]>
Commit | Line | Data |
---|---|---|
bec9e8ac BVA |
1 | /* |
2 | * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST | |
3 | * was acquired by Western Digital in 2012. | |
e67f86b3 | 4 | * |
bec9e8ac BVA |
5 | * Copyright 2012 sTec, Inc. |
6 | * Copyright (c) 2017 Western Digital Corporation or its affiliates. | |
7 | * | |
8 | * This file is part of the Linux kernel, and is made available under | |
9 | * the terms of the GNU General Public License version 2. | |
e67f86b3 AB |
10 | */ |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/init.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/spinlock.h> | |
18 | #include <linux/blkdev.h> | |
f18c17c8 | 19 | #include <linux/blk-mq.h> |
e67f86b3 AB |
20 | #include <linux/sched.h> |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/compiler.h> | |
23 | #include <linux/workqueue.h> | |
e67f86b3 AB |
24 | #include <linux/delay.h> |
25 | #include <linux/time.h> | |
26 | #include <linux/hdreg.h> | |
27 | #include <linux/dma-mapping.h> | |
28 | #include <linux/completion.h> | |
29 | #include <linux/scatterlist.h> | |
30 | #include <linux/version.h> | |
31 | #include <linux/err.h> | |
e67f86b3 | 32 | #include <linux/aer.h> |
e67f86b3 | 33 | #include <linux/wait.h> |
2da7b403 | 34 | #include <linux/stringify.h> |
e67f86b3 | 35 | #include <scsi/scsi.h> |
e67f86b3 AB |
36 | #include <scsi/sg.h> |
37 | #include <linux/io.h> | |
38 | #include <linux/uaccess.h> | |
4ca90b53 | 39 | #include <asm/unaligned.h> |
e67f86b3 AB |
40 | |
41 | #include "skd_s1120.h" | |
42 | ||
43 | static int skd_dbg_level; | |
44 | static int skd_isr_comp_limit = 4; | |
45 | ||
e67f86b3 AB |
46 | enum { |
47 | SKD_FLUSH_INITIALIZER, | |
48 | SKD_FLUSH_ZERO_SIZE_FIRST, | |
49 | SKD_FLUSH_DATA_SECOND, | |
50 | }; | |
51 | ||
e67f86b3 AB |
52 | #define SKD_ASSERT(expr) \ |
53 | do { \ | |
54 | if (unlikely(!(expr))) { \ | |
55 | pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ | |
56 | # expr, __FILE__, __func__, __LINE__); \ | |
57 | } \ | |
58 | } while (0) | |
59 | ||
e67f86b3 AB |
60 | #define DRV_NAME "skd" |
61 | #define DRV_VERSION "2.2.1" | |
62 | #define DRV_BUILD_ID "0260" | |
63 | #define PFX DRV_NAME ": " | |
e67f86b3 | 64 | |
bec9e8ac | 65 | MODULE_LICENSE("GPL"); |
e67f86b3 | 66 | |
38d4a1bb | 67 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")"); |
e67f86b3 AB |
68 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); |
69 | ||
70 | #define PCI_VENDOR_ID_STEC 0x1B39 | |
71 | #define PCI_DEVICE_ID_S1120 0x0001 | |
72 | ||
73 | #define SKD_FUA_NV (1 << 1) | |
74 | #define SKD_MINORS_PER_DEVICE 16 | |
75 | ||
76 | #define SKD_MAX_QUEUE_DEPTH 200u | |
77 | ||
78 | #define SKD_PAUSE_TIMEOUT (5 * 1000) | |
79 | ||
80 | #define SKD_N_FITMSG_BYTES (512u) | |
2da7b403 | 81 | #define SKD_MAX_REQ_PER_MSG 14 |
e67f86b3 | 82 | |
e67f86b3 AB |
83 | #define SKD_N_SPECIAL_FITMSG_BYTES (128u) |
84 | ||
85 | /* SG elements are 32 bytes, so we can make this 4096 and still be under the | |
86 | * 128KB limit. That allows 4096*4K = 16M xfer size | |
87 | */ | |
88 | #define SKD_N_SG_PER_REQ_DEFAULT 256u | |
e67f86b3 AB |
89 | |
90 | #define SKD_N_COMPLETION_ENTRY 256u | |
91 | #define SKD_N_READ_CAP_BYTES (8u) | |
92 | ||
93 | #define SKD_N_INTERNAL_BYTES (512u) | |
94 | ||
6f7c7675 BVA |
95 | #define SKD_SKCOMP_SIZE \ |
96 | ((sizeof(struct fit_completion_entry_v1) + \ | |
97 | sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY) | |
98 | ||
e67f86b3 AB |
99 | /* 5 bits of uniqifier, 0xF800 */ |
100 | #define SKD_ID_INCR (0x400) | |
101 | #define SKD_ID_TABLE_MASK (3u << 8u) | |
102 | #define SKD_ID_RW_REQUEST (0u << 8u) | |
103 | #define SKD_ID_INTERNAL (1u << 8u) | |
e67f86b3 AB |
104 | #define SKD_ID_FIT_MSG (3u << 8u) |
105 | #define SKD_ID_SLOT_MASK 0x00FFu | |
106 | #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu | |
107 | ||
108 | #define SKD_N_TIMEOUT_SLOT 4u | |
109 | #define SKD_TIMEOUT_SLOT_MASK 3u | |
110 | ||
111 | #define SKD_N_MAX_SECTORS 2048u | |
112 | ||
113 | #define SKD_MAX_RETRIES 2u | |
114 | ||
115 | #define SKD_TIMER_SECONDS(seconds) (seconds) | |
116 | #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) | |
117 | ||
118 | #define INQ_STD_NBYTES 36 | |
e67f86b3 AB |
119 | |
120 | enum skd_drvr_state { | |
121 | SKD_DRVR_STATE_LOAD, | |
122 | SKD_DRVR_STATE_IDLE, | |
123 | SKD_DRVR_STATE_BUSY, | |
124 | SKD_DRVR_STATE_STARTING, | |
125 | SKD_DRVR_STATE_ONLINE, | |
126 | SKD_DRVR_STATE_PAUSING, | |
127 | SKD_DRVR_STATE_PAUSED, | |
128 | SKD_DRVR_STATE_DRAINING_TIMEOUT, | |
129 | SKD_DRVR_STATE_RESTARTING, | |
130 | SKD_DRVR_STATE_RESUMING, | |
131 | SKD_DRVR_STATE_STOPPING, | |
132 | SKD_DRVR_STATE_FAULT, | |
133 | SKD_DRVR_STATE_DISAPPEARED, | |
134 | SKD_DRVR_STATE_PROTOCOL_MISMATCH, | |
135 | SKD_DRVR_STATE_BUSY_ERASE, | |
136 | SKD_DRVR_STATE_BUSY_SANITIZE, | |
137 | SKD_DRVR_STATE_BUSY_IMMINENT, | |
138 | SKD_DRVR_STATE_WAIT_BOOT, | |
139 | SKD_DRVR_STATE_SYNCING, | |
140 | }; | |
141 | ||
142 | #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) | |
143 | #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) | |
144 | #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) | |
145 | #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) | |
146 | #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) | |
147 | #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) | |
148 | #define SKD_START_WAIT_SECONDS 90u | |
149 | ||
150 | enum skd_req_state { | |
151 | SKD_REQ_STATE_IDLE, | |
152 | SKD_REQ_STATE_SETUP, | |
153 | SKD_REQ_STATE_BUSY, | |
154 | SKD_REQ_STATE_COMPLETED, | |
155 | SKD_REQ_STATE_TIMEOUT, | |
e67f86b3 AB |
156 | }; |
157 | ||
e67f86b3 AB |
158 | enum skd_check_status_action { |
159 | SKD_CHECK_STATUS_REPORT_GOOD, | |
160 | SKD_CHECK_STATUS_REPORT_SMART_ALERT, | |
161 | SKD_CHECK_STATUS_REQUEUE_REQUEST, | |
162 | SKD_CHECK_STATUS_REPORT_ERROR, | |
163 | SKD_CHECK_STATUS_BUSY_IMMINENT, | |
164 | }; | |
165 | ||
d891fe60 BVA |
166 | struct skd_msg_buf { |
167 | struct fit_msg_hdr fmh; | |
168 | struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG]; | |
169 | }; | |
170 | ||
e67f86b3 | 171 | struct skd_fitmsg_context { |
e67f86b3 | 172 | u32 id; |
e67f86b3 AB |
173 | |
174 | u32 length; | |
e67f86b3 | 175 | |
d891fe60 | 176 | struct skd_msg_buf *msg_buf; |
e67f86b3 AB |
177 | dma_addr_t mb_dma_address; |
178 | }; | |
179 | ||
180 | struct skd_request_context { | |
181 | enum skd_req_state state; | |
182 | ||
e67f86b3 AB |
183 | u16 id; |
184 | u32 fitmsg_id; | |
185 | ||
186 | struct request *req; | |
e67f86b3 | 187 | u8 flush_cmd; |
e67f86b3 AB |
188 | |
189 | u32 timeout_stamp; | |
b1824eef | 190 | enum dma_data_direction data_dir; |
e67f86b3 AB |
191 | struct scatterlist *sg; |
192 | u32 n_sg; | |
193 | u32 sg_byte_count; | |
194 | ||
195 | struct fit_sg_descriptor *sksg_list; | |
196 | dma_addr_t sksg_dma_address; | |
197 | ||
198 | struct fit_completion_entry_v1 completion; | |
199 | ||
200 | struct fit_comp_error_info err_info; | |
201 | ||
202 | }; | |
e67f86b3 AB |
203 | |
204 | struct skd_special_context { | |
205 | struct skd_request_context req; | |
206 | ||
e67f86b3 AB |
207 | void *data_buf; |
208 | dma_addr_t db_dma_address; | |
209 | ||
d891fe60 | 210 | struct skd_msg_buf *msg_buf; |
e67f86b3 AB |
211 | dma_addr_t mb_dma_address; |
212 | }; | |
213 | ||
e67f86b3 AB |
214 | typedef enum skd_irq_type { |
215 | SKD_IRQ_LEGACY, | |
216 | SKD_IRQ_MSI, | |
217 | SKD_IRQ_MSIX | |
218 | } skd_irq_type_t; | |
219 | ||
220 | #define SKD_MAX_BARS 2 | |
221 | ||
222 | struct skd_device { | |
85e34112 | 223 | void __iomem *mem_map[SKD_MAX_BARS]; |
e67f86b3 AB |
224 | resource_size_t mem_phys[SKD_MAX_BARS]; |
225 | u32 mem_size[SKD_MAX_BARS]; | |
226 | ||
e67f86b3 AB |
227 | struct skd_msix_entry *msix_entries; |
228 | ||
229 | struct pci_dev *pdev; | |
230 | int pcie_error_reporting_is_enabled; | |
231 | ||
232 | spinlock_t lock; | |
233 | struct gendisk *disk; | |
234 | struct request_queue *queue; | |
235 | struct device *class_dev; | |
236 | int gendisk_on; | |
237 | int sync_done; | |
238 | ||
e67f86b3 AB |
239 | u32 devno; |
240 | u32 major; | |
e67f86b3 AB |
241 | char isr_name[30]; |
242 | ||
243 | enum skd_drvr_state state; | |
244 | u32 drive_state; | |
245 | ||
6fbb2de5 | 246 | atomic_t in_flight; |
e67f86b3 AB |
247 | u32 cur_max_queue_depth; |
248 | u32 queue_low_water_mark; | |
249 | u32 dev_max_queue_depth; | |
250 | ||
251 | u32 num_fitmsg_context; | |
252 | u32 num_req_context; | |
253 | ||
6fbb2de5 BVA |
254 | atomic_t timeout_slot[SKD_N_TIMEOUT_SLOT]; |
255 | atomic_t timeout_stamp; | |
e67f86b3 AB |
256 | struct skd_fitmsg_context *skmsg_table; |
257 | ||
e67f86b3 AB |
258 | struct skd_request_context *skreq_table; |
259 | ||
e67f86b3 AB |
260 | struct skd_special_context internal_skspcl; |
261 | u32 read_cap_blocksize; | |
262 | u32 read_cap_last_lba; | |
263 | int read_cap_is_valid; | |
264 | int inquiry_is_valid; | |
265 | u8 inq_serial_num[13]; /*12 chars plus null term */ | |
e67f86b3 AB |
266 | |
267 | u8 skcomp_cycle; | |
268 | u32 skcomp_ix; | |
269 | struct fit_completion_entry_v1 *skcomp_table; | |
270 | struct fit_comp_error_info *skerr_table; | |
271 | dma_addr_t cq_dma_address; | |
272 | ||
273 | wait_queue_head_t waitq; | |
274 | ||
275 | struct timer_list timer; | |
276 | u32 timer_countdown; | |
277 | u32 timer_substate; | |
278 | ||
e67f86b3 AB |
279 | int sgs_per_request; |
280 | u32 last_mtd; | |
281 | ||
282 | u32 proto_ver; | |
283 | ||
284 | int dbg_level; | |
285 | u32 connect_time_stamp; | |
286 | int connect_retries; | |
287 | #define SKD_MAX_CONNECT_RETRIES 16 | |
288 | u32 drive_jiffies; | |
289 | ||
290 | u32 timo_slot; | |
291 | ||
38d4a1bb | 292 | struct work_struct completion_worker; |
e67f86b3 AB |
293 | }; |
294 | ||
295 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | |
296 | #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) | |
297 | #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) | |
298 | ||
299 | static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) | |
300 | { | |
14262a4b | 301 | u32 val = readl(skdev->mem_map[1] + offset); |
e67f86b3 | 302 | |
14262a4b | 303 | if (unlikely(skdev->dbg_level >= 2)) |
f98806d6 | 304 | dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); |
14262a4b | 305 | return val; |
e67f86b3 AB |
306 | } |
307 | ||
308 | static inline void skd_reg_write32(struct skd_device *skdev, u32 val, | |
309 | u32 offset) | |
310 | { | |
14262a4b BVA |
311 | writel(val, skdev->mem_map[1] + offset); |
312 | if (unlikely(skdev->dbg_level >= 2)) | |
f98806d6 | 313 | dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val); |
e67f86b3 AB |
314 | } |
315 | ||
316 | static inline void skd_reg_write64(struct skd_device *skdev, u64 val, | |
317 | u32 offset) | |
318 | { | |
14262a4b BVA |
319 | writeq(val, skdev->mem_map[1] + offset); |
320 | if (unlikely(skdev->dbg_level >= 2)) | |
f98806d6 BVA |
321 | dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset, |
322 | val); | |
e67f86b3 AB |
323 | } |
324 | ||
325 | ||
326 | #define SKD_IRQ_DEFAULT SKD_IRQ_MSI | |
327 | static int skd_isr_type = SKD_IRQ_DEFAULT; | |
328 | ||
329 | module_param(skd_isr_type, int, 0444); | |
330 | MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." | |
331 | " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); | |
332 | ||
333 | #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 | |
334 | static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
335 | ||
336 | module_param(skd_max_req_per_msg, int, 0444); | |
337 | MODULE_PARM_DESC(skd_max_req_per_msg, | |
338 | "Maximum SCSI requests packed in a single message." | |
2da7b403 | 339 | " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)"); |
e67f86b3 AB |
340 | |
341 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 | |
342 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" | |
343 | static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
344 | ||
345 | module_param(skd_max_queue_depth, int, 0444); | |
346 | MODULE_PARM_DESC(skd_max_queue_depth, | |
347 | "Maximum SCSI requests issued to s1120." | |
348 | " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); | |
349 | ||
350 | static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
351 | module_param(skd_sgs_per_request, int, 0444); | |
352 | MODULE_PARM_DESC(skd_sgs_per_request, | |
353 | "Maximum SG elements per block request." | |
354 | " (1-4096, default==256)"); | |
355 | ||
63214121 | 356 | static int skd_max_pass_thru = 1; |
e67f86b3 AB |
357 | module_param(skd_max_pass_thru, int, 0444); |
358 | MODULE_PARM_DESC(skd_max_pass_thru, | |
63214121 | 359 | "Maximum SCSI pass-thru at a time. IGNORED"); |
e67f86b3 AB |
360 | |
361 | module_param(skd_dbg_level, int, 0444); | |
362 | MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); | |
363 | ||
364 | module_param(skd_isr_comp_limit, int, 0444); | |
365 | MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); | |
366 | ||
e67f86b3 AB |
367 | /* Major device number dynamically assigned. */ |
368 | static u32 skd_major; | |
369 | ||
e67f86b3 AB |
370 | static void skd_destruct(struct skd_device *skdev); |
371 | static const struct block_device_operations skd_blockdev_ops; | |
372 | static void skd_send_fitmsg(struct skd_device *skdev, | |
373 | struct skd_fitmsg_context *skmsg); | |
374 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
375 | struct skd_special_context *skspcl); | |
376 | static void skd_request_fn(struct request_queue *rq); | |
f18c17c8 BVA |
377 | static void skd_end_request(struct skd_device *skdev, struct request *req, |
378 | blk_status_t status); | |
2a842aca | 379 | static bool skd_preop_sg_list(struct skd_device *skdev, |
e67f86b3 AB |
380 | struct skd_request_context *skreq); |
381 | static void skd_postop_sg_list(struct skd_device *skdev, | |
382 | struct skd_request_context *skreq); | |
383 | ||
384 | static void skd_restart_device(struct skd_device *skdev); | |
385 | static int skd_quiesce_dev(struct skd_device *skdev); | |
386 | static int skd_unquiesce_dev(struct skd_device *skdev); | |
e67f86b3 AB |
387 | static void skd_disable_interrupts(struct skd_device *skdev); |
388 | static void skd_isr_fwstate(struct skd_device *skdev); | |
79ce12a8 | 389 | static void skd_recover_requests(struct skd_device *skdev); |
e67f86b3 AB |
390 | static void skd_soft_reset(struct skd_device *skdev); |
391 | ||
e67f86b3 AB |
392 | const char *skd_drive_state_to_str(int state); |
393 | const char *skd_skdev_state_to_str(enum skd_drvr_state state); | |
394 | static void skd_log_skdev(struct skd_device *skdev, const char *event); | |
e67f86b3 AB |
395 | static void skd_log_skreq(struct skd_device *skdev, |
396 | struct skd_request_context *skreq, const char *event); | |
397 | ||
e67f86b3 AB |
398 | /* |
399 | ***************************************************************************** | |
400 | * READ/WRITE REQUESTS | |
401 | ***************************************************************************** | |
402 | */ | |
fcd37eb3 | 403 | static void skd_fail_all_pending(struct skd_device *skdev) |
e67f86b3 AB |
404 | { |
405 | struct request_queue *q = skdev->queue; | |
406 | struct request *req; | |
407 | ||
408 | for (;; ) { | |
409 | req = blk_peek_request(q); | |
410 | if (req == NULL) | |
411 | break; | |
f18c17c8 | 412 | WARN_ON_ONCE(blk_queue_start_tag(q, req)); |
2a842aca | 413 | __blk_end_request_all(req, BLK_STS_IOERR); |
e67f86b3 AB |
414 | } |
415 | } | |
416 | ||
e67f86b3 AB |
417 | static void |
418 | skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | |
419 | int data_dir, unsigned lba, | |
420 | unsigned count) | |
421 | { | |
422 | if (data_dir == READ) | |
fb4844b8 | 423 | scsi_req->cdb[0] = READ_10; |
e67f86b3 | 424 | else |
fb4844b8 | 425 | scsi_req->cdb[0] = WRITE_10; |
e67f86b3 AB |
426 | |
427 | scsi_req->cdb[1] = 0; | |
428 | scsi_req->cdb[2] = (lba & 0xff000000) >> 24; | |
429 | scsi_req->cdb[3] = (lba & 0xff0000) >> 16; | |
430 | scsi_req->cdb[4] = (lba & 0xff00) >> 8; | |
431 | scsi_req->cdb[5] = (lba & 0xff); | |
432 | scsi_req->cdb[6] = 0; | |
433 | scsi_req->cdb[7] = (count & 0xff00) >> 8; | |
434 | scsi_req->cdb[8] = count & 0xff; | |
435 | scsi_req->cdb[9] = 0; | |
436 | } | |
437 | ||
438 | static void | |
439 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | |
38d4a1bb | 440 | struct skd_request_context *skreq) |
e67f86b3 AB |
441 | { |
442 | skreq->flush_cmd = 1; | |
443 | ||
fb4844b8 | 444 | scsi_req->cdb[0] = SYNCHRONIZE_CACHE; |
e67f86b3 AB |
445 | scsi_req->cdb[1] = 0; |
446 | scsi_req->cdb[2] = 0; | |
447 | scsi_req->cdb[3] = 0; | |
448 | scsi_req->cdb[4] = 0; | |
449 | scsi_req->cdb[5] = 0; | |
450 | scsi_req->cdb[6] = 0; | |
451 | scsi_req->cdb[7] = 0; | |
452 | scsi_req->cdb[8] = 0; | |
453 | scsi_req->cdb[9] = 0; | |
454 | } | |
455 | ||
3d17a679 BVA |
456 | /* |
457 | * Return true if and only if all pending requests should be failed. | |
458 | */ | |
459 | static bool skd_fail_all(struct request_queue *q) | |
cb6981b9 BVA |
460 | { |
461 | struct skd_device *skdev = q->queuedata; | |
462 | ||
463 | SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); | |
464 | ||
465 | skd_log_skdev(skdev, "req_not_online"); | |
466 | switch (skdev->state) { | |
467 | case SKD_DRVR_STATE_PAUSING: | |
468 | case SKD_DRVR_STATE_PAUSED: | |
469 | case SKD_DRVR_STATE_STARTING: | |
470 | case SKD_DRVR_STATE_RESTARTING: | |
471 | case SKD_DRVR_STATE_WAIT_BOOT: | |
472 | /* In case of starting, we haven't started the queue, | |
473 | * so we can't get here... but requests are | |
474 | * possibly hanging out waiting for us because we | |
475 | * reported the dev/skd0 already. They'll wait | |
476 | * forever if connect doesn't complete. | |
477 | * What to do??? delay dev/skd0 ?? | |
478 | */ | |
479 | case SKD_DRVR_STATE_BUSY: | |
480 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
481 | case SKD_DRVR_STATE_BUSY_ERASE: | |
482 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
3d17a679 | 483 | return false; |
cb6981b9 BVA |
484 | |
485 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
486 | case SKD_DRVR_STATE_STOPPING: | |
487 | case SKD_DRVR_STATE_SYNCING: | |
488 | case SKD_DRVR_STATE_FAULT: | |
489 | case SKD_DRVR_STATE_DISAPPEARED: | |
490 | default: | |
3d17a679 | 491 | return true; |
cb6981b9 | 492 | } |
cb6981b9 | 493 | } |
e67f86b3 AB |
494 | |
495 | static void skd_request_fn(struct request_queue *q) | |
496 | { | |
497 | struct skd_device *skdev = q->queuedata; | |
498 | struct skd_fitmsg_context *skmsg = NULL; | |
499 | struct fit_msg_hdr *fmh = NULL; | |
500 | struct skd_request_context *skreq; | |
501 | struct request *req = NULL; | |
e67f86b3 | 502 | struct skd_scsi_request *scsi_req; |
e67f86b3 | 503 | unsigned long io_flags; |
e67f86b3 AB |
504 | u32 lba; |
505 | u32 count; | |
506 | int data_dir; | |
4854afe3 | 507 | __be64 be_dmaa; |
e67f86b3 AB |
508 | u64 cmdctxt; |
509 | u32 timo_slot; | |
e67f86b3 | 510 | int flush, fua; |
f18c17c8 | 511 | u32 tag; |
e67f86b3 AB |
512 | |
513 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
3d17a679 BVA |
514 | if (skd_fail_all(q)) |
515 | skd_fail_all_pending(skdev); | |
e67f86b3 AB |
516 | return; |
517 | } | |
518 | ||
6a5ec65b | 519 | if (blk_queue_stopped(skdev->queue)) { |
6fbb2de5 BVA |
520 | if (atomic_read(&skdev->in_flight) >= |
521 | skdev->queue_low_water_mark) | |
e67f86b3 AB |
522 | /* There is still some kind of shortage */ |
523 | return; | |
524 | ||
6a5ec65b | 525 | queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); |
e67f86b3 AB |
526 | } |
527 | ||
528 | /* | |
529 | * Stop conditions: | |
530 | * - There are no more native requests | |
531 | * - There are already the maximum number of requests in progress | |
532 | * - There are no more skd_request_context entries | |
533 | * - There are no more FIT msg buffers | |
534 | */ | |
535 | for (;; ) { | |
536 | ||
537 | flush = fua = 0; | |
538 | ||
fcd37eb3 | 539 | req = blk_peek_request(q); |
e67f86b3 | 540 | |
fcd37eb3 JA |
541 | /* Are there any native requests to start? */ |
542 | if (req == NULL) | |
543 | break; | |
e67f86b3 | 544 | |
fcd37eb3 JA |
545 | lba = (u32)blk_rq_pos(req); |
546 | count = blk_rq_sectors(req); | |
547 | data_dir = rq_data_dir(req); | |
548 | io_flags = req->cmd_flags; | |
e67f86b3 | 549 | |
3a5e02ce | 550 | if (req_op(req) == REQ_OP_FLUSH) |
fcd37eb3 | 551 | flush++; |
e67f86b3 | 552 | |
fcd37eb3 JA |
553 | if (io_flags & REQ_FUA) |
554 | fua++; | |
e67f86b3 | 555 | |
f98806d6 BVA |
556 | dev_dbg(&skdev->pdev->dev, |
557 | "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
558 | req, lba, lba, count, count, data_dir); | |
e67f86b3 | 559 | |
38d4a1bb | 560 | /* At this point we know there is a request */ |
e67f86b3 AB |
561 | |
562 | /* Are too many requets already in progress? */ | |
6fbb2de5 BVA |
563 | if (atomic_read(&skdev->in_flight) >= |
564 | skdev->cur_max_queue_depth) { | |
f98806d6 | 565 | dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n", |
6fbb2de5 BVA |
566 | atomic_read(&skdev->in_flight), |
567 | skdev->cur_max_queue_depth); | |
e67f86b3 AB |
568 | break; |
569 | } | |
570 | ||
e67f86b3 | 571 | /* |
38d4a1bb | 572 | * OK to now dequeue request from q. |
e67f86b3 AB |
573 | * |
574 | * At this point we are comitted to either start or reject | |
575 | * the native request. Note that skd_request_context is | |
576 | * available but is still at the head of the free list. | |
577 | */ | |
f18c17c8 BVA |
578 | WARN_ON_ONCE(blk_queue_start_tag(q, req)); |
579 | ||
580 | tag = blk_mq_unique_tag(req); | |
581 | WARN_ONCE(tag >= skd_max_queue_depth, | |
582 | "%#x > %#x (nr_requests = %lu)\n", tag, | |
583 | skd_max_queue_depth, q->nr_requests); | |
584 | ||
585 | skreq = &skdev->skreq_table[tag]; | |
586 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); | |
587 | SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); | |
588 | ||
589 | skreq->id = tag + SKD_ID_RW_REQUEST; | |
590 | skreq->flush_cmd = 0; | |
591 | skreq->n_sg = 0; | |
592 | skreq->sg_byte_count = 0; | |
593 | ||
fcd37eb3 JA |
594 | skreq->req = req; |
595 | skreq->fitmsg_id = 0; | |
e67f86b3 | 596 | |
b1824eef BVA |
597 | skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : |
598 | DMA_TO_DEVICE; | |
19fc85cf BVA |
599 | |
600 | if (req->bio && !skd_preop_sg_list(skdev, skreq)) { | |
601 | dev_dbg(&skdev->pdev->dev, "error Out\n"); | |
f18c17c8 | 602 | skd_end_request(skdev, skreq->req, BLK_STS_RESOURCE); |
19fc85cf BVA |
603 | continue; |
604 | } | |
605 | ||
e67f86b3 AB |
606 | /* Either a FIT msg is in progress or we have to start one. */ |
607 | if (skmsg == NULL) { | |
f18c17c8 | 608 | skmsg = &skdev->skmsg_table[tag]; |
e67f86b3 AB |
609 | |
610 | /* Initialize the FIT msg header */ | |
d891fe60 | 611 | fmh = &skmsg->msg_buf->fmh; |
e67f86b3 AB |
612 | memset(fmh, 0, sizeof(*fmh)); |
613 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
614 | skmsg->length = sizeof(*fmh); | |
615 | } | |
616 | ||
617 | skreq->fitmsg_id = skmsg->id; | |
618 | ||
d891fe60 BVA |
619 | scsi_req = |
620 | &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced]; | |
621 | memset(scsi_req, 0, sizeof(*scsi_req)); | |
e67f86b3 | 622 | |
4854afe3 | 623 | be_dmaa = cpu_to_be64(skreq->sksg_dma_address); |
e67f86b3 AB |
624 | cmdctxt = skreq->id + SKD_ID_INCR; |
625 | ||
e67f86b3 AB |
626 | scsi_req->hdr.tag = cmdctxt; |
627 | scsi_req->hdr.sg_list_dma_address = be_dmaa; | |
628 | ||
49bdedb3 | 629 | if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { |
e67f86b3 AB |
630 | skd_prep_zerosize_flush_cdb(scsi_req, skreq); |
631 | SKD_ASSERT(skreq->flush_cmd == 1); | |
e67f86b3 AB |
632 | } else { |
633 | skd_prep_rw_cdb(scsi_req, data_dir, lba, count); | |
634 | } | |
635 | ||
636 | if (fua) | |
637 | scsi_req->cdb[1] |= SKD_FUA_NV; | |
638 | ||
e67f86b3 AB |
639 | scsi_req->hdr.sg_list_len_bytes = |
640 | cpu_to_be32(skreq->sg_byte_count); | |
641 | ||
642 | /* Complete resource allocations. */ | |
e67f86b3 AB |
643 | skreq->state = SKD_REQ_STATE_BUSY; |
644 | skreq->id += SKD_ID_INCR; | |
645 | ||
646 | skmsg->length += sizeof(struct skd_scsi_request); | |
647 | fmh->num_protocol_cmds_coalesced++; | |
648 | ||
649 | /* | |
650 | * Update the active request counts. | |
651 | * Capture the timeout timestamp. | |
652 | */ | |
6fbb2de5 | 653 | skreq->timeout_stamp = atomic_read(&skdev->timeout_stamp); |
e67f86b3 | 654 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; |
6fbb2de5 BVA |
655 | atomic_inc(&skdev->timeout_slot[timo_slot]); |
656 | atomic_inc(&skdev->in_flight); | |
f98806d6 | 657 | dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id, |
6fbb2de5 | 658 | atomic_read(&skdev->in_flight)); |
e67f86b3 AB |
659 | |
660 | /* | |
661 | * If the FIT msg buffer is full send it. | |
662 | */ | |
fe4fd723 | 663 | if (fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { |
e67f86b3 AB |
664 | skd_send_fitmsg(skdev, skmsg); |
665 | skmsg = NULL; | |
666 | fmh = NULL; | |
667 | } | |
668 | } | |
669 | ||
fe4fd723 BVA |
670 | /* If the FIT msg buffer is not empty send what we got. */ |
671 | if (skmsg) { | |
672 | WARN_ON_ONCE(!fmh->num_protocol_cmds_coalesced); | |
673 | skd_send_fitmsg(skdev, skmsg); | |
e67f86b3 AB |
674 | skmsg = NULL; |
675 | fmh = NULL; | |
676 | } | |
677 | ||
678 | /* | |
679 | * If req is non-NULL it means there is something to do but | |
680 | * we are out of a resource. | |
681 | */ | |
fcd37eb3 | 682 | if (req) |
6a5ec65b | 683 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
684 | } |
685 | ||
f18c17c8 BVA |
686 | static void skd_end_request(struct skd_device *skdev, struct request *req, |
687 | blk_status_t error) | |
e67f86b3 | 688 | { |
e67f86b3 | 689 | if (unlikely(error)) { |
e67f86b3 AB |
690 | char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; |
691 | u32 lba = (u32)blk_rq_pos(req); | |
692 | u32 count = blk_rq_sectors(req); | |
693 | ||
f98806d6 BVA |
694 | dev_err(&skdev->pdev->dev, |
695 | "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba, | |
f18c17c8 | 696 | count, req->tag); |
e67f86b3 | 697 | } else |
f18c17c8 | 698 | dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", req->tag, |
f98806d6 | 699 | error); |
e67f86b3 | 700 | |
f18c17c8 | 701 | __blk_end_request_all(req, error); |
e67f86b3 AB |
702 | } |
703 | ||
2a842aca | 704 | static bool skd_preop_sg_list(struct skd_device *skdev, |
38d4a1bb | 705 | struct skd_request_context *skreq) |
e67f86b3 AB |
706 | { |
707 | struct request *req = skreq->req; | |
06f824c4 | 708 | struct scatterlist *sgl = &skreq->sg[0], *sg; |
e67f86b3 AB |
709 | int n_sg; |
710 | int i; | |
711 | ||
712 | skreq->sg_byte_count = 0; | |
713 | ||
b1824eef BVA |
714 | WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE && |
715 | skreq->data_dir != DMA_FROM_DEVICE); | |
e67f86b3 | 716 | |
06f824c4 | 717 | n_sg = blk_rq_map_sg(skdev->queue, req, sgl); |
e67f86b3 | 718 | if (n_sg <= 0) |
2a842aca | 719 | return false; |
e67f86b3 AB |
720 | |
721 | /* | |
722 | * Map scatterlist to PCI bus addresses. | |
723 | * Note PCI might change the number of entries. | |
724 | */ | |
06f824c4 | 725 | n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir); |
e67f86b3 | 726 | if (n_sg <= 0) |
2a842aca | 727 | return false; |
e67f86b3 AB |
728 | |
729 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | |
730 | ||
731 | skreq->n_sg = n_sg; | |
732 | ||
06f824c4 | 733 | for_each_sg(sgl, sg, n_sg, i) { |
e67f86b3 | 734 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; |
06f824c4 BVA |
735 | u32 cnt = sg_dma_len(sg); |
736 | uint64_t dma_addr = sg_dma_address(sg); | |
e67f86b3 AB |
737 | |
738 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | |
739 | sgd->byte_count = cnt; | |
740 | skreq->sg_byte_count += cnt; | |
741 | sgd->host_side_addr = dma_addr; | |
742 | sgd->dev_side_addr = 0; | |
743 | } | |
744 | ||
745 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | |
746 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | |
747 | ||
748 | if (unlikely(skdev->dbg_level > 1)) { | |
f98806d6 BVA |
749 | dev_dbg(&skdev->pdev->dev, |
750 | "skreq=%x sksg_list=%p sksg_dma=%llx\n", | |
751 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
e67f86b3 AB |
752 | for (i = 0; i < n_sg; i++) { |
753 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
f98806d6 BVA |
754 | |
755 | dev_dbg(&skdev->pdev->dev, | |
756 | " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", | |
757 | i, sgd->byte_count, sgd->control, | |
758 | sgd->host_side_addr, sgd->next_desc_ptr); | |
e67f86b3 AB |
759 | } |
760 | } | |
761 | ||
2a842aca | 762 | return true; |
e67f86b3 AB |
763 | } |
764 | ||
fcd37eb3 | 765 | static void skd_postop_sg_list(struct skd_device *skdev, |
38d4a1bb | 766 | struct skd_request_context *skreq) |
e67f86b3 | 767 | { |
e67f86b3 AB |
768 | /* |
769 | * restore the next ptr for next IO request so we | |
770 | * don't have to set it every time. | |
771 | */ | |
772 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | |
773 | skreq->sksg_dma_address + | |
774 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | |
b1824eef | 775 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir); |
e67f86b3 AB |
776 | } |
777 | ||
e67f86b3 AB |
778 | /* |
779 | ***************************************************************************** | |
780 | * TIMER | |
781 | ***************************************************************************** | |
782 | */ | |
783 | ||
784 | static void skd_timer_tick_not_online(struct skd_device *skdev); | |
785 | ||
786 | static void skd_timer_tick(ulong arg) | |
787 | { | |
788 | struct skd_device *skdev = (struct skd_device *)arg; | |
789 | ||
790 | u32 timo_slot; | |
e67f86b3 AB |
791 | unsigned long reqflags; |
792 | u32 state; | |
793 | ||
794 | if (skdev->state == SKD_DRVR_STATE_FAULT) | |
795 | /* The driver has declared fault, and we want it to | |
796 | * stay that way until driver is reloaded. | |
797 | */ | |
798 | return; | |
799 | ||
800 | spin_lock_irqsave(&skdev->lock, reqflags); | |
801 | ||
802 | state = SKD_READL(skdev, FIT_STATUS); | |
803 | state &= FIT_SR_DRIVE_STATE_MASK; | |
804 | if (state != skdev->drive_state) | |
805 | skd_isr_fwstate(skdev); | |
806 | ||
807 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
808 | skd_timer_tick_not_online(skdev); | |
809 | goto timer_func_out; | |
810 | } | |
6fbb2de5 BVA |
811 | timo_slot = atomic_inc_return(&skdev->timeout_stamp) & |
812 | SKD_TIMEOUT_SLOT_MASK; | |
e67f86b3 AB |
813 | |
814 | /* | |
815 | * All requests that happened during the previous use of | |
816 | * this slot should be done by now. The previous use was | |
817 | * over 7 seconds ago. | |
818 | */ | |
6fbb2de5 | 819 | if (atomic_read(&skdev->timeout_slot[timo_slot]) == 0) |
e67f86b3 AB |
820 | goto timer_func_out; |
821 | ||
822 | /* Something is overdue */ | |
f98806d6 | 823 | dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n", |
6fbb2de5 BVA |
824 | atomic_read(&skdev->timeout_slot[timo_slot]), |
825 | atomic_read(&skdev->in_flight)); | |
f98806d6 | 826 | dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n", |
6fbb2de5 BVA |
827 | atomic_read(&skdev->timeout_slot[timo_slot]), |
828 | atomic_read(&skdev->in_flight)); | |
e67f86b3 AB |
829 | |
830 | skdev->timer_countdown = SKD_DRAINING_TIMO; | |
831 | skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; | |
832 | skdev->timo_slot = timo_slot; | |
6a5ec65b | 833 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
834 | |
835 | timer_func_out: | |
836 | mod_timer(&skdev->timer, (jiffies + HZ)); | |
837 | ||
838 | spin_unlock_irqrestore(&skdev->lock, reqflags); | |
839 | } | |
840 | ||
841 | static void skd_timer_tick_not_online(struct skd_device *skdev) | |
842 | { | |
843 | switch (skdev->state) { | |
844 | case SKD_DRVR_STATE_IDLE: | |
845 | case SKD_DRVR_STATE_LOAD: | |
846 | break; | |
847 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
f98806d6 BVA |
848 | dev_dbg(&skdev->pdev->dev, |
849 | "drive busy sanitize[%x], driver[%x]\n", | |
850 | skdev->drive_state, skdev->state); | |
e67f86b3 AB |
851 | /* If we've been in sanitize for 3 seconds, we figure we're not |
852 | * going to get anymore completions, so recover requests now | |
853 | */ | |
854 | if (skdev->timer_countdown > 0) { | |
855 | skdev->timer_countdown--; | |
856 | return; | |
857 | } | |
79ce12a8 | 858 | skd_recover_requests(skdev); |
e67f86b3 AB |
859 | break; |
860 | ||
861 | case SKD_DRVR_STATE_BUSY: | |
862 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
863 | case SKD_DRVR_STATE_BUSY_ERASE: | |
f98806d6 BVA |
864 | dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n", |
865 | skdev->state, skdev->timer_countdown); | |
e67f86b3 AB |
866 | if (skdev->timer_countdown > 0) { |
867 | skdev->timer_countdown--; | |
868 | return; | |
869 | } | |
f98806d6 BVA |
870 | dev_dbg(&skdev->pdev->dev, |
871 | "busy[%x], timedout=%d, restarting device.", | |
872 | skdev->state, skdev->timer_countdown); | |
e67f86b3 AB |
873 | skd_restart_device(skdev); |
874 | break; | |
875 | ||
876 | case SKD_DRVR_STATE_WAIT_BOOT: | |
877 | case SKD_DRVR_STATE_STARTING: | |
878 | if (skdev->timer_countdown > 0) { | |
879 | skdev->timer_countdown--; | |
880 | return; | |
881 | } | |
882 | /* For now, we fault the drive. Could attempt resets to | |
883 | * revcover at some point. */ | |
884 | skdev->state = SKD_DRVR_STATE_FAULT; | |
885 | ||
f98806d6 BVA |
886 | dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n", |
887 | skdev->drive_state); | |
e67f86b3 AB |
888 | |
889 | /*start the queue so we can respond with error to requests */ | |
890 | /* wakeup anyone waiting for startup complete */ | |
6a5ec65b | 891 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
892 | skdev->gendisk_on = -1; |
893 | wake_up_interruptible(&skdev->waitq); | |
894 | break; | |
895 | ||
896 | case SKD_DRVR_STATE_ONLINE: | |
897 | /* shouldn't get here. */ | |
898 | break; | |
899 | ||
900 | case SKD_DRVR_STATE_PAUSING: | |
901 | case SKD_DRVR_STATE_PAUSED: | |
902 | break; | |
903 | ||
904 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
f98806d6 BVA |
905 | dev_dbg(&skdev->pdev->dev, |
906 | "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", | |
907 | skdev->timo_slot, skdev->timer_countdown, | |
6fbb2de5 BVA |
908 | atomic_read(&skdev->in_flight), |
909 | atomic_read(&skdev->timeout_slot[skdev->timo_slot])); | |
e67f86b3 | 910 | /* if the slot has cleared we can let the I/O continue */ |
6fbb2de5 | 911 | if (atomic_read(&skdev->timeout_slot[skdev->timo_slot]) == 0) { |
f98806d6 BVA |
912 | dev_dbg(&skdev->pdev->dev, |
913 | "Slot drained, starting queue.\n"); | |
e67f86b3 | 914 | skdev->state = SKD_DRVR_STATE_ONLINE; |
6a5ec65b | 915 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
916 | return; |
917 | } | |
918 | if (skdev->timer_countdown > 0) { | |
919 | skdev->timer_countdown--; | |
920 | return; | |
921 | } | |
922 | skd_restart_device(skdev); | |
923 | break; | |
924 | ||
925 | case SKD_DRVR_STATE_RESTARTING: | |
926 | if (skdev->timer_countdown > 0) { | |
927 | skdev->timer_countdown--; | |
928 | return; | |
929 | } | |
930 | /* For now, we fault the drive. Could attempt resets to | |
931 | * revcover at some point. */ | |
932 | skdev->state = SKD_DRVR_STATE_FAULT; | |
f98806d6 BVA |
933 | dev_err(&skdev->pdev->dev, |
934 | "DriveFault Reconnect Timeout (%x)\n", | |
935 | skdev->drive_state); | |
e67f86b3 AB |
936 | |
937 | /* | |
938 | * Recovering does two things: | |
939 | * 1. completes IO with error | |
940 | * 2. reclaims dma resources | |
941 | * When is it safe to recover requests? | |
942 | * - if the drive state is faulted | |
943 | * - if the state is still soft reset after out timeout | |
944 | * - if the drive registers are dead (state = FF) | |
945 | * If it is "unsafe", we still need to recover, so we will | |
946 | * disable pci bus mastering and disable our interrupts. | |
947 | */ | |
948 | ||
949 | if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || | |
950 | (skdev->drive_state == FIT_SR_DRIVE_FAULT) || | |
951 | (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) | |
952 | /* It never came out of soft reset. Try to | |
953 | * recover the requests and then let them | |
954 | * fail. This is to mitigate hung processes. */ | |
79ce12a8 | 955 | skd_recover_requests(skdev); |
e67f86b3 | 956 | else { |
f98806d6 BVA |
957 | dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n", |
958 | skdev->drive_state); | |
e67f86b3 AB |
959 | pci_disable_device(skdev->pdev); |
960 | skd_disable_interrupts(skdev); | |
79ce12a8 | 961 | skd_recover_requests(skdev); |
e67f86b3 AB |
962 | } |
963 | ||
964 | /*start the queue so we can respond with error to requests */ | |
965 | /* wakeup anyone waiting for startup complete */ | |
6a5ec65b | 966 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
967 | skdev->gendisk_on = -1; |
968 | wake_up_interruptible(&skdev->waitq); | |
969 | break; | |
970 | ||
971 | case SKD_DRVR_STATE_RESUMING: | |
972 | case SKD_DRVR_STATE_STOPPING: | |
973 | case SKD_DRVR_STATE_SYNCING: | |
974 | case SKD_DRVR_STATE_FAULT: | |
975 | case SKD_DRVR_STATE_DISAPPEARED: | |
976 | default: | |
977 | break; | |
978 | } | |
979 | } | |
980 | ||
981 | static int skd_start_timer(struct skd_device *skdev) | |
982 | { | |
983 | int rc; | |
984 | ||
e67f86b3 AB |
985 | setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); |
986 | ||
987 | rc = mod_timer(&skdev->timer, (jiffies + HZ)); | |
988 | if (rc) | |
f98806d6 | 989 | dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc); |
e67f86b3 AB |
990 | return rc; |
991 | } | |
992 | ||
993 | static void skd_kill_timer(struct skd_device *skdev) | |
994 | { | |
995 | del_timer_sync(&skdev->timer); | |
996 | } | |
997 | ||
e67f86b3 AB |
998 | /* |
999 | ***************************************************************************** | |
1000 | * INTERNAL REQUESTS -- generated by driver itself | |
1001 | ***************************************************************************** | |
1002 | */ | |
1003 | ||
1004 | static int skd_format_internal_skspcl(struct skd_device *skdev) | |
1005 | { | |
1006 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
1007 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
1008 | struct fit_msg_hdr *fmh; | |
1009 | uint64_t dma_address; | |
1010 | struct skd_scsi_request *scsi; | |
1011 | ||
d891fe60 | 1012 | fmh = &skspcl->msg_buf->fmh; |
e67f86b3 AB |
1013 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; |
1014 | fmh->num_protocol_cmds_coalesced = 1; | |
1015 | ||
d891fe60 | 1016 | scsi = &skspcl->msg_buf->scsi[0]; |
e67f86b3 AB |
1017 | memset(scsi, 0, sizeof(*scsi)); |
1018 | dma_address = skspcl->req.sksg_dma_address; | |
1019 | scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); | |
32494df9 | 1020 | skspcl->req.n_sg = 1; |
e67f86b3 AB |
1021 | sgd->control = FIT_SGD_CONTROL_LAST; |
1022 | sgd->byte_count = 0; | |
1023 | sgd->host_side_addr = skspcl->db_dma_address; | |
1024 | sgd->dev_side_addr = 0; | |
1025 | sgd->next_desc_ptr = 0LL; | |
1026 | ||
1027 | return 1; | |
1028 | } | |
1029 | ||
1030 | #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES | |
1031 | ||
1032 | static void skd_send_internal_skspcl(struct skd_device *skdev, | |
1033 | struct skd_special_context *skspcl, | |
1034 | u8 opcode) | |
1035 | { | |
1036 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
1037 | struct skd_scsi_request *scsi; | |
1038 | unsigned char *buf = skspcl->data_buf; | |
1039 | int i; | |
1040 | ||
1041 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) | |
1042 | /* | |
1043 | * A refresh is already in progress. | |
1044 | * Just wait for it to finish. | |
1045 | */ | |
1046 | return; | |
1047 | ||
1048 | SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); | |
1049 | skspcl->req.state = SKD_REQ_STATE_BUSY; | |
1050 | skspcl->req.id += SKD_ID_INCR; | |
1051 | ||
d891fe60 | 1052 | scsi = &skspcl->msg_buf->scsi[0]; |
e67f86b3 AB |
1053 | scsi->hdr.tag = skspcl->req.id; |
1054 | ||
1055 | memset(scsi->cdb, 0, sizeof(scsi->cdb)); | |
1056 | ||
1057 | switch (opcode) { | |
1058 | case TEST_UNIT_READY: | |
1059 | scsi->cdb[0] = TEST_UNIT_READY; | |
1060 | sgd->byte_count = 0; | |
1061 | scsi->hdr.sg_list_len_bytes = 0; | |
1062 | break; | |
1063 | ||
1064 | case READ_CAPACITY: | |
1065 | scsi->cdb[0] = READ_CAPACITY; | |
1066 | sgd->byte_count = SKD_N_READ_CAP_BYTES; | |
1067 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1068 | break; | |
1069 | ||
1070 | case INQUIRY: | |
1071 | scsi->cdb[0] = INQUIRY; | |
1072 | scsi->cdb[1] = 0x01; /* evpd */ | |
1073 | scsi->cdb[2] = 0x80; /* serial number page */ | |
1074 | scsi->cdb[4] = 0x10; | |
1075 | sgd->byte_count = 16; | |
1076 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1077 | break; | |
1078 | ||
1079 | case SYNCHRONIZE_CACHE: | |
1080 | scsi->cdb[0] = SYNCHRONIZE_CACHE; | |
1081 | sgd->byte_count = 0; | |
1082 | scsi->hdr.sg_list_len_bytes = 0; | |
1083 | break; | |
1084 | ||
1085 | case WRITE_BUFFER: | |
1086 | scsi->cdb[0] = WRITE_BUFFER; | |
1087 | scsi->cdb[1] = 0x02; | |
1088 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
1089 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
1090 | sgd->byte_count = WR_BUF_SIZE; | |
1091 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1092 | /* fill incrementing byte pattern */ | |
1093 | for (i = 0; i < sgd->byte_count; i++) | |
1094 | buf[i] = i & 0xFF; | |
1095 | break; | |
1096 | ||
1097 | case READ_BUFFER: | |
1098 | scsi->cdb[0] = READ_BUFFER; | |
1099 | scsi->cdb[1] = 0x02; | |
1100 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
1101 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
1102 | sgd->byte_count = WR_BUF_SIZE; | |
1103 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
1104 | memset(skspcl->data_buf, 0, sgd->byte_count); | |
1105 | break; | |
1106 | ||
1107 | default: | |
1108 | SKD_ASSERT("Don't know what to send"); | |
1109 | return; | |
1110 | ||
1111 | } | |
1112 | skd_send_special_fitmsg(skdev, skspcl); | |
1113 | } | |
1114 | ||
1115 | static void skd_refresh_device_data(struct skd_device *skdev) | |
1116 | { | |
1117 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
1118 | ||
1119 | skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); | |
1120 | } | |
1121 | ||
1122 | static int skd_chk_read_buf(struct skd_device *skdev, | |
1123 | struct skd_special_context *skspcl) | |
1124 | { | |
1125 | unsigned char *buf = skspcl->data_buf; | |
1126 | int i; | |
1127 | ||
1128 | /* check for incrementing byte pattern */ | |
1129 | for (i = 0; i < WR_BUF_SIZE; i++) | |
1130 | if (buf[i] != (i & 0xFF)) | |
1131 | return 1; | |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, | |
1137 | u8 code, u8 qual, u8 fruc) | |
1138 | { | |
1139 | /* If the check condition is of special interest, log a message */ | |
1140 | if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) | |
1141 | && (code == 0x04) && (qual == 0x06)) { | |
f98806d6 BVA |
1142 | dev_err(&skdev->pdev->dev, |
1143 | "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", | |
1144 | key, code, qual, fruc); | |
e67f86b3 AB |
1145 | } |
1146 | } | |
1147 | ||
1148 | static void skd_complete_internal(struct skd_device *skdev, | |
85e34112 BVA |
1149 | struct fit_completion_entry_v1 *skcomp, |
1150 | struct fit_comp_error_info *skerr, | |
e67f86b3 AB |
1151 | struct skd_special_context *skspcl) |
1152 | { | |
1153 | u8 *buf = skspcl->data_buf; | |
1154 | u8 status; | |
1155 | int i; | |
d891fe60 | 1156 | struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0]; |
e67f86b3 | 1157 | |
760b48ca BVA |
1158 | lockdep_assert_held(&skdev->lock); |
1159 | ||
e67f86b3 AB |
1160 | SKD_ASSERT(skspcl == &skdev->internal_skspcl); |
1161 | ||
f98806d6 | 1162 | dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]); |
e67f86b3 AB |
1163 | |
1164 | skspcl->req.completion = *skcomp; | |
1165 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
1166 | skspcl->req.id += SKD_ID_INCR; | |
1167 | ||
1168 | status = skspcl->req.completion.status; | |
1169 | ||
1170 | skd_log_check_status(skdev, status, skerr->key, skerr->code, | |
1171 | skerr->qual, skerr->fruc); | |
1172 | ||
1173 | switch (scsi->cdb[0]) { | |
1174 | case TEST_UNIT_READY: | |
1175 | if (status == SAM_STAT_GOOD) | |
1176 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
1177 | else if ((status == SAM_STAT_CHECK_CONDITION) && | |
1178 | (skerr->key == MEDIUM_ERROR)) | |
1179 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
1180 | else { | |
1181 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
f98806d6 BVA |
1182 | dev_dbg(&skdev->pdev->dev, |
1183 | "TUR failed, don't send anymore state 0x%x\n", | |
1184 | skdev->state); | |
e67f86b3 AB |
1185 | return; |
1186 | } | |
f98806d6 BVA |
1187 | dev_dbg(&skdev->pdev->dev, |
1188 | "**** TUR failed, retry skerr\n"); | |
fb4844b8 BVA |
1189 | skd_send_internal_skspcl(skdev, skspcl, |
1190 | TEST_UNIT_READY); | |
e67f86b3 AB |
1191 | } |
1192 | break; | |
1193 | ||
1194 | case WRITE_BUFFER: | |
1195 | if (status == SAM_STAT_GOOD) | |
1196 | skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); | |
1197 | else { | |
1198 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
f98806d6 BVA |
1199 | dev_dbg(&skdev->pdev->dev, |
1200 | "write buffer failed, don't send anymore state 0x%x\n", | |
1201 | skdev->state); | |
e67f86b3 AB |
1202 | return; |
1203 | } | |
f98806d6 BVA |
1204 | dev_dbg(&skdev->pdev->dev, |
1205 | "**** write buffer failed, retry skerr\n"); | |
fb4844b8 BVA |
1206 | skd_send_internal_skspcl(skdev, skspcl, |
1207 | TEST_UNIT_READY); | |
e67f86b3 AB |
1208 | } |
1209 | break; | |
1210 | ||
1211 | case READ_BUFFER: | |
1212 | if (status == SAM_STAT_GOOD) { | |
1213 | if (skd_chk_read_buf(skdev, skspcl) == 0) | |
1214 | skd_send_internal_skspcl(skdev, skspcl, | |
1215 | READ_CAPACITY); | |
1216 | else { | |
f98806d6 BVA |
1217 | dev_err(&skdev->pdev->dev, |
1218 | "*** W/R Buffer mismatch %d ***\n", | |
1219 | skdev->connect_retries); | |
e67f86b3 AB |
1220 | if (skdev->connect_retries < |
1221 | SKD_MAX_CONNECT_RETRIES) { | |
1222 | skdev->connect_retries++; | |
1223 | skd_soft_reset(skdev); | |
1224 | } else { | |
f98806d6 BVA |
1225 | dev_err(&skdev->pdev->dev, |
1226 | "W/R Buffer Connect Error\n"); | |
e67f86b3 AB |
1227 | return; |
1228 | } | |
1229 | } | |
1230 | ||
1231 | } else { | |
1232 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
f98806d6 BVA |
1233 | dev_dbg(&skdev->pdev->dev, |
1234 | "read buffer failed, don't send anymore state 0x%x\n", | |
1235 | skdev->state); | |
e67f86b3 AB |
1236 | return; |
1237 | } | |
f98806d6 BVA |
1238 | dev_dbg(&skdev->pdev->dev, |
1239 | "**** read buffer failed, retry skerr\n"); | |
fb4844b8 BVA |
1240 | skd_send_internal_skspcl(skdev, skspcl, |
1241 | TEST_UNIT_READY); | |
e67f86b3 AB |
1242 | } |
1243 | break; | |
1244 | ||
1245 | case READ_CAPACITY: | |
1246 | skdev->read_cap_is_valid = 0; | |
1247 | if (status == SAM_STAT_GOOD) { | |
1248 | skdev->read_cap_last_lba = | |
1249 | (buf[0] << 24) | (buf[1] << 16) | | |
1250 | (buf[2] << 8) | buf[3]; | |
1251 | skdev->read_cap_blocksize = | |
1252 | (buf[4] << 24) | (buf[5] << 16) | | |
1253 | (buf[6] << 8) | buf[7]; | |
1254 | ||
f98806d6 BVA |
1255 | dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n", |
1256 | skdev->read_cap_last_lba, | |
1257 | skdev->read_cap_blocksize); | |
e67f86b3 AB |
1258 | |
1259 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
1260 | ||
1261 | skdev->read_cap_is_valid = 1; | |
1262 | ||
1263 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | |
1264 | } else if ((status == SAM_STAT_CHECK_CONDITION) && | |
1265 | (skerr->key == MEDIUM_ERROR)) { | |
1266 | skdev->read_cap_last_lba = ~0; | |
1267 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
f98806d6 | 1268 | dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); |
e67f86b3 AB |
1269 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); |
1270 | } else { | |
f98806d6 | 1271 | dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n"); |
e67f86b3 AB |
1272 | skd_send_internal_skspcl(skdev, skspcl, |
1273 | TEST_UNIT_READY); | |
1274 | } | |
1275 | break; | |
1276 | ||
1277 | case INQUIRY: | |
1278 | skdev->inquiry_is_valid = 0; | |
1279 | if (status == SAM_STAT_GOOD) { | |
1280 | skdev->inquiry_is_valid = 1; | |
1281 | ||
1282 | for (i = 0; i < 12; i++) | |
1283 | skdev->inq_serial_num[i] = buf[i + 4]; | |
1284 | skdev->inq_serial_num[12] = 0; | |
1285 | } | |
1286 | ||
1287 | if (skd_unquiesce_dev(skdev) < 0) | |
f98806d6 | 1288 | dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n"); |
e67f86b3 AB |
1289 | /* connection is complete */ |
1290 | skdev->connect_retries = 0; | |
1291 | break; | |
1292 | ||
1293 | case SYNCHRONIZE_CACHE: | |
1294 | if (status == SAM_STAT_GOOD) | |
1295 | skdev->sync_done = 1; | |
1296 | else | |
1297 | skdev->sync_done = -1; | |
1298 | wake_up_interruptible(&skdev->waitq); | |
1299 | break; | |
1300 | ||
1301 | default: | |
1302 | SKD_ASSERT("we didn't send this"); | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | ***************************************************************************** | |
1308 | * FIT MESSAGES | |
1309 | ***************************************************************************** | |
1310 | */ | |
1311 | ||
1312 | static void skd_send_fitmsg(struct skd_device *skdev, | |
1313 | struct skd_fitmsg_context *skmsg) | |
1314 | { | |
1315 | u64 qcmd; | |
e67f86b3 | 1316 | |
f98806d6 | 1317 | dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n", |
6fbb2de5 | 1318 | skmsg->mb_dma_address, atomic_read(&skdev->in_flight)); |
6507f436 | 1319 | dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); |
e67f86b3 AB |
1320 | |
1321 | qcmd = skmsg->mb_dma_address; | |
1322 | qcmd |= FIT_QCMD_QID_NORMAL; | |
1323 | ||
e67f86b3 AB |
1324 | if (unlikely(skdev->dbg_level > 1)) { |
1325 | u8 *bp = (u8 *)skmsg->msg_buf; | |
1326 | int i; | |
1327 | for (i = 0; i < skmsg->length; i += 8) { | |
f98806d6 BVA |
1328 | dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i, |
1329 | &bp[i]); | |
e67f86b3 AB |
1330 | if (i == 0) |
1331 | i = 64 - 8; | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | if (skmsg->length > 256) | |
1336 | qcmd |= FIT_QCMD_MSGSIZE_512; | |
1337 | else if (skmsg->length > 128) | |
1338 | qcmd |= FIT_QCMD_MSGSIZE_256; | |
1339 | else if (skmsg->length > 64) | |
1340 | qcmd |= FIT_QCMD_MSGSIZE_128; | |
1341 | else | |
1342 | /* | |
1343 | * This makes no sense because the FIT msg header is | |
1344 | * 64 bytes. If the msg is only 64 bytes long it has | |
1345 | * no payload. | |
1346 | */ | |
1347 | qcmd |= FIT_QCMD_MSGSIZE_64; | |
1348 | ||
5fbd545c BVA |
1349 | /* Make sure skd_msg_buf is written before the doorbell is triggered. */ |
1350 | smp_wmb(); | |
1351 | ||
e67f86b3 | 1352 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); |
e67f86b3 AB |
1353 | } |
1354 | ||
1355 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
1356 | struct skd_special_context *skspcl) | |
1357 | { | |
1358 | u64 qcmd; | |
1359 | ||
1360 | if (unlikely(skdev->dbg_level > 1)) { | |
1361 | u8 *bp = (u8 *)skspcl->msg_buf; | |
1362 | int i; | |
1363 | ||
1364 | for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { | |
f98806d6 BVA |
1365 | dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i, |
1366 | &bp[i]); | |
e67f86b3 AB |
1367 | if (i == 0) |
1368 | i = 64 - 8; | |
1369 | } | |
1370 | ||
f98806d6 BVA |
1371 | dev_dbg(&skdev->pdev->dev, |
1372 | "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", | |
1373 | skspcl, skspcl->req.id, skspcl->req.sksg_list, | |
1374 | skspcl->req.sksg_dma_address); | |
e67f86b3 AB |
1375 | for (i = 0; i < skspcl->req.n_sg; i++) { |
1376 | struct fit_sg_descriptor *sgd = | |
1377 | &skspcl->req.sksg_list[i]; | |
1378 | ||
f98806d6 BVA |
1379 | dev_dbg(&skdev->pdev->dev, |
1380 | " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n", | |
1381 | i, sgd->byte_count, sgd->control, | |
1382 | sgd->host_side_addr, sgd->next_desc_ptr); | |
e67f86b3 AB |
1383 | } |
1384 | } | |
1385 | ||
1386 | /* | |
1387 | * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr | |
1388 | * and one 64-byte SSDI command. | |
1389 | */ | |
1390 | qcmd = skspcl->mb_dma_address; | |
1391 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; | |
1392 | ||
5fbd545c BVA |
1393 | /* Make sure skd_msg_buf is written before the doorbell is triggered. */ |
1394 | smp_wmb(); | |
1395 | ||
e67f86b3 AB |
1396 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); |
1397 | } | |
1398 | ||
1399 | /* | |
1400 | ***************************************************************************** | |
1401 | * COMPLETION QUEUE | |
1402 | ***************************************************************************** | |
1403 | */ | |
1404 | ||
1405 | static void skd_complete_other(struct skd_device *skdev, | |
85e34112 BVA |
1406 | struct fit_completion_entry_v1 *skcomp, |
1407 | struct fit_comp_error_info *skerr); | |
e67f86b3 | 1408 | |
e67f86b3 AB |
1409 | struct sns_info { |
1410 | u8 type; | |
1411 | u8 stat; | |
1412 | u8 key; | |
1413 | u8 asc; | |
1414 | u8 ascq; | |
1415 | u8 mask; | |
1416 | enum skd_check_status_action action; | |
1417 | }; | |
1418 | ||
1419 | static struct sns_info skd_chkstat_table[] = { | |
1420 | /* Good */ | |
1421 | { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, | |
1422 | SKD_CHECK_STATUS_REPORT_GOOD }, | |
1423 | ||
1424 | /* Smart alerts */ | |
1425 | { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ | |
1426 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
1427 | { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ | |
1428 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
1429 | { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ | |
1430 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
1431 | ||
1432 | /* Retry (with limits) */ | |
1433 | { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ | |
1434 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
1435 | { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ | |
1436 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
1437 | { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ | |
1438 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
1439 | { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ | |
1440 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
1441 | ||
1442 | /* Busy (or about to be) */ | |
1443 | { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ | |
1444 | SKD_CHECK_STATUS_BUSY_IMMINENT }, | |
1445 | }; | |
1446 | ||
1447 | /* | |
1448 | * Look up status and sense data to decide how to handle the error | |
1449 | * from the device. | |
1450 | * mask says which fields must match e.g., mask=0x18 means check | |
1451 | * type and stat, ignore key, asc, ascq. | |
1452 | */ | |
1453 | ||
38d4a1bb MS |
1454 | static enum skd_check_status_action |
1455 | skd_check_status(struct skd_device *skdev, | |
85e34112 | 1456 | u8 cmp_status, struct fit_comp_error_info *skerr) |
e67f86b3 | 1457 | { |
0b2e0c07 | 1458 | int i; |
e67f86b3 | 1459 | |
f98806d6 BVA |
1460 | dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", |
1461 | skerr->key, skerr->code, skerr->qual, skerr->fruc); | |
e67f86b3 | 1462 | |
f98806d6 BVA |
1463 | dev_dbg(&skdev->pdev->dev, |
1464 | "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n", | |
1465 | skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual, | |
1466 | skerr->fruc); | |
e67f86b3 AB |
1467 | |
1468 | /* Does the info match an entry in the good category? */ | |
0b2e0c07 | 1469 | for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) { |
e67f86b3 AB |
1470 | struct sns_info *sns = &skd_chkstat_table[i]; |
1471 | ||
1472 | if (sns->mask & 0x10) | |
1473 | if (skerr->type != sns->type) | |
1474 | continue; | |
1475 | ||
1476 | if (sns->mask & 0x08) | |
1477 | if (cmp_status != sns->stat) | |
1478 | continue; | |
1479 | ||
1480 | if (sns->mask & 0x04) | |
1481 | if (skerr->key != sns->key) | |
1482 | continue; | |
1483 | ||
1484 | if (sns->mask & 0x02) | |
1485 | if (skerr->code != sns->asc) | |
1486 | continue; | |
1487 | ||
1488 | if (sns->mask & 0x01) | |
1489 | if (skerr->qual != sns->ascq) | |
1490 | continue; | |
1491 | ||
1492 | if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { | |
f98806d6 BVA |
1493 | dev_err(&skdev->pdev->dev, |
1494 | "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n", | |
1495 | skerr->key, skerr->code, skerr->qual); | |
e67f86b3 AB |
1496 | } |
1497 | return sns->action; | |
1498 | } | |
1499 | ||
1500 | /* No other match, so nonzero status means error, | |
1501 | * zero status means good | |
1502 | */ | |
1503 | if (cmp_status) { | |
f98806d6 | 1504 | dev_dbg(&skdev->pdev->dev, "status check: error\n"); |
e67f86b3 AB |
1505 | return SKD_CHECK_STATUS_REPORT_ERROR; |
1506 | } | |
1507 | ||
f98806d6 | 1508 | dev_dbg(&skdev->pdev->dev, "status check good default\n"); |
e67f86b3 AB |
1509 | return SKD_CHECK_STATUS_REPORT_GOOD; |
1510 | } | |
1511 | ||
1512 | static void skd_resolve_req_exception(struct skd_device *skdev, | |
f18c17c8 BVA |
1513 | struct skd_request_context *skreq, |
1514 | struct request *req) | |
e67f86b3 AB |
1515 | { |
1516 | u8 cmp_status = skreq->completion.status; | |
1517 | ||
1518 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | |
1519 | case SKD_CHECK_STATUS_REPORT_GOOD: | |
1520 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | |
f18c17c8 | 1521 | skd_end_request(skdev, req, BLK_STS_OK); |
e67f86b3 AB |
1522 | break; |
1523 | ||
1524 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | |
1525 | skd_log_skreq(skdev, skreq, "retry(busy)"); | |
f18c17c8 | 1526 | blk_requeue_request(skdev->queue, req); |
f98806d6 | 1527 | dev_info(&skdev->pdev->dev, "drive BUSY imminent\n"); |
e67f86b3 AB |
1528 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; |
1529 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | |
1530 | skd_quiesce_dev(skdev); | |
1531 | break; | |
1532 | ||
1533 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | |
f18c17c8 | 1534 | if ((unsigned long) ++req->special < SKD_MAX_RETRIES) { |
fcd37eb3 | 1535 | skd_log_skreq(skdev, skreq, "retry"); |
f18c17c8 | 1536 | blk_requeue_request(skdev->queue, req); |
fcd37eb3 | 1537 | break; |
e67f86b3 | 1538 | } |
ce6882ba | 1539 | /* fall through */ |
e67f86b3 AB |
1540 | |
1541 | case SKD_CHECK_STATUS_REPORT_ERROR: | |
1542 | default: | |
f18c17c8 | 1543 | skd_end_request(skdev, req, BLK_STS_IOERR); |
e67f86b3 AB |
1544 | break; |
1545 | } | |
1546 | } | |
1547 | ||
e67f86b3 AB |
1548 | /* assume spinlock is already held */ |
1549 | static void skd_release_skreq(struct skd_device *skdev, | |
1550 | struct skd_request_context *skreq) | |
1551 | { | |
e67f86b3 AB |
1552 | u32 timo_slot; |
1553 | ||
e67f86b3 AB |
1554 | /* |
1555 | * Decrease the number of active requests. | |
1556 | * Also decrements the count in the timeout slot. | |
1557 | */ | |
6fbb2de5 BVA |
1558 | SKD_ASSERT(atomic_read(&skdev->in_flight) > 0); |
1559 | atomic_dec(&skdev->in_flight); | |
e67f86b3 AB |
1560 | |
1561 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
6fbb2de5 BVA |
1562 | SKD_ASSERT(atomic_read(&skdev->timeout_slot[timo_slot]) > 0); |
1563 | atomic_dec(&skdev->timeout_slot[timo_slot]); | |
e67f86b3 AB |
1564 | |
1565 | /* | |
1566 | * Reset backpointer | |
1567 | */ | |
fcd37eb3 | 1568 | skreq->req = NULL; |
e67f86b3 AB |
1569 | |
1570 | /* | |
1571 | * Reclaim the skd_request_context | |
1572 | */ | |
1573 | skreq->state = SKD_REQ_STATE_IDLE; | |
1574 | skreq->id += SKD_ID_INCR; | |
f18c17c8 BVA |
1575 | } |
1576 | ||
1577 | static struct skd_request_context *skd_skreq_from_rq(struct skd_device *skdev, | |
1578 | struct request *rq) | |
1579 | { | |
1580 | struct skd_request_context *skreq; | |
1581 | int i; | |
1582 | ||
1583 | for (i = 0, skreq = skdev->skreq_table; i < skdev->num_fitmsg_context; | |
1584 | i++, skreq++) | |
1585 | if (skreq->req == rq) | |
1586 | return skreq; | |
1587 | ||
1588 | return NULL; | |
e67f86b3 AB |
1589 | } |
1590 | ||
e67f86b3 AB |
1591 | static int skd_isr_completion_posted(struct skd_device *skdev, |
1592 | int limit, int *enqueued) | |
1593 | { | |
85e34112 BVA |
1594 | struct fit_completion_entry_v1 *skcmp; |
1595 | struct fit_comp_error_info *skerr; | |
e67f86b3 | 1596 | u16 req_id; |
f18c17c8 BVA |
1597 | u32 tag; |
1598 | struct request *rq; | |
e67f86b3 | 1599 | struct skd_request_context *skreq; |
c830da8c BVA |
1600 | u16 cmp_cntxt; |
1601 | u8 cmp_status; | |
1602 | u8 cmp_cycle; | |
1603 | u32 cmp_bytes; | |
1604 | int rc; | |
e67f86b3 | 1605 | int processed = 0; |
e67f86b3 | 1606 | |
760b48ca BVA |
1607 | lockdep_assert_held(&skdev->lock); |
1608 | ||
e67f86b3 AB |
1609 | for (;; ) { |
1610 | SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); | |
1611 | ||
1612 | skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; | |
1613 | cmp_cycle = skcmp->cycle; | |
1614 | cmp_cntxt = skcmp->tag; | |
1615 | cmp_status = skcmp->status; | |
1616 | cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); | |
1617 | ||
1618 | skerr = &skdev->skerr_table[skdev->skcomp_ix]; | |
1619 | ||
f98806d6 BVA |
1620 | dev_dbg(&skdev->pdev->dev, |
1621 | "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n", | |
1622 | skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle, | |
6fbb2de5 BVA |
1623 | cmp_cntxt, cmp_status, atomic_read(&skdev->in_flight), |
1624 | cmp_bytes, skdev->proto_ver); | |
e67f86b3 AB |
1625 | |
1626 | if (cmp_cycle != skdev->skcomp_cycle) { | |
f98806d6 | 1627 | dev_dbg(&skdev->pdev->dev, "end of completions\n"); |
e67f86b3 AB |
1628 | break; |
1629 | } | |
1630 | /* | |
1631 | * Update the completion queue head index and possibly | |
1632 | * the completion cycle count. 8-bit wrap-around. | |
1633 | */ | |
1634 | skdev->skcomp_ix++; | |
1635 | if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { | |
1636 | skdev->skcomp_ix = 0; | |
1637 | skdev->skcomp_cycle++; | |
1638 | } | |
1639 | ||
1640 | /* | |
1641 | * The command context is a unique 32-bit ID. The low order | |
1642 | * bits help locate the request. The request is usually a | |
1643 | * r/w request (see skd_start() above) or a special request. | |
1644 | */ | |
1645 | req_id = cmp_cntxt; | |
f18c17c8 | 1646 | tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK; |
e67f86b3 AB |
1647 | |
1648 | /* Is this other than a r/w request? */ | |
f18c17c8 | 1649 | if (tag >= skdev->num_req_context) { |
e67f86b3 AB |
1650 | /* |
1651 | * This is not a completion for a r/w request. | |
1652 | */ | |
f18c17c8 BVA |
1653 | WARN_ON_ONCE(blk_map_queue_find_tag(skdev->queue-> |
1654 | queue_tags, tag)); | |
e67f86b3 AB |
1655 | skd_complete_other(skdev, skcmp, skerr); |
1656 | continue; | |
1657 | } | |
1658 | ||
f18c17c8 BVA |
1659 | rq = blk_map_queue_find_tag(skdev->queue->queue_tags, tag); |
1660 | if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt, | |
1661 | tag)) | |
1662 | continue; | |
1663 | skreq = skd_skreq_from_rq(skdev, rq); | |
e67f86b3 AB |
1664 | |
1665 | /* | |
1666 | * Make sure the request ID for the slot matches. | |
1667 | */ | |
1668 | if (skreq->id != req_id) { | |
f98806d6 BVA |
1669 | dev_dbg(&skdev->pdev->dev, |
1670 | "mismatch comp_id=0x%x req_id=0x%x\n", req_id, | |
1671 | skreq->id); | |
e67f86b3 AB |
1672 | { |
1673 | u16 new_id = cmp_cntxt; | |
f98806d6 BVA |
1674 | dev_err(&skdev->pdev->dev, |
1675 | "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n", | |
1676 | req_id, skreq->id, new_id); | |
e67f86b3 AB |
1677 | |
1678 | continue; | |
1679 | } | |
1680 | } | |
1681 | ||
1682 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); | |
1683 | ||
e67f86b3 AB |
1684 | skreq->completion = *skcmp; |
1685 | if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { | |
1686 | skreq->err_info = *skerr; | |
1687 | skd_log_check_status(skdev, cmp_status, skerr->key, | |
1688 | skerr->code, skerr->qual, | |
1689 | skerr->fruc); | |
1690 | } | |
1691 | /* Release DMA resources for the request. */ | |
1692 | if (skreq->n_sg > 0) | |
1693 | skd_postop_sg_list(skdev, skreq); | |
1694 | ||
f18c17c8 BVA |
1695 | /* Mark the FIT msg and timeout slot as free. */ |
1696 | skd_release_skreq(skdev, skreq); | |
e67f86b3 AB |
1697 | |
1698 | /* | |
f18c17c8 | 1699 | * Capture the outcome and post it back to the native request. |
e67f86b3 | 1700 | */ |
f18c17c8 BVA |
1701 | if (likely(cmp_status == SAM_STAT_GOOD)) |
1702 | skd_end_request(skdev, rq, BLK_STS_OK); | |
1703 | else | |
1704 | skd_resolve_req_exception(skdev, skreq, rq); | |
e67f86b3 AB |
1705 | |
1706 | /* skd_isr_comp_limit equal zero means no limit */ | |
1707 | if (limit) { | |
1708 | if (++processed >= limit) { | |
1709 | rc = 1; | |
1710 | break; | |
1711 | } | |
1712 | } | |
1713 | } | |
1714 | ||
6fbb2de5 BVA |
1715 | if (skdev->state == SKD_DRVR_STATE_PAUSING && |
1716 | atomic_read(&skdev->in_flight) == 0) { | |
e67f86b3 AB |
1717 | skdev->state = SKD_DRVR_STATE_PAUSED; |
1718 | wake_up_interruptible(&skdev->waitq); | |
1719 | } | |
1720 | ||
1721 | return rc; | |
1722 | } | |
1723 | ||
1724 | static void skd_complete_other(struct skd_device *skdev, | |
85e34112 BVA |
1725 | struct fit_completion_entry_v1 *skcomp, |
1726 | struct fit_comp_error_info *skerr) | |
e67f86b3 AB |
1727 | { |
1728 | u32 req_id = 0; | |
1729 | u32 req_table; | |
1730 | u32 req_slot; | |
1731 | struct skd_special_context *skspcl; | |
1732 | ||
760b48ca BVA |
1733 | lockdep_assert_held(&skdev->lock); |
1734 | ||
e67f86b3 AB |
1735 | req_id = skcomp->tag; |
1736 | req_table = req_id & SKD_ID_TABLE_MASK; | |
1737 | req_slot = req_id & SKD_ID_SLOT_MASK; | |
1738 | ||
f98806d6 BVA |
1739 | dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table, |
1740 | req_id, req_slot); | |
e67f86b3 AB |
1741 | |
1742 | /* | |
1743 | * Based on the request id, determine how to dispatch this completion. | |
1744 | * This swich/case is finding the good cases and forwarding the | |
1745 | * completion entry. Errors are reported below the switch. | |
1746 | */ | |
1747 | switch (req_table) { | |
1748 | case SKD_ID_RW_REQUEST: | |
1749 | /* | |
e1d06f2d | 1750 | * The caller, skd_isr_completion_posted() above, |
e67f86b3 AB |
1751 | * handles r/w requests. The only way we get here |
1752 | * is if the req_slot is out of bounds. | |
1753 | */ | |
1754 | break; | |
1755 | ||
e67f86b3 AB |
1756 | case SKD_ID_INTERNAL: |
1757 | if (req_slot == 0) { | |
1758 | skspcl = &skdev->internal_skspcl; | |
1759 | if (skspcl->req.id == req_id && | |
1760 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
1761 | skd_complete_internal(skdev, | |
1762 | skcomp, skerr, skspcl); | |
1763 | return; | |
1764 | } | |
1765 | } | |
1766 | break; | |
1767 | ||
1768 | case SKD_ID_FIT_MSG: | |
1769 | /* | |
1770 | * These id's should never appear in a completion record. | |
1771 | */ | |
1772 | break; | |
1773 | ||
1774 | default: | |
1775 | /* | |
1776 | * These id's should never appear anywhere; | |
1777 | */ | |
1778 | break; | |
1779 | } | |
1780 | ||
1781 | /* | |
1782 | * If we get here it is a bad or stale id. | |
1783 | */ | |
1784 | } | |
1785 | ||
e67f86b3 AB |
1786 | static void skd_reset_skcomp(struct skd_device *skdev) |
1787 | { | |
6f7c7675 | 1788 | memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE); |
e67f86b3 AB |
1789 | |
1790 | skdev->skcomp_ix = 0; | |
1791 | skdev->skcomp_cycle = 1; | |
1792 | } | |
1793 | ||
1794 | /* | |
1795 | ***************************************************************************** | |
1796 | * INTERRUPTS | |
1797 | ***************************************************************************** | |
1798 | */ | |
1799 | static void skd_completion_worker(struct work_struct *work) | |
1800 | { | |
1801 | struct skd_device *skdev = | |
1802 | container_of(work, struct skd_device, completion_worker); | |
1803 | unsigned long flags; | |
1804 | int flush_enqueued = 0; | |
1805 | ||
1806 | spin_lock_irqsave(&skdev->lock, flags); | |
1807 | ||
1808 | /* | |
1809 | * pass in limit=0, which means no limit.. | |
1810 | * process everything in compq | |
1811 | */ | |
1812 | skd_isr_completion_posted(skdev, 0, &flush_enqueued); | |
8fe70065 | 1813 | blk_run_queue_async(skdev->queue); |
e67f86b3 AB |
1814 | |
1815 | spin_unlock_irqrestore(&skdev->lock, flags); | |
1816 | } | |
1817 | ||
1818 | static void skd_isr_msg_from_dev(struct skd_device *skdev); | |
1819 | ||
41c9499b AB |
1820 | static irqreturn_t |
1821 | skd_isr(int irq, void *ptr) | |
e67f86b3 | 1822 | { |
1cd3c1ab | 1823 | struct skd_device *skdev = ptr; |
e67f86b3 AB |
1824 | u32 intstat; |
1825 | u32 ack; | |
1826 | int rc = 0; | |
1827 | int deferred = 0; | |
1828 | int flush_enqueued = 0; | |
1829 | ||
e67f86b3 AB |
1830 | spin_lock(&skdev->lock); |
1831 | ||
1832 | for (;; ) { | |
1833 | intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
1834 | ||
1835 | ack = FIT_INT_DEF_MASK; | |
1836 | ack &= intstat; | |
1837 | ||
f98806d6 BVA |
1838 | dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat, |
1839 | ack); | |
e67f86b3 AB |
1840 | |
1841 | /* As long as there is an int pending on device, keep | |
1842 | * running loop. When none, get out, but if we've never | |
1843 | * done any processing, call completion handler? | |
1844 | */ | |
1845 | if (ack == 0) { | |
1846 | /* No interrupts on device, but run the completion | |
1847 | * processor anyway? | |
1848 | */ | |
1849 | if (rc == 0) | |
1850 | if (likely (skdev->state | |
1851 | == SKD_DRVR_STATE_ONLINE)) | |
1852 | deferred = 1; | |
1853 | break; | |
1854 | } | |
1855 | ||
1856 | rc = IRQ_HANDLED; | |
1857 | ||
1858 | SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); | |
1859 | ||
1860 | if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && | |
1861 | (skdev->state != SKD_DRVR_STATE_STOPPING))) { | |
1862 | if (intstat & FIT_ISH_COMPLETION_POSTED) { | |
1863 | /* | |
1864 | * If we have already deferred completion | |
1865 | * processing, don't bother running it again | |
1866 | */ | |
1867 | if (deferred == 0) | |
1868 | deferred = | |
1869 | skd_isr_completion_posted(skdev, | |
1870 | skd_isr_comp_limit, &flush_enqueued); | |
1871 | } | |
1872 | ||
1873 | if (intstat & FIT_ISH_FW_STATE_CHANGE) { | |
1874 | skd_isr_fwstate(skdev); | |
1875 | if (skdev->state == SKD_DRVR_STATE_FAULT || | |
1876 | skdev->state == | |
1877 | SKD_DRVR_STATE_DISAPPEARED) { | |
1878 | spin_unlock(&skdev->lock); | |
1879 | return rc; | |
1880 | } | |
1881 | } | |
1882 | ||
1883 | if (intstat & FIT_ISH_MSG_FROM_DEV) | |
1884 | skd_isr_msg_from_dev(skdev); | |
1885 | } | |
1886 | } | |
1887 | ||
1888 | if (unlikely(flush_enqueued)) | |
8fe70065 | 1889 | blk_run_queue_async(skdev->queue); |
e67f86b3 AB |
1890 | |
1891 | if (deferred) | |
1892 | schedule_work(&skdev->completion_worker); | |
1893 | else if (!flush_enqueued) | |
8fe70065 | 1894 | blk_run_queue_async(skdev->queue); |
e67f86b3 AB |
1895 | |
1896 | spin_unlock(&skdev->lock); | |
1897 | ||
1898 | return rc; | |
1899 | } | |
1900 | ||
e67f86b3 AB |
1901 | static void skd_drive_fault(struct skd_device *skdev) |
1902 | { | |
1903 | skdev->state = SKD_DRVR_STATE_FAULT; | |
f98806d6 | 1904 | dev_err(&skdev->pdev->dev, "Drive FAULT\n"); |
e67f86b3 AB |
1905 | } |
1906 | ||
1907 | static void skd_drive_disappeared(struct skd_device *skdev) | |
1908 | { | |
1909 | skdev->state = SKD_DRVR_STATE_DISAPPEARED; | |
f98806d6 | 1910 | dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n"); |
e67f86b3 AB |
1911 | } |
1912 | ||
1913 | static void skd_isr_fwstate(struct skd_device *skdev) | |
1914 | { | |
1915 | u32 sense; | |
1916 | u32 state; | |
1917 | u32 mtd; | |
1918 | int prev_driver_state = skdev->state; | |
1919 | ||
1920 | sense = SKD_READL(skdev, FIT_STATUS); | |
1921 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
1922 | ||
f98806d6 BVA |
1923 | dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n", |
1924 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
1925 | skd_drive_state_to_str(state), state); | |
e67f86b3 AB |
1926 | |
1927 | skdev->drive_state = state; | |
1928 | ||
1929 | switch (skdev->drive_state) { | |
1930 | case FIT_SR_DRIVE_INIT: | |
1931 | if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { | |
1932 | skd_disable_interrupts(skdev); | |
1933 | break; | |
1934 | } | |
1935 | if (skdev->state == SKD_DRVR_STATE_RESTARTING) | |
79ce12a8 | 1936 | skd_recover_requests(skdev); |
e67f86b3 AB |
1937 | if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { |
1938 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
1939 | skdev->state = SKD_DRVR_STATE_STARTING; | |
1940 | skd_soft_reset(skdev); | |
1941 | break; | |
1942 | } | |
1943 | mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); | |
1944 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
1945 | skdev->last_mtd = mtd; | |
1946 | break; | |
1947 | ||
1948 | case FIT_SR_DRIVE_ONLINE: | |
1949 | skdev->cur_max_queue_depth = skd_max_queue_depth; | |
1950 | if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) | |
1951 | skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; | |
1952 | ||
1953 | skdev->queue_low_water_mark = | |
1954 | skdev->cur_max_queue_depth * 2 / 3 + 1; | |
1955 | if (skdev->queue_low_water_mark < 1) | |
1956 | skdev->queue_low_water_mark = 1; | |
f98806d6 BVA |
1957 | dev_info(&skdev->pdev->dev, |
1958 | "Queue depth limit=%d dev=%d lowat=%d\n", | |
1959 | skdev->cur_max_queue_depth, | |
1960 | skdev->dev_max_queue_depth, | |
1961 | skdev->queue_low_water_mark); | |
e67f86b3 AB |
1962 | |
1963 | skd_refresh_device_data(skdev); | |
1964 | break; | |
1965 | ||
1966 | case FIT_SR_DRIVE_BUSY: | |
1967 | skdev->state = SKD_DRVR_STATE_BUSY; | |
1968 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
1969 | skd_quiesce_dev(skdev); | |
1970 | break; | |
1971 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
1972 | /* set timer for 3 seconds, we'll abort any unfinished | |
1973 | * commands after that expires | |
1974 | */ | |
1975 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | |
1976 | skdev->timer_countdown = SKD_TIMER_SECONDS(3); | |
6a5ec65b | 1977 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
1978 | break; |
1979 | case FIT_SR_DRIVE_BUSY_ERASE: | |
1980 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | |
1981 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
1982 | break; | |
1983 | case FIT_SR_DRIVE_OFFLINE: | |
1984 | skdev->state = SKD_DRVR_STATE_IDLE; | |
1985 | break; | |
1986 | case FIT_SR_DRIVE_SOFT_RESET: | |
1987 | switch (skdev->state) { | |
1988 | case SKD_DRVR_STATE_STARTING: | |
1989 | case SKD_DRVR_STATE_RESTARTING: | |
1990 | /* Expected by a caller of skd_soft_reset() */ | |
1991 | break; | |
1992 | default: | |
1993 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
1994 | break; | |
1995 | } | |
1996 | break; | |
1997 | case FIT_SR_DRIVE_FW_BOOTING: | |
f98806d6 | 1998 | dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n"); |
e67f86b3 AB |
1999 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; |
2000 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
2001 | break; | |
2002 | ||
2003 | case FIT_SR_DRIVE_DEGRADED: | |
2004 | case FIT_SR_PCIE_LINK_DOWN: | |
2005 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
2006 | break; | |
2007 | ||
2008 | case FIT_SR_DRIVE_FAULT: | |
2009 | skd_drive_fault(skdev); | |
79ce12a8 | 2010 | skd_recover_requests(skdev); |
6a5ec65b | 2011 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
2012 | break; |
2013 | ||
2014 | /* PCIe bus returned all Fs? */ | |
2015 | case 0xFF: | |
f98806d6 BVA |
2016 | dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state, |
2017 | sense); | |
e67f86b3 | 2018 | skd_drive_disappeared(skdev); |
79ce12a8 | 2019 | skd_recover_requests(skdev); |
6a5ec65b | 2020 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
2021 | break; |
2022 | default: | |
2023 | /* | |
2024 | * Uknown FW State. Wait for a state we recognize. | |
2025 | */ | |
2026 | break; | |
2027 | } | |
f98806d6 BVA |
2028 | dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", |
2029 | skd_skdev_state_to_str(prev_driver_state), prev_driver_state, | |
2030 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
e67f86b3 AB |
2031 | } |
2032 | ||
79ce12a8 | 2033 | static void skd_recover_requests(struct skd_device *skdev) |
e67f86b3 AB |
2034 | { |
2035 | int i; | |
2036 | ||
2037 | for (i = 0; i < skdev->num_req_context; i++) { | |
2038 | struct skd_request_context *skreq = &skdev->skreq_table[i]; | |
f18c17c8 | 2039 | struct request *req = skreq->req; |
e67f86b3 AB |
2040 | |
2041 | if (skreq->state == SKD_REQ_STATE_BUSY) { | |
2042 | skd_log_skreq(skdev, skreq, "recover"); | |
2043 | ||
2044 | SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); | |
f18c17c8 | 2045 | SKD_ASSERT(req != NULL); |
e67f86b3 AB |
2046 | |
2047 | /* Release DMA resources for the request. */ | |
2048 | if (skreq->n_sg > 0) | |
2049 | skd_postop_sg_list(skdev, skreq); | |
2050 | ||
fcd37eb3 | 2051 | skreq->req = NULL; |
e67f86b3 AB |
2052 | |
2053 | skreq->state = SKD_REQ_STATE_IDLE; | |
2054 | skreq->id += SKD_ID_INCR; | |
e67f86b3 | 2055 | |
f18c17c8 | 2056 | skd_end_request(skdev, req, BLK_STS_IOERR); |
e67f86b3 | 2057 | } |
e67f86b3 | 2058 | } |
e67f86b3 | 2059 | |
e67f86b3 | 2060 | for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) |
6fbb2de5 | 2061 | atomic_set(&skdev->timeout_slot[i], 0); |
e67f86b3 | 2062 | |
6fbb2de5 | 2063 | atomic_set(&skdev->in_flight, 0); |
e67f86b3 AB |
2064 | } |
2065 | ||
2066 | static void skd_isr_msg_from_dev(struct skd_device *skdev) | |
2067 | { | |
2068 | u32 mfd; | |
2069 | u32 mtd; | |
2070 | u32 data; | |
2071 | ||
2072 | mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
2073 | ||
f98806d6 BVA |
2074 | dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd, |
2075 | skdev->last_mtd); | |
e67f86b3 AB |
2076 | |
2077 | /* ignore any mtd that is an ack for something we didn't send */ | |
2078 | if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) | |
2079 | return; | |
2080 | ||
2081 | switch (FIT_MXD_TYPE(mfd)) { | |
2082 | case FIT_MTD_FITFW_INIT: | |
2083 | skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); | |
2084 | ||
2085 | if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { | |
f98806d6 BVA |
2086 | dev_err(&skdev->pdev->dev, "protocol mismatch\n"); |
2087 | dev_err(&skdev->pdev->dev, " got=%d support=%d\n", | |
2088 | skdev->proto_ver, FIT_PROTOCOL_VERSION_1); | |
2089 | dev_err(&skdev->pdev->dev, " please upgrade driver\n"); | |
e67f86b3 AB |
2090 | skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; |
2091 | skd_soft_reset(skdev); | |
2092 | break; | |
2093 | } | |
2094 | mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); | |
2095 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2096 | skdev->last_mtd = mtd; | |
2097 | break; | |
2098 | ||
2099 | case FIT_MTD_GET_CMDQ_DEPTH: | |
2100 | skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); | |
2101 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, | |
2102 | SKD_N_COMPLETION_ENTRY); | |
2103 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2104 | skdev->last_mtd = mtd; | |
2105 | break; | |
2106 | ||
2107 | case FIT_MTD_SET_COMPQ_DEPTH: | |
2108 | SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); | |
2109 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); | |
2110 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2111 | skdev->last_mtd = mtd; | |
2112 | break; | |
2113 | ||
2114 | case FIT_MTD_SET_COMPQ_ADDR: | |
2115 | skd_reset_skcomp(skdev); | |
2116 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); | |
2117 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2118 | skdev->last_mtd = mtd; | |
2119 | break; | |
2120 | ||
2121 | case FIT_MTD_CMD_LOG_HOST_ID: | |
2122 | skdev->connect_time_stamp = get_seconds(); | |
2123 | data = skdev->connect_time_stamp & 0xFFFF; | |
2124 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); | |
2125 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2126 | skdev->last_mtd = mtd; | |
2127 | break; | |
2128 | ||
2129 | case FIT_MTD_CMD_LOG_TIME_STAMP_LO: | |
2130 | skdev->drive_jiffies = FIT_MXD_DATA(mfd); | |
2131 | data = (skdev->connect_time_stamp >> 16) & 0xFFFF; | |
2132 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); | |
2133 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2134 | skdev->last_mtd = mtd; | |
2135 | break; | |
2136 | ||
2137 | case FIT_MTD_CMD_LOG_TIME_STAMP_HI: | |
2138 | skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); | |
2139 | mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); | |
2140 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
2141 | skdev->last_mtd = mtd; | |
2142 | ||
f98806d6 BVA |
2143 | dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n", |
2144 | skdev->connect_time_stamp, skdev->drive_jiffies); | |
e67f86b3 AB |
2145 | break; |
2146 | ||
2147 | case FIT_MTD_ARM_QUEUE: | |
2148 | skdev->last_mtd = 0; | |
2149 | /* | |
2150 | * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. | |
2151 | */ | |
2152 | break; | |
2153 | ||
2154 | default: | |
2155 | break; | |
2156 | } | |
2157 | } | |
2158 | ||
2159 | static void skd_disable_interrupts(struct skd_device *skdev) | |
2160 | { | |
2161 | u32 sense; | |
2162 | ||
2163 | sense = SKD_READL(skdev, FIT_CONTROL); | |
2164 | sense &= ~FIT_CR_ENABLE_INTERRUPTS; | |
2165 | SKD_WRITEL(skdev, sense, FIT_CONTROL); | |
f98806d6 | 2166 | dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense); |
e67f86b3 AB |
2167 | |
2168 | /* Note that the 1s is written. A 1-bit means | |
2169 | * disable, a 0 means enable. | |
2170 | */ | |
2171 | SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); | |
2172 | } | |
2173 | ||
2174 | static void skd_enable_interrupts(struct skd_device *skdev) | |
2175 | { | |
2176 | u32 val; | |
2177 | ||
2178 | /* unmask interrupts first */ | |
2179 | val = FIT_ISH_FW_STATE_CHANGE + | |
2180 | FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; | |
2181 | ||
2182 | /* Note that the compliment of mask is written. A 1-bit means | |
2183 | * disable, a 0 means enable. */ | |
2184 | SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); | |
f98806d6 | 2185 | dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val); |
e67f86b3 AB |
2186 | |
2187 | val = SKD_READL(skdev, FIT_CONTROL); | |
2188 | val |= FIT_CR_ENABLE_INTERRUPTS; | |
f98806d6 | 2189 | dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); |
e67f86b3 AB |
2190 | SKD_WRITEL(skdev, val, FIT_CONTROL); |
2191 | } | |
2192 | ||
2193 | /* | |
2194 | ***************************************************************************** | |
2195 | * START, STOP, RESTART, QUIESCE, UNQUIESCE | |
2196 | ***************************************************************************** | |
2197 | */ | |
2198 | ||
2199 | static void skd_soft_reset(struct skd_device *skdev) | |
2200 | { | |
2201 | u32 val; | |
2202 | ||
2203 | val = SKD_READL(skdev, FIT_CONTROL); | |
2204 | val |= (FIT_CR_SOFT_RESET); | |
f98806d6 | 2205 | dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val); |
e67f86b3 AB |
2206 | SKD_WRITEL(skdev, val, FIT_CONTROL); |
2207 | } | |
2208 | ||
2209 | static void skd_start_device(struct skd_device *skdev) | |
2210 | { | |
2211 | unsigned long flags; | |
2212 | u32 sense; | |
2213 | u32 state; | |
2214 | ||
2215 | spin_lock_irqsave(&skdev->lock, flags); | |
2216 | ||
2217 | /* ack all ghost interrupts */ | |
2218 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
2219 | ||
2220 | sense = SKD_READL(skdev, FIT_STATUS); | |
2221 | ||
f98806d6 | 2222 | dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense); |
e67f86b3 AB |
2223 | |
2224 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
2225 | skdev->drive_state = state; | |
2226 | skdev->last_mtd = 0; | |
2227 | ||
2228 | skdev->state = SKD_DRVR_STATE_STARTING; | |
2229 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
2230 | ||
2231 | skd_enable_interrupts(skdev); | |
2232 | ||
2233 | switch (skdev->drive_state) { | |
2234 | case FIT_SR_DRIVE_OFFLINE: | |
f98806d6 | 2235 | dev_err(&skdev->pdev->dev, "Drive offline...\n"); |
e67f86b3 AB |
2236 | break; |
2237 | ||
2238 | case FIT_SR_DRIVE_FW_BOOTING: | |
f98806d6 | 2239 | dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n"); |
e67f86b3 AB |
2240 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; |
2241 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
2242 | break; | |
2243 | ||
2244 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
f98806d6 | 2245 | dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n"); |
e67f86b3 AB |
2246 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; |
2247 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
2248 | break; | |
2249 | ||
2250 | case FIT_SR_DRIVE_BUSY_ERASE: | |
f98806d6 | 2251 | dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n"); |
e67f86b3 AB |
2252 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; |
2253 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
2254 | break; | |
2255 | ||
2256 | case FIT_SR_DRIVE_INIT: | |
2257 | case FIT_SR_DRIVE_ONLINE: | |
2258 | skd_soft_reset(skdev); | |
2259 | break; | |
2260 | ||
2261 | case FIT_SR_DRIVE_BUSY: | |
f98806d6 | 2262 | dev_err(&skdev->pdev->dev, "Drive Busy...\n"); |
e67f86b3 AB |
2263 | skdev->state = SKD_DRVR_STATE_BUSY; |
2264 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
2265 | break; | |
2266 | ||
2267 | case FIT_SR_DRIVE_SOFT_RESET: | |
f98806d6 | 2268 | dev_err(&skdev->pdev->dev, "drive soft reset in prog\n"); |
e67f86b3 AB |
2269 | break; |
2270 | ||
2271 | case FIT_SR_DRIVE_FAULT: | |
2272 | /* Fault state is bad...soft reset won't do it... | |
2273 | * Hard reset, maybe, but does it work on device? | |
2274 | * For now, just fault so the system doesn't hang. | |
2275 | */ | |
2276 | skd_drive_fault(skdev); | |
2277 | /*start the queue so we can respond with error to requests */ | |
f98806d6 | 2278 | dev_dbg(&skdev->pdev->dev, "starting queue\n"); |
6a5ec65b | 2279 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
2280 | skdev->gendisk_on = -1; |
2281 | wake_up_interruptible(&skdev->waitq); | |
2282 | break; | |
2283 | ||
2284 | case 0xFF: | |
2285 | /* Most likely the device isn't there or isn't responding | |
2286 | * to the BAR1 addresses. */ | |
2287 | skd_drive_disappeared(skdev); | |
2288 | /*start the queue so we can respond with error to requests */ | |
f98806d6 BVA |
2289 | dev_dbg(&skdev->pdev->dev, |
2290 | "starting queue to error-out reqs\n"); | |
6a5ec65b | 2291 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
2292 | skdev->gendisk_on = -1; |
2293 | wake_up_interruptible(&skdev->waitq); | |
2294 | break; | |
2295 | ||
2296 | default: | |
f98806d6 BVA |
2297 | dev_err(&skdev->pdev->dev, "Start: unknown state %x\n", |
2298 | skdev->drive_state); | |
e67f86b3 AB |
2299 | break; |
2300 | } | |
2301 | ||
2302 | state = SKD_READL(skdev, FIT_CONTROL); | |
f98806d6 | 2303 | dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state); |
e67f86b3 AB |
2304 | |
2305 | state = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
f98806d6 | 2306 | dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state); |
e67f86b3 AB |
2307 | |
2308 | state = SKD_READL(skdev, FIT_INT_MASK_HOST); | |
f98806d6 | 2309 | dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state); |
e67f86b3 AB |
2310 | |
2311 | state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
f98806d6 | 2312 | dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state); |
e67f86b3 AB |
2313 | |
2314 | state = SKD_READL(skdev, FIT_HW_VERSION); | |
f98806d6 | 2315 | dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state); |
e67f86b3 AB |
2316 | |
2317 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2318 | } | |
2319 | ||
2320 | static void skd_stop_device(struct skd_device *skdev) | |
2321 | { | |
2322 | unsigned long flags; | |
2323 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
2324 | u32 dev_state; | |
2325 | int i; | |
2326 | ||
2327 | spin_lock_irqsave(&skdev->lock, flags); | |
2328 | ||
2329 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
f98806d6 | 2330 | dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__); |
e67f86b3 AB |
2331 | goto stop_out; |
2332 | } | |
2333 | ||
2334 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) { | |
f98806d6 | 2335 | dev_err(&skdev->pdev->dev, "%s no special\n", __func__); |
e67f86b3 AB |
2336 | goto stop_out; |
2337 | } | |
2338 | ||
2339 | skdev->state = SKD_DRVR_STATE_SYNCING; | |
2340 | skdev->sync_done = 0; | |
2341 | ||
2342 | skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); | |
2343 | ||
2344 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2345 | ||
2346 | wait_event_interruptible_timeout(skdev->waitq, | |
2347 | (skdev->sync_done), (10 * HZ)); | |
2348 | ||
2349 | spin_lock_irqsave(&skdev->lock, flags); | |
2350 | ||
2351 | switch (skdev->sync_done) { | |
2352 | case 0: | |
f98806d6 | 2353 | dev_err(&skdev->pdev->dev, "%s no sync\n", __func__); |
e67f86b3 AB |
2354 | break; |
2355 | case 1: | |
f98806d6 | 2356 | dev_err(&skdev->pdev->dev, "%s sync done\n", __func__); |
e67f86b3 AB |
2357 | break; |
2358 | default: | |
f98806d6 | 2359 | dev_err(&skdev->pdev->dev, "%s sync error\n", __func__); |
e67f86b3 AB |
2360 | } |
2361 | ||
2362 | stop_out: | |
2363 | skdev->state = SKD_DRVR_STATE_STOPPING; | |
2364 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2365 | ||
2366 | skd_kill_timer(skdev); | |
2367 | ||
2368 | spin_lock_irqsave(&skdev->lock, flags); | |
2369 | skd_disable_interrupts(skdev); | |
2370 | ||
2371 | /* ensure all ints on device are cleared */ | |
2372 | /* soft reset the device to unload with a clean slate */ | |
2373 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
2374 | SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); | |
2375 | ||
2376 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2377 | ||
2378 | /* poll every 100ms, 1 second timeout */ | |
2379 | for (i = 0; i < 10; i++) { | |
2380 | dev_state = | |
2381 | SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; | |
2382 | if (dev_state == FIT_SR_DRIVE_INIT) | |
2383 | break; | |
2384 | set_current_state(TASK_INTERRUPTIBLE); | |
2385 | schedule_timeout(msecs_to_jiffies(100)); | |
2386 | } | |
2387 | ||
2388 | if (dev_state != FIT_SR_DRIVE_INIT) | |
f98806d6 BVA |
2389 | dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__, |
2390 | dev_state); | |
e67f86b3 AB |
2391 | } |
2392 | ||
2393 | /* assume spinlock is held */ | |
2394 | static void skd_restart_device(struct skd_device *skdev) | |
2395 | { | |
2396 | u32 state; | |
2397 | ||
2398 | /* ack all ghost interrupts */ | |
2399 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
2400 | ||
2401 | state = SKD_READL(skdev, FIT_STATUS); | |
2402 | ||
f98806d6 | 2403 | dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state); |
e67f86b3 AB |
2404 | |
2405 | state &= FIT_SR_DRIVE_STATE_MASK; | |
2406 | skdev->drive_state = state; | |
2407 | skdev->last_mtd = 0; | |
2408 | ||
2409 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
2410 | skdev->timer_countdown = SKD_RESTARTING_TIMO; | |
2411 | ||
2412 | skd_soft_reset(skdev); | |
2413 | } | |
2414 | ||
2415 | /* assume spinlock is held */ | |
2416 | static int skd_quiesce_dev(struct skd_device *skdev) | |
2417 | { | |
2418 | int rc = 0; | |
2419 | ||
2420 | switch (skdev->state) { | |
2421 | case SKD_DRVR_STATE_BUSY: | |
2422 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
f98806d6 | 2423 | dev_dbg(&skdev->pdev->dev, "stopping queue\n"); |
6a5ec65b | 2424 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
2425 | break; |
2426 | case SKD_DRVR_STATE_ONLINE: | |
2427 | case SKD_DRVR_STATE_STOPPING: | |
2428 | case SKD_DRVR_STATE_SYNCING: | |
2429 | case SKD_DRVR_STATE_PAUSING: | |
2430 | case SKD_DRVR_STATE_PAUSED: | |
2431 | case SKD_DRVR_STATE_STARTING: | |
2432 | case SKD_DRVR_STATE_RESTARTING: | |
2433 | case SKD_DRVR_STATE_RESUMING: | |
2434 | default: | |
2435 | rc = -EINVAL; | |
f98806d6 BVA |
2436 | dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n", |
2437 | skdev->state); | |
e67f86b3 AB |
2438 | } |
2439 | return rc; | |
2440 | } | |
2441 | ||
2442 | /* assume spinlock is held */ | |
2443 | static int skd_unquiesce_dev(struct skd_device *skdev) | |
2444 | { | |
2445 | int prev_driver_state = skdev->state; | |
2446 | ||
2447 | skd_log_skdev(skdev, "unquiesce"); | |
2448 | if (skdev->state == SKD_DRVR_STATE_ONLINE) { | |
f98806d6 | 2449 | dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n"); |
e67f86b3 AB |
2450 | return 0; |
2451 | } | |
2452 | if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { | |
2453 | /* | |
2454 | * If there has been an state change to other than | |
2455 | * ONLINE, we will rely on controller state change | |
2456 | * to come back online and restart the queue. | |
2457 | * The BUSY state means that driver is ready to | |
2458 | * continue normal processing but waiting for controller | |
2459 | * to become available. | |
2460 | */ | |
2461 | skdev->state = SKD_DRVR_STATE_BUSY; | |
f98806d6 | 2462 | dev_dbg(&skdev->pdev->dev, "drive BUSY state\n"); |
e67f86b3 AB |
2463 | return 0; |
2464 | } | |
2465 | ||
2466 | /* | |
2467 | * Drive has just come online, driver is either in startup, | |
2468 | * paused performing a task, or bust waiting for hardware. | |
2469 | */ | |
2470 | switch (skdev->state) { | |
2471 | case SKD_DRVR_STATE_PAUSED: | |
2472 | case SKD_DRVR_STATE_BUSY: | |
2473 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
2474 | case SKD_DRVR_STATE_BUSY_ERASE: | |
2475 | case SKD_DRVR_STATE_STARTING: | |
2476 | case SKD_DRVR_STATE_RESTARTING: | |
2477 | case SKD_DRVR_STATE_FAULT: | |
2478 | case SKD_DRVR_STATE_IDLE: | |
2479 | case SKD_DRVR_STATE_LOAD: | |
2480 | skdev->state = SKD_DRVR_STATE_ONLINE; | |
f98806d6 BVA |
2481 | dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n", |
2482 | skd_skdev_state_to_str(prev_driver_state), | |
2483 | prev_driver_state, skd_skdev_state_to_str(skdev->state), | |
2484 | skdev->state); | |
2485 | dev_dbg(&skdev->pdev->dev, | |
2486 | "**** device ONLINE...starting block queue\n"); | |
2487 | dev_dbg(&skdev->pdev->dev, "starting queue\n"); | |
2488 | dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n"); | |
6a5ec65b | 2489 | blk_start_queue(skdev->queue); |
e67f86b3 AB |
2490 | skdev->gendisk_on = 1; |
2491 | wake_up_interruptible(&skdev->waitq); | |
2492 | break; | |
2493 | ||
2494 | case SKD_DRVR_STATE_DISAPPEARED: | |
2495 | default: | |
f98806d6 BVA |
2496 | dev_dbg(&skdev->pdev->dev, |
2497 | "**** driver state %d, not implemented\n", | |
2498 | skdev->state); | |
e67f86b3 AB |
2499 | return -EBUSY; |
2500 | } | |
2501 | return 0; | |
2502 | } | |
2503 | ||
2504 | /* | |
2505 | ***************************************************************************** | |
2506 | * PCIe MSI/MSI-X INTERRUPT HANDLERS | |
2507 | ***************************************************************************** | |
2508 | */ | |
2509 | ||
2510 | static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) | |
2511 | { | |
2512 | struct skd_device *skdev = skd_host_data; | |
2513 | unsigned long flags; | |
2514 | ||
2515 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 BVA |
2516 | dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", |
2517 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
2518 | dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq, | |
2519 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
2520 | SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); |
2521 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2522 | return IRQ_HANDLED; | |
2523 | } | |
2524 | ||
2525 | static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) | |
2526 | { | |
2527 | struct skd_device *skdev = skd_host_data; | |
2528 | unsigned long flags; | |
2529 | ||
2530 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 BVA |
2531 | dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", |
2532 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
2533 | SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); |
2534 | skd_isr_fwstate(skdev); | |
2535 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2536 | return IRQ_HANDLED; | |
2537 | } | |
2538 | ||
2539 | static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | |
2540 | { | |
2541 | struct skd_device *skdev = skd_host_data; | |
2542 | unsigned long flags; | |
2543 | int flush_enqueued = 0; | |
2544 | int deferred; | |
2545 | ||
2546 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 BVA |
2547 | dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", |
2548 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
2549 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); |
2550 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | |
2551 | &flush_enqueued); | |
e67f86b3 | 2552 | if (flush_enqueued) |
8fe70065 | 2553 | blk_run_queue_async(skdev->queue); |
e67f86b3 AB |
2554 | |
2555 | if (deferred) | |
2556 | schedule_work(&skdev->completion_worker); | |
2557 | else if (!flush_enqueued) | |
8fe70065 | 2558 | blk_run_queue_async(skdev->queue); |
e67f86b3 AB |
2559 | |
2560 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2561 | ||
2562 | return IRQ_HANDLED; | |
2563 | } | |
2564 | ||
2565 | static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) | |
2566 | { | |
2567 | struct skd_device *skdev = skd_host_data; | |
2568 | unsigned long flags; | |
2569 | ||
2570 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 BVA |
2571 | dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", |
2572 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
2573 | SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); |
2574 | skd_isr_msg_from_dev(skdev); | |
2575 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2576 | return IRQ_HANDLED; | |
2577 | } | |
2578 | ||
2579 | static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) | |
2580 | { | |
2581 | struct skd_device *skdev = skd_host_data; | |
2582 | unsigned long flags; | |
2583 | ||
2584 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 BVA |
2585 | dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n", |
2586 | SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
e67f86b3 AB |
2587 | SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); |
2588 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2589 | return IRQ_HANDLED; | |
2590 | } | |
2591 | ||
2592 | /* | |
2593 | ***************************************************************************** | |
2594 | * PCIe MSI/MSI-X SETUP | |
2595 | ***************************************************************************** | |
2596 | */ | |
2597 | ||
2598 | struct skd_msix_entry { | |
e67f86b3 AB |
2599 | char isr_name[30]; |
2600 | }; | |
2601 | ||
2602 | struct skd_init_msix_entry { | |
2603 | const char *name; | |
2604 | irq_handler_t handler; | |
2605 | }; | |
2606 | ||
2607 | #define SKD_MAX_MSIX_COUNT 13 | |
2608 | #define SKD_MIN_MSIX_COUNT 7 | |
2609 | #define SKD_BASE_MSIX_IRQ 4 | |
2610 | ||
2611 | static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { | |
2612 | { "(DMA 0)", skd_reserved_isr }, | |
2613 | { "(DMA 1)", skd_reserved_isr }, | |
2614 | { "(DMA 2)", skd_reserved_isr }, | |
2615 | { "(DMA 3)", skd_reserved_isr }, | |
2616 | { "(State Change)", skd_statec_isr }, | |
2617 | { "(COMPL_Q)", skd_comp_q }, | |
2618 | { "(MSG)", skd_msg_isr }, | |
2619 | { "(Reserved)", skd_reserved_isr }, | |
2620 | { "(Reserved)", skd_reserved_isr }, | |
2621 | { "(Queue Full 0)", skd_qfull_isr }, | |
2622 | { "(Queue Full 1)", skd_qfull_isr }, | |
2623 | { "(Queue Full 2)", skd_qfull_isr }, | |
2624 | { "(Queue Full 3)", skd_qfull_isr }, | |
2625 | }; | |
2626 | ||
e67f86b3 AB |
2627 | static int skd_acquire_msix(struct skd_device *skdev) |
2628 | { | |
a9df8625 | 2629 | int i, rc; |
46817769 | 2630 | struct pci_dev *pdev = skdev->pdev; |
e67f86b3 | 2631 | |
180b0ae7 CH |
2632 | rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT, |
2633 | PCI_IRQ_MSIX); | |
2634 | if (rc < 0) { | |
f98806d6 | 2635 | dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc); |
3bc8492f | 2636 | goto out; |
e67f86b3 | 2637 | } |
46817769 | 2638 | |
180b0ae7 CH |
2639 | skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT, |
2640 | sizeof(struct skd_msix_entry), GFP_KERNEL); | |
e67f86b3 AB |
2641 | if (!skdev->msix_entries) { |
2642 | rc = -ENOMEM; | |
f98806d6 | 2643 | dev_err(&skdev->pdev->dev, "msix table allocation error\n"); |
3bc8492f | 2644 | goto out; |
e67f86b3 AB |
2645 | } |
2646 | ||
e67f86b3 | 2647 | /* Enable MSI-X vectors for the base queue */ |
180b0ae7 CH |
2648 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { |
2649 | struct skd_msix_entry *qentry = &skdev->msix_entries[i]; | |
2650 | ||
e67f86b3 AB |
2651 | snprintf(qentry->isr_name, sizeof(qentry->isr_name), |
2652 | "%s%d-msix %s", DRV_NAME, skdev->devno, | |
2653 | msix_entries[i].name); | |
180b0ae7 CH |
2654 | |
2655 | rc = devm_request_irq(&skdev->pdev->dev, | |
2656 | pci_irq_vector(skdev->pdev, i), | |
2657 | msix_entries[i].handler, 0, | |
2658 | qentry->isr_name, skdev); | |
e67f86b3 | 2659 | if (rc) { |
f98806d6 BVA |
2660 | dev_err(&skdev->pdev->dev, |
2661 | "Unable to register(%d) MSI-X handler %d: %s\n", | |
2662 | rc, i, qentry->isr_name); | |
e67f86b3 | 2663 | goto msix_out; |
e67f86b3 AB |
2664 | } |
2665 | } | |
180b0ae7 | 2666 | |
f98806d6 BVA |
2667 | dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n", |
2668 | SKD_MAX_MSIX_COUNT); | |
e67f86b3 AB |
2669 | return 0; |
2670 | ||
2671 | msix_out: | |
180b0ae7 CH |
2672 | while (--i >= 0) |
2673 | devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev); | |
3bc8492f | 2674 | out: |
180b0ae7 CH |
2675 | kfree(skdev->msix_entries); |
2676 | skdev->msix_entries = NULL; | |
e67f86b3 AB |
2677 | return rc; |
2678 | } | |
2679 | ||
2680 | static int skd_acquire_irq(struct skd_device *skdev) | |
2681 | { | |
180b0ae7 CH |
2682 | struct pci_dev *pdev = skdev->pdev; |
2683 | unsigned int irq_flag = PCI_IRQ_LEGACY; | |
e67f86b3 | 2684 | int rc; |
e67f86b3 | 2685 | |
180b0ae7 | 2686 | if (skd_isr_type == SKD_IRQ_MSIX) { |
e67f86b3 AB |
2687 | rc = skd_acquire_msix(skdev); |
2688 | if (!rc) | |
180b0ae7 CH |
2689 | return 0; |
2690 | ||
f98806d6 BVA |
2691 | dev_err(&skdev->pdev->dev, |
2692 | "failed to enable MSI-X, re-trying with MSI %d\n", rc); | |
e67f86b3 | 2693 | } |
180b0ae7 CH |
2694 | |
2695 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME, | |
2696 | skdev->devno); | |
2697 | ||
2698 | if (skd_isr_type != SKD_IRQ_LEGACY) | |
2699 | irq_flag |= PCI_IRQ_MSI; | |
2700 | rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag); | |
2701 | if (rc < 0) { | |
f98806d6 BVA |
2702 | dev_err(&skdev->pdev->dev, |
2703 | "failed to allocate the MSI interrupt %d\n", rc); | |
180b0ae7 CH |
2704 | return rc; |
2705 | } | |
2706 | ||
2707 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, | |
2708 | pdev->msi_enabled ? 0 : IRQF_SHARED, | |
2709 | skdev->isr_name, skdev); | |
2710 | if (rc) { | |
2711 | pci_free_irq_vectors(pdev); | |
f98806d6 BVA |
2712 | dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n", |
2713 | rc); | |
180b0ae7 CH |
2714 | return rc; |
2715 | } | |
2716 | ||
2717 | return 0; | |
e67f86b3 AB |
2718 | } |
2719 | ||
2720 | static void skd_release_irq(struct skd_device *skdev) | |
2721 | { | |
180b0ae7 CH |
2722 | struct pci_dev *pdev = skdev->pdev; |
2723 | ||
2724 | if (skdev->msix_entries) { | |
2725 | int i; | |
2726 | ||
2727 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { | |
2728 | devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), | |
2729 | skdev); | |
2730 | } | |
2731 | ||
2732 | kfree(skdev->msix_entries); | |
2733 | skdev->msix_entries = NULL; | |
2734 | } else { | |
2735 | devm_free_irq(&pdev->dev, pdev->irq, skdev); | |
e67f86b3 | 2736 | } |
180b0ae7 CH |
2737 | |
2738 | pci_free_irq_vectors(pdev); | |
e67f86b3 AB |
2739 | } |
2740 | ||
2741 | /* | |
2742 | ***************************************************************************** | |
2743 | * CONSTRUCT | |
2744 | ***************************************************************************** | |
2745 | */ | |
2746 | ||
e67f86b3 AB |
2747 | static int skd_cons_skcomp(struct skd_device *skdev) |
2748 | { | |
2749 | int rc = 0; | |
2750 | struct fit_completion_entry_v1 *skcomp; | |
e67f86b3 | 2751 | |
f98806d6 | 2752 | dev_dbg(&skdev->pdev->dev, |
6f7c7675 BVA |
2753 | "comp pci_alloc, total bytes %zd entries %d\n", |
2754 | SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY); | |
e67f86b3 | 2755 | |
6f7c7675 | 2756 | skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE, |
a5bbf616 | 2757 | &skdev->cq_dma_address); |
e67f86b3 AB |
2758 | |
2759 | if (skcomp == NULL) { | |
2760 | rc = -ENOMEM; | |
2761 | goto err_out; | |
2762 | } | |
2763 | ||
e67f86b3 AB |
2764 | skdev->skcomp_table = skcomp; |
2765 | skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + | |
2766 | sizeof(*skcomp) * | |
2767 | SKD_N_COMPLETION_ENTRY); | |
2768 | ||
2769 | err_out: | |
2770 | return rc; | |
2771 | } | |
2772 | ||
2773 | static int skd_cons_skmsg(struct skd_device *skdev) | |
2774 | { | |
2775 | int rc = 0; | |
2776 | u32 i; | |
2777 | ||
f98806d6 | 2778 | dev_dbg(&skdev->pdev->dev, |
01433d0d | 2779 | "skmsg_table kcalloc, struct %lu, count %u total %lu\n", |
f98806d6 BVA |
2780 | sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context, |
2781 | sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); | |
e67f86b3 | 2782 | |
01433d0d BVA |
2783 | skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context, |
2784 | sizeof(struct skd_fitmsg_context), | |
2785 | GFP_KERNEL); | |
e67f86b3 AB |
2786 | if (skdev->skmsg_table == NULL) { |
2787 | rc = -ENOMEM; | |
2788 | goto err_out; | |
2789 | } | |
2790 | ||
2791 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
2792 | struct skd_fitmsg_context *skmsg; | |
2793 | ||
2794 | skmsg = &skdev->skmsg_table[i]; | |
2795 | ||
2796 | skmsg->id = i + SKD_ID_FIT_MSG; | |
2797 | ||
e67f86b3 | 2798 | skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, |
6507f436 | 2799 | SKD_N_FITMSG_BYTES, |
e67f86b3 AB |
2800 | &skmsg->mb_dma_address); |
2801 | ||
2802 | if (skmsg->msg_buf == NULL) { | |
2803 | rc = -ENOMEM; | |
2804 | goto err_out; | |
2805 | } | |
2806 | ||
6507f436 BVA |
2807 | WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & |
2808 | (FIT_QCMD_ALIGN - 1), | |
2809 | "not aligned: msg_buf %p mb_dma_address %#llx\n", | |
2810 | skmsg->msg_buf, skmsg->mb_dma_address); | |
e67f86b3 | 2811 | memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); |
e67f86b3 AB |
2812 | } |
2813 | ||
e67f86b3 AB |
2814 | err_out: |
2815 | return rc; | |
2816 | } | |
2817 | ||
542d7b00 BZ |
2818 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, |
2819 | u32 n_sg, | |
2820 | dma_addr_t *ret_dma_addr) | |
2821 | { | |
2822 | struct fit_sg_descriptor *sg_list; | |
2823 | u32 nbytes; | |
2824 | ||
2825 | nbytes = sizeof(*sg_list) * n_sg; | |
2826 | ||
2827 | sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); | |
2828 | ||
2829 | if (sg_list != NULL) { | |
2830 | uint64_t dma_address = *ret_dma_addr; | |
2831 | u32 i; | |
2832 | ||
2833 | memset(sg_list, 0, nbytes); | |
2834 | ||
2835 | for (i = 0; i < n_sg - 1; i++) { | |
2836 | uint64_t ndp_off; | |
2837 | ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); | |
2838 | ||
2839 | sg_list[i].next_desc_ptr = dma_address + ndp_off; | |
2840 | } | |
2841 | sg_list[i].next_desc_ptr = 0LL; | |
2842 | } | |
2843 | ||
2844 | return sg_list; | |
2845 | } | |
2846 | ||
e67f86b3 AB |
2847 | static int skd_cons_skreq(struct skd_device *skdev) |
2848 | { | |
2849 | int rc = 0; | |
2850 | u32 i; | |
2851 | ||
f98806d6 | 2852 | dev_dbg(&skdev->pdev->dev, |
01433d0d | 2853 | "skreq_table kcalloc, struct %lu, count %u total %lu\n", |
f98806d6 BVA |
2854 | sizeof(struct skd_request_context), skdev->num_req_context, |
2855 | sizeof(struct skd_request_context) * skdev->num_req_context); | |
e67f86b3 | 2856 | |
01433d0d BVA |
2857 | skdev->skreq_table = kcalloc(skdev->num_req_context, |
2858 | sizeof(struct skd_request_context), | |
2859 | GFP_KERNEL); | |
e67f86b3 AB |
2860 | if (skdev->skreq_table == NULL) { |
2861 | rc = -ENOMEM; | |
2862 | goto err_out; | |
2863 | } | |
2864 | ||
f98806d6 BVA |
2865 | dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n", |
2866 | skdev->sgs_per_request, sizeof(struct scatterlist), | |
2867 | skdev->sgs_per_request * sizeof(struct scatterlist)); | |
e67f86b3 AB |
2868 | |
2869 | for (i = 0; i < skdev->num_req_context; i++) { | |
2870 | struct skd_request_context *skreq; | |
2871 | ||
2872 | skreq = &skdev->skreq_table[i]; | |
e67f86b3 | 2873 | skreq->state = SKD_REQ_STATE_IDLE; |
01433d0d BVA |
2874 | skreq->sg = kcalloc(skdev->sgs_per_request, |
2875 | sizeof(struct scatterlist), GFP_KERNEL); | |
e67f86b3 AB |
2876 | if (skreq->sg == NULL) { |
2877 | rc = -ENOMEM; | |
2878 | goto err_out; | |
2879 | } | |
2880 | sg_init_table(skreq->sg, skdev->sgs_per_request); | |
2881 | ||
2882 | skreq->sksg_list = skd_cons_sg_list(skdev, | |
2883 | skdev->sgs_per_request, | |
2884 | &skreq->sksg_dma_address); | |
2885 | ||
2886 | if (skreq->sksg_list == NULL) { | |
2887 | rc = -ENOMEM; | |
2888 | goto err_out; | |
2889 | } | |
e67f86b3 AB |
2890 | } |
2891 | ||
e67f86b3 AB |
2892 | err_out: |
2893 | return rc; | |
2894 | } | |
2895 | ||
e67f86b3 AB |
2896 | static int skd_cons_sksb(struct skd_device *skdev) |
2897 | { | |
2898 | int rc = 0; | |
2899 | struct skd_special_context *skspcl; | |
2900 | u32 nbytes; | |
2901 | ||
2902 | skspcl = &skdev->internal_skspcl; | |
2903 | ||
2904 | skspcl->req.id = 0 + SKD_ID_INTERNAL; | |
2905 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
2906 | ||
2907 | nbytes = SKD_N_INTERNAL_BYTES; | |
2908 | ||
a5bbf616 JP |
2909 | skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes, |
2910 | &skspcl->db_dma_address); | |
e67f86b3 AB |
2911 | if (skspcl->data_buf == NULL) { |
2912 | rc = -ENOMEM; | |
2913 | goto err_out; | |
2914 | } | |
2915 | ||
e67f86b3 | 2916 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; |
a5bbf616 JP |
2917 | skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes, |
2918 | &skspcl->mb_dma_address); | |
e67f86b3 AB |
2919 | if (skspcl->msg_buf == NULL) { |
2920 | rc = -ENOMEM; | |
2921 | goto err_out; | |
2922 | } | |
2923 | ||
e67f86b3 AB |
2924 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, |
2925 | &skspcl->req.sksg_dma_address); | |
2926 | if (skspcl->req.sksg_list == NULL) { | |
2927 | rc = -ENOMEM; | |
2928 | goto err_out; | |
2929 | } | |
2930 | ||
2931 | if (!skd_format_internal_skspcl(skdev)) { | |
2932 | rc = -EINVAL; | |
2933 | goto err_out; | |
2934 | } | |
2935 | ||
2936 | err_out: | |
2937 | return rc; | |
2938 | } | |
2939 | ||
e67f86b3 AB |
2940 | static int skd_cons_disk(struct skd_device *skdev) |
2941 | { | |
2942 | int rc = 0; | |
2943 | struct gendisk *disk; | |
2944 | struct request_queue *q; | |
2945 | unsigned long flags; | |
2946 | ||
2947 | disk = alloc_disk(SKD_MINORS_PER_DEVICE); | |
2948 | if (!disk) { | |
2949 | rc = -ENOMEM; | |
2950 | goto err_out; | |
2951 | } | |
2952 | ||
2953 | skdev->disk = disk; | |
2954 | sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); | |
2955 | ||
2956 | disk->major = skdev->major; | |
2957 | disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; | |
2958 | disk->fops = &skd_blockdev_ops; | |
2959 | disk->private_data = skdev; | |
2960 | ||
fcd37eb3 | 2961 | q = blk_init_queue(skd_request_fn, &skdev->lock); |
e67f86b3 AB |
2962 | if (!q) { |
2963 | rc = -ENOMEM; | |
2964 | goto err_out; | |
2965 | } | |
8fc45044 | 2966 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
f18c17c8 BVA |
2967 | q->nr_requests = skd_max_queue_depth / 2; |
2968 | blk_queue_init_tags(q, skd_max_queue_depth, NULL, BLK_TAG_ALLOC_FIFO); | |
e67f86b3 AB |
2969 | |
2970 | skdev->queue = q; | |
2971 | disk->queue = q; | |
2972 | q->queuedata = skdev; | |
2973 | ||
6975f732 | 2974 | blk_queue_write_cache(q, true, true); |
e67f86b3 AB |
2975 | blk_queue_max_segments(q, skdev->sgs_per_request); |
2976 | blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); | |
2977 | ||
a5c5b392 | 2978 | /* set optimal I/O size to 8KB */ |
e67f86b3 AB |
2979 | blk_queue_io_opt(q, 8192); |
2980 | ||
e67f86b3 | 2981 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); |
b277da0a | 2982 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); |
e67f86b3 AB |
2983 | |
2984 | spin_lock_irqsave(&skdev->lock, flags); | |
f98806d6 | 2985 | dev_dbg(&skdev->pdev->dev, "stopping queue\n"); |
6a5ec65b | 2986 | blk_stop_queue(skdev->queue); |
e67f86b3 AB |
2987 | spin_unlock_irqrestore(&skdev->lock, flags); |
2988 | ||
2989 | err_out: | |
2990 | return rc; | |
2991 | } | |
2992 | ||
542d7b00 BZ |
2993 | #define SKD_N_DEV_TABLE 16u |
2994 | static u32 skd_next_devno; | |
e67f86b3 | 2995 | |
542d7b00 | 2996 | static struct skd_device *skd_construct(struct pci_dev *pdev) |
e67f86b3 | 2997 | { |
542d7b00 BZ |
2998 | struct skd_device *skdev; |
2999 | int blk_major = skd_major; | |
3000 | int rc; | |
e67f86b3 | 3001 | |
542d7b00 | 3002 | skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); |
e67f86b3 | 3003 | |
542d7b00 | 3004 | if (!skdev) { |
f98806d6 | 3005 | dev_err(&pdev->dev, "memory alloc failure\n"); |
542d7b00 BZ |
3006 | return NULL; |
3007 | } | |
e67f86b3 | 3008 | |
542d7b00 BZ |
3009 | skdev->state = SKD_DRVR_STATE_LOAD; |
3010 | skdev->pdev = pdev; | |
3011 | skdev->devno = skd_next_devno++; | |
3012 | skdev->major = blk_major; | |
542d7b00 | 3013 | skdev->dev_max_queue_depth = 0; |
e67f86b3 | 3014 | |
542d7b00 BZ |
3015 | skdev->num_req_context = skd_max_queue_depth; |
3016 | skdev->num_fitmsg_context = skd_max_queue_depth; | |
542d7b00 BZ |
3017 | skdev->cur_max_queue_depth = 1; |
3018 | skdev->queue_low_water_mark = 1; | |
3019 | skdev->proto_ver = 99; | |
3020 | skdev->sgs_per_request = skd_sgs_per_request; | |
3021 | skdev->dbg_level = skd_dbg_level; | |
e67f86b3 | 3022 | |
542d7b00 BZ |
3023 | spin_lock_init(&skdev->lock); |
3024 | ||
3025 | INIT_WORK(&skdev->completion_worker, skd_completion_worker); | |
e67f86b3 | 3026 | |
f98806d6 | 3027 | dev_dbg(&skdev->pdev->dev, "skcomp\n"); |
542d7b00 BZ |
3028 | rc = skd_cons_skcomp(skdev); |
3029 | if (rc < 0) | |
3030 | goto err_out; | |
e67f86b3 | 3031 | |
f98806d6 | 3032 | dev_dbg(&skdev->pdev->dev, "skmsg\n"); |
542d7b00 BZ |
3033 | rc = skd_cons_skmsg(skdev); |
3034 | if (rc < 0) | |
3035 | goto err_out; | |
3036 | ||
f98806d6 | 3037 | dev_dbg(&skdev->pdev->dev, "skreq\n"); |
542d7b00 BZ |
3038 | rc = skd_cons_skreq(skdev); |
3039 | if (rc < 0) | |
3040 | goto err_out; | |
3041 | ||
f98806d6 | 3042 | dev_dbg(&skdev->pdev->dev, "sksb\n"); |
542d7b00 BZ |
3043 | rc = skd_cons_sksb(skdev); |
3044 | if (rc < 0) | |
3045 | goto err_out; | |
3046 | ||
f98806d6 | 3047 | dev_dbg(&skdev->pdev->dev, "disk\n"); |
542d7b00 BZ |
3048 | rc = skd_cons_disk(skdev); |
3049 | if (rc < 0) | |
3050 | goto err_out; | |
3051 | ||
f98806d6 | 3052 | dev_dbg(&skdev->pdev->dev, "VICTORY\n"); |
542d7b00 BZ |
3053 | return skdev; |
3054 | ||
3055 | err_out: | |
f98806d6 | 3056 | dev_dbg(&skdev->pdev->dev, "construct failed\n"); |
542d7b00 BZ |
3057 | skd_destruct(skdev); |
3058 | return NULL; | |
e67f86b3 AB |
3059 | } |
3060 | ||
542d7b00 BZ |
3061 | /* |
3062 | ***************************************************************************** | |
3063 | * DESTRUCT (FREE) | |
3064 | ***************************************************************************** | |
3065 | */ | |
3066 | ||
e67f86b3 AB |
3067 | static void skd_free_skcomp(struct skd_device *skdev) |
3068 | { | |
7f13bdad BVA |
3069 | if (skdev->skcomp_table) |
3070 | pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE, | |
e67f86b3 | 3071 | skdev->skcomp_table, skdev->cq_dma_address); |
e67f86b3 AB |
3072 | |
3073 | skdev->skcomp_table = NULL; | |
3074 | skdev->cq_dma_address = 0; | |
3075 | } | |
3076 | ||
3077 | static void skd_free_skmsg(struct skd_device *skdev) | |
3078 | { | |
3079 | u32 i; | |
3080 | ||
3081 | if (skdev->skmsg_table == NULL) | |
3082 | return; | |
3083 | ||
3084 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
3085 | struct skd_fitmsg_context *skmsg; | |
3086 | ||
3087 | skmsg = &skdev->skmsg_table[i]; | |
3088 | ||
3089 | if (skmsg->msg_buf != NULL) { | |
e67f86b3 AB |
3090 | pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, |
3091 | skmsg->msg_buf, | |
3092 | skmsg->mb_dma_address); | |
3093 | } | |
3094 | skmsg->msg_buf = NULL; | |
3095 | skmsg->mb_dma_address = 0; | |
3096 | } | |
3097 | ||
3098 | kfree(skdev->skmsg_table); | |
3099 | skdev->skmsg_table = NULL; | |
3100 | } | |
3101 | ||
542d7b00 BZ |
3102 | static void skd_free_sg_list(struct skd_device *skdev, |
3103 | struct fit_sg_descriptor *sg_list, | |
3104 | u32 n_sg, dma_addr_t dma_addr) | |
3105 | { | |
3106 | if (sg_list != NULL) { | |
3107 | u32 nbytes; | |
3108 | ||
3109 | nbytes = sizeof(*sg_list) * n_sg; | |
3110 | ||
3111 | pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); | |
3112 | } | |
3113 | } | |
3114 | ||
e67f86b3 AB |
3115 | static void skd_free_skreq(struct skd_device *skdev) |
3116 | { | |
3117 | u32 i; | |
3118 | ||
3119 | if (skdev->skreq_table == NULL) | |
3120 | return; | |
3121 | ||
3122 | for (i = 0; i < skdev->num_req_context; i++) { | |
3123 | struct skd_request_context *skreq; | |
3124 | ||
3125 | skreq = &skdev->skreq_table[i]; | |
3126 | ||
3127 | skd_free_sg_list(skdev, skreq->sksg_list, | |
3128 | skdev->sgs_per_request, | |
3129 | skreq->sksg_dma_address); | |
3130 | ||
3131 | skreq->sksg_list = NULL; | |
3132 | skreq->sksg_dma_address = 0; | |
3133 | ||
3134 | kfree(skreq->sg); | |
3135 | } | |
3136 | ||
3137 | kfree(skdev->skreq_table); | |
3138 | skdev->skreq_table = NULL; | |
3139 | } | |
3140 | ||
e67f86b3 AB |
3141 | static void skd_free_sksb(struct skd_device *skdev) |
3142 | { | |
3143 | struct skd_special_context *skspcl; | |
3144 | u32 nbytes; | |
3145 | ||
3146 | skspcl = &skdev->internal_skspcl; | |
3147 | ||
3148 | if (skspcl->data_buf != NULL) { | |
3149 | nbytes = SKD_N_INTERNAL_BYTES; | |
3150 | ||
3151 | pci_free_consistent(skdev->pdev, nbytes, | |
3152 | skspcl->data_buf, skspcl->db_dma_address); | |
3153 | } | |
3154 | ||
3155 | skspcl->data_buf = NULL; | |
3156 | skspcl->db_dma_address = 0; | |
3157 | ||
3158 | if (skspcl->msg_buf != NULL) { | |
3159 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
3160 | pci_free_consistent(skdev->pdev, nbytes, | |
3161 | skspcl->msg_buf, skspcl->mb_dma_address); | |
3162 | } | |
3163 | ||
3164 | skspcl->msg_buf = NULL; | |
3165 | skspcl->mb_dma_address = 0; | |
3166 | ||
3167 | skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, | |
3168 | skspcl->req.sksg_dma_address); | |
3169 | ||
3170 | skspcl->req.sksg_list = NULL; | |
3171 | skspcl->req.sksg_dma_address = 0; | |
3172 | } | |
3173 | ||
e67f86b3 AB |
3174 | static void skd_free_disk(struct skd_device *skdev) |
3175 | { | |
3176 | struct gendisk *disk = skdev->disk; | |
3177 | ||
7277cc67 BVA |
3178 | if (disk && (disk->flags & GENHD_FL_UP)) |
3179 | del_gendisk(disk); | |
3180 | ||
3181 | if (skdev->queue) { | |
3182 | blk_cleanup_queue(skdev->queue); | |
3183 | skdev->queue = NULL; | |
3184 | disk->queue = NULL; | |
e67f86b3 | 3185 | } |
7277cc67 BVA |
3186 | |
3187 | put_disk(disk); | |
e67f86b3 AB |
3188 | skdev->disk = NULL; |
3189 | } | |
3190 | ||
542d7b00 BZ |
3191 | static void skd_destruct(struct skd_device *skdev) |
3192 | { | |
3193 | if (skdev == NULL) | |
3194 | return; | |
3195 | ||
f98806d6 | 3196 | dev_dbg(&skdev->pdev->dev, "disk\n"); |
542d7b00 BZ |
3197 | skd_free_disk(skdev); |
3198 | ||
f98806d6 | 3199 | dev_dbg(&skdev->pdev->dev, "sksb\n"); |
542d7b00 BZ |
3200 | skd_free_sksb(skdev); |
3201 | ||
f98806d6 | 3202 | dev_dbg(&skdev->pdev->dev, "skreq\n"); |
542d7b00 BZ |
3203 | skd_free_skreq(skdev); |
3204 | ||
f98806d6 | 3205 | dev_dbg(&skdev->pdev->dev, "skmsg\n"); |
542d7b00 | 3206 | skd_free_skmsg(skdev); |
e67f86b3 | 3207 | |
f98806d6 | 3208 | dev_dbg(&skdev->pdev->dev, "skcomp\n"); |
542d7b00 BZ |
3209 | skd_free_skcomp(skdev); |
3210 | ||
f98806d6 | 3211 | dev_dbg(&skdev->pdev->dev, "skdev\n"); |
542d7b00 BZ |
3212 | kfree(skdev); |
3213 | } | |
e67f86b3 AB |
3214 | |
3215 | /* | |
3216 | ***************************************************************************** | |
3217 | * BLOCK DEVICE (BDEV) GLUE | |
3218 | ***************************************************************************** | |
3219 | */ | |
3220 | ||
3221 | static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
3222 | { | |
3223 | struct skd_device *skdev; | |
3224 | u64 capacity; | |
3225 | ||
3226 | skdev = bdev->bd_disk->private_data; | |
3227 | ||
f98806d6 BVA |
3228 | dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n", |
3229 | bdev->bd_disk->disk_name, current->comm); | |
e67f86b3 AB |
3230 | |
3231 | if (skdev->read_cap_is_valid) { | |
3232 | capacity = get_capacity(skdev->disk); | |
3233 | geo->heads = 64; | |
3234 | geo->sectors = 255; | |
3235 | geo->cylinders = (capacity) / (255 * 64); | |
3236 | ||
3237 | return 0; | |
3238 | } | |
3239 | return -EIO; | |
3240 | } | |
3241 | ||
0d52c756 | 3242 | static int skd_bdev_attach(struct device *parent, struct skd_device *skdev) |
e67f86b3 | 3243 | { |
f98806d6 | 3244 | dev_dbg(&skdev->pdev->dev, "add_disk\n"); |
0d52c756 | 3245 | device_add_disk(parent, skdev->disk); |
e67f86b3 AB |
3246 | return 0; |
3247 | } | |
3248 | ||
3249 | static const struct block_device_operations skd_blockdev_ops = { | |
3250 | .owner = THIS_MODULE, | |
e67f86b3 AB |
3251 | .getgeo = skd_bdev_getgeo, |
3252 | }; | |
3253 | ||
e67f86b3 AB |
3254 | /* |
3255 | ***************************************************************************** | |
3256 | * PCIe DRIVER GLUE | |
3257 | ***************************************************************************** | |
3258 | */ | |
3259 | ||
9baa3c34 | 3260 | static const struct pci_device_id skd_pci_tbl[] = { |
e67f86b3 AB |
3261 | { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, |
3262 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | |
3263 | { 0 } /* terminate list */ | |
3264 | }; | |
3265 | ||
3266 | MODULE_DEVICE_TABLE(pci, skd_pci_tbl); | |
3267 | ||
3268 | static char *skd_pci_info(struct skd_device *skdev, char *str) | |
3269 | { | |
3270 | int pcie_reg; | |
3271 | ||
3272 | strcpy(str, "PCIe ("); | |
3273 | pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); | |
3274 | ||
3275 | if (pcie_reg) { | |
3276 | ||
3277 | char lwstr[6]; | |
3278 | uint16_t pcie_lstat, lspeed, lwidth; | |
3279 | ||
3280 | pcie_reg += 0x12; | |
3281 | pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); | |
3282 | lspeed = pcie_lstat & (0xF); | |
3283 | lwidth = (pcie_lstat & 0x3F0) >> 4; | |
3284 | ||
3285 | if (lspeed == 1) | |
3286 | strcat(str, "2.5GT/s "); | |
3287 | else if (lspeed == 2) | |
3288 | strcat(str, "5.0GT/s "); | |
3289 | else | |
3290 | strcat(str, "<unknown> "); | |
3291 | snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); | |
3292 | strcat(str, lwstr); | |
3293 | } | |
3294 | return str; | |
3295 | } | |
3296 | ||
3297 | static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
3298 | { | |
3299 | int i; | |
3300 | int rc = 0; | |
3301 | char pci_str[32]; | |
3302 | struct skd_device *skdev; | |
3303 | ||
f98806d6 BVA |
3304 | dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n", |
3305 | DRV_NAME, DRV_VERSION, DRV_BUILD_ID); | |
3306 | dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor, | |
3307 | pdev->device); | |
e67f86b3 AB |
3308 | |
3309 | rc = pci_enable_device(pdev); | |
3310 | if (rc) | |
3311 | return rc; | |
3312 | rc = pci_request_regions(pdev, DRV_NAME); | |
3313 | if (rc) | |
3314 | goto err_out; | |
3315 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
3316 | if (!rc) { | |
3317 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
f98806d6 BVA |
3318 | dev_err(&pdev->dev, "consistent DMA mask error %d\n", |
3319 | rc); | |
e67f86b3 AB |
3320 | } |
3321 | } else { | |
f98806d6 | 3322 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
e67f86b3 | 3323 | if (rc) { |
f98806d6 | 3324 | dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
e67f86b3 AB |
3325 | goto err_out_regions; |
3326 | } | |
3327 | } | |
3328 | ||
b8df6647 BZ |
3329 | if (!skd_major) { |
3330 | rc = register_blkdev(0, DRV_NAME); | |
3331 | if (rc < 0) | |
3332 | goto err_out_regions; | |
3333 | BUG_ON(!rc); | |
3334 | skd_major = rc; | |
3335 | } | |
3336 | ||
e67f86b3 | 3337 | skdev = skd_construct(pdev); |
1762b57f WY |
3338 | if (skdev == NULL) { |
3339 | rc = -ENOMEM; | |
e67f86b3 | 3340 | goto err_out_regions; |
1762b57f | 3341 | } |
e67f86b3 AB |
3342 | |
3343 | skd_pci_info(skdev, pci_str); | |
f98806d6 | 3344 | dev_info(&pdev->dev, "%s 64bit\n", pci_str); |
e67f86b3 AB |
3345 | |
3346 | pci_set_master(pdev); | |
3347 | rc = pci_enable_pcie_error_reporting(pdev); | |
3348 | if (rc) { | |
f98806d6 BVA |
3349 | dev_err(&pdev->dev, |
3350 | "bad enable of PCIe error reporting rc=%d\n", rc); | |
e67f86b3 AB |
3351 | skdev->pcie_error_reporting_is_enabled = 0; |
3352 | } else | |
3353 | skdev->pcie_error_reporting_is_enabled = 1; | |
3354 | ||
e67f86b3 | 3355 | pci_set_drvdata(pdev, skdev); |
ebedd16d | 3356 | |
e67f86b3 AB |
3357 | for (i = 0; i < SKD_MAX_BARS; i++) { |
3358 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
3359 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
3360 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
3361 | skdev->mem_size[i]); | |
3362 | if (!skdev->mem_map[i]) { | |
f98806d6 BVA |
3363 | dev_err(&pdev->dev, |
3364 | "Unable to map adapter memory!\n"); | |
e67f86b3 AB |
3365 | rc = -ENODEV; |
3366 | goto err_out_iounmap; | |
3367 | } | |
f98806d6 BVA |
3368 | dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", |
3369 | skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], | |
3370 | skdev->mem_size[i]); | |
e67f86b3 AB |
3371 | } |
3372 | ||
3373 | rc = skd_acquire_irq(skdev); | |
3374 | if (rc) { | |
f98806d6 | 3375 | dev_err(&pdev->dev, "interrupt resource error %d\n", rc); |
e67f86b3 AB |
3376 | goto err_out_iounmap; |
3377 | } | |
3378 | ||
3379 | rc = skd_start_timer(skdev); | |
3380 | if (rc) | |
3381 | goto err_out_timer; | |
3382 | ||
3383 | init_waitqueue_head(&skdev->waitq); | |
3384 | ||
3385 | skd_start_device(skdev); | |
3386 | ||
3387 | rc = wait_event_interruptible_timeout(skdev->waitq, | |
3388 | (skdev->gendisk_on), | |
3389 | (SKD_START_WAIT_SECONDS * HZ)); | |
3390 | if (skdev->gendisk_on > 0) { | |
3391 | /* device came on-line after reset */ | |
0d52c756 | 3392 | skd_bdev_attach(&pdev->dev, skdev); |
e67f86b3 AB |
3393 | rc = 0; |
3394 | } else { | |
3395 | /* we timed out, something is wrong with the device, | |
3396 | don't add the disk structure */ | |
f98806d6 BVA |
3397 | dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n", |
3398 | rc); | |
e67f86b3 AB |
3399 | /* in case of no error; we timeout with ENXIO */ |
3400 | if (!rc) | |
3401 | rc = -ENXIO; | |
3402 | goto err_out_timer; | |
3403 | } | |
3404 | ||
e67f86b3 AB |
3405 | return rc; |
3406 | ||
3407 | err_out_timer: | |
3408 | skd_stop_device(skdev); | |
3409 | skd_release_irq(skdev); | |
3410 | ||
3411 | err_out_iounmap: | |
3412 | for (i = 0; i < SKD_MAX_BARS; i++) | |
3413 | if (skdev->mem_map[i]) | |
3414 | iounmap(skdev->mem_map[i]); | |
3415 | ||
3416 | if (skdev->pcie_error_reporting_is_enabled) | |
3417 | pci_disable_pcie_error_reporting(pdev); | |
3418 | ||
3419 | skd_destruct(skdev); | |
3420 | ||
3421 | err_out_regions: | |
3422 | pci_release_regions(pdev); | |
3423 | ||
3424 | err_out: | |
3425 | pci_disable_device(pdev); | |
3426 | pci_set_drvdata(pdev, NULL); | |
3427 | return rc; | |
3428 | } | |
3429 | ||
3430 | static void skd_pci_remove(struct pci_dev *pdev) | |
3431 | { | |
3432 | int i; | |
3433 | struct skd_device *skdev; | |
3434 | ||
3435 | skdev = pci_get_drvdata(pdev); | |
3436 | if (!skdev) { | |
f98806d6 | 3437 | dev_err(&pdev->dev, "no device data for PCI\n"); |
e67f86b3 AB |
3438 | return; |
3439 | } | |
3440 | skd_stop_device(skdev); | |
3441 | skd_release_irq(skdev); | |
3442 | ||
3443 | for (i = 0; i < SKD_MAX_BARS; i++) | |
3444 | if (skdev->mem_map[i]) | |
4854afe3 | 3445 | iounmap(skdev->mem_map[i]); |
e67f86b3 AB |
3446 | |
3447 | if (skdev->pcie_error_reporting_is_enabled) | |
3448 | pci_disable_pcie_error_reporting(pdev); | |
3449 | ||
3450 | skd_destruct(skdev); | |
3451 | ||
3452 | pci_release_regions(pdev); | |
3453 | pci_disable_device(pdev); | |
3454 | pci_set_drvdata(pdev, NULL); | |
3455 | ||
3456 | return; | |
3457 | } | |
3458 | ||
3459 | static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |
3460 | { | |
3461 | int i; | |
3462 | struct skd_device *skdev; | |
3463 | ||
3464 | skdev = pci_get_drvdata(pdev); | |
3465 | if (!skdev) { | |
f98806d6 | 3466 | dev_err(&pdev->dev, "no device data for PCI\n"); |
e67f86b3 AB |
3467 | return -EIO; |
3468 | } | |
3469 | ||
3470 | skd_stop_device(skdev); | |
3471 | ||
3472 | skd_release_irq(skdev); | |
3473 | ||
3474 | for (i = 0; i < SKD_MAX_BARS; i++) | |
3475 | if (skdev->mem_map[i]) | |
4854afe3 | 3476 | iounmap(skdev->mem_map[i]); |
e67f86b3 AB |
3477 | |
3478 | if (skdev->pcie_error_reporting_is_enabled) | |
3479 | pci_disable_pcie_error_reporting(pdev); | |
3480 | ||
3481 | pci_release_regions(pdev); | |
3482 | pci_save_state(pdev); | |
3483 | pci_disable_device(pdev); | |
3484 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
3485 | return 0; | |
3486 | } | |
3487 | ||
3488 | static int skd_pci_resume(struct pci_dev *pdev) | |
3489 | { | |
3490 | int i; | |
3491 | int rc = 0; | |
3492 | struct skd_device *skdev; | |
3493 | ||
3494 | skdev = pci_get_drvdata(pdev); | |
3495 | if (!skdev) { | |
f98806d6 | 3496 | dev_err(&pdev->dev, "no device data for PCI\n"); |
e67f86b3 AB |
3497 | return -1; |
3498 | } | |
3499 | ||
3500 | pci_set_power_state(pdev, PCI_D0); | |
3501 | pci_enable_wake(pdev, PCI_D0, 0); | |
3502 | pci_restore_state(pdev); | |
3503 | ||
3504 | rc = pci_enable_device(pdev); | |
3505 | if (rc) | |
3506 | return rc; | |
3507 | rc = pci_request_regions(pdev, DRV_NAME); | |
3508 | if (rc) | |
3509 | goto err_out; | |
3510 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
3511 | if (!rc) { | |
3512 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
3513 | ||
f98806d6 BVA |
3514 | dev_err(&pdev->dev, "consistent DMA mask error %d\n", |
3515 | rc); | |
e67f86b3 AB |
3516 | } |
3517 | } else { | |
3518 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
3519 | if (rc) { | |
3520 | ||
f98806d6 | 3521 | dev_err(&pdev->dev, "DMA mask error %d\n", rc); |
e67f86b3 AB |
3522 | goto err_out_regions; |
3523 | } | |
3524 | } | |
3525 | ||
3526 | pci_set_master(pdev); | |
3527 | rc = pci_enable_pcie_error_reporting(pdev); | |
3528 | if (rc) { | |
f98806d6 BVA |
3529 | dev_err(&pdev->dev, |
3530 | "bad enable of PCIe error reporting rc=%d\n", rc); | |
e67f86b3 AB |
3531 | skdev->pcie_error_reporting_is_enabled = 0; |
3532 | } else | |
3533 | skdev->pcie_error_reporting_is_enabled = 1; | |
3534 | ||
3535 | for (i = 0; i < SKD_MAX_BARS; i++) { | |
3536 | ||
3537 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
3538 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
3539 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
3540 | skdev->mem_size[i]); | |
3541 | if (!skdev->mem_map[i]) { | |
f98806d6 | 3542 | dev_err(&pdev->dev, "Unable to map adapter memory!\n"); |
e67f86b3 AB |
3543 | rc = -ENODEV; |
3544 | goto err_out_iounmap; | |
3545 | } | |
f98806d6 BVA |
3546 | dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n", |
3547 | skdev->mem_map[i], (uint64_t)skdev->mem_phys[i], | |
3548 | skdev->mem_size[i]); | |
e67f86b3 AB |
3549 | } |
3550 | rc = skd_acquire_irq(skdev); | |
3551 | if (rc) { | |
f98806d6 | 3552 | dev_err(&pdev->dev, "interrupt resource error %d\n", rc); |
e67f86b3 AB |
3553 | goto err_out_iounmap; |
3554 | } | |
3555 | ||
3556 | rc = skd_start_timer(skdev); | |
3557 | if (rc) | |
3558 | goto err_out_timer; | |
3559 | ||
3560 | init_waitqueue_head(&skdev->waitq); | |
3561 | ||
3562 | skd_start_device(skdev); | |
3563 | ||
3564 | return rc; | |
3565 | ||
3566 | err_out_timer: | |
3567 | skd_stop_device(skdev); | |
3568 | skd_release_irq(skdev); | |
3569 | ||
3570 | err_out_iounmap: | |
3571 | for (i = 0; i < SKD_MAX_BARS; i++) | |
3572 | if (skdev->mem_map[i]) | |
3573 | iounmap(skdev->mem_map[i]); | |
3574 | ||
3575 | if (skdev->pcie_error_reporting_is_enabled) | |
3576 | pci_disable_pcie_error_reporting(pdev); | |
3577 | ||
3578 | err_out_regions: | |
3579 | pci_release_regions(pdev); | |
3580 | ||
3581 | err_out: | |
3582 | pci_disable_device(pdev); | |
3583 | return rc; | |
3584 | } | |
3585 | ||
3586 | static void skd_pci_shutdown(struct pci_dev *pdev) | |
3587 | { | |
3588 | struct skd_device *skdev; | |
3589 | ||
f98806d6 | 3590 | dev_err(&pdev->dev, "%s called\n", __func__); |
e67f86b3 AB |
3591 | |
3592 | skdev = pci_get_drvdata(pdev); | |
3593 | if (!skdev) { | |
f98806d6 | 3594 | dev_err(&pdev->dev, "no device data for PCI\n"); |
e67f86b3 AB |
3595 | return; |
3596 | } | |
3597 | ||
f98806d6 | 3598 | dev_err(&pdev->dev, "calling stop\n"); |
e67f86b3 AB |
3599 | skd_stop_device(skdev); |
3600 | } | |
3601 | ||
3602 | static struct pci_driver skd_driver = { | |
3603 | .name = DRV_NAME, | |
3604 | .id_table = skd_pci_tbl, | |
3605 | .probe = skd_pci_probe, | |
3606 | .remove = skd_pci_remove, | |
3607 | .suspend = skd_pci_suspend, | |
3608 | .resume = skd_pci_resume, | |
3609 | .shutdown = skd_pci_shutdown, | |
3610 | }; | |
3611 | ||
3612 | /* | |
3613 | ***************************************************************************** | |
3614 | * LOGGING SUPPORT | |
3615 | ***************************************************************************** | |
3616 | */ | |
3617 | ||
e67f86b3 AB |
3618 | const char *skd_drive_state_to_str(int state) |
3619 | { | |
3620 | switch (state) { | |
3621 | case FIT_SR_DRIVE_OFFLINE: | |
3622 | return "OFFLINE"; | |
3623 | case FIT_SR_DRIVE_INIT: | |
3624 | return "INIT"; | |
3625 | case FIT_SR_DRIVE_ONLINE: | |
3626 | return "ONLINE"; | |
3627 | case FIT_SR_DRIVE_BUSY: | |
3628 | return "BUSY"; | |
3629 | case FIT_SR_DRIVE_FAULT: | |
3630 | return "FAULT"; | |
3631 | case FIT_SR_DRIVE_DEGRADED: | |
3632 | return "DEGRADED"; | |
3633 | case FIT_SR_PCIE_LINK_DOWN: | |
3634 | return "INK_DOWN"; | |
3635 | case FIT_SR_DRIVE_SOFT_RESET: | |
3636 | return "SOFT_RESET"; | |
3637 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
3638 | return "NEED_FW"; | |
3639 | case FIT_SR_DRIVE_INIT_FAULT: | |
3640 | return "INIT_FAULT"; | |
3641 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
3642 | return "BUSY_SANITIZE"; | |
3643 | case FIT_SR_DRIVE_BUSY_ERASE: | |
3644 | return "BUSY_ERASE"; | |
3645 | case FIT_SR_DRIVE_FW_BOOTING: | |
3646 | return "FW_BOOTING"; | |
3647 | default: | |
3648 | return "???"; | |
3649 | } | |
3650 | } | |
3651 | ||
3652 | const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |
3653 | { | |
3654 | switch (state) { | |
3655 | case SKD_DRVR_STATE_LOAD: | |
3656 | return "LOAD"; | |
3657 | case SKD_DRVR_STATE_IDLE: | |
3658 | return "IDLE"; | |
3659 | case SKD_DRVR_STATE_BUSY: | |
3660 | return "BUSY"; | |
3661 | case SKD_DRVR_STATE_STARTING: | |
3662 | return "STARTING"; | |
3663 | case SKD_DRVR_STATE_ONLINE: | |
3664 | return "ONLINE"; | |
3665 | case SKD_DRVR_STATE_PAUSING: | |
3666 | return "PAUSING"; | |
3667 | case SKD_DRVR_STATE_PAUSED: | |
3668 | return "PAUSED"; | |
3669 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
3670 | return "DRAINING_TIMEOUT"; | |
3671 | case SKD_DRVR_STATE_RESTARTING: | |
3672 | return "RESTARTING"; | |
3673 | case SKD_DRVR_STATE_RESUMING: | |
3674 | return "RESUMING"; | |
3675 | case SKD_DRVR_STATE_STOPPING: | |
3676 | return "STOPPING"; | |
3677 | case SKD_DRVR_STATE_SYNCING: | |
3678 | return "SYNCING"; | |
3679 | case SKD_DRVR_STATE_FAULT: | |
3680 | return "FAULT"; | |
3681 | case SKD_DRVR_STATE_DISAPPEARED: | |
3682 | return "DISAPPEARED"; | |
3683 | case SKD_DRVR_STATE_BUSY_ERASE: | |
3684 | return "BUSY_ERASE"; | |
3685 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
3686 | return "BUSY_SANITIZE"; | |
3687 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
3688 | return "BUSY_IMMINENT"; | |
3689 | case SKD_DRVR_STATE_WAIT_BOOT: | |
3690 | return "WAIT_BOOT"; | |
3691 | ||
3692 | default: | |
3693 | return "???"; | |
3694 | } | |
3695 | } | |
3696 | ||
a26ba7fa | 3697 | static const char *skd_skreq_state_to_str(enum skd_req_state state) |
e67f86b3 AB |
3698 | { |
3699 | switch (state) { | |
3700 | case SKD_REQ_STATE_IDLE: | |
3701 | return "IDLE"; | |
3702 | case SKD_REQ_STATE_SETUP: | |
3703 | return "SETUP"; | |
3704 | case SKD_REQ_STATE_BUSY: | |
3705 | return "BUSY"; | |
3706 | case SKD_REQ_STATE_COMPLETED: | |
3707 | return "COMPLETED"; | |
3708 | case SKD_REQ_STATE_TIMEOUT: | |
3709 | return "TIMEOUT"; | |
e67f86b3 AB |
3710 | default: |
3711 | return "???"; | |
3712 | } | |
3713 | } | |
3714 | ||
3715 | static void skd_log_skdev(struct skd_device *skdev, const char *event) | |
3716 | { | |
f98806d6 BVA |
3717 | dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event); |
3718 | dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n", | |
3719 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
3720 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
3721 | dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n", | |
6fbb2de5 | 3722 | atomic_read(&skdev->in_flight), skdev->cur_max_queue_depth, |
f98806d6 BVA |
3723 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); |
3724 | dev_dbg(&skdev->pdev->dev, " timestamp=0x%x cycle=%d cycle_ix=%d\n", | |
6fbb2de5 BVA |
3725 | atomic_read(&skdev->timeout_stamp), skdev->skcomp_cycle, |
3726 | skdev->skcomp_ix); | |
e67f86b3 AB |
3727 | } |
3728 | ||
e67f86b3 AB |
3729 | static void skd_log_skreq(struct skd_device *skdev, |
3730 | struct skd_request_context *skreq, const char *event) | |
3731 | { | |
f98806d6 BVA |
3732 | dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event); |
3733 | dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", | |
3734 | skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id, | |
3735 | skreq->fitmsg_id); | |
3736 | dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n", | |
b1824eef | 3737 | skreq->timeout_stamp, skreq->data_dir, skreq->n_sg); |
e67f86b3 | 3738 | |
fcd37eb3 JA |
3739 | if (skreq->req != NULL) { |
3740 | struct request *req = skreq->req; | |
3741 | u32 lba = (u32)blk_rq_pos(req); | |
3742 | u32 count = blk_rq_sectors(req); | |
e67f86b3 | 3743 | |
f98806d6 BVA |
3744 | dev_dbg(&skdev->pdev->dev, |
3745 | "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, | |
3746 | lba, lba, count, count, (int)rq_data_dir(req)); | |
fcd37eb3 | 3747 | } else |
f98806d6 | 3748 | dev_dbg(&skdev->pdev->dev, "req=NULL\n"); |
e67f86b3 AB |
3749 | } |
3750 | ||
3751 | /* | |
3752 | ***************************************************************************** | |
3753 | * MODULE GLUE | |
3754 | ***************************************************************************** | |
3755 | */ | |
3756 | ||
3757 | static int __init skd_init(void) | |
3758 | { | |
16a70534 BVA |
3759 | BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8); |
3760 | BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32); | |
3761 | BUILD_BUG_ON(sizeof(struct skd_command_header) != 16); | |
3762 | BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32); | |
3763 | BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44); | |
d891fe60 BVA |
3764 | BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0); |
3765 | BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64); | |
3766 | BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES); | |
2da7b403 | 3767 | |
e67f86b3 AB |
3768 | pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); |
3769 | ||
3770 | switch (skd_isr_type) { | |
3771 | case SKD_IRQ_LEGACY: | |
3772 | case SKD_IRQ_MSI: | |
3773 | case SKD_IRQ_MSIX: | |
3774 | break; | |
3775 | default: | |
fbed149a | 3776 | pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n", |
e67f86b3 AB |
3777 | skd_isr_type, SKD_IRQ_DEFAULT); |
3778 | skd_isr_type = SKD_IRQ_DEFAULT; | |
3779 | } | |
3780 | ||
fbed149a BZ |
3781 | if (skd_max_queue_depth < 1 || |
3782 | skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | |
3783 | pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n", | |
e67f86b3 AB |
3784 | skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); |
3785 | skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
3786 | } | |
3787 | ||
2da7b403 BVA |
3788 | if (skd_max_req_per_msg < 1 || |
3789 | skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) { | |
fbed149a | 3790 | pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n", |
e67f86b3 AB |
3791 | skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); |
3792 | skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
3793 | } | |
3794 | ||
3795 | if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { | |
fbed149a | 3796 | pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n", |
e67f86b3 AB |
3797 | skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); |
3798 | skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
3799 | } | |
3800 | ||
3801 | if (skd_dbg_level < 0 || skd_dbg_level > 2) { | |
fbed149a | 3802 | pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n", |
e67f86b3 AB |
3803 | skd_dbg_level, 0); |
3804 | skd_dbg_level = 0; | |
3805 | } | |
3806 | ||
3807 | if (skd_isr_comp_limit < 0) { | |
fbed149a | 3808 | pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n", |
e67f86b3 AB |
3809 | skd_isr_comp_limit, 0); |
3810 | skd_isr_comp_limit = 0; | |
3811 | } | |
3812 | ||
b8df6647 | 3813 | return pci_register_driver(&skd_driver); |
e67f86b3 AB |
3814 | } |
3815 | ||
3816 | static void __exit skd_exit(void) | |
3817 | { | |
3818 | pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); | |
3819 | ||
e67f86b3 | 3820 | pci_unregister_driver(&skd_driver); |
b8df6647 BZ |
3821 | |
3822 | if (skd_major) | |
3823 | unregister_blkdev(skd_major, DRV_NAME); | |
e67f86b3 AB |
3824 | } |
3825 | ||
e67f86b3 AB |
3826 | module_init(skd_init); |
3827 | module_exit(skd_exit); |