]>
Commit | Line | Data |
---|---|---|
e67f86b3 AB |
1 | /* Copyright 2012 STEC, Inc. |
2 | * | |
3 | * This file is licensed under the terms of the 3-clause | |
4 | * BSD License (http://opensource.org/licenses/BSD-3-Clause) | |
5 | * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html), | |
6 | * at your option. Both licenses are also available in the LICENSE file | |
7 | * distributed with this project. This file may not be copied, modified, | |
8 | * or distributed except in accordance with those terms. | |
9 | * Gordoni Waidhofer <gwaidhofer@stec-inc.com> | |
10 | * Initial Driver Design! | |
11 | * Thomas Swann <tswann@stec-inc.com> | |
12 | * Interrupt handling. | |
13 | * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com> | |
14 | * biomode implementation. | |
15 | * Akhil Bhansali <abhansali@stec-inc.com> | |
16 | * Added support for DISCARD / FLUSH and FUA. | |
17 | */ | |
18 | ||
19 | #include <linux/kernel.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/pci.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/blkdev.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/interrupt.h> | |
28 | #include <linux/compiler.h> | |
29 | #include <linux/workqueue.h> | |
30 | #include <linux/bitops.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/time.h> | |
33 | #include <linux/hdreg.h> | |
34 | #include <linux/dma-mapping.h> | |
35 | #include <linux/completion.h> | |
36 | #include <linux/scatterlist.h> | |
37 | #include <linux/version.h> | |
38 | #include <linux/err.h> | |
39 | #include <linux/scatterlist.h> | |
40 | #include <linux/aer.h> | |
41 | #include <linux/ctype.h> | |
42 | #include <linux/wait.h> | |
43 | #include <linux/uio.h> | |
44 | #include <scsi/scsi.h> | |
45 | #include <scsi/scsi_host.h> | |
46 | #include <scsi/scsi_tcq.h> | |
47 | #include <scsi/scsi_cmnd.h> | |
48 | #include <scsi/sg.h> | |
49 | #include <linux/io.h> | |
50 | #include <linux/uaccess.h> | |
51 | #include <asm-generic/unaligned.h> | |
52 | ||
53 | #include "skd_s1120.h" | |
54 | ||
55 | static int skd_dbg_level; | |
56 | static int skd_isr_comp_limit = 4; | |
57 | ||
58 | enum { | |
59 | STEC_LINK_2_5GTS = 0, | |
60 | STEC_LINK_5GTS = 1, | |
61 | STEC_LINK_8GTS = 2, | |
62 | STEC_LINK_UNKNOWN = 0xFF | |
63 | }; | |
64 | ||
65 | enum { | |
66 | SKD_FLUSH_INITIALIZER, | |
67 | SKD_FLUSH_ZERO_SIZE_FIRST, | |
68 | SKD_FLUSH_DATA_SECOND, | |
69 | }; | |
70 | ||
71 | #define DPRINTK(skdev, fmt, args ...) \ | |
72 | do { \ | |
73 | if (unlikely((skdev)->dbg_level > 0)) { \ | |
74 | pr_err("%s:%s:%d " fmt, (skdev)->name, \ | |
75 | __func__, __LINE__, ## args); \ | |
76 | } \ | |
77 | } while (0) | |
78 | ||
79 | #define SKD_ASSERT(expr) \ | |
80 | do { \ | |
81 | if (unlikely(!(expr))) { \ | |
82 | pr_err("Assertion failed! %s,%s,%s,line=%d\n", \ | |
83 | # expr, __FILE__, __func__, __LINE__); \ | |
84 | } \ | |
85 | } while (0) | |
86 | ||
87 | #define VPRINTK(skdev, fmt, args ...) \ | |
88 | do { \ | |
89 | if (unlikely((skdev)->dbg_level > 1)) { \ | |
90 | pr_err("%s:%s:%d " fmt, (skdev)->name, \ | |
91 | __func__, __LINE__, ## args); \ | |
92 | } \ | |
93 | } while (0) | |
94 | ||
95 | ||
96 | #define DRV_NAME "skd" | |
97 | #define DRV_VERSION "2.2.1" | |
98 | #define DRV_BUILD_ID "0260" | |
99 | #define PFX DRV_NAME ": " | |
100 | #define DRV_BIN_VERSION 0x100 | |
101 | #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID | |
102 | ||
103 | MODULE_AUTHOR("bug-reports: support@stec-inc.com"); | |
104 | MODULE_LICENSE("Dual BSD/GPL"); | |
105 | ||
106 | MODULE_DESCRIPTION("STEC s1120 PCIe SSD block/BIO driver (b" DRV_BUILD_ID ")"); | |
107 | MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID); | |
108 | ||
109 | #define PCI_VENDOR_ID_STEC 0x1B39 | |
110 | #define PCI_DEVICE_ID_S1120 0x0001 | |
111 | ||
112 | #define SKD_FUA_NV (1 << 1) | |
113 | #define SKD_MINORS_PER_DEVICE 16 | |
114 | ||
115 | #define SKD_MAX_QUEUE_DEPTH 200u | |
116 | ||
117 | #define SKD_PAUSE_TIMEOUT (5 * 1000) | |
118 | ||
119 | #define SKD_N_FITMSG_BYTES (512u) | |
120 | ||
121 | #define SKD_N_SPECIAL_CONTEXT 32u | |
122 | #define SKD_N_SPECIAL_FITMSG_BYTES (128u) | |
123 | ||
124 | /* SG elements are 32 bytes, so we can make this 4096 and still be under the | |
125 | * 128KB limit. That allows 4096*4K = 16M xfer size | |
126 | */ | |
127 | #define SKD_N_SG_PER_REQ_DEFAULT 256u | |
128 | #define SKD_N_SG_PER_SPECIAL 256u | |
129 | ||
130 | #define SKD_N_COMPLETION_ENTRY 256u | |
131 | #define SKD_N_READ_CAP_BYTES (8u) | |
132 | ||
133 | #define SKD_N_INTERNAL_BYTES (512u) | |
134 | ||
135 | /* 5 bits of uniqifier, 0xF800 */ | |
136 | #define SKD_ID_INCR (0x400) | |
137 | #define SKD_ID_TABLE_MASK (3u << 8u) | |
138 | #define SKD_ID_RW_REQUEST (0u << 8u) | |
139 | #define SKD_ID_INTERNAL (1u << 8u) | |
140 | #define SKD_ID_SPECIAL_REQUEST (2u << 8u) | |
141 | #define SKD_ID_FIT_MSG (3u << 8u) | |
142 | #define SKD_ID_SLOT_MASK 0x00FFu | |
143 | #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu | |
144 | ||
145 | #define SKD_N_TIMEOUT_SLOT 4u | |
146 | #define SKD_TIMEOUT_SLOT_MASK 3u | |
147 | ||
148 | #define SKD_N_MAX_SECTORS 2048u | |
149 | ||
150 | #define SKD_MAX_RETRIES 2u | |
151 | ||
152 | #define SKD_TIMER_SECONDS(seconds) (seconds) | |
153 | #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60)) | |
154 | ||
155 | #define INQ_STD_NBYTES 36 | |
156 | #define SKD_DISCARD_CDB_LENGTH 24 | |
157 | ||
158 | enum skd_drvr_state { | |
159 | SKD_DRVR_STATE_LOAD, | |
160 | SKD_DRVR_STATE_IDLE, | |
161 | SKD_DRVR_STATE_BUSY, | |
162 | SKD_DRVR_STATE_STARTING, | |
163 | SKD_DRVR_STATE_ONLINE, | |
164 | SKD_DRVR_STATE_PAUSING, | |
165 | SKD_DRVR_STATE_PAUSED, | |
166 | SKD_DRVR_STATE_DRAINING_TIMEOUT, | |
167 | SKD_DRVR_STATE_RESTARTING, | |
168 | SKD_DRVR_STATE_RESUMING, | |
169 | SKD_DRVR_STATE_STOPPING, | |
170 | SKD_DRVR_STATE_FAULT, | |
171 | SKD_DRVR_STATE_DISAPPEARED, | |
172 | SKD_DRVR_STATE_PROTOCOL_MISMATCH, | |
173 | SKD_DRVR_STATE_BUSY_ERASE, | |
174 | SKD_DRVR_STATE_BUSY_SANITIZE, | |
175 | SKD_DRVR_STATE_BUSY_IMMINENT, | |
176 | SKD_DRVR_STATE_WAIT_BOOT, | |
177 | SKD_DRVR_STATE_SYNCING, | |
178 | }; | |
179 | ||
180 | #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u) | |
181 | #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u) | |
182 | #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u) | |
183 | #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u) | |
184 | #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u) | |
185 | #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u) | |
186 | #define SKD_START_WAIT_SECONDS 90u | |
187 | ||
188 | enum skd_req_state { | |
189 | SKD_REQ_STATE_IDLE, | |
190 | SKD_REQ_STATE_SETUP, | |
191 | SKD_REQ_STATE_BUSY, | |
192 | SKD_REQ_STATE_COMPLETED, | |
193 | SKD_REQ_STATE_TIMEOUT, | |
194 | SKD_REQ_STATE_ABORTED, | |
195 | }; | |
196 | ||
197 | enum skd_fit_msg_state { | |
198 | SKD_MSG_STATE_IDLE, | |
199 | SKD_MSG_STATE_BUSY, | |
200 | }; | |
201 | ||
202 | enum skd_check_status_action { | |
203 | SKD_CHECK_STATUS_REPORT_GOOD, | |
204 | SKD_CHECK_STATUS_REPORT_SMART_ALERT, | |
205 | SKD_CHECK_STATUS_REQUEUE_REQUEST, | |
206 | SKD_CHECK_STATUS_REPORT_ERROR, | |
207 | SKD_CHECK_STATUS_BUSY_IMMINENT, | |
208 | }; | |
209 | ||
210 | struct skd_fitmsg_context { | |
211 | enum skd_fit_msg_state state; | |
212 | ||
213 | struct skd_fitmsg_context *next; | |
214 | ||
215 | u32 id; | |
216 | u16 outstanding; | |
217 | ||
218 | u32 length; | |
219 | u32 offset; | |
220 | ||
221 | u8 *msg_buf; | |
222 | dma_addr_t mb_dma_address; | |
223 | }; | |
224 | ||
225 | struct skd_request_context { | |
226 | enum skd_req_state state; | |
227 | ||
228 | struct skd_request_context *next; | |
229 | ||
230 | u16 id; | |
231 | u32 fitmsg_id; | |
232 | ||
233 | struct request *req; | |
234 | struct bio *bio; | |
235 | unsigned long start_time; | |
236 | u8 flush_cmd; | |
237 | u8 discard_page; | |
238 | ||
239 | u32 timeout_stamp; | |
240 | u8 sg_data_dir; | |
241 | struct scatterlist *sg; | |
242 | u32 n_sg; | |
243 | u32 sg_byte_count; | |
244 | ||
245 | struct fit_sg_descriptor *sksg_list; | |
246 | dma_addr_t sksg_dma_address; | |
247 | ||
248 | struct fit_completion_entry_v1 completion; | |
249 | ||
250 | struct fit_comp_error_info err_info; | |
251 | ||
252 | }; | |
253 | #define SKD_DATA_DIR_HOST_TO_CARD 1 | |
254 | #define SKD_DATA_DIR_CARD_TO_HOST 2 | |
255 | #define SKD_DATA_DIR_NONE 3 /* especially for DISCARD requests. */ | |
256 | ||
257 | struct skd_special_context { | |
258 | struct skd_request_context req; | |
259 | ||
260 | u8 orphaned; | |
261 | ||
262 | void *data_buf; | |
263 | dma_addr_t db_dma_address; | |
264 | ||
265 | u8 *msg_buf; | |
266 | dma_addr_t mb_dma_address; | |
267 | }; | |
268 | ||
269 | struct skd_sg_io { | |
270 | fmode_t mode; | |
271 | void __user *argp; | |
272 | ||
273 | struct sg_io_hdr sg; | |
274 | ||
275 | u8 cdb[16]; | |
276 | ||
277 | u32 dxfer_len; | |
278 | u32 iovcnt; | |
279 | struct sg_iovec *iov; | |
280 | struct sg_iovec no_iov_iov; | |
281 | ||
282 | struct skd_special_context *skspcl; | |
283 | }; | |
284 | ||
285 | typedef enum skd_irq_type { | |
286 | SKD_IRQ_LEGACY, | |
287 | SKD_IRQ_MSI, | |
288 | SKD_IRQ_MSIX | |
289 | } skd_irq_type_t; | |
290 | ||
291 | #define SKD_MAX_BARS 2 | |
292 | ||
293 | struct skd_device { | |
294 | volatile void __iomem *mem_map[SKD_MAX_BARS]; | |
295 | resource_size_t mem_phys[SKD_MAX_BARS]; | |
296 | u32 mem_size[SKD_MAX_BARS]; | |
297 | ||
298 | skd_irq_type_t irq_type; | |
299 | u32 msix_count; | |
300 | struct skd_msix_entry *msix_entries; | |
301 | ||
302 | struct pci_dev *pdev; | |
303 | int pcie_error_reporting_is_enabled; | |
304 | ||
305 | spinlock_t lock; | |
306 | struct gendisk *disk; | |
307 | struct request_queue *queue; | |
308 | struct device *class_dev; | |
309 | int gendisk_on; | |
310 | int sync_done; | |
311 | ||
312 | atomic_t device_count; | |
313 | u32 devno; | |
314 | u32 major; | |
315 | char name[32]; | |
316 | char isr_name[30]; | |
317 | ||
318 | enum skd_drvr_state state; | |
319 | u32 drive_state; | |
320 | ||
321 | u32 in_flight; | |
322 | u32 cur_max_queue_depth; | |
323 | u32 queue_low_water_mark; | |
324 | u32 dev_max_queue_depth; | |
325 | ||
326 | u32 num_fitmsg_context; | |
327 | u32 num_req_context; | |
328 | ||
329 | u32 timeout_slot[SKD_N_TIMEOUT_SLOT]; | |
330 | u32 timeout_stamp; | |
331 | struct skd_fitmsg_context *skmsg_free_list; | |
332 | struct skd_fitmsg_context *skmsg_table; | |
333 | ||
334 | struct skd_request_context *skreq_free_list; | |
335 | struct skd_request_context *skreq_table; | |
336 | ||
337 | struct skd_special_context *skspcl_free_list; | |
338 | struct skd_special_context *skspcl_table; | |
339 | ||
340 | struct skd_special_context internal_skspcl; | |
341 | u32 read_cap_blocksize; | |
342 | u32 read_cap_last_lba; | |
343 | int read_cap_is_valid; | |
344 | int inquiry_is_valid; | |
345 | u8 inq_serial_num[13]; /*12 chars plus null term */ | |
346 | u8 id_str[80]; /* holds a composite name (pci + sernum) */ | |
347 | ||
348 | u8 skcomp_cycle; | |
349 | u32 skcomp_ix; | |
350 | struct fit_completion_entry_v1 *skcomp_table; | |
351 | struct fit_comp_error_info *skerr_table; | |
352 | dma_addr_t cq_dma_address; | |
353 | ||
354 | wait_queue_head_t waitq; | |
355 | ||
356 | struct timer_list timer; | |
357 | u32 timer_countdown; | |
358 | u32 timer_substate; | |
359 | ||
360 | int n_special; | |
361 | int sgs_per_request; | |
362 | u32 last_mtd; | |
363 | ||
364 | u32 proto_ver; | |
365 | ||
366 | int dbg_level; | |
367 | u32 connect_time_stamp; | |
368 | int connect_retries; | |
369 | #define SKD_MAX_CONNECT_RETRIES 16 | |
370 | u32 drive_jiffies; | |
371 | ||
372 | u32 timo_slot; | |
373 | ||
374 | ||
375 | struct work_struct completion_worker; | |
376 | ||
377 | struct bio_list bio_queue; | |
378 | int queue_stopped; | |
379 | ||
380 | struct list_head flush_list; | |
381 | }; | |
382 | ||
383 | #define SKD_FLUSH_JOB "skd-flush-jobs" | |
384 | struct kmem_cache *skd_flush_slab; | |
385 | ||
386 | /* | |
387 | * These commands hold "nonzero size FLUSH bios", | |
388 | * which are enqueud in skdev->flush_list during | |
389 | * completion of "zero size FLUSH commands". | |
390 | * It will be active in biomode. | |
391 | */ | |
392 | struct skd_flush_cmd { | |
393 | void *cmd; | |
394 | struct list_head flist; | |
395 | }; | |
396 | ||
397 | #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF) | |
398 | #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF) | |
399 | #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF) | |
400 | ||
401 | static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset) | |
402 | { | |
403 | u32 val; | |
404 | ||
405 | if (likely(skdev->dbg_level < 2)) | |
406 | return readl(skdev->mem_map[1] + offset); | |
407 | else { | |
408 | barrier(); | |
409 | val = readl(skdev->mem_map[1] + offset); | |
410 | barrier(); | |
411 | VPRINTK(skdev, "offset %x = %x\n", offset, val); | |
412 | return val; | |
413 | } | |
414 | ||
415 | } | |
416 | ||
417 | static inline void skd_reg_write32(struct skd_device *skdev, u32 val, | |
418 | u32 offset) | |
419 | { | |
420 | if (likely(skdev->dbg_level < 2)) { | |
421 | writel(val, skdev->mem_map[1] + offset); | |
422 | barrier(); | |
e67f86b3 AB |
423 | } else { |
424 | barrier(); | |
425 | writel(val, skdev->mem_map[1] + offset); | |
426 | barrier(); | |
e67f86b3 AB |
427 | VPRINTK(skdev, "offset %x = %x\n", offset, val); |
428 | } | |
429 | } | |
430 | ||
431 | static inline void skd_reg_write64(struct skd_device *skdev, u64 val, | |
432 | u32 offset) | |
433 | { | |
434 | if (likely(skdev->dbg_level < 2)) { | |
435 | writeq(val, skdev->mem_map[1] + offset); | |
436 | barrier(); | |
e67f86b3 AB |
437 | } else { |
438 | barrier(); | |
439 | writeq(val, skdev->mem_map[1] + offset); | |
440 | barrier(); | |
e67f86b3 AB |
441 | VPRINTK(skdev, "offset %x = %016llx\n", offset, val); |
442 | } | |
443 | } | |
444 | ||
445 | ||
446 | #define SKD_IRQ_DEFAULT SKD_IRQ_MSI | |
447 | static int skd_isr_type = SKD_IRQ_DEFAULT; | |
448 | ||
449 | module_param(skd_isr_type, int, 0444); | |
450 | MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability." | |
451 | " (0==legacy, 1==MSI, 2==MSI-X, default==1)"); | |
452 | ||
453 | #define SKD_MAX_REQ_PER_MSG_DEFAULT 1 | |
454 | static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
455 | ||
456 | module_param(skd_max_req_per_msg, int, 0444); | |
457 | MODULE_PARM_DESC(skd_max_req_per_msg, | |
458 | "Maximum SCSI requests packed in a single message." | |
459 | " (1-14, default==1)"); | |
460 | ||
461 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64 | |
462 | #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64" | |
463 | static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
464 | ||
465 | module_param(skd_max_queue_depth, int, 0444); | |
466 | MODULE_PARM_DESC(skd_max_queue_depth, | |
467 | "Maximum SCSI requests issued to s1120." | |
468 | " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")"); | |
469 | ||
470 | static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
471 | module_param(skd_sgs_per_request, int, 0444); | |
472 | MODULE_PARM_DESC(skd_sgs_per_request, | |
473 | "Maximum SG elements per block request." | |
474 | " (1-4096, default==256)"); | |
475 | ||
476 | static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | |
477 | module_param(skd_max_pass_thru, int, 0444); | |
478 | MODULE_PARM_DESC(skd_max_pass_thru, | |
479 | "Maximum SCSI pass-thru at a time." " (1-50, default==32)"); | |
480 | ||
481 | module_param(skd_dbg_level, int, 0444); | |
482 | MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); | |
483 | ||
484 | module_param(skd_isr_comp_limit, int, 0444); | |
485 | MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); | |
486 | ||
487 | static int skd_bio; | |
488 | module_param(skd_bio, int, 0444); | |
489 | MODULE_PARM_DESC(skd_bio, | |
490 | "Register as a bio device instead of block (0, 1) default=0"); | |
491 | ||
492 | /* Major device number dynamically assigned. */ | |
493 | static u32 skd_major; | |
494 | ||
495 | static struct skd_device *skd_construct(struct pci_dev *pdev); | |
496 | static void skd_destruct(struct skd_device *skdev); | |
497 | static const struct block_device_operations skd_blockdev_ops; | |
498 | static void skd_send_fitmsg(struct skd_device *skdev, | |
499 | struct skd_fitmsg_context *skmsg); | |
500 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
501 | struct skd_special_context *skspcl); | |
502 | static void skd_request_fn(struct request_queue *rq); | |
503 | static void skd_end_request(struct skd_device *skdev, | |
504 | struct skd_request_context *skreq, int error); | |
505 | static int skd_preop_sg_list(struct skd_device *skdev, | |
506 | struct skd_request_context *skreq); | |
507 | static void skd_postop_sg_list(struct skd_device *skdev, | |
508 | struct skd_request_context *skreq); | |
509 | ||
510 | static void skd_restart_device(struct skd_device *skdev); | |
511 | static int skd_quiesce_dev(struct skd_device *skdev); | |
512 | static int skd_unquiesce_dev(struct skd_device *skdev); | |
513 | static void skd_release_special(struct skd_device *skdev, | |
514 | struct skd_special_context *skspcl); | |
515 | static void skd_disable_interrupts(struct skd_device *skdev); | |
516 | static void skd_isr_fwstate(struct skd_device *skdev); | |
517 | static void skd_recover_requests(struct skd_device *skdev, int requeue); | |
518 | static void skd_soft_reset(struct skd_device *skdev); | |
519 | ||
520 | static const char *skd_name(struct skd_device *skdev); | |
521 | const char *skd_drive_state_to_str(int state); | |
522 | const char *skd_skdev_state_to_str(enum skd_drvr_state state); | |
523 | static void skd_log_skdev(struct skd_device *skdev, const char *event); | |
524 | static void skd_log_skmsg(struct skd_device *skdev, | |
525 | struct skd_fitmsg_context *skmsg, const char *event); | |
526 | static void skd_log_skreq(struct skd_device *skdev, | |
527 | struct skd_request_context *skreq, const char *event); | |
528 | ||
529 | /* FLUSH FUA flag handling. */ | |
530 | static int skd_flush_cmd_enqueue(struct skd_device *, void *); | |
531 | static void *skd_flush_cmd_dequeue(struct skd_device *); | |
532 | ||
533 | ||
534 | /* | |
535 | ***************************************************************************** | |
536 | * READ/WRITE REQUESTS | |
537 | ***************************************************************************** | |
538 | */ | |
539 | static void skd_stop_queue(struct skd_device *skdev) | |
540 | { | |
541 | if (!skd_bio) | |
542 | blk_stop_queue(skdev->queue); | |
543 | else | |
544 | skdev->queue_stopped = 1; | |
545 | } | |
546 | ||
547 | static void skd_unstop_queue(struct skd_device *skdev) | |
548 | { | |
549 | if (!skd_bio) | |
550 | queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); | |
551 | else | |
552 | skdev->queue_stopped = 0; | |
553 | } | |
554 | ||
555 | static void skd_start_queue(struct skd_device *skdev) | |
556 | { | |
557 | if (!skd_bio) { | |
558 | blk_start_queue(skdev->queue); | |
559 | } else { | |
560 | pr_err("(%s): Starting queue\n", skd_name(skdev)); | |
561 | skdev->queue_stopped = 0; | |
562 | skd_request_fn(skdev->queue); | |
563 | } | |
564 | } | |
565 | ||
566 | static int skd_queue_stopped(struct skd_device *skdev) | |
567 | { | |
568 | if (!skd_bio) | |
569 | return blk_queue_stopped(skdev->queue); | |
570 | else | |
571 | return skdev->queue_stopped; | |
572 | } | |
573 | ||
574 | static void skd_fail_all_pending_blk(struct skd_device *skdev) | |
575 | { | |
576 | struct request_queue *q = skdev->queue; | |
577 | struct request *req; | |
578 | ||
579 | for (;; ) { | |
580 | req = blk_peek_request(q); | |
581 | if (req == NULL) | |
582 | break; | |
583 | blk_start_request(req); | |
584 | __blk_end_request_all(req, -EIO); | |
585 | } | |
586 | } | |
587 | ||
588 | static void skd_fail_all_pending_bio(struct skd_device *skdev) | |
589 | { | |
590 | struct bio *bio; | |
591 | int error = -EIO; | |
592 | ||
593 | for (;; ) { | |
594 | bio = bio_list_pop(&skdev->bio_queue); | |
595 | ||
596 | if (bio == NULL) | |
597 | break; | |
598 | ||
599 | bio_endio(bio, error); | |
600 | } | |
601 | } | |
602 | ||
603 | static void skd_fail_all_pending(struct skd_device *skdev) | |
604 | { | |
605 | if (!skd_bio) | |
606 | skd_fail_all_pending_blk(skdev); | |
607 | else | |
608 | skd_fail_all_pending_bio(skdev); | |
609 | } | |
610 | ||
611 | static void skd_make_request(struct request_queue *q, struct bio *bio) | |
612 | { | |
613 | struct skd_device *skdev = q->queuedata; | |
614 | unsigned long flags; | |
615 | ||
616 | spin_lock_irqsave(&skdev->lock, flags); | |
617 | ||
618 | bio_list_add(&skdev->bio_queue, bio); | |
619 | skd_request_fn(skdev->queue); | |
620 | ||
621 | spin_unlock_irqrestore(&skdev->lock, flags); | |
622 | } | |
623 | ||
624 | static void | |
625 | skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, | |
626 | int data_dir, unsigned lba, | |
627 | unsigned count) | |
628 | { | |
629 | if (data_dir == READ) | |
630 | scsi_req->cdb[0] = 0x28; | |
631 | else | |
632 | scsi_req->cdb[0] = 0x2a; | |
633 | ||
634 | scsi_req->cdb[1] = 0; | |
635 | scsi_req->cdb[2] = (lba & 0xff000000) >> 24; | |
636 | scsi_req->cdb[3] = (lba & 0xff0000) >> 16; | |
637 | scsi_req->cdb[4] = (lba & 0xff00) >> 8; | |
638 | scsi_req->cdb[5] = (lba & 0xff); | |
639 | scsi_req->cdb[6] = 0; | |
640 | scsi_req->cdb[7] = (count & 0xff00) >> 8; | |
641 | scsi_req->cdb[8] = count & 0xff; | |
642 | scsi_req->cdb[9] = 0; | |
643 | } | |
644 | ||
645 | static void | |
646 | skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req, | |
647 | struct skd_request_context *skreq) | |
648 | { | |
649 | skreq->flush_cmd = 1; | |
650 | ||
651 | scsi_req->cdb[0] = 0x35; | |
652 | scsi_req->cdb[1] = 0; | |
653 | scsi_req->cdb[2] = 0; | |
654 | scsi_req->cdb[3] = 0; | |
655 | scsi_req->cdb[4] = 0; | |
656 | scsi_req->cdb[5] = 0; | |
657 | scsi_req->cdb[6] = 0; | |
658 | scsi_req->cdb[7] = 0; | |
659 | scsi_req->cdb[8] = 0; | |
660 | scsi_req->cdb[9] = 0; | |
661 | } | |
662 | ||
663 | static void | |
664 | skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, | |
665 | struct skd_request_context *skreq, | |
666 | struct page *page, | |
667 | u32 lba, u32 count) | |
668 | { | |
669 | char *buf; | |
670 | unsigned long len; | |
671 | struct request *req; | |
672 | ||
673 | buf = page_address(page); | |
674 | len = SKD_DISCARD_CDB_LENGTH; | |
675 | ||
676 | scsi_req->cdb[0] = UNMAP; | |
677 | scsi_req->cdb[8] = len; | |
678 | ||
679 | put_unaligned_be16(6 + 16, &buf[0]); | |
680 | put_unaligned_be16(16, &buf[2]); | |
681 | put_unaligned_be64(lba, &buf[8]); | |
682 | put_unaligned_be32(count, &buf[16]); | |
683 | ||
684 | if (!skd_bio) { | |
685 | req = skreq->req; | |
686 | blk_add_request_payload(req, page, len); | |
687 | req->buffer = buf; | |
688 | } else { | |
689 | skreq->bio->bi_io_vec->bv_page = page; | |
690 | skreq->bio->bi_io_vec->bv_offset = 0; | |
691 | skreq->bio->bi_io_vec->bv_len = len; | |
692 | ||
693 | skreq->bio->bi_vcnt = 1; | |
694 | skreq->bio->bi_phys_segments = 1; | |
695 | } | |
696 | } | |
697 | ||
698 | static void skd_request_fn_not_online(struct request_queue *q); | |
699 | ||
700 | static void skd_request_fn(struct request_queue *q) | |
701 | { | |
702 | struct skd_device *skdev = q->queuedata; | |
703 | struct skd_fitmsg_context *skmsg = NULL; | |
704 | struct fit_msg_hdr *fmh = NULL; | |
705 | struct skd_request_context *skreq; | |
706 | struct request *req = NULL; | |
707 | struct bio *bio = NULL; | |
708 | struct skd_scsi_request *scsi_req; | |
709 | struct page *page; | |
710 | unsigned long io_flags; | |
711 | int error; | |
712 | u32 lba; | |
713 | u32 count; | |
714 | int data_dir; | |
715 | u32 be_lba; | |
716 | u32 be_count; | |
717 | u64 be_dmaa; | |
718 | u64 cmdctxt; | |
719 | u32 timo_slot; | |
720 | void *cmd_ptr; | |
721 | int flush, fua; | |
722 | ||
723 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
724 | skd_request_fn_not_online(q); | |
725 | return; | |
726 | } | |
727 | ||
728 | if (skd_queue_stopped(skdev)) { | |
729 | if (skdev->skmsg_free_list == NULL || | |
730 | skdev->skreq_free_list == NULL || | |
731 | skdev->in_flight >= skdev->queue_low_water_mark) | |
732 | /* There is still some kind of shortage */ | |
733 | return; | |
734 | ||
735 | skd_unstop_queue(skdev); | |
736 | } | |
737 | ||
738 | /* | |
739 | * Stop conditions: | |
740 | * - There are no more native requests | |
741 | * - There are already the maximum number of requests in progress | |
742 | * - There are no more skd_request_context entries | |
743 | * - There are no more FIT msg buffers | |
744 | */ | |
745 | for (;; ) { | |
746 | ||
747 | flush = fua = 0; | |
748 | ||
749 | if (!skd_bio) { | |
750 | req = blk_peek_request(q); | |
751 | ||
752 | /* Are there any native requests to start? */ | |
753 | if (req == NULL) | |
754 | break; | |
755 | ||
756 | lba = (u32)blk_rq_pos(req); | |
757 | count = blk_rq_sectors(req); | |
758 | data_dir = rq_data_dir(req); | |
759 | io_flags = req->cmd_flags; | |
760 | ||
761 | if (io_flags & REQ_FLUSH) | |
762 | flush++; | |
763 | ||
764 | if (io_flags & REQ_FUA) | |
765 | fua++; | |
766 | ||
767 | VPRINTK(skdev, | |
768 | "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
769 | req, lba, lba, count, count, data_dir); | |
770 | } else { | |
771 | if (!list_empty(&skdev->flush_list)) { | |
772 | /* Process data part of FLUSH request. */ | |
773 | bio = (struct bio *)skd_flush_cmd_dequeue(skdev); | |
774 | flush++; | |
775 | VPRINTK(skdev, "processing FLUSH request with data.\n"); | |
776 | } else { | |
777 | /* peek at our bio queue */ | |
778 | bio = bio_list_peek(&skdev->bio_queue); | |
779 | } | |
780 | ||
781 | /* Are there any native requests to start? */ | |
782 | if (bio == NULL) | |
783 | break; | |
784 | ||
785 | lba = (u32)bio->bi_sector; | |
786 | count = bio_sectors(bio); | |
787 | data_dir = bio_data_dir(bio); | |
788 | io_flags = bio->bi_rw; | |
789 | ||
790 | VPRINTK(skdev, | |
791 | "new bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
792 | bio, lba, lba, count, count, data_dir); | |
793 | ||
794 | if (io_flags & REQ_FLUSH) | |
795 | flush++; | |
796 | ||
797 | if (io_flags & REQ_FUA) | |
798 | fua++; | |
799 | } | |
800 | ||
801 | /* At this point we know there is a request | |
802 | * (from our bio q or req q depending on the way | |
803 | * the driver is built do checks for resources. | |
804 | */ | |
805 | ||
806 | /* Are too many requets already in progress? */ | |
807 | if (skdev->in_flight >= skdev->cur_max_queue_depth) { | |
808 | VPRINTK(skdev, "qdepth %d, limit %d\n", | |
809 | skdev->in_flight, skdev->cur_max_queue_depth); | |
810 | break; | |
811 | } | |
812 | ||
813 | /* Is a skd_request_context available? */ | |
814 | skreq = skdev->skreq_free_list; | |
815 | if (skreq == NULL) { | |
816 | VPRINTK(skdev, "Out of req=%p\n", q); | |
817 | break; | |
818 | } | |
819 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE); | |
820 | SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0); | |
821 | ||
822 | /* Now we check to see if we can get a fit msg */ | |
823 | if (skmsg == NULL) { | |
824 | if (skdev->skmsg_free_list == NULL) { | |
825 | VPRINTK(skdev, "Out of msg\n"); | |
826 | break; | |
827 | } | |
828 | } | |
829 | ||
830 | skreq->flush_cmd = 0; | |
831 | skreq->n_sg = 0; | |
832 | skreq->sg_byte_count = 0; | |
833 | skreq->discard_page = 0; | |
834 | ||
835 | /* | |
836 | * OK to now dequeue request from either bio or q. | |
837 | * | |
838 | * At this point we are comitted to either start or reject | |
839 | * the native request. Note that skd_request_context is | |
840 | * available but is still at the head of the free list. | |
841 | */ | |
842 | if (!skd_bio) { | |
843 | blk_start_request(req); | |
844 | skreq->req = req; | |
845 | skreq->fitmsg_id = 0; | |
846 | } else { | |
847 | if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) { | |
848 | skreq->bio = bio; | |
849 | } else { | |
850 | skreq->bio = bio_list_pop(&skdev->bio_queue); | |
851 | SKD_ASSERT(skreq->bio == bio); | |
852 | skreq->start_time = jiffies; | |
853 | part_inc_in_flight(&skdev->disk->part0, | |
854 | bio_data_dir(bio)); | |
855 | } | |
856 | ||
857 | skreq->fitmsg_id = 0; | |
858 | } | |
859 | ||
860 | /* Either a FIT msg is in progress or we have to start one. */ | |
861 | if (skmsg == NULL) { | |
862 | /* Are there any FIT msg buffers available? */ | |
863 | skmsg = skdev->skmsg_free_list; | |
864 | if (skmsg == NULL) { | |
865 | VPRINTK(skdev, "Out of msg skdev=%p\n", skdev); | |
866 | break; | |
867 | } | |
868 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE); | |
869 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0); | |
870 | ||
871 | skdev->skmsg_free_list = skmsg->next; | |
872 | ||
873 | skmsg->state = SKD_MSG_STATE_BUSY; | |
874 | skmsg->id += SKD_ID_INCR; | |
875 | ||
876 | /* Initialize the FIT msg header */ | |
877 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | |
878 | memset(fmh, 0, sizeof(*fmh)); | |
879 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
880 | skmsg->length = sizeof(*fmh); | |
881 | } | |
882 | ||
883 | skreq->fitmsg_id = skmsg->id; | |
884 | ||
885 | /* | |
886 | * Note that a FIT msg may have just been started | |
887 | * but contains no SoFIT requests yet. | |
888 | */ | |
889 | ||
890 | /* | |
891 | * Transcode the request, checking as we go. The outcome of | |
892 | * the transcoding is represented by the error variable. | |
893 | */ | |
894 | cmd_ptr = &skmsg->msg_buf[skmsg->length]; | |
895 | memset(cmd_ptr, 0, 32); | |
896 | ||
897 | be_lba = cpu_to_be32(lba); | |
898 | be_count = cpu_to_be32(count); | |
899 | be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address); | |
900 | cmdctxt = skreq->id + SKD_ID_INCR; | |
901 | ||
902 | scsi_req = cmd_ptr; | |
903 | scsi_req->hdr.tag = cmdctxt; | |
904 | scsi_req->hdr.sg_list_dma_address = be_dmaa; | |
905 | ||
906 | if (data_dir == READ) | |
907 | skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST; | |
908 | else | |
909 | skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD; | |
910 | ||
911 | if (io_flags & REQ_DISCARD) { | |
912 | page = alloc_page(GFP_ATOMIC | __GFP_ZERO); | |
913 | if (!page) { | |
914 | pr_err("request_fn:Page allocation failed.\n"); | |
915 | skd_end_request(skdev, skreq, -ENOMEM); | |
916 | break; | |
917 | } | |
918 | skreq->discard_page = 1; | |
919 | skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); | |
920 | ||
921 | } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { | |
922 | skd_prep_zerosize_flush_cdb(scsi_req, skreq); | |
923 | SKD_ASSERT(skreq->flush_cmd == 1); | |
924 | ||
925 | } else { | |
926 | skd_prep_rw_cdb(scsi_req, data_dir, lba, count); | |
927 | } | |
928 | ||
929 | if (fua) | |
930 | scsi_req->cdb[1] |= SKD_FUA_NV; | |
931 | ||
932 | if ((!skd_bio && !req->bio) || | |
933 | (skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST)) | |
934 | goto skip_sg; | |
935 | ||
936 | error = skd_preop_sg_list(skdev, skreq); | |
937 | ||
938 | if (error != 0) { | |
939 | /* | |
940 | * Complete the native request with error. | |
941 | * Note that the request context is still at the | |
942 | * head of the free list, and that the SoFIT request | |
943 | * was encoded into the FIT msg buffer but the FIT | |
944 | * msg length has not been updated. In short, the | |
945 | * only resource that has been allocated but might | |
946 | * not be used is that the FIT msg could be empty. | |
947 | */ | |
948 | DPRINTK(skdev, "error Out\n"); | |
949 | skd_end_request(skdev, skreq, error); | |
950 | continue; | |
951 | } | |
952 | ||
953 | skip_sg: | |
954 | scsi_req->hdr.sg_list_len_bytes = | |
955 | cpu_to_be32(skreq->sg_byte_count); | |
956 | ||
957 | /* Complete resource allocations. */ | |
958 | skdev->skreq_free_list = skreq->next; | |
959 | skreq->state = SKD_REQ_STATE_BUSY; | |
960 | skreq->id += SKD_ID_INCR; | |
961 | ||
962 | skmsg->length += sizeof(struct skd_scsi_request); | |
963 | fmh->num_protocol_cmds_coalesced++; | |
964 | ||
965 | /* | |
966 | * Update the active request counts. | |
967 | * Capture the timeout timestamp. | |
968 | */ | |
969 | skreq->timeout_stamp = skdev->timeout_stamp; | |
970 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
971 | skdev->timeout_slot[timo_slot]++; | |
972 | skdev->in_flight++; | |
973 | VPRINTK(skdev, "req=0x%x busy=%d\n", | |
974 | skreq->id, skdev->in_flight); | |
975 | ||
976 | /* | |
977 | * If the FIT msg buffer is full send it. | |
978 | */ | |
979 | if (skmsg->length >= SKD_N_FITMSG_BYTES || | |
980 | fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) { | |
981 | skd_send_fitmsg(skdev, skmsg); | |
982 | skmsg = NULL; | |
983 | fmh = NULL; | |
984 | } | |
985 | } | |
986 | ||
987 | /* | |
988 | * Is a FIT msg in progress? If it is empty put the buffer back | |
989 | * on the free list. If it is non-empty send what we got. | |
990 | * This minimizes latency when there are fewer requests than | |
991 | * what fits in a FIT msg. | |
992 | */ | |
993 | if (skmsg != NULL) { | |
994 | /* Bigger than just a FIT msg header? */ | |
995 | if (skmsg->length > sizeof(struct fit_msg_hdr)) { | |
996 | VPRINTK(skdev, "sending msg=%p, len %d\n", | |
997 | skmsg, skmsg->length); | |
998 | skd_send_fitmsg(skdev, skmsg); | |
999 | } else { | |
1000 | /* | |
1001 | * The FIT msg is empty. It means we got started | |
1002 | * on the msg, but the requests were rejected. | |
1003 | */ | |
1004 | skmsg->state = SKD_MSG_STATE_IDLE; | |
1005 | skmsg->id += SKD_ID_INCR; | |
1006 | skmsg->next = skdev->skmsg_free_list; | |
1007 | skdev->skmsg_free_list = skmsg; | |
1008 | } | |
1009 | skmsg = NULL; | |
1010 | fmh = NULL; | |
1011 | } | |
1012 | ||
1013 | /* | |
1014 | * If req is non-NULL it means there is something to do but | |
1015 | * we are out of a resource. | |
1016 | */ | |
1017 | if (((!skd_bio) && req) || | |
1018 | ((skd_bio) && bio_list_peek(&skdev->bio_queue))) | |
1019 | skd_stop_queue(skdev); | |
1020 | } | |
1021 | ||
1022 | static void skd_end_request_blk(struct skd_device *skdev, | |
1023 | struct skd_request_context *skreq, int error) | |
1024 | { | |
1025 | struct request *req = skreq->req; | |
1026 | unsigned int io_flags = req->cmd_flags; | |
1027 | ||
1028 | if ((io_flags & REQ_DISCARD) && | |
1029 | (skreq->discard_page == 1)) { | |
1030 | VPRINTK(skdev, "skd_end_request_blk, free the page!"); | |
1031 | free_page((unsigned long)req->buffer); | |
1032 | req->buffer = NULL; | |
1033 | } | |
1034 | ||
1035 | if (unlikely(error)) { | |
1036 | struct request *req = skreq->req; | |
1037 | char *cmd = (rq_data_dir(req) == READ) ? "read" : "write"; | |
1038 | u32 lba = (u32)blk_rq_pos(req); | |
1039 | u32 count = blk_rq_sectors(req); | |
1040 | ||
1041 | pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", | |
1042 | skd_name(skdev), cmd, lba, count, skreq->id); | |
1043 | } else | |
1044 | VPRINTK(skdev, "id=0x%x error=%d\n", skreq->id, error); | |
1045 | ||
1046 | __blk_end_request_all(skreq->req, error); | |
1047 | } | |
1048 | ||
1049 | static int skd_preop_sg_list_blk(struct skd_device *skdev, | |
1050 | struct skd_request_context *skreq) | |
1051 | { | |
1052 | struct request *req = skreq->req; | |
1053 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
1054 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
1055 | struct scatterlist *sg = &skreq->sg[0]; | |
1056 | int n_sg; | |
1057 | int i; | |
1058 | ||
1059 | skreq->sg_byte_count = 0; | |
1060 | ||
1061 | /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD || | |
1062 | skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */ | |
1063 | ||
1064 | n_sg = blk_rq_map_sg(skdev->queue, req, sg); | |
1065 | if (n_sg <= 0) | |
1066 | return -EINVAL; | |
1067 | ||
1068 | /* | |
1069 | * Map scatterlist to PCI bus addresses. | |
1070 | * Note PCI might change the number of entries. | |
1071 | */ | |
1072 | n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir); | |
1073 | if (n_sg <= 0) | |
1074 | return -EINVAL; | |
1075 | ||
1076 | SKD_ASSERT(n_sg <= skdev->sgs_per_request); | |
1077 | ||
1078 | skreq->n_sg = n_sg; | |
1079 | ||
1080 | for (i = 0; i < n_sg; i++) { | |
1081 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
1082 | u32 cnt = sg_dma_len(&sg[i]); | |
1083 | uint64_t dma_addr = sg_dma_address(&sg[i]); | |
1084 | ||
1085 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | |
1086 | sgd->byte_count = cnt; | |
1087 | skreq->sg_byte_count += cnt; | |
1088 | sgd->host_side_addr = dma_addr; | |
1089 | sgd->dev_side_addr = 0; | |
1090 | } | |
1091 | ||
1092 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | |
1093 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | |
1094 | ||
1095 | if (unlikely(skdev->dbg_level > 1)) { | |
1096 | VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n", | |
1097 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
1098 | for (i = 0; i < n_sg; i++) { | |
1099 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
1100 | VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x " | |
1101 | "addr=0x%llx next=0x%llx\n", | |
1102 | i, sgd->byte_count, sgd->control, | |
1103 | sgd->host_side_addr, sgd->next_desc_ptr); | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | static void skd_postop_sg_list_blk(struct skd_device *skdev, | |
1111 | struct skd_request_context *skreq) | |
1112 | { | |
1113 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
1114 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
1115 | ||
1116 | /* | |
1117 | * restore the next ptr for next IO request so we | |
1118 | * don't have to set it every time. | |
1119 | */ | |
1120 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | |
1121 | skreq->sksg_dma_address + | |
1122 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | |
1123 | pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); | |
1124 | } | |
1125 | ||
1126 | static void skd_end_request_bio(struct skd_device *skdev, | |
1127 | struct skd_request_context *skreq, int error) | |
1128 | { | |
1129 | struct bio *bio = skreq->bio; | |
1130 | int rw = bio_data_dir(bio); | |
1131 | unsigned long io_flags = bio->bi_rw; | |
1132 | ||
1133 | if ((io_flags & REQ_DISCARD) && | |
1134 | (skreq->discard_page == 1)) { | |
1135 | VPRINTK(skdev, "biomode: skd_end_request: freeing DISCARD page.\n"); | |
1136 | free_page((unsigned long)page_address(bio->bi_io_vec->bv_page)); | |
1137 | } | |
1138 | ||
1139 | if (unlikely(error)) { | |
1140 | u32 lba = (u32)skreq->bio->bi_sector; | |
1141 | u32 count = bio_sectors(skreq->bio); | |
1142 | char *cmd = (rw == WRITE) ? "write" : "read"; | |
1143 | pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n", | |
1144 | skd_name(skdev), cmd, lba, count, skreq->id); | |
1145 | } | |
1146 | { | |
1147 | int cpu = part_stat_lock(); | |
1148 | ||
1149 | if (likely(!error)) { | |
1150 | part_stat_inc(cpu, &skdev->disk->part0, ios[rw]); | |
1151 | part_stat_add(cpu, &skdev->disk->part0, sectors[rw], | |
1152 | bio_sectors(bio)); | |
1153 | } | |
1154 | part_stat_add(cpu, &skdev->disk->part0, ticks[rw], | |
1155 | jiffies - skreq->start_time); | |
1156 | part_dec_in_flight(&skdev->disk->part0, rw); | |
1157 | part_stat_unlock(); | |
1158 | } | |
1159 | ||
1160 | VPRINTK(skdev, "id=0x%x error=%d\n", skreq->id, error); | |
1161 | ||
1162 | bio_endio(skreq->bio, error); | |
1163 | } | |
1164 | ||
1165 | static int skd_preop_sg_list_bio(struct skd_device *skdev, | |
1166 | struct skd_request_context *skreq) | |
1167 | { | |
1168 | struct bio *bio = skreq->bio; | |
1169 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
1170 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
1171 | int n_sg; | |
1172 | int i; | |
1173 | struct bio_vec *vec; | |
1174 | struct fit_sg_descriptor *sgd; | |
1175 | u64 dma_addr; | |
1176 | u32 count; | |
1177 | int errs = 0; | |
1178 | unsigned int io_flags = 0; | |
1179 | io_flags |= bio->bi_rw; | |
1180 | ||
1181 | skreq->sg_byte_count = 0; | |
1182 | n_sg = skreq->n_sg = skreq->bio->bi_vcnt; | |
1183 | ||
1184 | if (n_sg <= 0) | |
1185 | return -EINVAL; | |
1186 | ||
1187 | if (n_sg > skdev->sgs_per_request) { | |
1188 | pr_err("(%s): sg overflow n=%d\n", | |
1189 | skd_name(skdev), n_sg); | |
1190 | skreq->n_sg = 0; | |
1191 | return -EIO; | |
1192 | } | |
1193 | ||
1194 | for (i = 0; i < skreq->n_sg; i++) { | |
1195 | vec = bio_iovec_idx(bio, i); | |
1196 | dma_addr = pci_map_page(skdev->pdev, | |
1197 | vec->bv_page, | |
1198 | vec->bv_offset, vec->bv_len, pci_dir); | |
1199 | count = vec->bv_len; | |
1200 | ||
1201 | if (count == 0 || count > 64u * 1024u || (count & 3) != 0 | |
1202 | || (dma_addr & 3) != 0) { | |
1203 | pr_err( | |
1204 | "(%s): Bad sg ix=%d count=%d addr=0x%llx\n", | |
1205 | skd_name(skdev), i, count, dma_addr); | |
1206 | errs++; | |
1207 | } | |
1208 | ||
1209 | sgd = &skreq->sksg_list[i]; | |
1210 | ||
1211 | sgd->control = FIT_SGD_CONTROL_NOT_LAST; | |
1212 | sgd->byte_count = vec->bv_len; | |
1213 | skreq->sg_byte_count += vec->bv_len; | |
1214 | sgd->host_side_addr = dma_addr; | |
1215 | sgd->dev_side_addr = 0; /* not used */ | |
1216 | } | |
1217 | ||
1218 | skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL; | |
1219 | skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST; | |
1220 | ||
1221 | ||
1222 | if (!(io_flags & REQ_DISCARD)) { | |
1223 | count = bio_sectors(bio) << 9u; | |
1224 | if (count != skreq->sg_byte_count) { | |
1225 | pr_err("(%s): mismatch count sg=%d req=%d\n", | |
1226 | skd_name(skdev), skreq->sg_byte_count, count); | |
1227 | errs++; | |
1228 | } | |
1229 | } | |
1230 | ||
1231 | if (unlikely(skdev->dbg_level > 1)) { | |
1232 | VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n", | |
1233 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
1234 | for (i = 0; i < n_sg; i++) { | |
1235 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
1236 | VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x " | |
1237 | "addr=0x%llx next=0x%llx\n", | |
1238 | i, sgd->byte_count, sgd->control, | |
1239 | sgd->host_side_addr, sgd->next_desc_ptr); | |
1240 | } | |
1241 | } | |
1242 | ||
1243 | if (errs != 0) { | |
1244 | skd_postop_sg_list(skdev, skreq); | |
1245 | skreq->n_sg = 0; | |
1246 | return -EIO; | |
1247 | } | |
1248 | ||
1249 | return 0; | |
1250 | } | |
1251 | ||
1252 | static int skd_preop_sg_list(struct skd_device *skdev, | |
1253 | struct skd_request_context *skreq) | |
1254 | { | |
1255 | if (!skd_bio) | |
1256 | return skd_preop_sg_list_blk(skdev, skreq); | |
1257 | else | |
1258 | return skd_preop_sg_list_bio(skdev, skreq); | |
1259 | } | |
1260 | ||
1261 | static void skd_postop_sg_list_bio(struct skd_device *skdev, | |
1262 | struct skd_request_context *skreq) | |
1263 | { | |
1264 | int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; | |
1265 | int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE; | |
1266 | int i; | |
1267 | struct fit_sg_descriptor *sgd; | |
1268 | ||
1269 | /* | |
1270 | * restore the next ptr for next IO request so we | |
1271 | * don't have to set it every time. | |
1272 | */ | |
1273 | skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr = | |
1274 | skreq->sksg_dma_address + | |
1275 | ((skreq->n_sg) * sizeof(struct fit_sg_descriptor)); | |
1276 | ||
1277 | for (i = 0; i < skreq->n_sg; i++) { | |
1278 | sgd = &skreq->sksg_list[i]; | |
1279 | pci_unmap_page(skdev->pdev, sgd->host_side_addr, | |
1280 | sgd->byte_count, pci_dir); | |
1281 | } | |
1282 | } | |
1283 | ||
1284 | static void skd_postop_sg_list(struct skd_device *skdev, | |
1285 | struct skd_request_context *skreq) | |
1286 | { | |
1287 | if (!skd_bio) | |
1288 | skd_postop_sg_list_blk(skdev, skreq); | |
1289 | else | |
1290 | skd_postop_sg_list_bio(skdev, skreq); | |
1291 | } | |
1292 | ||
1293 | static void skd_end_request(struct skd_device *skdev, | |
1294 | struct skd_request_context *skreq, int error) | |
1295 | { | |
1296 | if (likely(!skd_bio)) | |
1297 | skd_end_request_blk(skdev, skreq, error); | |
1298 | else | |
1299 | skd_end_request_bio(skdev, skreq, error); | |
1300 | } | |
1301 | ||
1302 | static void skd_request_fn_not_online(struct request_queue *q) | |
1303 | { | |
1304 | struct skd_device *skdev = q->queuedata; | |
1305 | int error; | |
1306 | ||
1307 | SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE); | |
1308 | ||
1309 | skd_log_skdev(skdev, "req_not_online"); | |
1310 | switch (skdev->state) { | |
1311 | case SKD_DRVR_STATE_PAUSING: | |
1312 | case SKD_DRVR_STATE_PAUSED: | |
1313 | case SKD_DRVR_STATE_STARTING: | |
1314 | case SKD_DRVR_STATE_RESTARTING: | |
1315 | case SKD_DRVR_STATE_WAIT_BOOT: | |
1316 | /* In case of starting, we haven't started the queue, | |
1317 | * so we can't get here... but requests are | |
1318 | * possibly hanging out waiting for us because we | |
1319 | * reported the dev/skd0 already. They'll wait | |
1320 | * forever if connect doesn't complete. | |
1321 | * What to do??? delay dev/skd0 ?? | |
1322 | */ | |
1323 | case SKD_DRVR_STATE_BUSY: | |
1324 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
1325 | case SKD_DRVR_STATE_BUSY_ERASE: | |
1326 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
1327 | return; | |
1328 | ||
1329 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
1330 | case SKD_DRVR_STATE_STOPPING: | |
1331 | case SKD_DRVR_STATE_SYNCING: | |
1332 | case SKD_DRVR_STATE_FAULT: | |
1333 | case SKD_DRVR_STATE_DISAPPEARED: | |
1334 | default: | |
1335 | error = -EIO; | |
1336 | break; | |
1337 | } | |
1338 | ||
1339 | /* If we get here, terminate all pending block requeusts | |
1340 | * with EIO and any scsi pass thru with appropriate sense | |
1341 | */ | |
1342 | ||
1343 | skd_fail_all_pending(skdev); | |
1344 | } | |
1345 | ||
1346 | /* | |
1347 | ***************************************************************************** | |
1348 | * TIMER | |
1349 | ***************************************************************************** | |
1350 | */ | |
1351 | ||
1352 | static void skd_timer_tick_not_online(struct skd_device *skdev); | |
1353 | ||
1354 | static void skd_timer_tick(ulong arg) | |
1355 | { | |
1356 | struct skd_device *skdev = (struct skd_device *)arg; | |
1357 | ||
1358 | u32 timo_slot; | |
1359 | u32 overdue_timestamp; | |
1360 | unsigned long reqflags; | |
1361 | u32 state; | |
1362 | ||
1363 | if (skdev->state == SKD_DRVR_STATE_FAULT) | |
1364 | /* The driver has declared fault, and we want it to | |
1365 | * stay that way until driver is reloaded. | |
1366 | */ | |
1367 | return; | |
1368 | ||
1369 | spin_lock_irqsave(&skdev->lock, reqflags); | |
1370 | ||
1371 | state = SKD_READL(skdev, FIT_STATUS); | |
1372 | state &= FIT_SR_DRIVE_STATE_MASK; | |
1373 | if (state != skdev->drive_state) | |
1374 | skd_isr_fwstate(skdev); | |
1375 | ||
1376 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
1377 | skd_timer_tick_not_online(skdev); | |
1378 | goto timer_func_out; | |
1379 | } | |
1380 | skdev->timeout_stamp++; | |
1381 | timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
1382 | ||
1383 | /* | |
1384 | * All requests that happened during the previous use of | |
1385 | * this slot should be done by now. The previous use was | |
1386 | * over 7 seconds ago. | |
1387 | */ | |
1388 | if (skdev->timeout_slot[timo_slot] == 0) | |
1389 | goto timer_func_out; | |
1390 | ||
1391 | /* Something is overdue */ | |
1392 | overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT; | |
1393 | ||
1394 | DPRINTK(skdev, "found %d timeouts, draining busy=%d\n", | |
1395 | skdev->timeout_slot[timo_slot], skdev->in_flight); | |
1396 | pr_err("(%s): Overdue IOs (%d), busy %d\n", | |
1397 | skd_name(skdev), skdev->timeout_slot[timo_slot], | |
1398 | skdev->in_flight); | |
1399 | ||
1400 | skdev->timer_countdown = SKD_DRAINING_TIMO; | |
1401 | skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT; | |
1402 | skdev->timo_slot = timo_slot; | |
1403 | skd_stop_queue(skdev); | |
1404 | ||
1405 | timer_func_out: | |
1406 | mod_timer(&skdev->timer, (jiffies + HZ)); | |
1407 | ||
1408 | spin_unlock_irqrestore(&skdev->lock, reqflags); | |
1409 | } | |
1410 | ||
1411 | static void skd_timer_tick_not_online(struct skd_device *skdev) | |
1412 | { | |
1413 | switch (skdev->state) { | |
1414 | case SKD_DRVR_STATE_IDLE: | |
1415 | case SKD_DRVR_STATE_LOAD: | |
1416 | break; | |
1417 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
1418 | VPRINTK(skdev, "drive busy sanitize[%x], driver[%x]\n", | |
1419 | skdev->drive_state, skdev->state); | |
1420 | /* If we've been in sanitize for 3 seconds, we figure we're not | |
1421 | * going to get anymore completions, so recover requests now | |
1422 | */ | |
1423 | if (skdev->timer_countdown > 0) { | |
1424 | skdev->timer_countdown--; | |
1425 | return; | |
1426 | } | |
1427 | skd_recover_requests(skdev, 0); | |
1428 | break; | |
1429 | ||
1430 | case SKD_DRVR_STATE_BUSY: | |
1431 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
1432 | case SKD_DRVR_STATE_BUSY_ERASE: | |
1433 | VPRINTK(skdev, "busy[%x], countdown=%d\n", | |
1434 | skdev->state, skdev->timer_countdown); | |
1435 | if (skdev->timer_countdown > 0) { | |
1436 | skdev->timer_countdown--; | |
1437 | return; | |
1438 | } | |
1439 | DPRINTK(skdev, "busy[%x], timedout=%d, restarting device.", | |
1440 | skdev->state, skdev->timer_countdown); | |
1441 | skd_restart_device(skdev); | |
1442 | break; | |
1443 | ||
1444 | case SKD_DRVR_STATE_WAIT_BOOT: | |
1445 | case SKD_DRVR_STATE_STARTING: | |
1446 | if (skdev->timer_countdown > 0) { | |
1447 | skdev->timer_countdown--; | |
1448 | return; | |
1449 | } | |
1450 | /* For now, we fault the drive. Could attempt resets to | |
1451 | * revcover at some point. */ | |
1452 | skdev->state = SKD_DRVR_STATE_FAULT; | |
1453 | ||
1454 | pr_err("(%s): DriveFault Connect Timeout (%x)\n", | |
1455 | skd_name(skdev), skdev->drive_state); | |
1456 | ||
1457 | /*start the queue so we can respond with error to requests */ | |
1458 | /* wakeup anyone waiting for startup complete */ | |
1459 | skd_start_queue(skdev); | |
1460 | skdev->gendisk_on = -1; | |
1461 | wake_up_interruptible(&skdev->waitq); | |
1462 | break; | |
1463 | ||
1464 | case SKD_DRVR_STATE_ONLINE: | |
1465 | /* shouldn't get here. */ | |
1466 | break; | |
1467 | ||
1468 | case SKD_DRVR_STATE_PAUSING: | |
1469 | case SKD_DRVR_STATE_PAUSED: | |
1470 | break; | |
1471 | ||
1472 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
1473 | DPRINTK(skdev, | |
1474 | "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n", | |
1475 | skdev->timo_slot, | |
1476 | skdev->timer_countdown, | |
1477 | skdev->in_flight, | |
1478 | skdev->timeout_slot[skdev->timo_slot]); | |
1479 | /* if the slot has cleared we can let the I/O continue */ | |
1480 | if (skdev->timeout_slot[skdev->timo_slot] == 0) { | |
1481 | DPRINTK(skdev, "Slot drained, starting queue.\n"); | |
1482 | skdev->state = SKD_DRVR_STATE_ONLINE; | |
1483 | skd_start_queue(skdev); | |
1484 | return; | |
1485 | } | |
1486 | if (skdev->timer_countdown > 0) { | |
1487 | skdev->timer_countdown--; | |
1488 | return; | |
1489 | } | |
1490 | skd_restart_device(skdev); | |
1491 | break; | |
1492 | ||
1493 | case SKD_DRVR_STATE_RESTARTING: | |
1494 | if (skdev->timer_countdown > 0) { | |
1495 | skdev->timer_countdown--; | |
1496 | return; | |
1497 | } | |
1498 | /* For now, we fault the drive. Could attempt resets to | |
1499 | * revcover at some point. */ | |
1500 | skdev->state = SKD_DRVR_STATE_FAULT; | |
1501 | pr_err("(%s): DriveFault Reconnect Timeout (%x)\n", | |
1502 | skd_name(skdev), skdev->drive_state); | |
1503 | ||
1504 | /* | |
1505 | * Recovering does two things: | |
1506 | * 1. completes IO with error | |
1507 | * 2. reclaims dma resources | |
1508 | * When is it safe to recover requests? | |
1509 | * - if the drive state is faulted | |
1510 | * - if the state is still soft reset after out timeout | |
1511 | * - if the drive registers are dead (state = FF) | |
1512 | * If it is "unsafe", we still need to recover, so we will | |
1513 | * disable pci bus mastering and disable our interrupts. | |
1514 | */ | |
1515 | ||
1516 | if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) || | |
1517 | (skdev->drive_state == FIT_SR_DRIVE_FAULT) || | |
1518 | (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK)) | |
1519 | /* It never came out of soft reset. Try to | |
1520 | * recover the requests and then let them | |
1521 | * fail. This is to mitigate hung processes. */ | |
1522 | skd_recover_requests(skdev, 0); | |
1523 | else { | |
1524 | pr_err("(%s): Disable BusMaster (%x)\n", | |
1525 | skd_name(skdev), skdev->drive_state); | |
1526 | pci_disable_device(skdev->pdev); | |
1527 | skd_disable_interrupts(skdev); | |
1528 | skd_recover_requests(skdev, 0); | |
1529 | } | |
1530 | ||
1531 | /*start the queue so we can respond with error to requests */ | |
1532 | /* wakeup anyone waiting for startup complete */ | |
1533 | skd_start_queue(skdev); | |
1534 | skdev->gendisk_on = -1; | |
1535 | wake_up_interruptible(&skdev->waitq); | |
1536 | break; | |
1537 | ||
1538 | case SKD_DRVR_STATE_RESUMING: | |
1539 | case SKD_DRVR_STATE_STOPPING: | |
1540 | case SKD_DRVR_STATE_SYNCING: | |
1541 | case SKD_DRVR_STATE_FAULT: | |
1542 | case SKD_DRVR_STATE_DISAPPEARED: | |
1543 | default: | |
1544 | break; | |
1545 | } | |
1546 | } | |
1547 | ||
1548 | static int skd_start_timer(struct skd_device *skdev) | |
1549 | { | |
1550 | int rc; | |
1551 | ||
1552 | init_timer(&skdev->timer); | |
1553 | setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev); | |
1554 | ||
1555 | rc = mod_timer(&skdev->timer, (jiffies + HZ)); | |
1556 | if (rc) | |
1557 | pr_err("%s: failed to start timer %d\n", | |
1558 | __func__, rc); | |
1559 | return rc; | |
1560 | } | |
1561 | ||
1562 | static void skd_kill_timer(struct skd_device *skdev) | |
1563 | { | |
1564 | del_timer_sync(&skdev->timer); | |
1565 | } | |
1566 | ||
1567 | /* | |
1568 | ***************************************************************************** | |
1569 | * IOCTL | |
1570 | ***************************************************************************** | |
1571 | */ | |
1572 | static int skd_ioctl_sg_io(struct skd_device *skdev, | |
1573 | fmode_t mode, void __user *argp); | |
1574 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | |
1575 | struct skd_sg_io *sksgio); | |
1576 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | |
1577 | struct skd_sg_io *sksgio); | |
1578 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | |
1579 | struct skd_sg_io *sksgio); | |
1580 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | |
1581 | struct skd_sg_io *sksgio, int dxfer_dir); | |
1582 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | |
1583 | struct skd_sg_io *sksgio); | |
1584 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio); | |
1585 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | |
1586 | struct skd_sg_io *sksgio); | |
1587 | static int skd_sg_io_put_status(struct skd_device *skdev, | |
1588 | struct skd_sg_io *sksgio); | |
1589 | ||
1590 | static void skd_complete_special(struct skd_device *skdev, | |
1591 | volatile struct fit_completion_entry_v1 | |
1592 | *skcomp, | |
1593 | volatile struct fit_comp_error_info *skerr, | |
1594 | struct skd_special_context *skspcl); | |
1595 | ||
1596 | static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode, | |
1597 | uint cmd_in, ulong arg) | |
1598 | { | |
1599 | int rc = 0; | |
1600 | struct gendisk *disk = bdev->bd_disk; | |
1601 | struct skd_device *skdev = disk->private_data; | |
1602 | void __user *p = (void *)arg; | |
1603 | ||
1604 | DPRINTK(skdev, "%s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n", | |
1605 | disk->disk_name, current->comm, mode, cmd_in, arg); | |
1606 | ||
1607 | if (!capable(CAP_SYS_ADMIN)) | |
1608 | return -EPERM; | |
1609 | ||
1610 | switch (cmd_in) { | |
1611 | case SG_SET_TIMEOUT: | |
1612 | case SG_GET_TIMEOUT: | |
1613 | case SG_GET_VERSION_NUM: | |
1614 | rc = scsi_cmd_ioctl(disk->queue, disk, mode, cmd_in, p); | |
1615 | break; | |
1616 | case SG_IO: | |
1617 | rc = skd_ioctl_sg_io(skdev, mode, p); | |
1618 | break; | |
1619 | ||
1620 | default: | |
1621 | rc = -ENOTTY; | |
1622 | break; | |
1623 | } | |
1624 | ||
1625 | DPRINTK(skdev, "%s: completion rc %d\n", disk->disk_name, rc); | |
1626 | return rc; | |
1627 | } | |
1628 | ||
1629 | static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode, | |
1630 | void __user *argp) | |
1631 | { | |
1632 | int rc; | |
1633 | struct skd_sg_io sksgio; | |
1634 | ||
1635 | memset(&sksgio, 0, sizeof(sksgio)); | |
1636 | sksgio.mode = mode; | |
1637 | sksgio.argp = argp; | |
1638 | sksgio.iov = &sksgio.no_iov_iov; | |
1639 | ||
1640 | switch (skdev->state) { | |
1641 | case SKD_DRVR_STATE_ONLINE: | |
1642 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
1643 | break; | |
1644 | ||
1645 | default: | |
1646 | DPRINTK(skdev, "drive not online\n"); | |
1647 | rc = -ENXIO; | |
1648 | goto out; | |
1649 | } | |
1650 | ||
f721bb0d AB |
1651 | rc = skd_sg_io_get_and_check_args(skdev, &sksgio); |
1652 | if (rc) | |
1653 | goto out; | |
1654 | ||
1655 | rc = skd_sg_io_obtain_skspcl(skdev, &sksgio); | |
1656 | if (rc) | |
1657 | goto out; | |
1658 | ||
1659 | rc = skd_sg_io_prep_buffering(skdev, &sksgio); | |
1660 | if (rc) | |
1661 | goto out; | |
1662 | ||
1663 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV); | |
1664 | if (rc) | |
e67f86b3 AB |
1665 | goto out; |
1666 | ||
f721bb0d AB |
1667 | rc = skd_sg_io_send_fitmsg(skdev, &sksgio); |
1668 | if (rc) | |
e67f86b3 AB |
1669 | goto out; |
1670 | ||
f721bb0d AB |
1671 | rc = skd_sg_io_await(skdev, &sksgio); |
1672 | if (rc) | |
1673 | goto out; | |
1674 | ||
1675 | rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV); | |
1676 | if (rc) | |
1677 | goto out; | |
1678 | ||
1679 | rc = skd_sg_io_put_status(skdev, &sksgio); | |
1680 | if (rc) | |
e67f86b3 AB |
1681 | goto out; |
1682 | ||
1683 | rc = 0; | |
1684 | ||
1685 | out: | |
1686 | skd_sg_io_release_skspcl(skdev, &sksgio); | |
1687 | ||
1688 | if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov) | |
1689 | kfree(sksgio.iov); | |
1690 | return rc; | |
1691 | } | |
1692 | ||
1693 | static int skd_sg_io_get_and_check_args(struct skd_device *skdev, | |
1694 | struct skd_sg_io *sksgio) | |
1695 | { | |
1696 | struct sg_io_hdr *sgp = &sksgio->sg; | |
1697 | int i, acc; | |
1698 | ||
1699 | if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) { | |
1700 | DPRINTK(skdev, "access sg failed %p\n", sksgio->argp); | |
1701 | return -EFAULT; | |
1702 | } | |
1703 | ||
1704 | if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) { | |
1705 | DPRINTK(skdev, "copy_from_user sg failed %p\n", sksgio->argp); | |
1706 | return -EFAULT; | |
1707 | } | |
1708 | ||
1709 | if (sgp->interface_id != SG_INTERFACE_ID_ORIG) { | |
1710 | DPRINTK(skdev, "interface_id invalid 0x%x\n", | |
1711 | sgp->interface_id); | |
1712 | return -EINVAL; | |
1713 | } | |
1714 | ||
1715 | if (sgp->cmd_len > sizeof(sksgio->cdb)) { | |
1716 | DPRINTK(skdev, "cmd_len invalid %d\n", sgp->cmd_len); | |
1717 | return -EINVAL; | |
1718 | } | |
1719 | ||
1720 | if (sgp->iovec_count > 256) { | |
1721 | DPRINTK(skdev, "iovec_count invalid %d\n", sgp->iovec_count); | |
1722 | return -EINVAL; | |
1723 | } | |
1724 | ||
1725 | if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) { | |
1726 | DPRINTK(skdev, "dxfer_len invalid %d\n", sgp->dxfer_len); | |
1727 | return -EINVAL; | |
1728 | } | |
1729 | ||
1730 | switch (sgp->dxfer_direction) { | |
1731 | case SG_DXFER_NONE: | |
1732 | acc = -1; | |
1733 | break; | |
1734 | ||
1735 | case SG_DXFER_TO_DEV: | |
1736 | acc = VERIFY_READ; | |
1737 | break; | |
1738 | ||
1739 | case SG_DXFER_FROM_DEV: | |
1740 | case SG_DXFER_TO_FROM_DEV: | |
1741 | acc = VERIFY_WRITE; | |
1742 | break; | |
1743 | ||
1744 | default: | |
1745 | DPRINTK(skdev, "dxfer_dir invalid %d\n", sgp->dxfer_direction); | |
1746 | return -EINVAL; | |
1747 | } | |
1748 | ||
1749 | if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) { | |
1750 | DPRINTK(skdev, "copy_from_user cmdp failed %p\n", sgp->cmdp); | |
1751 | return -EFAULT; | |
1752 | } | |
1753 | ||
1754 | if (sgp->mx_sb_len != 0) { | |
1755 | if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) { | |
1756 | DPRINTK(skdev, "access sbp failed %p\n", sgp->sbp); | |
1757 | return -EFAULT; | |
1758 | } | |
1759 | } | |
1760 | ||
1761 | if (sgp->iovec_count == 0) { | |
1762 | sksgio->iov[0].iov_base = sgp->dxferp; | |
1763 | sksgio->iov[0].iov_len = sgp->dxfer_len; | |
1764 | sksgio->iovcnt = 1; | |
1765 | sksgio->dxfer_len = sgp->dxfer_len; | |
1766 | } else { | |
1767 | struct sg_iovec *iov; | |
1768 | uint nbytes = sizeof(*iov) * sgp->iovec_count; | |
1769 | size_t iov_data_len; | |
1770 | ||
1771 | iov = kmalloc(nbytes, GFP_KERNEL); | |
1772 | if (iov == NULL) { | |
1773 | DPRINTK(skdev, "alloc iovec failed %d\n", | |
1774 | sgp->iovec_count); | |
1775 | return -ENOMEM; | |
1776 | } | |
1777 | sksgio->iov = iov; | |
1778 | sksgio->iovcnt = sgp->iovec_count; | |
1779 | ||
1780 | if (copy_from_user(iov, sgp->dxferp, nbytes)) { | |
1781 | DPRINTK(skdev, "copy_from_user iovec failed %p\n", | |
1782 | sgp->dxferp); | |
1783 | return -EFAULT; | |
1784 | } | |
1785 | ||
1786 | /* | |
1787 | * Sum up the vecs, making sure they don't overflow | |
1788 | */ | |
1789 | iov_data_len = 0; | |
1790 | for (i = 0; i < sgp->iovec_count; i++) { | |
1791 | if (iov_data_len + iov[i].iov_len < iov_data_len) | |
1792 | return -EINVAL; | |
1793 | iov_data_len += iov[i].iov_len; | |
1794 | } | |
1795 | ||
1796 | /* SG_IO howto says that the shorter of the two wins */ | |
1797 | if (sgp->dxfer_len < iov_data_len) { | |
1798 | sksgio->iovcnt = iov_shorten((struct iovec *)iov, | |
1799 | sgp->iovec_count, | |
1800 | sgp->dxfer_len); | |
1801 | sksgio->dxfer_len = sgp->dxfer_len; | |
1802 | } else | |
1803 | sksgio->dxfer_len = iov_data_len; | |
1804 | } | |
1805 | ||
1806 | if (sgp->dxfer_direction != SG_DXFER_NONE) { | |
1807 | struct sg_iovec *iov = sksgio->iov; | |
1808 | for (i = 0; i < sksgio->iovcnt; i++, iov++) { | |
1809 | if (!access_ok(acc, iov->iov_base, iov->iov_len)) { | |
1810 | DPRINTK(skdev, "access data failed %p/%d\n", | |
1811 | iov->iov_base, (int)iov->iov_len); | |
1812 | return -EFAULT; | |
1813 | } | |
1814 | } | |
1815 | } | |
1816 | ||
1817 | return 0; | |
1818 | } | |
1819 | ||
1820 | static int skd_sg_io_obtain_skspcl(struct skd_device *skdev, | |
1821 | struct skd_sg_io *sksgio) | |
1822 | { | |
1823 | struct skd_special_context *skspcl = NULL; | |
1824 | int rc; | |
1825 | ||
1826 | for (;; ) { | |
1827 | ulong flags; | |
1828 | ||
1829 | spin_lock_irqsave(&skdev->lock, flags); | |
1830 | skspcl = skdev->skspcl_free_list; | |
1831 | if (skspcl != NULL) { | |
1832 | skdev->skspcl_free_list = | |
1833 | (struct skd_special_context *)skspcl->req.next; | |
1834 | skspcl->req.id += SKD_ID_INCR; | |
1835 | skspcl->req.state = SKD_REQ_STATE_SETUP; | |
1836 | skspcl->orphaned = 0; | |
1837 | skspcl->req.n_sg = 0; | |
1838 | } | |
1839 | spin_unlock_irqrestore(&skdev->lock, flags); | |
1840 | ||
1841 | if (skspcl != NULL) { | |
1842 | rc = 0; | |
1843 | break; | |
1844 | } | |
1845 | ||
1846 | DPRINTK(skdev, "blocking\n"); | |
1847 | ||
1848 | rc = wait_event_interruptible_timeout( | |
1849 | skdev->waitq, | |
1850 | (skdev->skspcl_free_list != NULL), | |
1851 | msecs_to_jiffies(sksgio->sg.timeout)); | |
1852 | ||
1853 | DPRINTK(skdev, "unblocking, rc=%d\n", rc); | |
1854 | ||
1855 | if (rc <= 0) { | |
1856 | if (rc == 0) | |
1857 | rc = -ETIMEDOUT; | |
1858 | else | |
1859 | rc = -EINTR; | |
1860 | break; | |
1861 | } | |
1862 | /* | |
1863 | * If we get here rc > 0 meaning the timeout to | |
1864 | * wait_event_interruptible_timeout() had time left, hence the | |
1865 | * sought event -- non-empty free list -- happened. | |
1866 | * Retry the allocation. | |
1867 | */ | |
1868 | } | |
1869 | sksgio->skspcl = skspcl; | |
1870 | ||
1871 | return rc; | |
1872 | } | |
1873 | ||
1874 | static int skd_skreq_prep_buffering(struct skd_device *skdev, | |
1875 | struct skd_request_context *skreq, | |
1876 | u32 dxfer_len) | |
1877 | { | |
1878 | u32 resid = dxfer_len; | |
1879 | ||
1880 | /* | |
1881 | * The DMA engine must have aligned addresses and byte counts. | |
1882 | */ | |
1883 | resid += (-resid) & 3; | |
1884 | skreq->sg_byte_count = resid; | |
1885 | ||
1886 | skreq->n_sg = 0; | |
1887 | ||
1888 | while (resid > 0) { | |
1889 | u32 nbytes = PAGE_SIZE; | |
1890 | u32 ix = skreq->n_sg; | |
1891 | struct scatterlist *sg = &skreq->sg[ix]; | |
1892 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | |
1893 | struct page *page; | |
1894 | ||
1895 | if (nbytes > resid) | |
1896 | nbytes = resid; | |
1897 | ||
1898 | page = alloc_page(GFP_KERNEL); | |
1899 | if (page == NULL) | |
1900 | return -ENOMEM; | |
1901 | ||
1902 | sg_set_page(sg, page, nbytes, 0); | |
1903 | ||
1904 | /* TODO: This should be going through a pci_???() | |
1905 | * routine to do proper mapping. */ | |
1906 | sksg->control = FIT_SGD_CONTROL_NOT_LAST; | |
1907 | sksg->byte_count = nbytes; | |
1908 | ||
1909 | sksg->host_side_addr = sg_phys(sg); | |
1910 | ||
1911 | sksg->dev_side_addr = 0; | |
1912 | sksg->next_desc_ptr = skreq->sksg_dma_address + | |
1913 | (ix + 1) * sizeof(*sksg); | |
1914 | ||
1915 | skreq->n_sg++; | |
1916 | resid -= nbytes; | |
1917 | } | |
1918 | ||
1919 | if (skreq->n_sg > 0) { | |
1920 | u32 ix = skreq->n_sg - 1; | |
1921 | struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix]; | |
1922 | ||
1923 | sksg->control = FIT_SGD_CONTROL_LAST; | |
1924 | sksg->next_desc_ptr = 0; | |
1925 | } | |
1926 | ||
1927 | if (unlikely(skdev->dbg_level > 1)) { | |
1928 | u32 i; | |
1929 | ||
1930 | VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n", | |
1931 | skreq->id, skreq->sksg_list, skreq->sksg_dma_address); | |
1932 | for (i = 0; i < skreq->n_sg; i++) { | |
1933 | struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; | |
1934 | ||
1935 | VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x " | |
1936 | "addr=0x%llx next=0x%llx\n", | |
1937 | i, sgd->byte_count, sgd->control, | |
1938 | sgd->host_side_addr, sgd->next_desc_ptr); | |
1939 | } | |
1940 | } | |
1941 | ||
1942 | return 0; | |
1943 | } | |
1944 | ||
1945 | static int skd_sg_io_prep_buffering(struct skd_device *skdev, | |
1946 | struct skd_sg_io *sksgio) | |
1947 | { | |
1948 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1949 | struct skd_request_context *skreq = &skspcl->req; | |
1950 | u32 dxfer_len = sksgio->dxfer_len; | |
1951 | int rc; | |
1952 | ||
1953 | rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len); | |
1954 | /* | |
1955 | * Eventually, errors or not, skd_release_special() is called | |
1956 | * to recover allocations including partial allocations. | |
1957 | */ | |
1958 | return rc; | |
1959 | } | |
1960 | ||
1961 | static int skd_sg_io_copy_buffer(struct skd_device *skdev, | |
1962 | struct skd_sg_io *sksgio, int dxfer_dir) | |
1963 | { | |
1964 | struct skd_special_context *skspcl = sksgio->skspcl; | |
1965 | u32 iov_ix = 0; | |
1966 | struct sg_iovec curiov; | |
1967 | u32 sksg_ix = 0; | |
1968 | u8 *bufp = NULL; | |
1969 | u32 buf_len = 0; | |
1970 | u32 resid = sksgio->dxfer_len; | |
1971 | int rc; | |
1972 | ||
1973 | curiov.iov_len = 0; | |
1974 | curiov.iov_base = NULL; | |
1975 | ||
1976 | if (dxfer_dir != sksgio->sg.dxfer_direction) { | |
1977 | if (dxfer_dir != SG_DXFER_TO_DEV || | |
1978 | sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV) | |
1979 | return 0; | |
1980 | } | |
1981 | ||
1982 | while (resid > 0) { | |
1983 | u32 nbytes = PAGE_SIZE; | |
1984 | ||
1985 | if (curiov.iov_len == 0) { | |
1986 | curiov = sksgio->iov[iov_ix++]; | |
1987 | continue; | |
1988 | } | |
1989 | ||
1990 | if (buf_len == 0) { | |
1991 | struct page *page; | |
1992 | page = sg_page(&skspcl->req.sg[sksg_ix++]); | |
1993 | bufp = page_address(page); | |
1994 | buf_len = PAGE_SIZE; | |
1995 | } | |
1996 | ||
1997 | nbytes = min_t(u32, nbytes, resid); | |
1998 | nbytes = min_t(u32, nbytes, curiov.iov_len); | |
1999 | nbytes = min_t(u32, nbytes, buf_len); | |
2000 | ||
2001 | if (dxfer_dir == SG_DXFER_TO_DEV) | |
2002 | rc = __copy_from_user(bufp, curiov.iov_base, nbytes); | |
2003 | else | |
2004 | rc = __copy_to_user(curiov.iov_base, bufp, nbytes); | |
2005 | ||
2006 | if (rc) | |
2007 | return -EFAULT; | |
2008 | ||
2009 | resid -= nbytes; | |
2010 | curiov.iov_len -= nbytes; | |
2011 | curiov.iov_base += nbytes; | |
2012 | buf_len -= nbytes; | |
2013 | } | |
2014 | ||
2015 | return 0; | |
2016 | } | |
2017 | ||
2018 | static int skd_sg_io_send_fitmsg(struct skd_device *skdev, | |
2019 | struct skd_sg_io *sksgio) | |
2020 | { | |
2021 | struct skd_special_context *skspcl = sksgio->skspcl; | |
2022 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | |
2023 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | |
2024 | ||
2025 | memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES); | |
2026 | ||
2027 | /* Initialize the FIT msg header */ | |
2028 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
2029 | fmh->num_protocol_cmds_coalesced = 1; | |
2030 | ||
2031 | /* Initialize the SCSI request */ | |
2032 | if (sksgio->sg.dxfer_direction != SG_DXFER_NONE) | |
2033 | scsi_req->hdr.sg_list_dma_address = | |
2034 | cpu_to_be64(skspcl->req.sksg_dma_address); | |
2035 | scsi_req->hdr.tag = skspcl->req.id; | |
2036 | scsi_req->hdr.sg_list_len_bytes = | |
2037 | cpu_to_be32(skspcl->req.sg_byte_count); | |
2038 | memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb)); | |
2039 | ||
2040 | skspcl->req.state = SKD_REQ_STATE_BUSY; | |
2041 | skd_send_special_fitmsg(skdev, skspcl); | |
2042 | ||
2043 | return 0; | |
2044 | } | |
2045 | ||
2046 | static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio) | |
2047 | { | |
2048 | unsigned long flags; | |
2049 | int rc; | |
2050 | ||
2051 | rc = wait_event_interruptible_timeout(skdev->waitq, | |
2052 | (sksgio->skspcl->req.state != | |
2053 | SKD_REQ_STATE_BUSY), | |
2054 | msecs_to_jiffies(sksgio->sg. | |
2055 | timeout)); | |
2056 | ||
2057 | spin_lock_irqsave(&skdev->lock, flags); | |
2058 | ||
2059 | if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) { | |
2060 | DPRINTK(skdev, "skspcl %p aborted\n", sksgio->skspcl); | |
2061 | ||
2062 | /* Build check cond, sense and let command finish. */ | |
2063 | /* For a timeout, we must fabricate completion and sense | |
2064 | * data to complete the command */ | |
2065 | sksgio->skspcl->req.completion.status = | |
2066 | SAM_STAT_CHECK_CONDITION; | |
2067 | ||
2068 | memset(&sksgio->skspcl->req.err_info, 0, | |
2069 | sizeof(sksgio->skspcl->req.err_info)); | |
2070 | sksgio->skspcl->req.err_info.type = 0x70; | |
2071 | sksgio->skspcl->req.err_info.key = ABORTED_COMMAND; | |
2072 | sksgio->skspcl->req.err_info.code = 0x44; | |
2073 | sksgio->skspcl->req.err_info.qual = 0; | |
2074 | rc = 0; | |
2075 | } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY) | |
2076 | /* No longer on the adapter. We finish. */ | |
2077 | rc = 0; | |
2078 | else { | |
2079 | /* Something's gone wrong. Still busy. Timeout or | |
2080 | * user interrupted (control-C). Mark as an orphan | |
2081 | * so it will be disposed when completed. */ | |
2082 | sksgio->skspcl->orphaned = 1; | |
2083 | sksgio->skspcl = NULL; | |
2084 | if (rc == 0) { | |
2085 | DPRINTK(skdev, "timed out %p (%u ms)\n", sksgio, | |
2086 | sksgio->sg.timeout); | |
2087 | rc = -ETIMEDOUT; | |
2088 | } else { | |
2089 | DPRINTK(skdev, "cntlc %p\n", sksgio); | |
2090 | rc = -EINTR; | |
2091 | } | |
2092 | } | |
2093 | ||
2094 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2095 | ||
2096 | return rc; | |
2097 | } | |
2098 | ||
2099 | static int skd_sg_io_put_status(struct skd_device *skdev, | |
2100 | struct skd_sg_io *sksgio) | |
2101 | { | |
2102 | struct sg_io_hdr *sgp = &sksgio->sg; | |
2103 | struct skd_special_context *skspcl = sksgio->skspcl; | |
2104 | int resid = 0; | |
2105 | ||
2106 | u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes); | |
2107 | ||
2108 | sgp->status = skspcl->req.completion.status; | |
2109 | resid = sksgio->dxfer_len - nb; | |
2110 | ||
2111 | sgp->masked_status = sgp->status & STATUS_MASK; | |
2112 | sgp->msg_status = 0; | |
2113 | sgp->host_status = 0; | |
2114 | sgp->driver_status = 0; | |
2115 | sgp->resid = resid; | |
2116 | if (sgp->masked_status || sgp->host_status || sgp->driver_status) | |
2117 | sgp->info |= SG_INFO_CHECK; | |
2118 | ||
2119 | DPRINTK(skdev, "status %x masked %x resid 0x%x\n", sgp->status, | |
2120 | sgp->masked_status, sgp->resid); | |
2121 | ||
2122 | if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) { | |
2123 | if (sgp->mx_sb_len > 0) { | |
2124 | struct fit_comp_error_info *ei = &skspcl->req.err_info; | |
2125 | u32 nbytes = sizeof(*ei); | |
2126 | ||
2127 | nbytes = min_t(u32, nbytes, sgp->mx_sb_len); | |
2128 | ||
2129 | sgp->sb_len_wr = nbytes; | |
2130 | ||
2131 | if (__copy_to_user(sgp->sbp, ei, nbytes)) { | |
2132 | DPRINTK(skdev, "copy_to_user sense failed %p\n", | |
2133 | sgp->sbp); | |
2134 | return -EFAULT; | |
2135 | } | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) { | |
2140 | DPRINTK(skdev, "copy_to_user sg failed %p\n", sksgio->argp); | |
2141 | return -EFAULT; | |
2142 | } | |
2143 | ||
2144 | return 0; | |
2145 | } | |
2146 | ||
2147 | static int skd_sg_io_release_skspcl(struct skd_device *skdev, | |
2148 | struct skd_sg_io *sksgio) | |
2149 | { | |
2150 | struct skd_special_context *skspcl = sksgio->skspcl; | |
2151 | ||
2152 | if (skspcl != NULL) { | |
2153 | ulong flags; | |
2154 | ||
2155 | sksgio->skspcl = NULL; | |
2156 | ||
2157 | spin_lock_irqsave(&skdev->lock, flags); | |
2158 | skd_release_special(skdev, skspcl); | |
2159 | spin_unlock_irqrestore(&skdev->lock, flags); | |
2160 | } | |
2161 | ||
2162 | return 0; | |
2163 | } | |
2164 | ||
2165 | /* | |
2166 | ***************************************************************************** | |
2167 | * INTERNAL REQUESTS -- generated by driver itself | |
2168 | ***************************************************************************** | |
2169 | */ | |
2170 | ||
2171 | static int skd_format_internal_skspcl(struct skd_device *skdev) | |
2172 | { | |
2173 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
2174 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
2175 | struct fit_msg_hdr *fmh; | |
2176 | uint64_t dma_address; | |
2177 | struct skd_scsi_request *scsi; | |
2178 | ||
2179 | fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0]; | |
2180 | fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT; | |
2181 | fmh->num_protocol_cmds_coalesced = 1; | |
2182 | ||
2183 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
2184 | memset(scsi, 0, sizeof(*scsi)); | |
2185 | dma_address = skspcl->req.sksg_dma_address; | |
2186 | scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address); | |
2187 | sgd->control = FIT_SGD_CONTROL_LAST; | |
2188 | sgd->byte_count = 0; | |
2189 | sgd->host_side_addr = skspcl->db_dma_address; | |
2190 | sgd->dev_side_addr = 0; | |
2191 | sgd->next_desc_ptr = 0LL; | |
2192 | ||
2193 | return 1; | |
2194 | } | |
2195 | ||
2196 | #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES | |
2197 | ||
2198 | static void skd_send_internal_skspcl(struct skd_device *skdev, | |
2199 | struct skd_special_context *skspcl, | |
2200 | u8 opcode) | |
2201 | { | |
2202 | struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0]; | |
2203 | struct skd_scsi_request *scsi; | |
2204 | unsigned char *buf = skspcl->data_buf; | |
2205 | int i; | |
2206 | ||
2207 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) | |
2208 | /* | |
2209 | * A refresh is already in progress. | |
2210 | * Just wait for it to finish. | |
2211 | */ | |
2212 | return; | |
2213 | ||
2214 | SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0); | |
2215 | skspcl->req.state = SKD_REQ_STATE_BUSY; | |
2216 | skspcl->req.id += SKD_ID_INCR; | |
2217 | ||
2218 | scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
2219 | scsi->hdr.tag = skspcl->req.id; | |
2220 | ||
2221 | memset(scsi->cdb, 0, sizeof(scsi->cdb)); | |
2222 | ||
2223 | switch (opcode) { | |
2224 | case TEST_UNIT_READY: | |
2225 | scsi->cdb[0] = TEST_UNIT_READY; | |
2226 | sgd->byte_count = 0; | |
2227 | scsi->hdr.sg_list_len_bytes = 0; | |
2228 | break; | |
2229 | ||
2230 | case READ_CAPACITY: | |
2231 | scsi->cdb[0] = READ_CAPACITY; | |
2232 | sgd->byte_count = SKD_N_READ_CAP_BYTES; | |
2233 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
2234 | break; | |
2235 | ||
2236 | case INQUIRY: | |
2237 | scsi->cdb[0] = INQUIRY; | |
2238 | scsi->cdb[1] = 0x01; /* evpd */ | |
2239 | scsi->cdb[2] = 0x80; /* serial number page */ | |
2240 | scsi->cdb[4] = 0x10; | |
2241 | sgd->byte_count = 16; | |
2242 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
2243 | break; | |
2244 | ||
2245 | case SYNCHRONIZE_CACHE: | |
2246 | scsi->cdb[0] = SYNCHRONIZE_CACHE; | |
2247 | sgd->byte_count = 0; | |
2248 | scsi->hdr.sg_list_len_bytes = 0; | |
2249 | break; | |
2250 | ||
2251 | case WRITE_BUFFER: | |
2252 | scsi->cdb[0] = WRITE_BUFFER; | |
2253 | scsi->cdb[1] = 0x02; | |
2254 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
2255 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
2256 | sgd->byte_count = WR_BUF_SIZE; | |
2257 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
2258 | /* fill incrementing byte pattern */ | |
2259 | for (i = 0; i < sgd->byte_count; i++) | |
2260 | buf[i] = i & 0xFF; | |
2261 | break; | |
2262 | ||
2263 | case READ_BUFFER: | |
2264 | scsi->cdb[0] = READ_BUFFER; | |
2265 | scsi->cdb[1] = 0x02; | |
2266 | scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8; | |
2267 | scsi->cdb[8] = WR_BUF_SIZE & 0xFF; | |
2268 | sgd->byte_count = WR_BUF_SIZE; | |
2269 | scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count); | |
2270 | memset(skspcl->data_buf, 0, sgd->byte_count); | |
2271 | break; | |
2272 | ||
2273 | default: | |
2274 | SKD_ASSERT("Don't know what to send"); | |
2275 | return; | |
2276 | ||
2277 | } | |
2278 | skd_send_special_fitmsg(skdev, skspcl); | |
2279 | } | |
2280 | ||
2281 | static void skd_refresh_device_data(struct skd_device *skdev) | |
2282 | { | |
2283 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
2284 | ||
2285 | skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY); | |
2286 | } | |
2287 | ||
2288 | static int skd_chk_read_buf(struct skd_device *skdev, | |
2289 | struct skd_special_context *skspcl) | |
2290 | { | |
2291 | unsigned char *buf = skspcl->data_buf; | |
2292 | int i; | |
2293 | ||
2294 | /* check for incrementing byte pattern */ | |
2295 | for (i = 0; i < WR_BUF_SIZE; i++) | |
2296 | if (buf[i] != (i & 0xFF)) | |
2297 | return 1; | |
2298 | ||
2299 | return 0; | |
2300 | } | |
2301 | ||
2302 | static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key, | |
2303 | u8 code, u8 qual, u8 fruc) | |
2304 | { | |
2305 | /* If the check condition is of special interest, log a message */ | |
2306 | if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02) | |
2307 | && (code == 0x04) && (qual == 0x06)) { | |
2308 | pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/" | |
2309 | "ascq/fruc %02x/%02x/%02x/%02x\n", | |
2310 | skd_name(skdev), key, code, qual, fruc); | |
2311 | } | |
2312 | } | |
2313 | ||
2314 | static void skd_complete_internal(struct skd_device *skdev, | |
2315 | volatile struct fit_completion_entry_v1 | |
2316 | *skcomp, | |
2317 | volatile struct fit_comp_error_info *skerr, | |
2318 | struct skd_special_context *skspcl) | |
2319 | { | |
2320 | u8 *buf = skspcl->data_buf; | |
2321 | u8 status; | |
2322 | int i; | |
2323 | struct skd_scsi_request *scsi = | |
2324 | (struct skd_scsi_request *)&skspcl->msg_buf[64]; | |
2325 | ||
2326 | SKD_ASSERT(skspcl == &skdev->internal_skspcl); | |
2327 | ||
2328 | DPRINTK(skdev, "complete internal %x\n", scsi->cdb[0]); | |
2329 | ||
2330 | skspcl->req.completion = *skcomp; | |
2331 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
2332 | skspcl->req.id += SKD_ID_INCR; | |
2333 | ||
2334 | status = skspcl->req.completion.status; | |
2335 | ||
2336 | skd_log_check_status(skdev, status, skerr->key, skerr->code, | |
2337 | skerr->qual, skerr->fruc); | |
2338 | ||
2339 | switch (scsi->cdb[0]) { | |
2340 | case TEST_UNIT_READY: | |
2341 | if (status == SAM_STAT_GOOD) | |
2342 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
2343 | else if ((status == SAM_STAT_CHECK_CONDITION) && | |
2344 | (skerr->key == MEDIUM_ERROR)) | |
2345 | skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER); | |
2346 | else { | |
2347 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2348 | VPRINTK(skdev, "TUR failed, don't send anymore" | |
2349 | "state 0x%x\n", skdev->state); | |
2350 | return; | |
2351 | } | |
2352 | DPRINTK(skdev, "**** TUR failed, retry skerr\n"); | |
2353 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | |
2354 | } | |
2355 | break; | |
2356 | ||
2357 | case WRITE_BUFFER: | |
2358 | if (status == SAM_STAT_GOOD) | |
2359 | skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER); | |
2360 | else { | |
2361 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2362 | VPRINTK(skdev, "write buffer failed, don't send" | |
2363 | " anymore state 0x%x\n", skdev->state); | |
2364 | return; | |
2365 | } | |
2366 | DPRINTK(skdev, | |
2367 | "**** write buffer failed, retry skerr\n"); | |
2368 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | |
2369 | } | |
2370 | break; | |
2371 | ||
2372 | case READ_BUFFER: | |
2373 | if (status == SAM_STAT_GOOD) { | |
2374 | if (skd_chk_read_buf(skdev, skspcl) == 0) | |
2375 | skd_send_internal_skspcl(skdev, skspcl, | |
2376 | READ_CAPACITY); | |
2377 | else { | |
2378 | pr_err( | |
2379 | "(%s):*** W/R Buffer mismatch %d ***\n", | |
2380 | skd_name(skdev), skdev->connect_retries); | |
2381 | if (skdev->connect_retries < | |
2382 | SKD_MAX_CONNECT_RETRIES) { | |
2383 | skdev->connect_retries++; | |
2384 | skd_soft_reset(skdev); | |
2385 | } else { | |
2386 | pr_err( | |
2387 | "(%s): W/R Buffer Connect Error\n", | |
2388 | skd_name(skdev)); | |
2389 | return; | |
2390 | } | |
2391 | } | |
2392 | ||
2393 | } else { | |
2394 | if (skdev->state == SKD_DRVR_STATE_STOPPING) { | |
2395 | VPRINTK(skdev, | |
2396 | "read buffer failed, don't send anymore" | |
2397 | "state 0x%x\n", skdev->state); | |
2398 | return; | |
2399 | } | |
2400 | DPRINTK(skdev, | |
2401 | "**** read buffer failed, retry skerr\n"); | |
2402 | skd_send_internal_skspcl(skdev, skspcl, 0x00); | |
2403 | } | |
2404 | break; | |
2405 | ||
2406 | case READ_CAPACITY: | |
2407 | skdev->read_cap_is_valid = 0; | |
2408 | if (status == SAM_STAT_GOOD) { | |
2409 | skdev->read_cap_last_lba = | |
2410 | (buf[0] << 24) | (buf[1] << 16) | | |
2411 | (buf[2] << 8) | buf[3]; | |
2412 | skdev->read_cap_blocksize = | |
2413 | (buf[4] << 24) | (buf[5] << 16) | | |
2414 | (buf[6] << 8) | buf[7]; | |
2415 | ||
2416 | DPRINTK(skdev, "last lba %d, bs %d\n", | |
2417 | skdev->read_cap_last_lba, | |
2418 | skdev->read_cap_blocksize); | |
2419 | ||
2420 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
2421 | ||
2422 | skdev->read_cap_is_valid = 1; | |
2423 | ||
2424 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | |
2425 | } else if ((status == SAM_STAT_CHECK_CONDITION) && | |
2426 | (skerr->key == MEDIUM_ERROR)) { | |
2427 | skdev->read_cap_last_lba = ~0; | |
2428 | set_capacity(skdev->disk, skdev->read_cap_last_lba + 1); | |
2429 | DPRINTK(skdev, | |
2430 | "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n"); | |
2431 | skd_send_internal_skspcl(skdev, skspcl, INQUIRY); | |
2432 | } else { | |
2433 | DPRINTK(skdev, "**** READCAP failed, retry TUR\n"); | |
2434 | skd_send_internal_skspcl(skdev, skspcl, | |
2435 | TEST_UNIT_READY); | |
2436 | } | |
2437 | break; | |
2438 | ||
2439 | case INQUIRY: | |
2440 | skdev->inquiry_is_valid = 0; | |
2441 | if (status == SAM_STAT_GOOD) { | |
2442 | skdev->inquiry_is_valid = 1; | |
2443 | ||
2444 | for (i = 0; i < 12; i++) | |
2445 | skdev->inq_serial_num[i] = buf[i + 4]; | |
2446 | skdev->inq_serial_num[12] = 0; | |
2447 | } | |
2448 | ||
2449 | if (skd_unquiesce_dev(skdev) < 0) | |
2450 | DPRINTK(skdev, "**** failed, to ONLINE device\n"); | |
2451 | /* connection is complete */ | |
2452 | skdev->connect_retries = 0; | |
2453 | break; | |
2454 | ||
2455 | case SYNCHRONIZE_CACHE: | |
2456 | if (status == SAM_STAT_GOOD) | |
2457 | skdev->sync_done = 1; | |
2458 | else | |
2459 | skdev->sync_done = -1; | |
2460 | wake_up_interruptible(&skdev->waitq); | |
2461 | break; | |
2462 | ||
2463 | default: | |
2464 | SKD_ASSERT("we didn't send this"); | |
2465 | } | |
2466 | } | |
2467 | ||
2468 | /* | |
2469 | ***************************************************************************** | |
2470 | * FIT MESSAGES | |
2471 | ***************************************************************************** | |
2472 | */ | |
2473 | ||
2474 | static void skd_send_fitmsg(struct skd_device *skdev, | |
2475 | struct skd_fitmsg_context *skmsg) | |
2476 | { | |
2477 | u64 qcmd; | |
2478 | struct fit_msg_hdr *fmh; | |
2479 | ||
2480 | VPRINTK(skdev, "dma address 0x%llx, busy=%d\n", | |
2481 | skmsg->mb_dma_address, skdev->in_flight); | |
2482 | VPRINTK(skdev, "msg_buf 0x%p, offset %x\n", | |
2483 | skmsg->msg_buf, skmsg->offset); | |
2484 | ||
2485 | qcmd = skmsg->mb_dma_address; | |
2486 | qcmd |= FIT_QCMD_QID_NORMAL; | |
2487 | ||
2488 | fmh = (struct fit_msg_hdr *)skmsg->msg_buf; | |
2489 | skmsg->outstanding = fmh->num_protocol_cmds_coalesced; | |
2490 | ||
2491 | if (unlikely(skdev->dbg_level > 1)) { | |
2492 | u8 *bp = (u8 *)skmsg->msg_buf; | |
2493 | int i; | |
2494 | for (i = 0; i < skmsg->length; i += 8) { | |
2495 | VPRINTK(skdev, " msg[%2d] %02x %02x %02x %02x " | |
2496 | "%02x %02x %02x %02x\n", | |
2497 | i, bp[i + 0], bp[i + 1], bp[i + 2], | |
2498 | bp[i + 3], bp[i + 4], bp[i + 5], | |
2499 | bp[i + 6], bp[i + 7]); | |
2500 | if (i == 0) | |
2501 | i = 64 - 8; | |
2502 | } | |
2503 | } | |
2504 | ||
2505 | if (skmsg->length > 256) | |
2506 | qcmd |= FIT_QCMD_MSGSIZE_512; | |
2507 | else if (skmsg->length > 128) | |
2508 | qcmd |= FIT_QCMD_MSGSIZE_256; | |
2509 | else if (skmsg->length > 64) | |
2510 | qcmd |= FIT_QCMD_MSGSIZE_128; | |
2511 | else | |
2512 | /* | |
2513 | * This makes no sense because the FIT msg header is | |
2514 | * 64 bytes. If the msg is only 64 bytes long it has | |
2515 | * no payload. | |
2516 | */ | |
2517 | qcmd |= FIT_QCMD_MSGSIZE_64; | |
2518 | ||
2519 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | |
2520 | ||
2521 | } | |
2522 | ||
2523 | static void skd_send_special_fitmsg(struct skd_device *skdev, | |
2524 | struct skd_special_context *skspcl) | |
2525 | { | |
2526 | u64 qcmd; | |
2527 | ||
2528 | if (unlikely(skdev->dbg_level > 1)) { | |
2529 | u8 *bp = (u8 *)skspcl->msg_buf; | |
2530 | int i; | |
2531 | ||
2532 | for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) { | |
2533 | VPRINTK(skdev, | |
2534 | " spcl[%2d] %02x %02x %02x %02x " | |
2535 | "%02x %02x %02x %02x\n", i, | |
2536 | bp[i + 0], bp[i + 1], bp[i + 2], bp[i + 3], | |
2537 | bp[i + 4], bp[i + 5], bp[i + 6], bp[i + 7]); | |
2538 | if (i == 0) | |
2539 | i = 64 - 8; | |
2540 | } | |
2541 | ||
2542 | VPRINTK(skdev, "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", | |
2543 | skspcl, skspcl->req.id, skspcl->req.sksg_list, | |
2544 | skspcl->req.sksg_dma_address); | |
2545 | for (i = 0; i < skspcl->req.n_sg; i++) { | |
2546 | struct fit_sg_descriptor *sgd = | |
2547 | &skspcl->req.sksg_list[i]; | |
2548 | ||
2549 | VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x " | |
2550 | "addr=0x%llx next=0x%llx\n", | |
2551 | i, sgd->byte_count, sgd->control, | |
2552 | sgd->host_side_addr, sgd->next_desc_ptr); | |
2553 | } | |
2554 | } | |
2555 | ||
2556 | /* | |
2557 | * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr | |
2558 | * and one 64-byte SSDI command. | |
2559 | */ | |
2560 | qcmd = skspcl->mb_dma_address; | |
2561 | qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128; | |
2562 | ||
2563 | SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND); | |
2564 | } | |
2565 | ||
2566 | /* | |
2567 | ***************************************************************************** | |
2568 | * COMPLETION QUEUE | |
2569 | ***************************************************************************** | |
2570 | */ | |
2571 | ||
2572 | static void skd_complete_other(struct skd_device *skdev, | |
2573 | volatile struct fit_completion_entry_v1 *skcomp, | |
2574 | volatile struct fit_comp_error_info *skerr); | |
2575 | ||
2576 | ||
2577 | static void skd_requeue_request(struct skd_device *skdev, | |
2578 | struct skd_request_context *skreq); | |
2579 | ||
2580 | struct sns_info { | |
2581 | u8 type; | |
2582 | u8 stat; | |
2583 | u8 key; | |
2584 | u8 asc; | |
2585 | u8 ascq; | |
2586 | u8 mask; | |
2587 | enum skd_check_status_action action; | |
2588 | }; | |
2589 | ||
2590 | static struct sns_info skd_chkstat_table[] = { | |
2591 | /* Good */ | |
2592 | { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c, | |
2593 | SKD_CHECK_STATUS_REPORT_GOOD }, | |
2594 | ||
2595 | /* Smart alerts */ | |
2596 | { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */ | |
2597 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2598 | { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */ | |
2599 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2600 | { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */ | |
2601 | SKD_CHECK_STATUS_REPORT_SMART_ALERT }, | |
2602 | ||
2603 | /* Retry (with limits) */ | |
2604 | { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */ | |
2605 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2606 | { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */ | |
2607 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2608 | { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */ | |
2609 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2610 | { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */ | |
2611 | SKD_CHECK_STATUS_REQUEUE_REQUEST }, | |
2612 | ||
2613 | /* Busy (or about to be) */ | |
2614 | { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */ | |
2615 | SKD_CHECK_STATUS_BUSY_IMMINENT }, | |
2616 | }; | |
2617 | ||
2618 | /* | |
2619 | * Look up status and sense data to decide how to handle the error | |
2620 | * from the device. | |
2621 | * mask says which fields must match e.g., mask=0x18 means check | |
2622 | * type and stat, ignore key, asc, ascq. | |
2623 | */ | |
2624 | ||
2625 | static enum skd_check_status_action skd_check_status(struct skd_device *skdev, | |
2626 | u8 cmp_status, | |
2627 | volatile struct fit_comp_error_info *skerr) | |
2628 | { | |
2629 | int i, n; | |
2630 | ||
2631 | pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n", | |
2632 | skd_name(skdev), skerr->key, skerr->code, skerr->qual, | |
2633 | skerr->fruc); | |
2634 | ||
2635 | VPRINTK(skdev, "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x " | |
2636 | "fruc=%02x\n", skerr->type, cmp_status, skerr->key, | |
2637 | skerr->code, skerr->qual, skerr->fruc); | |
2638 | ||
2639 | /* Does the info match an entry in the good category? */ | |
2640 | n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]); | |
2641 | for (i = 0; i < n; i++) { | |
2642 | struct sns_info *sns = &skd_chkstat_table[i]; | |
2643 | ||
2644 | if (sns->mask & 0x10) | |
2645 | if (skerr->type != sns->type) | |
2646 | continue; | |
2647 | ||
2648 | if (sns->mask & 0x08) | |
2649 | if (cmp_status != sns->stat) | |
2650 | continue; | |
2651 | ||
2652 | if (sns->mask & 0x04) | |
2653 | if (skerr->key != sns->key) | |
2654 | continue; | |
2655 | ||
2656 | if (sns->mask & 0x02) | |
2657 | if (skerr->code != sns->asc) | |
2658 | continue; | |
2659 | ||
2660 | if (sns->mask & 0x01) | |
2661 | if (skerr->qual != sns->ascq) | |
2662 | continue; | |
2663 | ||
2664 | if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) { | |
2665 | pr_err("(%s): SMART Alert: sense key/asc/ascq " | |
2666 | "%02x/%02x/%02x\n", | |
2667 | skd_name(skdev), skerr->key, | |
2668 | skerr->code, skerr->qual); | |
2669 | } | |
2670 | return sns->action; | |
2671 | } | |
2672 | ||
2673 | /* No other match, so nonzero status means error, | |
2674 | * zero status means good | |
2675 | */ | |
2676 | if (cmp_status) { | |
2677 | DPRINTK(skdev, "status check: error\n"); | |
2678 | return SKD_CHECK_STATUS_REPORT_ERROR; | |
2679 | } | |
2680 | ||
2681 | DPRINTK(skdev, "status check good default\n"); | |
2682 | return SKD_CHECK_STATUS_REPORT_GOOD; | |
2683 | } | |
2684 | ||
2685 | static void skd_resolve_req_exception(struct skd_device *skdev, | |
2686 | struct skd_request_context *skreq) | |
2687 | { | |
2688 | u8 cmp_status = skreq->completion.status; | |
2689 | ||
2690 | switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) { | |
2691 | case SKD_CHECK_STATUS_REPORT_GOOD: | |
2692 | case SKD_CHECK_STATUS_REPORT_SMART_ALERT: | |
2693 | skd_end_request(skdev, skreq, 0); | |
2694 | break; | |
2695 | ||
2696 | case SKD_CHECK_STATUS_BUSY_IMMINENT: | |
2697 | skd_log_skreq(skdev, skreq, "retry(busy)"); | |
2698 | skd_requeue_request(skdev, skreq); | |
2699 | pr_info("(%s) drive BUSY imminent\n", skd_name(skdev)); | |
2700 | skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT; | |
2701 | skdev->timer_countdown = SKD_TIMER_MINUTES(20); | |
2702 | skd_quiesce_dev(skdev); | |
2703 | break; | |
2704 | ||
2705 | case SKD_CHECK_STATUS_REQUEUE_REQUEST: | |
2706 | if (!skd_bio) { | |
2707 | if ((unsigned long) ++skreq->req->special < | |
2708 | SKD_MAX_RETRIES) { | |
2709 | skd_log_skreq(skdev, skreq, "retry"); | |
2710 | skd_requeue_request(skdev, skreq); | |
2711 | break; | |
2712 | } | |
2713 | } | |
2714 | /* fall through to report error */ | |
2715 | ||
2716 | case SKD_CHECK_STATUS_REPORT_ERROR: | |
2717 | default: | |
2718 | skd_end_request(skdev, skreq, -EIO); | |
2719 | break; | |
2720 | } | |
2721 | } | |
2722 | ||
2723 | static void skd_requeue_request(struct skd_device *skdev, | |
2724 | struct skd_request_context *skreq) | |
2725 | { | |
2726 | if (!skd_bio) { | |
2727 | blk_requeue_request(skdev->queue, skreq->req); | |
2728 | } else { | |
2729 | bio_list_add_head(&skdev->bio_queue, skreq->bio); | |
2730 | skreq->bio = NULL; | |
2731 | } | |
2732 | } | |
2733 | ||
2734 | ||
2735 | ||
2736 | /* assume spinlock is already held */ | |
2737 | static void skd_release_skreq(struct skd_device *skdev, | |
2738 | struct skd_request_context *skreq) | |
2739 | { | |
2740 | u32 msg_slot; | |
2741 | struct skd_fitmsg_context *skmsg; | |
2742 | ||
2743 | u32 timo_slot; | |
2744 | ||
2745 | /* | |
2746 | * Reclaim the FIT msg buffer if this is | |
2747 | * the first of the requests it carried to | |
2748 | * be completed. The FIT msg buffer used to | |
2749 | * send this request cannot be reused until | |
2750 | * we are sure the s1120 card has copied | |
2751 | * it to its memory. The FIT msg might have | |
2752 | * contained several requests. As soon as | |
2753 | * any of them are completed we know that | |
2754 | * the entire FIT msg was transferred. | |
2755 | * Only the first completed request will | |
2756 | * match the FIT msg buffer id. The FIT | |
2757 | * msg buffer id is immediately updated. | |
2758 | * When subsequent requests complete the FIT | |
2759 | * msg buffer id won't match, so we know | |
2760 | * quite cheaply that it is already done. | |
2761 | */ | |
2762 | msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK; | |
2763 | SKD_ASSERT(msg_slot < skdev->num_fitmsg_context); | |
2764 | ||
2765 | skmsg = &skdev->skmsg_table[msg_slot]; | |
2766 | if (skmsg->id == skreq->fitmsg_id) { | |
2767 | SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY); | |
2768 | SKD_ASSERT(skmsg->outstanding > 0); | |
2769 | skmsg->outstanding--; | |
2770 | if (skmsg->outstanding == 0) { | |
2771 | skmsg->state = SKD_MSG_STATE_IDLE; | |
2772 | skmsg->id += SKD_ID_INCR; | |
2773 | skmsg->next = skdev->skmsg_free_list; | |
2774 | skdev->skmsg_free_list = skmsg; | |
2775 | } | |
2776 | } | |
2777 | ||
2778 | /* | |
2779 | * Decrease the number of active requests. | |
2780 | * Also decrements the count in the timeout slot. | |
2781 | */ | |
2782 | SKD_ASSERT(skdev->in_flight > 0); | |
2783 | skdev->in_flight -= 1; | |
2784 | ||
2785 | timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK; | |
2786 | SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0); | |
2787 | skdev->timeout_slot[timo_slot] -= 1; | |
2788 | ||
2789 | /* | |
2790 | * Reset backpointer | |
2791 | */ | |
2792 | if (likely(!skd_bio)) | |
2793 | skreq->req = NULL; | |
2794 | else | |
2795 | skreq->bio = NULL; | |
2796 | ||
2797 | ||
2798 | /* | |
2799 | * Reclaim the skd_request_context | |
2800 | */ | |
2801 | skreq->state = SKD_REQ_STATE_IDLE; | |
2802 | skreq->id += SKD_ID_INCR; | |
2803 | skreq->next = skdev->skreq_free_list; | |
2804 | skdev->skreq_free_list = skreq; | |
2805 | } | |
2806 | ||
2807 | #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA | |
2808 | ||
2809 | static void skd_do_inq_page_00(struct skd_device *skdev, | |
2810 | volatile struct fit_completion_entry_v1 *skcomp, | |
2811 | volatile struct fit_comp_error_info *skerr, | |
2812 | uint8_t *cdb, uint8_t *buf) | |
2813 | { | |
2814 | uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size; | |
2815 | ||
2816 | /* Caller requested "supported pages". The driver needs to insert | |
2817 | * its page. | |
2818 | */ | |
2819 | VPRINTK(skdev, "skd_do_driver_inquiry: modify supported pages.\n"); | |
2820 | ||
2821 | /* If the device rejected the request because the CDB was | |
2822 | * improperly formed, then just leave. | |
2823 | */ | |
2824 | if (skcomp->status == SAM_STAT_CHECK_CONDITION && | |
2825 | skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24) | |
2826 | return; | |
2827 | ||
2828 | /* Get the amount of space the caller allocated */ | |
2829 | max_bytes = (cdb[3] << 8) | cdb[4]; | |
2830 | ||
2831 | /* Get the number of pages actually returned by the device */ | |
2832 | drive_pages = (buf[2] << 8) | buf[3]; | |
2833 | drive_bytes = drive_pages + 4; | |
2834 | new_size = drive_pages + 1; | |
2835 | ||
2836 | /* Supported pages must be in numerical order, so find where | |
2837 | * the driver page needs to be inserted into the list of | |
2838 | * pages returned by the device. | |
2839 | */ | |
2840 | for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) { | |
2841 | if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE) | |
2842 | return; /* Device using this page code. abort */ | |
2843 | else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE) | |
2844 | break; | |
2845 | } | |
2846 | ||
2847 | if (insert_pt < max_bytes) { | |
2848 | uint16_t u; | |
2849 | ||
2850 | /* Shift everything up one byte to make room. */ | |
2851 | for (u = new_size + 3; u > insert_pt; u--) | |
2852 | buf[u] = buf[u - 1]; | |
2853 | buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE; | |
2854 | ||
2855 | /* SCSI byte order increment of num_returned_bytes by 1 */ | |
2856 | skcomp->num_returned_bytes = | |
2857 | be32_to_cpu(skcomp->num_returned_bytes) + 1; | |
2858 | skcomp->num_returned_bytes = | |
2859 | be32_to_cpu(skcomp->num_returned_bytes); | |
2860 | } | |
2861 | ||
2862 | /* update page length field to reflect the driver's page too */ | |
2863 | buf[2] = (uint8_t)((new_size >> 8) & 0xFF); | |
2864 | buf[3] = (uint8_t)((new_size >> 0) & 0xFF); | |
2865 | } | |
2866 | ||
2867 | static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width) | |
2868 | { | |
2869 | int pcie_reg; | |
2870 | u16 pci_bus_speed; | |
2871 | u8 pci_lanes; | |
2872 | ||
2873 | pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); | |
2874 | if (pcie_reg) { | |
2875 | u16 linksta; | |
2876 | pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta); | |
2877 | ||
2878 | pci_bus_speed = linksta & 0xF; | |
2879 | pci_lanes = (linksta & 0x3F0) >> 4; | |
2880 | } else { | |
2881 | *speed = STEC_LINK_UNKNOWN; | |
2882 | *width = 0xFF; | |
2883 | return; | |
2884 | } | |
2885 | ||
2886 | switch (pci_bus_speed) { | |
2887 | case 1: | |
2888 | *speed = STEC_LINK_2_5GTS; | |
2889 | break; | |
2890 | case 2: | |
2891 | *speed = STEC_LINK_5GTS; | |
2892 | break; | |
2893 | case 3: | |
2894 | *speed = STEC_LINK_8GTS; | |
2895 | break; | |
2896 | default: | |
2897 | *speed = STEC_LINK_UNKNOWN; | |
2898 | break; | |
2899 | } | |
2900 | ||
2901 | if (pci_lanes <= 0x20) | |
2902 | *width = pci_lanes; | |
2903 | else | |
2904 | *width = 0xFF; | |
2905 | } | |
2906 | ||
2907 | static void skd_do_inq_page_da(struct skd_device *skdev, | |
2908 | volatile struct fit_completion_entry_v1 *skcomp, | |
2909 | volatile struct fit_comp_error_info *skerr, | |
2910 | uint8_t *cdb, uint8_t *buf) | |
2911 | { | |
2912 | unsigned max_bytes; | |
2913 | struct driver_inquiry_data inq; | |
2914 | u16 val; | |
2915 | ||
2916 | VPRINTK(skdev, "skd_do_driver_inquiry: return driver page\n"); | |
2917 | ||
2918 | memset(&inq, 0, sizeof(inq)); | |
2919 | ||
2920 | inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE; | |
2921 | ||
2922 | if (skdev->pdev && skdev->pdev->bus) { | |
2923 | skd_get_link_info(skdev->pdev, | |
2924 | &inq.pcie_link_speed, &inq.pcie_link_lanes); | |
2925 | inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number); | |
2926 | inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn); | |
2927 | inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn); | |
2928 | ||
2929 | pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val); | |
2930 | inq.pcie_vendor_id = cpu_to_be16(val); | |
2931 | ||
2932 | pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val); | |
2933 | inq.pcie_device_id = cpu_to_be16(val); | |
2934 | ||
2935 | pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID, | |
2936 | &val); | |
2937 | inq.pcie_subsystem_vendor_id = cpu_to_be16(val); | |
2938 | ||
2939 | pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val); | |
2940 | inq.pcie_subsystem_device_id = cpu_to_be16(val); | |
2941 | } else { | |
2942 | inq.pcie_bus_number = 0xFFFF; | |
2943 | inq.pcie_device_number = 0xFF; | |
2944 | inq.pcie_function_number = 0xFF; | |
2945 | inq.pcie_link_speed = 0xFF; | |
2946 | inq.pcie_link_lanes = 0xFF; | |
2947 | inq.pcie_vendor_id = 0xFFFF; | |
2948 | inq.pcie_device_id = 0xFFFF; | |
2949 | inq.pcie_subsystem_vendor_id = 0xFFFF; | |
2950 | inq.pcie_subsystem_device_id = 0xFFFF; | |
2951 | } | |
2952 | ||
2953 | /* Driver version, fixed lenth, padded with spaces on the right */ | |
2954 | inq.driver_version_length = sizeof(inq.driver_version); | |
2955 | memset(&inq.driver_version, ' ', sizeof(inq.driver_version)); | |
2956 | memcpy(inq.driver_version, DRV_VER_COMPL, | |
2957 | min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL))); | |
2958 | ||
2959 | inq.page_length = cpu_to_be16((sizeof(inq) - 4)); | |
2960 | ||
2961 | /* Clear the error set by the device */ | |
2962 | skcomp->status = SAM_STAT_GOOD; | |
2963 | memset((void *)skerr, 0, sizeof(*skerr)); | |
2964 | ||
2965 | /* copy response into output buffer */ | |
2966 | max_bytes = (cdb[3] << 8) | cdb[4]; | |
2967 | memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq))); | |
2968 | ||
2969 | skcomp->num_returned_bytes = | |
2970 | be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq))); | |
2971 | } | |
2972 | ||
2973 | static void skd_do_driver_inq(struct skd_device *skdev, | |
2974 | volatile struct fit_completion_entry_v1 *skcomp, | |
2975 | volatile struct fit_comp_error_info *skerr, | |
2976 | uint8_t *cdb, uint8_t *buf) | |
2977 | { | |
2978 | if (!buf) | |
2979 | return; | |
2980 | else if (cdb[0] != INQUIRY) | |
2981 | return; /* Not an INQUIRY */ | |
2982 | else if ((cdb[1] & 1) == 0) | |
2983 | return; /* EVPD not set */ | |
2984 | else if (cdb[2] == 0) | |
2985 | /* Need to add driver's page to supported pages list */ | |
2986 | skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf); | |
2987 | else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE) | |
2988 | /* Caller requested driver's page */ | |
2989 | skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf); | |
2990 | } | |
2991 | ||
2992 | static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg) | |
2993 | { | |
2994 | if (!sg) | |
2995 | return NULL; | |
2996 | if (!sg_page(sg)) | |
2997 | return NULL; | |
2998 | return sg_virt(sg); | |
2999 | } | |
3000 | ||
3001 | static void skd_process_scsi_inq(struct skd_device *skdev, | |
3002 | volatile struct fit_completion_entry_v1 | |
3003 | *skcomp, | |
3004 | volatile struct fit_comp_error_info *skerr, | |
3005 | struct skd_special_context *skspcl) | |
3006 | { | |
3007 | uint8_t *buf; | |
3008 | struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf; | |
3009 | struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1]; | |
3010 | ||
3011 | dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg, | |
3012 | skspcl->req.sg_data_dir); | |
3013 | buf = skd_sg_1st_page_ptr(skspcl->req.sg); | |
3014 | ||
3015 | if (buf) | |
3016 | skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf); | |
3017 | } | |
3018 | ||
3019 | ||
3020 | static int skd_isr_completion_posted(struct skd_device *skdev, | |
3021 | int limit, int *enqueued) | |
3022 | { | |
3023 | volatile struct fit_completion_entry_v1 *skcmp = NULL; | |
3024 | volatile struct fit_comp_error_info *skerr; | |
3025 | u16 req_id; | |
3026 | u32 req_slot; | |
3027 | struct skd_request_context *skreq; | |
3028 | u16 cmp_cntxt = 0; | |
3029 | u8 cmp_status = 0; | |
3030 | u8 cmp_cycle = 0; | |
3031 | u32 cmp_bytes = 0; | |
3032 | int rc = 0; | |
3033 | int processed = 0; | |
3034 | int ret; | |
3035 | ||
3036 | ||
3037 | for (;; ) { | |
3038 | SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); | |
3039 | ||
3040 | skcmp = &skdev->skcomp_table[skdev->skcomp_ix]; | |
3041 | cmp_cycle = skcmp->cycle; | |
3042 | cmp_cntxt = skcmp->tag; | |
3043 | cmp_status = skcmp->status; | |
3044 | cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes); | |
3045 | ||
3046 | skerr = &skdev->skerr_table[skdev->skcomp_ix]; | |
3047 | ||
3048 | VPRINTK(skdev, | |
3049 | "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d " | |
3050 | "busy=%d rbytes=0x%x proto=%d\n", skdev->skcomp_cycle, | |
3051 | skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status, | |
3052 | skdev->in_flight, cmp_bytes, skdev->proto_ver); | |
3053 | ||
3054 | if (cmp_cycle != skdev->skcomp_cycle) { | |
3055 | VPRINTK(skdev, "end of completions\n"); | |
3056 | break; | |
3057 | } | |
3058 | /* | |
3059 | * Update the completion queue head index and possibly | |
3060 | * the completion cycle count. 8-bit wrap-around. | |
3061 | */ | |
3062 | skdev->skcomp_ix++; | |
3063 | if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) { | |
3064 | skdev->skcomp_ix = 0; | |
3065 | skdev->skcomp_cycle++; | |
3066 | } | |
3067 | ||
3068 | /* | |
3069 | * The command context is a unique 32-bit ID. The low order | |
3070 | * bits help locate the request. The request is usually a | |
3071 | * r/w request (see skd_start() above) or a special request. | |
3072 | */ | |
3073 | req_id = cmp_cntxt; | |
3074 | req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK; | |
3075 | ||
3076 | /* Is this other than a r/w request? */ | |
3077 | if (req_slot >= skdev->num_req_context) { | |
3078 | /* | |
3079 | * This is not a completion for a r/w request. | |
3080 | */ | |
3081 | skd_complete_other(skdev, skcmp, skerr); | |
3082 | continue; | |
3083 | } | |
3084 | ||
3085 | skreq = &skdev->skreq_table[req_slot]; | |
3086 | ||
3087 | /* | |
3088 | * Make sure the request ID for the slot matches. | |
3089 | */ | |
3090 | if (skreq->id != req_id) { | |
3091 | DPRINTK(skdev, "mismatch comp_id=0x%x req_id=0x%x\n", | |
3092 | req_id, skreq->id); | |
3093 | { | |
3094 | u16 new_id = cmp_cntxt; | |
3095 | pr_err("(%s): Completion mismatch " | |
3096 | "comp_id=0x%04x skreq=0x%04x new=0x%04x\n", | |
3097 | skd_name(skdev), req_id, | |
3098 | skreq->id, new_id); | |
3099 | ||
3100 | continue; | |
3101 | } | |
3102 | } | |
3103 | ||
3104 | SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY); | |
3105 | ||
3106 | if (skreq->state == SKD_REQ_STATE_ABORTED) { | |
3107 | DPRINTK(skdev, "reclaim req %p id=%04x\n", | |
3108 | skreq, skreq->id); | |
3109 | /* a previously timed out command can | |
3110 | * now be cleaned up */ | |
3111 | skd_release_skreq(skdev, skreq); | |
3112 | continue; | |
3113 | } | |
3114 | ||
3115 | skreq->completion = *skcmp; | |
3116 | if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) { | |
3117 | skreq->err_info = *skerr; | |
3118 | skd_log_check_status(skdev, cmp_status, skerr->key, | |
3119 | skerr->code, skerr->qual, | |
3120 | skerr->fruc); | |
3121 | } | |
3122 | /* Release DMA resources for the request. */ | |
3123 | if (skreq->n_sg > 0) | |
3124 | skd_postop_sg_list(skdev, skreq); | |
3125 | ||
3126 | if (((!skd_bio) && !skreq->req) || | |
3127 | ((skd_bio) && !skreq->bio)) { | |
3128 | DPRINTK(skdev, "NULL backptr skdreq %p, " | |
3129 | "req=0x%x req_id=0x%x\n", | |
3130 | skreq, skreq->id, req_id); | |
3131 | } else { | |
3132 | /* | |
3133 | * Capture the outcome and post it back to the | |
3134 | * native request. | |
3135 | */ | |
3136 | if (likely(cmp_status == SAM_STAT_GOOD)) { | |
3137 | if (unlikely(skreq->flush_cmd)) { | |
3138 | if (skd_bio) { | |
3139 | /* if empty size bio, we are all done */ | |
3140 | if (bio_sectors(skreq->bio) == 0) { | |
3141 | skd_end_request(skdev, skreq, 0); | |
3142 | } else { | |
3143 | ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio); | |
3144 | if (ret != 0) { | |
3145 | pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret); | |
3146 | skd_end_request(skdev, skreq, ret); | |
3147 | } else { | |
3148 | ((*enqueued)++); | |
3149 | } | |
3150 | } | |
3151 | } else { | |
3152 | skd_end_request(skdev, skreq, 0); | |
3153 | } | |
3154 | } else { | |
3155 | skd_end_request(skdev, skreq, 0); | |
3156 | } | |
3157 | } else { | |
3158 | skd_resolve_req_exception(skdev, skreq); | |
3159 | } | |
3160 | } | |
3161 | ||
3162 | /* | |
3163 | * Release the skreq, its FIT msg (if one), timeout slot, | |
3164 | * and queue depth. | |
3165 | */ | |
3166 | skd_release_skreq(skdev, skreq); | |
3167 | ||
3168 | /* skd_isr_comp_limit equal zero means no limit */ | |
3169 | if (limit) { | |
3170 | if (++processed >= limit) { | |
3171 | rc = 1; | |
3172 | break; | |
3173 | } | |
3174 | } | |
3175 | } | |
3176 | ||
3177 | if ((skdev->state == SKD_DRVR_STATE_PAUSING) | |
3178 | && (skdev->in_flight) == 0) { | |
3179 | skdev->state = SKD_DRVR_STATE_PAUSED; | |
3180 | wake_up_interruptible(&skdev->waitq); | |
3181 | } | |
3182 | ||
3183 | return rc; | |
3184 | } | |
3185 | ||
3186 | static void skd_complete_other(struct skd_device *skdev, | |
3187 | volatile struct fit_completion_entry_v1 *skcomp, | |
3188 | volatile struct fit_comp_error_info *skerr) | |
3189 | { | |
3190 | u32 req_id = 0; | |
3191 | u32 req_table; | |
3192 | u32 req_slot; | |
3193 | struct skd_special_context *skspcl; | |
3194 | ||
3195 | req_id = skcomp->tag; | |
3196 | req_table = req_id & SKD_ID_TABLE_MASK; | |
3197 | req_slot = req_id & SKD_ID_SLOT_MASK; | |
3198 | ||
3199 | DPRINTK(skdev, "table=0x%x id=0x%x slot=%d\n", req_table, req_id, | |
3200 | req_slot); | |
3201 | ||
3202 | /* | |
3203 | * Based on the request id, determine how to dispatch this completion. | |
3204 | * This swich/case is finding the good cases and forwarding the | |
3205 | * completion entry. Errors are reported below the switch. | |
3206 | */ | |
3207 | switch (req_table) { | |
3208 | case SKD_ID_RW_REQUEST: | |
3209 | /* | |
3210 | * The caller, skd_completion_posted_isr() above, | |
3211 | * handles r/w requests. The only way we get here | |
3212 | * is if the req_slot is out of bounds. | |
3213 | */ | |
3214 | break; | |
3215 | ||
3216 | case SKD_ID_SPECIAL_REQUEST: | |
3217 | /* | |
3218 | * Make sure the req_slot is in bounds and that the id | |
3219 | * matches. | |
3220 | */ | |
3221 | if (req_slot < skdev->n_special) { | |
3222 | skspcl = &skdev->skspcl_table[req_slot]; | |
3223 | if (skspcl->req.id == req_id && | |
3224 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
3225 | skd_complete_special(skdev, | |
3226 | skcomp, skerr, skspcl); | |
3227 | return; | |
3228 | } | |
3229 | } | |
3230 | break; | |
3231 | ||
3232 | case SKD_ID_INTERNAL: | |
3233 | if (req_slot == 0) { | |
3234 | skspcl = &skdev->internal_skspcl; | |
3235 | if (skspcl->req.id == req_id && | |
3236 | skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
3237 | skd_complete_internal(skdev, | |
3238 | skcomp, skerr, skspcl); | |
3239 | return; | |
3240 | } | |
3241 | } | |
3242 | break; | |
3243 | ||
3244 | case SKD_ID_FIT_MSG: | |
3245 | /* | |
3246 | * These id's should never appear in a completion record. | |
3247 | */ | |
3248 | break; | |
3249 | ||
3250 | default: | |
3251 | /* | |
3252 | * These id's should never appear anywhere; | |
3253 | */ | |
3254 | break; | |
3255 | } | |
3256 | ||
3257 | /* | |
3258 | * If we get here it is a bad or stale id. | |
3259 | */ | |
3260 | } | |
3261 | ||
3262 | static void skd_complete_special(struct skd_device *skdev, | |
3263 | volatile struct fit_completion_entry_v1 | |
3264 | *skcomp, | |
3265 | volatile struct fit_comp_error_info *skerr, | |
3266 | struct skd_special_context *skspcl) | |
3267 | { | |
3268 | DPRINTK(skdev, " completing special request %p\n", skspcl); | |
3269 | if (skspcl->orphaned) { | |
3270 | /* Discard orphaned request */ | |
3271 | /* ?: Can this release directly or does it need | |
3272 | * to use a worker? */ | |
3273 | DPRINTK(skdev, "release orphaned %p\n", skspcl); | |
3274 | skd_release_special(skdev, skspcl); | |
3275 | return; | |
3276 | } | |
3277 | ||
3278 | skd_process_scsi_inq(skdev, skcomp, skerr, skspcl); | |
3279 | ||
3280 | skspcl->req.state = SKD_REQ_STATE_COMPLETED; | |
3281 | skspcl->req.completion = *skcomp; | |
3282 | skspcl->req.err_info = *skerr; | |
3283 | ||
3284 | skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key, | |
3285 | skerr->code, skerr->qual, skerr->fruc); | |
3286 | ||
3287 | wake_up_interruptible(&skdev->waitq); | |
3288 | } | |
3289 | ||
3290 | /* assume spinlock is already held */ | |
3291 | static void skd_release_special(struct skd_device *skdev, | |
3292 | struct skd_special_context *skspcl) | |
3293 | { | |
3294 | int i, was_depleted; | |
3295 | ||
3296 | for (i = 0; i < skspcl->req.n_sg; i++) { | |
3297 | ||
3298 | struct page *page = sg_page(&skspcl->req.sg[i]); | |
3299 | __free_page(page); | |
3300 | } | |
3301 | ||
3302 | was_depleted = (skdev->skspcl_free_list == NULL); | |
3303 | ||
3304 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
3305 | skspcl->req.id += SKD_ID_INCR; | |
3306 | skspcl->req.next = | |
3307 | (struct skd_request_context *)skdev->skspcl_free_list; | |
3308 | skdev->skspcl_free_list = (struct skd_special_context *)skspcl; | |
3309 | ||
3310 | if (was_depleted) { | |
3311 | DPRINTK(skdev, "skspcl was depleted\n"); | |
3312 | /* Free list was depleted. Their might be waiters. */ | |
3313 | wake_up_interruptible(&skdev->waitq); | |
3314 | } | |
3315 | } | |
3316 | ||
3317 | static void skd_reset_skcomp(struct skd_device *skdev) | |
3318 | { | |
3319 | u32 nbytes; | |
3320 | struct fit_completion_entry_v1 *skcomp; | |
3321 | ||
3322 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | |
3323 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | |
3324 | ||
3325 | memset(skdev->skcomp_table, 0, nbytes); | |
3326 | ||
3327 | skdev->skcomp_ix = 0; | |
3328 | skdev->skcomp_cycle = 1; | |
3329 | } | |
3330 | ||
3331 | /* | |
3332 | ***************************************************************************** | |
3333 | * INTERRUPTS | |
3334 | ***************************************************************************** | |
3335 | */ | |
3336 | static void skd_completion_worker(struct work_struct *work) | |
3337 | { | |
3338 | struct skd_device *skdev = | |
3339 | container_of(work, struct skd_device, completion_worker); | |
3340 | unsigned long flags; | |
3341 | int flush_enqueued = 0; | |
3342 | ||
3343 | spin_lock_irqsave(&skdev->lock, flags); | |
3344 | ||
3345 | /* | |
3346 | * pass in limit=0, which means no limit.. | |
3347 | * process everything in compq | |
3348 | */ | |
3349 | skd_isr_completion_posted(skdev, 0, &flush_enqueued); | |
3350 | skd_request_fn(skdev->queue); | |
3351 | ||
3352 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3353 | } | |
3354 | ||
3355 | static void skd_isr_msg_from_dev(struct skd_device *skdev); | |
3356 | ||
3357 | irqreturn_t | |
3358 | static skd_isr(int irq, void *ptr) | |
3359 | { | |
3360 | struct skd_device *skdev; | |
3361 | u32 intstat; | |
3362 | u32 ack; | |
3363 | int rc = 0; | |
3364 | int deferred = 0; | |
3365 | int flush_enqueued = 0; | |
3366 | ||
3367 | skdev = (struct skd_device *)ptr; | |
3368 | spin_lock(&skdev->lock); | |
3369 | ||
3370 | for (;; ) { | |
3371 | intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
3372 | ||
3373 | ack = FIT_INT_DEF_MASK; | |
3374 | ack &= intstat; | |
3375 | ||
3376 | VPRINTK(skdev, "intstat=0x%x ack=0x%x\n", intstat, ack); | |
3377 | ||
3378 | /* As long as there is an int pending on device, keep | |
3379 | * running loop. When none, get out, but if we've never | |
3380 | * done any processing, call completion handler? | |
3381 | */ | |
3382 | if (ack == 0) { | |
3383 | /* No interrupts on device, but run the completion | |
3384 | * processor anyway? | |
3385 | */ | |
3386 | if (rc == 0) | |
3387 | if (likely (skdev->state | |
3388 | == SKD_DRVR_STATE_ONLINE)) | |
3389 | deferred = 1; | |
3390 | break; | |
3391 | } | |
3392 | ||
3393 | rc = IRQ_HANDLED; | |
3394 | ||
3395 | SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST); | |
3396 | ||
3397 | if (likely((skdev->state != SKD_DRVR_STATE_LOAD) && | |
3398 | (skdev->state != SKD_DRVR_STATE_STOPPING))) { | |
3399 | if (intstat & FIT_ISH_COMPLETION_POSTED) { | |
3400 | /* | |
3401 | * If we have already deferred completion | |
3402 | * processing, don't bother running it again | |
3403 | */ | |
3404 | if (deferred == 0) | |
3405 | deferred = | |
3406 | skd_isr_completion_posted(skdev, | |
3407 | skd_isr_comp_limit, &flush_enqueued); | |
3408 | } | |
3409 | ||
3410 | if (intstat & FIT_ISH_FW_STATE_CHANGE) { | |
3411 | skd_isr_fwstate(skdev); | |
3412 | if (skdev->state == SKD_DRVR_STATE_FAULT || | |
3413 | skdev->state == | |
3414 | SKD_DRVR_STATE_DISAPPEARED) { | |
3415 | spin_unlock(&skdev->lock); | |
3416 | return rc; | |
3417 | } | |
3418 | } | |
3419 | ||
3420 | if (intstat & FIT_ISH_MSG_FROM_DEV) | |
3421 | skd_isr_msg_from_dev(skdev); | |
3422 | } | |
3423 | } | |
3424 | ||
3425 | if (unlikely(flush_enqueued)) | |
3426 | skd_request_fn(skdev->queue); | |
3427 | ||
3428 | if (deferred) | |
3429 | schedule_work(&skdev->completion_worker); | |
3430 | else if (!flush_enqueued) | |
3431 | skd_request_fn(skdev->queue); | |
3432 | ||
3433 | spin_unlock(&skdev->lock); | |
3434 | ||
3435 | return rc; | |
3436 | } | |
3437 | ||
3438 | ||
3439 | static void skd_drive_fault(struct skd_device *skdev) | |
3440 | { | |
3441 | skdev->state = SKD_DRVR_STATE_FAULT; | |
3442 | pr_err("(%s): Drive FAULT\n", skd_name(skdev)); | |
3443 | } | |
3444 | ||
3445 | static void skd_drive_disappeared(struct skd_device *skdev) | |
3446 | { | |
3447 | skdev->state = SKD_DRVR_STATE_DISAPPEARED; | |
3448 | pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev)); | |
3449 | } | |
3450 | ||
3451 | static void skd_isr_fwstate(struct skd_device *skdev) | |
3452 | { | |
3453 | u32 sense; | |
3454 | u32 state; | |
3455 | u32 mtd; | |
3456 | int prev_driver_state = skdev->state; | |
3457 | ||
3458 | sense = SKD_READL(skdev, FIT_STATUS); | |
3459 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
3460 | ||
3461 | pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n", | |
3462 | skd_name(skdev), | |
3463 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
3464 | skd_drive_state_to_str(state), state); | |
3465 | ||
3466 | skdev->drive_state = state; | |
3467 | ||
3468 | switch (skdev->drive_state) { | |
3469 | case FIT_SR_DRIVE_INIT: | |
3470 | if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) { | |
3471 | skd_disable_interrupts(skdev); | |
3472 | break; | |
3473 | } | |
3474 | if (skdev->state == SKD_DRVR_STATE_RESTARTING) | |
3475 | skd_recover_requests(skdev, 0); | |
3476 | if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) { | |
3477 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
3478 | skdev->state = SKD_DRVR_STATE_STARTING; | |
3479 | skd_soft_reset(skdev); | |
3480 | break; | |
3481 | } | |
3482 | mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0); | |
3483 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3484 | skdev->last_mtd = mtd; | |
3485 | break; | |
3486 | ||
3487 | case FIT_SR_DRIVE_ONLINE: | |
3488 | skdev->cur_max_queue_depth = skd_max_queue_depth; | |
3489 | if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth) | |
3490 | skdev->cur_max_queue_depth = skdev->dev_max_queue_depth; | |
3491 | ||
3492 | skdev->queue_low_water_mark = | |
3493 | skdev->cur_max_queue_depth * 2 / 3 + 1; | |
3494 | if (skdev->queue_low_water_mark < 1) | |
3495 | skdev->queue_low_water_mark = 1; | |
3496 | pr_info( | |
3497 | "(%s): Queue depth limit=%d dev=%d lowat=%d\n", | |
3498 | skd_name(skdev), | |
3499 | skdev->cur_max_queue_depth, | |
3500 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | |
3501 | ||
3502 | skd_refresh_device_data(skdev); | |
3503 | break; | |
3504 | ||
3505 | case FIT_SR_DRIVE_BUSY: | |
3506 | skdev->state = SKD_DRVR_STATE_BUSY; | |
3507 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
3508 | skd_quiesce_dev(skdev); | |
3509 | break; | |
3510 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
3511 | /* set timer for 3 seconds, we'll abort any unfinished | |
3512 | * commands after that expires | |
3513 | */ | |
3514 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | |
3515 | skdev->timer_countdown = SKD_TIMER_SECONDS(3); | |
3516 | skd_start_queue(skdev); | |
3517 | break; | |
3518 | case FIT_SR_DRIVE_BUSY_ERASE: | |
3519 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | |
3520 | skdev->timer_countdown = SKD_BUSY_TIMO; | |
3521 | break; | |
3522 | case FIT_SR_DRIVE_OFFLINE: | |
3523 | skdev->state = SKD_DRVR_STATE_IDLE; | |
3524 | break; | |
3525 | case FIT_SR_DRIVE_SOFT_RESET: | |
3526 | switch (skdev->state) { | |
3527 | case SKD_DRVR_STATE_STARTING: | |
3528 | case SKD_DRVR_STATE_RESTARTING: | |
3529 | /* Expected by a caller of skd_soft_reset() */ | |
3530 | break; | |
3531 | default: | |
3532 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
3533 | break; | |
3534 | } | |
3535 | break; | |
3536 | case FIT_SR_DRIVE_FW_BOOTING: | |
3537 | VPRINTK(skdev, "ISR FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); | |
3538 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | |
3539 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
3540 | break; | |
3541 | ||
3542 | case FIT_SR_DRIVE_DEGRADED: | |
3543 | case FIT_SR_PCIE_LINK_DOWN: | |
3544 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
3545 | break; | |
3546 | ||
3547 | case FIT_SR_DRIVE_FAULT: | |
3548 | skd_drive_fault(skdev); | |
3549 | skd_recover_requests(skdev, 0); | |
3550 | skd_start_queue(skdev); | |
3551 | break; | |
3552 | ||
3553 | /* PCIe bus returned all Fs? */ | |
3554 | case 0xFF: | |
3555 | pr_info("(%s): state=0x%x sense=0x%x\n", | |
3556 | skd_name(skdev), state, sense); | |
3557 | skd_drive_disappeared(skdev); | |
3558 | skd_recover_requests(skdev, 0); | |
3559 | skd_start_queue(skdev); | |
3560 | break; | |
3561 | default: | |
3562 | /* | |
3563 | * Uknown FW State. Wait for a state we recognize. | |
3564 | */ | |
3565 | break; | |
3566 | } | |
3567 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | |
3568 | skd_name(skdev), | |
3569 | skd_skdev_state_to_str(prev_driver_state), prev_driver_state, | |
3570 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
3571 | } | |
3572 | ||
3573 | static void skd_recover_requests(struct skd_device *skdev, int requeue) | |
3574 | { | |
3575 | int i; | |
3576 | ||
3577 | for (i = 0; i < skdev->num_req_context; i++) { | |
3578 | struct skd_request_context *skreq = &skdev->skreq_table[i]; | |
3579 | ||
3580 | if (skreq->state == SKD_REQ_STATE_BUSY) { | |
3581 | skd_log_skreq(skdev, skreq, "recover"); | |
3582 | ||
3583 | SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); | |
3584 | if (!skd_bio) | |
3585 | SKD_ASSERT(skreq->req != NULL); | |
3586 | else | |
3587 | SKD_ASSERT(skreq->bio != NULL); | |
3588 | ||
3589 | /* Release DMA resources for the request. */ | |
3590 | if (skreq->n_sg > 0) | |
3591 | skd_postop_sg_list(skdev, skreq); | |
3592 | ||
3593 | if (!skd_bio) { | |
3594 | if (requeue && | |
3595 | (unsigned long) ++skreq->req->special < | |
3596 | SKD_MAX_RETRIES) | |
3597 | skd_requeue_request(skdev, skreq); | |
3598 | else | |
3599 | skd_end_request(skdev, skreq, -EIO); | |
3600 | } else | |
3601 | skd_end_request(skdev, skreq, -EIO); | |
3602 | ||
3603 | if (!skd_bio) | |
3604 | skreq->req = NULL; | |
3605 | else | |
3606 | skreq->bio = NULL; | |
3607 | ||
3608 | skreq->state = SKD_REQ_STATE_IDLE; | |
3609 | skreq->id += SKD_ID_INCR; | |
3610 | ||
3611 | ||
3612 | } | |
3613 | if (i > 0) | |
3614 | skreq[-1].next = skreq; | |
3615 | skreq->next = NULL; | |
3616 | } | |
3617 | skdev->skreq_free_list = skdev->skreq_table; | |
3618 | ||
3619 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
3620 | struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i]; | |
3621 | ||
3622 | if (skmsg->state == SKD_MSG_STATE_BUSY) { | |
3623 | skd_log_skmsg(skdev, skmsg, "salvaged"); | |
3624 | SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0); | |
3625 | skmsg->state = SKD_MSG_STATE_IDLE; | |
3626 | skmsg->id += SKD_ID_INCR; | |
3627 | } | |
3628 | if (i > 0) | |
3629 | skmsg[-1].next = skmsg; | |
3630 | skmsg->next = NULL; | |
3631 | } | |
3632 | skdev->skmsg_free_list = skdev->skmsg_table; | |
3633 | ||
3634 | for (i = 0; i < skdev->n_special; i++) { | |
3635 | struct skd_special_context *skspcl = &skdev->skspcl_table[i]; | |
3636 | ||
3637 | /* If orphaned, reclaim it because it has already been reported | |
3638 | * to the process as an error (it was just waiting for | |
3639 | * a completion that didn't come, and now it will never come) | |
3640 | * If busy, change to a state that will cause it to error | |
3641 | * out in the wait routine and let it do the normal | |
3642 | * reporting and reclaiming | |
3643 | */ | |
3644 | if (skspcl->req.state == SKD_REQ_STATE_BUSY) { | |
3645 | if (skspcl->orphaned) { | |
3646 | DPRINTK(skdev, "orphaned %p\n", skspcl); | |
3647 | skd_release_special(skdev, skspcl); | |
3648 | } else { | |
3649 | DPRINTK(skdev, "not orphaned %p\n", skspcl); | |
3650 | skspcl->req.state = SKD_REQ_STATE_ABORTED; | |
3651 | } | |
3652 | } | |
3653 | } | |
3654 | skdev->skspcl_free_list = skdev->skspcl_table; | |
3655 | ||
3656 | for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++) | |
3657 | skdev->timeout_slot[i] = 0; | |
3658 | ||
3659 | skdev->in_flight = 0; | |
3660 | } | |
3661 | ||
3662 | static void skd_isr_msg_from_dev(struct skd_device *skdev) | |
3663 | { | |
3664 | u32 mfd; | |
3665 | u32 mtd; | |
3666 | u32 data; | |
3667 | ||
3668 | mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
3669 | ||
3670 | DPRINTK(skdev, "mfd=0x%x last_mtd=0x%x\n", mfd, skdev->last_mtd); | |
3671 | ||
3672 | /* ignore any mtd that is an ack for something we didn't send */ | |
3673 | if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd)) | |
3674 | return; | |
3675 | ||
3676 | switch (FIT_MXD_TYPE(mfd)) { | |
3677 | case FIT_MTD_FITFW_INIT: | |
3678 | skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd); | |
3679 | ||
3680 | if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) { | |
3681 | pr_err("(%s): protocol mismatch\n", | |
3682 | skdev->name); | |
3683 | pr_err("(%s): got=%d support=%d\n", | |
3684 | skdev->name, skdev->proto_ver, | |
3685 | FIT_PROTOCOL_VERSION_1); | |
3686 | pr_err("(%s): please upgrade driver\n", | |
3687 | skdev->name); | |
3688 | skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH; | |
3689 | skd_soft_reset(skdev); | |
3690 | break; | |
3691 | } | |
3692 | mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0); | |
3693 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3694 | skdev->last_mtd = mtd; | |
3695 | break; | |
3696 | ||
3697 | case FIT_MTD_GET_CMDQ_DEPTH: | |
3698 | skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd); | |
3699 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0, | |
3700 | SKD_N_COMPLETION_ENTRY); | |
3701 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3702 | skdev->last_mtd = mtd; | |
3703 | break; | |
3704 | ||
3705 | case FIT_MTD_SET_COMPQ_DEPTH: | |
3706 | SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG); | |
3707 | mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0); | |
3708 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3709 | skdev->last_mtd = mtd; | |
3710 | break; | |
3711 | ||
3712 | case FIT_MTD_SET_COMPQ_ADDR: | |
3713 | skd_reset_skcomp(skdev); | |
3714 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno); | |
3715 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3716 | skdev->last_mtd = mtd; | |
3717 | break; | |
3718 | ||
3719 | case FIT_MTD_CMD_LOG_HOST_ID: | |
3720 | skdev->connect_time_stamp = get_seconds(); | |
3721 | data = skdev->connect_time_stamp & 0xFFFF; | |
3722 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data); | |
3723 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3724 | skdev->last_mtd = mtd; | |
3725 | break; | |
3726 | ||
3727 | case FIT_MTD_CMD_LOG_TIME_STAMP_LO: | |
3728 | skdev->drive_jiffies = FIT_MXD_DATA(mfd); | |
3729 | data = (skdev->connect_time_stamp >> 16) & 0xFFFF; | |
3730 | mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data); | |
3731 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3732 | skdev->last_mtd = mtd; | |
3733 | break; | |
3734 | ||
3735 | case FIT_MTD_CMD_LOG_TIME_STAMP_HI: | |
3736 | skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16); | |
3737 | mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0); | |
3738 | SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE); | |
3739 | skdev->last_mtd = mtd; | |
3740 | ||
3741 | pr_err("(%s): Time sync driver=0x%x device=0x%x\n", | |
3742 | skd_name(skdev), | |
3743 | skdev->connect_time_stamp, skdev->drive_jiffies); | |
3744 | break; | |
3745 | ||
3746 | case FIT_MTD_ARM_QUEUE: | |
3747 | skdev->last_mtd = 0; | |
3748 | /* | |
3749 | * State should be, or soon will be, FIT_SR_DRIVE_ONLINE. | |
3750 | */ | |
3751 | break; | |
3752 | ||
3753 | default: | |
3754 | break; | |
3755 | } | |
3756 | } | |
3757 | ||
3758 | static void skd_disable_interrupts(struct skd_device *skdev) | |
3759 | { | |
3760 | u32 sense; | |
3761 | ||
3762 | sense = SKD_READL(skdev, FIT_CONTROL); | |
3763 | sense &= ~FIT_CR_ENABLE_INTERRUPTS; | |
3764 | SKD_WRITEL(skdev, sense, FIT_CONTROL); | |
3765 | DPRINTK(skdev, "sense 0x%x\n", sense); | |
3766 | ||
3767 | /* Note that the 1s is written. A 1-bit means | |
3768 | * disable, a 0 means enable. | |
3769 | */ | |
3770 | SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST); | |
3771 | } | |
3772 | ||
3773 | static void skd_enable_interrupts(struct skd_device *skdev) | |
3774 | { | |
3775 | u32 val; | |
3776 | ||
3777 | /* unmask interrupts first */ | |
3778 | val = FIT_ISH_FW_STATE_CHANGE + | |
3779 | FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV; | |
3780 | ||
3781 | /* Note that the compliment of mask is written. A 1-bit means | |
3782 | * disable, a 0 means enable. */ | |
3783 | SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST); | |
3784 | DPRINTK(skdev, "interrupt mask=0x%x\n", ~val); | |
3785 | ||
3786 | val = SKD_READL(skdev, FIT_CONTROL); | |
3787 | val |= FIT_CR_ENABLE_INTERRUPTS; | |
3788 | DPRINTK(skdev, "control=0x%x\n", val); | |
3789 | SKD_WRITEL(skdev, val, FIT_CONTROL); | |
3790 | } | |
3791 | ||
3792 | /* | |
3793 | ***************************************************************************** | |
3794 | * START, STOP, RESTART, QUIESCE, UNQUIESCE | |
3795 | ***************************************************************************** | |
3796 | */ | |
3797 | ||
3798 | static void skd_soft_reset(struct skd_device *skdev) | |
3799 | { | |
3800 | u32 val; | |
3801 | ||
3802 | val = SKD_READL(skdev, FIT_CONTROL); | |
3803 | val |= (FIT_CR_SOFT_RESET); | |
3804 | DPRINTK(skdev, "control=0x%x\n", val); | |
3805 | SKD_WRITEL(skdev, val, FIT_CONTROL); | |
3806 | } | |
3807 | ||
3808 | static void skd_start_device(struct skd_device *skdev) | |
3809 | { | |
3810 | unsigned long flags; | |
3811 | u32 sense; | |
3812 | u32 state; | |
3813 | ||
3814 | spin_lock_irqsave(&skdev->lock, flags); | |
3815 | ||
3816 | /* ack all ghost interrupts */ | |
3817 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
3818 | ||
3819 | sense = SKD_READL(skdev, FIT_STATUS); | |
3820 | ||
3821 | DPRINTK(skdev, "initial status=0x%x\n", sense); | |
3822 | ||
3823 | state = sense & FIT_SR_DRIVE_STATE_MASK; | |
3824 | skdev->drive_state = state; | |
3825 | skdev->last_mtd = 0; | |
3826 | ||
3827 | skdev->state = SKD_DRVR_STATE_STARTING; | |
3828 | skdev->timer_countdown = SKD_STARTING_TIMO; | |
3829 | ||
3830 | skd_enable_interrupts(skdev); | |
3831 | ||
3832 | switch (skdev->drive_state) { | |
3833 | case FIT_SR_DRIVE_OFFLINE: | |
3834 | pr_err("(%s): Drive offline...\n", skd_name(skdev)); | |
3835 | break; | |
3836 | ||
3837 | case FIT_SR_DRIVE_FW_BOOTING: | |
3838 | VPRINTK(skdev, "FIT_SR_DRIVE_FW_BOOTING %s\n", skdev->name); | |
3839 | skdev->state = SKD_DRVR_STATE_WAIT_BOOT; | |
3840 | skdev->timer_countdown = SKD_WAIT_BOOT_TIMO; | |
3841 | break; | |
3842 | ||
3843 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
3844 | pr_info("(%s): Start: BUSY_SANITIZE\n", | |
3845 | skd_name(skdev)); | |
3846 | skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE; | |
3847 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3848 | break; | |
3849 | ||
3850 | case FIT_SR_DRIVE_BUSY_ERASE: | |
3851 | pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev)); | |
3852 | skdev->state = SKD_DRVR_STATE_BUSY_ERASE; | |
3853 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3854 | break; | |
3855 | ||
3856 | case FIT_SR_DRIVE_INIT: | |
3857 | case FIT_SR_DRIVE_ONLINE: | |
3858 | skd_soft_reset(skdev); | |
3859 | break; | |
3860 | ||
3861 | case FIT_SR_DRIVE_BUSY: | |
3862 | pr_err("(%s): Drive Busy...\n", skd_name(skdev)); | |
3863 | skdev->state = SKD_DRVR_STATE_BUSY; | |
3864 | skdev->timer_countdown = SKD_STARTED_BUSY_TIMO; | |
3865 | break; | |
3866 | ||
3867 | case FIT_SR_DRIVE_SOFT_RESET: | |
3868 | pr_err("(%s) drive soft reset in prog\n", | |
3869 | skd_name(skdev)); | |
3870 | break; | |
3871 | ||
3872 | case FIT_SR_DRIVE_FAULT: | |
3873 | /* Fault state is bad...soft reset won't do it... | |
3874 | * Hard reset, maybe, but does it work on device? | |
3875 | * For now, just fault so the system doesn't hang. | |
3876 | */ | |
3877 | skd_drive_fault(skdev); | |
3878 | /*start the queue so we can respond with error to requests */ | |
3879 | VPRINTK(skdev, "starting %s queue\n", skdev->name); | |
3880 | skd_start_queue(skdev); | |
3881 | skdev->gendisk_on = -1; | |
3882 | wake_up_interruptible(&skdev->waitq); | |
3883 | break; | |
3884 | ||
3885 | case 0xFF: | |
3886 | /* Most likely the device isn't there or isn't responding | |
3887 | * to the BAR1 addresses. */ | |
3888 | skd_drive_disappeared(skdev); | |
3889 | /*start the queue so we can respond with error to requests */ | |
3890 | VPRINTK(skdev, "starting %s queue to error-out reqs\n", | |
3891 | skdev->name); | |
3892 | skd_start_queue(skdev); | |
3893 | skdev->gendisk_on = -1; | |
3894 | wake_up_interruptible(&skdev->waitq); | |
3895 | break; | |
3896 | ||
3897 | default: | |
3898 | pr_err("(%s) Start: unknown state %x\n", | |
3899 | skd_name(skdev), skdev->drive_state); | |
3900 | break; | |
3901 | } | |
3902 | ||
3903 | state = SKD_READL(skdev, FIT_CONTROL); | |
3904 | DPRINTK(skdev, "FIT Control Status=0x%x\n", state); | |
3905 | ||
3906 | state = SKD_READL(skdev, FIT_INT_STATUS_HOST); | |
3907 | DPRINTK(skdev, "Intr Status=0x%x\n", state); | |
3908 | ||
3909 | state = SKD_READL(skdev, FIT_INT_MASK_HOST); | |
3910 | DPRINTK(skdev, "Intr Mask=0x%x\n", state); | |
3911 | ||
3912 | state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE); | |
3913 | DPRINTK(skdev, "Msg from Dev=0x%x\n", state); | |
3914 | ||
3915 | state = SKD_READL(skdev, FIT_HW_VERSION); | |
3916 | DPRINTK(skdev, "HW version=0x%x\n", state); | |
3917 | ||
3918 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3919 | } | |
3920 | ||
3921 | static void skd_stop_device(struct skd_device *skdev) | |
3922 | { | |
3923 | unsigned long flags; | |
3924 | struct skd_special_context *skspcl = &skdev->internal_skspcl; | |
3925 | u32 dev_state; | |
3926 | int i; | |
3927 | ||
3928 | spin_lock_irqsave(&skdev->lock, flags); | |
3929 | ||
3930 | if (skdev->state != SKD_DRVR_STATE_ONLINE) { | |
3931 | pr_err("(%s): skd_stop_device not online no sync\n", | |
3932 | skd_name(skdev)); | |
3933 | goto stop_out; | |
3934 | } | |
3935 | ||
3936 | if (skspcl->req.state != SKD_REQ_STATE_IDLE) { | |
3937 | pr_err("(%s): skd_stop_device no special\n", | |
3938 | skd_name(skdev)); | |
3939 | goto stop_out; | |
3940 | } | |
3941 | ||
3942 | skdev->state = SKD_DRVR_STATE_SYNCING; | |
3943 | skdev->sync_done = 0; | |
3944 | ||
3945 | skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE); | |
3946 | ||
3947 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3948 | ||
3949 | wait_event_interruptible_timeout(skdev->waitq, | |
3950 | (skdev->sync_done), (10 * HZ)); | |
3951 | ||
3952 | spin_lock_irqsave(&skdev->lock, flags); | |
3953 | ||
3954 | switch (skdev->sync_done) { | |
3955 | case 0: | |
3956 | pr_err("(%s): skd_stop_device no sync\n", | |
3957 | skd_name(skdev)); | |
3958 | break; | |
3959 | case 1: | |
3960 | pr_err("(%s): skd_stop_device sync done\n", | |
3961 | skd_name(skdev)); | |
3962 | break; | |
3963 | default: | |
3964 | pr_err("(%s): skd_stop_device sync error\n", | |
3965 | skd_name(skdev)); | |
3966 | } | |
3967 | ||
3968 | stop_out: | |
3969 | skdev->state = SKD_DRVR_STATE_STOPPING; | |
3970 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3971 | ||
3972 | skd_kill_timer(skdev); | |
3973 | ||
3974 | spin_lock_irqsave(&skdev->lock, flags); | |
3975 | skd_disable_interrupts(skdev); | |
3976 | ||
3977 | /* ensure all ints on device are cleared */ | |
3978 | /* soft reset the device to unload with a clean slate */ | |
3979 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
3980 | SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL); | |
3981 | ||
3982 | spin_unlock_irqrestore(&skdev->lock, flags); | |
3983 | ||
3984 | /* poll every 100ms, 1 second timeout */ | |
3985 | for (i = 0; i < 10; i++) { | |
3986 | dev_state = | |
3987 | SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK; | |
3988 | if (dev_state == FIT_SR_DRIVE_INIT) | |
3989 | break; | |
3990 | set_current_state(TASK_INTERRUPTIBLE); | |
3991 | schedule_timeout(msecs_to_jiffies(100)); | |
3992 | } | |
3993 | ||
3994 | if (dev_state != FIT_SR_DRIVE_INIT) | |
3995 | pr_err("(%s): skd_stop_device state error 0x%02x\n", | |
3996 | skd_name(skdev), dev_state); | |
3997 | } | |
3998 | ||
3999 | /* assume spinlock is held */ | |
4000 | static void skd_restart_device(struct skd_device *skdev) | |
4001 | { | |
4002 | u32 state; | |
4003 | ||
4004 | /* ack all ghost interrupts */ | |
4005 | SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST); | |
4006 | ||
4007 | state = SKD_READL(skdev, FIT_STATUS); | |
4008 | ||
4009 | DPRINTK(skdev, "drive status=0x%x\n", state); | |
4010 | ||
4011 | state &= FIT_SR_DRIVE_STATE_MASK; | |
4012 | skdev->drive_state = state; | |
4013 | skdev->last_mtd = 0; | |
4014 | ||
4015 | skdev->state = SKD_DRVR_STATE_RESTARTING; | |
4016 | skdev->timer_countdown = SKD_RESTARTING_TIMO; | |
4017 | ||
4018 | skd_soft_reset(skdev); | |
4019 | } | |
4020 | ||
4021 | /* assume spinlock is held */ | |
4022 | static int skd_quiesce_dev(struct skd_device *skdev) | |
4023 | { | |
4024 | int rc = 0; | |
4025 | ||
4026 | switch (skdev->state) { | |
4027 | case SKD_DRVR_STATE_BUSY: | |
4028 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
4029 | VPRINTK(skdev, "stopping %s queue\n", skdev->name); | |
4030 | skd_stop_queue(skdev); | |
4031 | break; | |
4032 | case SKD_DRVR_STATE_ONLINE: | |
4033 | case SKD_DRVR_STATE_STOPPING: | |
4034 | case SKD_DRVR_STATE_SYNCING: | |
4035 | case SKD_DRVR_STATE_PAUSING: | |
4036 | case SKD_DRVR_STATE_PAUSED: | |
4037 | case SKD_DRVR_STATE_STARTING: | |
4038 | case SKD_DRVR_STATE_RESTARTING: | |
4039 | case SKD_DRVR_STATE_RESUMING: | |
4040 | default: | |
4041 | rc = -EINVAL; | |
4042 | VPRINTK(skdev, "state [%d] not implemented\n", skdev->state); | |
4043 | } | |
4044 | return rc; | |
4045 | } | |
4046 | ||
4047 | /* assume spinlock is held */ | |
4048 | static int skd_unquiesce_dev(struct skd_device *skdev) | |
4049 | { | |
4050 | int prev_driver_state = skdev->state; | |
4051 | ||
4052 | skd_log_skdev(skdev, "unquiesce"); | |
4053 | if (skdev->state == SKD_DRVR_STATE_ONLINE) { | |
4054 | DPRINTK(skdev, "**** device already ONLINE\n"); | |
4055 | return 0; | |
4056 | } | |
4057 | if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) { | |
4058 | /* | |
4059 | * If there has been an state change to other than | |
4060 | * ONLINE, we will rely on controller state change | |
4061 | * to come back online and restart the queue. | |
4062 | * The BUSY state means that driver is ready to | |
4063 | * continue normal processing but waiting for controller | |
4064 | * to become available. | |
4065 | */ | |
4066 | skdev->state = SKD_DRVR_STATE_BUSY; | |
4067 | DPRINTK(skdev, "drive BUSY state\n"); | |
4068 | return 0; | |
4069 | } | |
4070 | ||
4071 | /* | |
4072 | * Drive has just come online, driver is either in startup, | |
4073 | * paused performing a task, or bust waiting for hardware. | |
4074 | */ | |
4075 | switch (skdev->state) { | |
4076 | case SKD_DRVR_STATE_PAUSED: | |
4077 | case SKD_DRVR_STATE_BUSY: | |
4078 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
4079 | case SKD_DRVR_STATE_BUSY_ERASE: | |
4080 | case SKD_DRVR_STATE_STARTING: | |
4081 | case SKD_DRVR_STATE_RESTARTING: | |
4082 | case SKD_DRVR_STATE_FAULT: | |
4083 | case SKD_DRVR_STATE_IDLE: | |
4084 | case SKD_DRVR_STATE_LOAD: | |
4085 | skdev->state = SKD_DRVR_STATE_ONLINE; | |
4086 | pr_err("(%s): Driver state %s(%d)=>%s(%d)\n", | |
4087 | skd_name(skdev), | |
4088 | skd_skdev_state_to_str(prev_driver_state), | |
4089 | prev_driver_state, skd_skdev_state_to_str(skdev->state), | |
4090 | skdev->state); | |
4091 | DPRINTK(skdev, "**** device ONLINE...starting block queue\n"); | |
4092 | VPRINTK(skdev, "starting %s queue\n", skdev->name); | |
4093 | pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev)); | |
4094 | skd_start_queue(skdev); | |
4095 | skdev->gendisk_on = 1; | |
4096 | wake_up_interruptible(&skdev->waitq); | |
4097 | break; | |
4098 | ||
4099 | case SKD_DRVR_STATE_DISAPPEARED: | |
4100 | default: | |
4101 | DPRINTK(skdev, "**** driver state %d, not implemented \n", | |
4102 | skdev->state); | |
4103 | return -EBUSY; | |
4104 | } | |
4105 | return 0; | |
4106 | } | |
4107 | ||
4108 | /* | |
4109 | ***************************************************************************** | |
4110 | * PCIe MSI/MSI-X INTERRUPT HANDLERS | |
4111 | ***************************************************************************** | |
4112 | */ | |
4113 | ||
4114 | static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data) | |
4115 | { | |
4116 | struct skd_device *skdev = skd_host_data; | |
4117 | unsigned long flags; | |
4118 | ||
4119 | spin_lock_irqsave(&skdev->lock, flags); | |
4120 | VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4121 | pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev), | |
4122 | irq, SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4123 | SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST); | |
4124 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4125 | return IRQ_HANDLED; | |
4126 | } | |
4127 | ||
4128 | static irqreturn_t skd_statec_isr(int irq, void *skd_host_data) | |
4129 | { | |
4130 | struct skd_device *skdev = skd_host_data; | |
4131 | unsigned long flags; | |
4132 | ||
4133 | spin_lock_irqsave(&skdev->lock, flags); | |
4134 | VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4135 | SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST); | |
4136 | skd_isr_fwstate(skdev); | |
4137 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4138 | return IRQ_HANDLED; | |
4139 | } | |
4140 | ||
4141 | static irqreturn_t skd_comp_q(int irq, void *skd_host_data) | |
4142 | { | |
4143 | struct skd_device *skdev = skd_host_data; | |
4144 | unsigned long flags; | |
4145 | int flush_enqueued = 0; | |
4146 | int deferred; | |
4147 | ||
4148 | spin_lock_irqsave(&skdev->lock, flags); | |
4149 | VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4150 | SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST); | |
4151 | deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit, | |
4152 | &flush_enqueued); | |
4153 | ||
4154 | if (flush_enqueued) | |
4155 | skd_request_fn(skdev->queue); | |
4156 | ||
4157 | if (deferred) | |
4158 | schedule_work(&skdev->completion_worker); | |
4159 | else if (!flush_enqueued) | |
4160 | skd_request_fn(skdev->queue); | |
4161 | ||
4162 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4163 | ||
4164 | return IRQ_HANDLED; | |
4165 | } | |
4166 | ||
4167 | static irqreturn_t skd_msg_isr(int irq, void *skd_host_data) | |
4168 | { | |
4169 | struct skd_device *skdev = skd_host_data; | |
4170 | unsigned long flags; | |
4171 | ||
4172 | spin_lock_irqsave(&skdev->lock, flags); | |
4173 | VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4174 | SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST); | |
4175 | skd_isr_msg_from_dev(skdev); | |
4176 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4177 | return IRQ_HANDLED; | |
4178 | } | |
4179 | ||
4180 | static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data) | |
4181 | { | |
4182 | struct skd_device *skdev = skd_host_data; | |
4183 | unsigned long flags; | |
4184 | ||
4185 | spin_lock_irqsave(&skdev->lock, flags); | |
4186 | VPRINTK(skdev, "MSIX = 0x%x\n", SKD_READL(skdev, FIT_INT_STATUS_HOST)); | |
4187 | SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST); | |
4188 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4189 | return IRQ_HANDLED; | |
4190 | } | |
4191 | ||
4192 | /* | |
4193 | ***************************************************************************** | |
4194 | * PCIe MSI/MSI-X SETUP | |
4195 | ***************************************************************************** | |
4196 | */ | |
4197 | ||
4198 | struct skd_msix_entry { | |
4199 | int have_irq; | |
4200 | u32 vector; | |
4201 | u32 entry; | |
4202 | struct skd_device *rsp; | |
4203 | char isr_name[30]; | |
4204 | }; | |
4205 | ||
4206 | struct skd_init_msix_entry { | |
4207 | const char *name; | |
4208 | irq_handler_t handler; | |
4209 | }; | |
4210 | ||
4211 | #define SKD_MAX_MSIX_COUNT 13 | |
4212 | #define SKD_MIN_MSIX_COUNT 7 | |
4213 | #define SKD_BASE_MSIX_IRQ 4 | |
4214 | ||
4215 | static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = { | |
4216 | { "(DMA 0)", skd_reserved_isr }, | |
4217 | { "(DMA 1)", skd_reserved_isr }, | |
4218 | { "(DMA 2)", skd_reserved_isr }, | |
4219 | { "(DMA 3)", skd_reserved_isr }, | |
4220 | { "(State Change)", skd_statec_isr }, | |
4221 | { "(COMPL_Q)", skd_comp_q }, | |
4222 | { "(MSG)", skd_msg_isr }, | |
4223 | { "(Reserved)", skd_reserved_isr }, | |
4224 | { "(Reserved)", skd_reserved_isr }, | |
4225 | { "(Queue Full 0)", skd_qfull_isr }, | |
4226 | { "(Queue Full 1)", skd_qfull_isr }, | |
4227 | { "(Queue Full 2)", skd_qfull_isr }, | |
4228 | { "(Queue Full 3)", skd_qfull_isr }, | |
4229 | }; | |
4230 | ||
4231 | static void skd_release_msix(struct skd_device *skdev) | |
4232 | { | |
4233 | struct skd_msix_entry *qentry; | |
4234 | int i; | |
4235 | ||
4236 | if (skdev->msix_entries == NULL) | |
4237 | return; | |
4238 | for (i = 0; i < skdev->msix_count; i++) { | |
4239 | qentry = &skdev->msix_entries[i]; | |
4240 | skdev = qentry->rsp; | |
4241 | ||
4242 | if (qentry->have_irq) | |
4243 | devm_free_irq(&skdev->pdev->dev, | |
4244 | qentry->vector, qentry->rsp); | |
4245 | } | |
4246 | pci_disable_msix(skdev->pdev); | |
4247 | kfree(skdev->msix_entries); | |
4248 | skdev->msix_count = 0; | |
4249 | skdev->msix_entries = NULL; | |
4250 | } | |
4251 | ||
4252 | static int skd_acquire_msix(struct skd_device *skdev) | |
4253 | { | |
4254 | int i, rc; | |
4255 | struct pci_dev *pdev; | |
4256 | struct msix_entry *entries = NULL; | |
4257 | struct skd_msix_entry *qentry; | |
4258 | ||
4259 | pdev = skdev->pdev; | |
4260 | skdev->msix_count = SKD_MAX_MSIX_COUNT; | |
4261 | entries = kzalloc(sizeof(struct msix_entry) * SKD_MAX_MSIX_COUNT, | |
4262 | GFP_KERNEL); | |
4263 | if (!entries) | |
4264 | return -ENOMEM; | |
4265 | ||
4266 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) | |
4267 | entries[i].entry = i; | |
4268 | ||
4269 | rc = pci_enable_msix(pdev, entries, SKD_MAX_MSIX_COUNT); | |
4270 | if (rc < 0) | |
4271 | goto msix_out; | |
4272 | if (rc) { | |
4273 | if (rc < SKD_MIN_MSIX_COUNT) { | |
4274 | pr_err("(%s): failed to enable MSI-X %d\n", | |
4275 | skd_name(skdev), rc); | |
4276 | goto msix_out; | |
4277 | } | |
4278 | DPRINTK(skdev, "%s: <%s> allocated %d MSI-X vectors\n", | |
4279 | pci_name(pdev), skdev->name, rc); | |
4280 | ||
4281 | skdev->msix_count = rc; | |
4282 | rc = pci_enable_msix(pdev, entries, skdev->msix_count); | |
4283 | if (rc) { | |
4284 | pr_err("(%s): failed to enable MSI-X " | |
4285 | "support (%d) %d\n", | |
4286 | skd_name(skdev), skdev->msix_count, rc); | |
4287 | goto msix_out; | |
4288 | } | |
4289 | } | |
4290 | skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * | |
4291 | skdev->msix_count, GFP_KERNEL); | |
4292 | if (!skdev->msix_entries) { | |
4293 | rc = -ENOMEM; | |
4294 | skdev->msix_count = 0; | |
4295 | pr_err("(%s): msix table allocation error\n", | |
4296 | skd_name(skdev)); | |
4297 | goto msix_out; | |
4298 | } | |
4299 | ||
4300 | qentry = skdev->msix_entries; | |
4301 | for (i = 0; i < skdev->msix_count; i++) { | |
4302 | qentry->vector = entries[i].vector; | |
4303 | qentry->entry = entries[i].entry; | |
4304 | qentry->rsp = NULL; | |
4305 | qentry->have_irq = 0; | |
4306 | DPRINTK(skdev, "%s: <%s> msix (%d) vec %d, entry %x\n", | |
4307 | pci_name(pdev), skdev->name, | |
4308 | i, qentry->vector, qentry->entry); | |
4309 | qentry++; | |
4310 | } | |
4311 | ||
4312 | /* Enable MSI-X vectors for the base queue */ | |
4313 | for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) { | |
4314 | qentry = &skdev->msix_entries[i]; | |
4315 | snprintf(qentry->isr_name, sizeof(qentry->isr_name), | |
4316 | "%s%d-msix %s", DRV_NAME, skdev->devno, | |
4317 | msix_entries[i].name); | |
4318 | rc = devm_request_irq(&skdev->pdev->dev, qentry->vector, | |
4319 | msix_entries[i].handler, 0, | |
4320 | qentry->isr_name, skdev); | |
4321 | if (rc) { | |
4322 | pr_err("(%s): Unable to register(%d) MSI-X " | |
4323 | "handler %d: %s\n", | |
4324 | skd_name(skdev), rc, i, qentry->isr_name); | |
4325 | goto msix_out; | |
4326 | } else { | |
4327 | qentry->have_irq = 1; | |
4328 | qentry->rsp = skdev; | |
4329 | } | |
4330 | } | |
4331 | DPRINTK(skdev, "%s: <%s> msix %d irq(s) enabled\n", | |
4332 | pci_name(pdev), skdev->name, skdev->msix_count); | |
4333 | return 0; | |
4334 | ||
4335 | msix_out: | |
4336 | if (entries) | |
4337 | kfree(entries); | |
4338 | skd_release_msix(skdev); | |
4339 | return rc; | |
4340 | } | |
4341 | ||
4342 | static int skd_acquire_irq(struct skd_device *skdev) | |
4343 | { | |
4344 | int rc; | |
4345 | struct pci_dev *pdev; | |
4346 | ||
4347 | pdev = skdev->pdev; | |
4348 | skdev->msix_count = 0; | |
4349 | ||
4350 | RETRY_IRQ_TYPE: | |
4351 | switch (skdev->irq_type) { | |
4352 | case SKD_IRQ_MSIX: | |
4353 | rc = skd_acquire_msix(skdev); | |
4354 | if (!rc) | |
4355 | pr_info("(%s): MSI-X %d irqs enabled\n", | |
4356 | skd_name(skdev), skdev->msix_count); | |
4357 | else { | |
4358 | pr_err( | |
4359 | "(%s): failed to enable MSI-X, re-trying with MSI %d\n", | |
4360 | skd_name(skdev), rc); | |
4361 | skdev->irq_type = SKD_IRQ_MSI; | |
4362 | goto RETRY_IRQ_TYPE; | |
4363 | } | |
4364 | break; | |
4365 | case SKD_IRQ_MSI: | |
4366 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d-msi", | |
4367 | DRV_NAME, skdev->devno); | |
4368 | rc = pci_enable_msi(pdev); | |
4369 | if (!rc) { | |
4370 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, 0, | |
4371 | skdev->isr_name, skdev); | |
4372 | if (rc) { | |
4373 | pci_disable_msi(pdev); | |
4374 | pr_err( | |
4375 | "(%s): failed to allocate the MSI interrupt %d\n", | |
4376 | skd_name(skdev), rc); | |
4377 | goto RETRY_IRQ_LEGACY; | |
4378 | } | |
4379 | pr_info("(%s): MSI irq %d enabled\n", | |
4380 | skd_name(skdev), pdev->irq); | |
4381 | } else { | |
4382 | RETRY_IRQ_LEGACY: | |
4383 | pr_err( | |
4384 | "(%s): failed to enable MSI, re-trying with LEGACY %d\n", | |
4385 | skd_name(skdev), rc); | |
4386 | skdev->irq_type = SKD_IRQ_LEGACY; | |
4387 | goto RETRY_IRQ_TYPE; | |
4388 | } | |
4389 | break; | |
4390 | case SKD_IRQ_LEGACY: | |
4391 | snprintf(skdev->isr_name, sizeof(skdev->isr_name), | |
4392 | "%s%d-legacy", DRV_NAME, skdev->devno); | |
4393 | rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr, | |
4394 | IRQF_SHARED, skdev->isr_name, skdev); | |
4395 | if (!rc) | |
4396 | pr_info("(%s): LEGACY irq %d enabled\n", | |
4397 | skd_name(skdev), pdev->irq); | |
4398 | else | |
4399 | pr_err("(%s): request LEGACY irq error %d\n", | |
4400 | skd_name(skdev), rc); | |
4401 | break; | |
4402 | default: | |
4403 | pr_info("(%s): irq_type %d invalid, re-set to %d\n", | |
4404 | skd_name(skdev), skdev->irq_type, SKD_IRQ_DEFAULT); | |
4405 | skdev->irq_type = SKD_IRQ_LEGACY; | |
4406 | goto RETRY_IRQ_TYPE; | |
4407 | } | |
4408 | return rc; | |
4409 | } | |
4410 | ||
4411 | static void skd_release_irq(struct skd_device *skdev) | |
4412 | { | |
4413 | switch (skdev->irq_type) { | |
4414 | case SKD_IRQ_MSIX: | |
4415 | skd_release_msix(skdev); | |
4416 | break; | |
4417 | case SKD_IRQ_MSI: | |
4418 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | |
4419 | pci_disable_msi(skdev->pdev); | |
4420 | break; | |
4421 | case SKD_IRQ_LEGACY: | |
4422 | devm_free_irq(&skdev->pdev->dev, skdev->pdev->irq, skdev); | |
4423 | break; | |
4424 | default: | |
4425 | pr_err("(%s): wrong irq type %d!", | |
4426 | skd_name(skdev), skdev->irq_type); | |
4427 | break; | |
4428 | } | |
4429 | } | |
4430 | ||
4431 | /* | |
4432 | ***************************************************************************** | |
4433 | * CONSTRUCT | |
4434 | ***************************************************************************** | |
4435 | */ | |
4436 | ||
4437 | static int skd_cons_skcomp(struct skd_device *skdev); | |
4438 | static int skd_cons_skmsg(struct skd_device *skdev); | |
4439 | static int skd_cons_skreq(struct skd_device *skdev); | |
4440 | static int skd_cons_skspcl(struct skd_device *skdev); | |
4441 | static int skd_cons_sksb(struct skd_device *skdev); | |
4442 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, | |
4443 | u32 n_sg, | |
4444 | dma_addr_t *ret_dma_addr); | |
4445 | static int skd_cons_disk(struct skd_device *skdev); | |
4446 | ||
4447 | #define SKD_N_DEV_TABLE 16u | |
4448 | static u32 skd_next_devno; | |
4449 | ||
4450 | static struct skd_device *skd_construct(struct pci_dev *pdev) | |
4451 | { | |
4452 | struct skd_device *skdev; | |
4453 | int blk_major = skd_major; | |
4454 | int rc; | |
4455 | ||
4456 | skdev = kzalloc(sizeof(*skdev), GFP_KERNEL); | |
4457 | ||
4458 | if (!skdev) { | |
4459 | pr_err(PFX "(%s): memory alloc failure\n", | |
4460 | pci_name(pdev)); | |
4461 | return NULL; | |
4462 | } | |
4463 | ||
4464 | skdev->state = SKD_DRVR_STATE_LOAD; | |
4465 | skdev->pdev = pdev; | |
4466 | skdev->devno = skd_next_devno++; | |
4467 | skdev->major = blk_major; | |
4468 | skdev->irq_type = skd_isr_type; | |
4469 | sprintf(skdev->name, DRV_NAME "%d", skdev->devno); | |
4470 | skdev->dev_max_queue_depth = 0; | |
4471 | ||
4472 | skdev->num_req_context = skd_max_queue_depth; | |
4473 | skdev->num_fitmsg_context = skd_max_queue_depth; | |
4474 | skdev->n_special = skd_max_pass_thru; | |
4475 | skdev->cur_max_queue_depth = 1; | |
4476 | skdev->queue_low_water_mark = 1; | |
4477 | skdev->proto_ver = 99; | |
4478 | skdev->sgs_per_request = skd_sgs_per_request; | |
4479 | skdev->dbg_level = skd_dbg_level; | |
4480 | ||
4481 | if (skd_bio) | |
4482 | bio_list_init(&skdev->bio_queue); | |
4483 | ||
4484 | ||
4485 | atomic_set(&skdev->device_count, 0); | |
4486 | ||
4487 | spin_lock_init(&skdev->lock); | |
4488 | ||
4489 | INIT_WORK(&skdev->completion_worker, skd_completion_worker); | |
4490 | INIT_LIST_HEAD(&skdev->flush_list); | |
4491 | ||
4492 | VPRINTK(skdev, "skcomp\n"); | |
4493 | rc = skd_cons_skcomp(skdev); | |
4494 | if (rc < 0) | |
4495 | goto err_out; | |
4496 | ||
4497 | VPRINTK(skdev, "skmsg\n"); | |
4498 | rc = skd_cons_skmsg(skdev); | |
4499 | if (rc < 0) | |
4500 | goto err_out; | |
4501 | ||
4502 | VPRINTK(skdev, "skreq\n"); | |
4503 | rc = skd_cons_skreq(skdev); | |
4504 | if (rc < 0) | |
4505 | goto err_out; | |
4506 | ||
4507 | VPRINTK(skdev, "skspcl\n"); | |
4508 | rc = skd_cons_skspcl(skdev); | |
4509 | if (rc < 0) | |
4510 | goto err_out; | |
4511 | ||
4512 | VPRINTK(skdev, "sksb\n"); | |
4513 | rc = skd_cons_sksb(skdev); | |
4514 | if (rc < 0) | |
4515 | goto err_out; | |
4516 | ||
4517 | VPRINTK(skdev, "disk\n"); | |
4518 | rc = skd_cons_disk(skdev); | |
4519 | if (rc < 0) | |
4520 | goto err_out; | |
4521 | ||
4522 | ||
4523 | ||
4524 | DPRINTK(skdev, "VICTORY\n"); | |
4525 | return skdev; | |
4526 | ||
4527 | err_out: | |
4528 | DPRINTK(skdev, "construct failed\n"); | |
4529 | skd_destruct(skdev); | |
4530 | return NULL; | |
4531 | } | |
4532 | ||
4533 | static int skd_cons_skcomp(struct skd_device *skdev) | |
4534 | { | |
4535 | int rc = 0; | |
4536 | struct fit_completion_entry_v1 *skcomp; | |
4537 | u32 nbytes; | |
4538 | ||
4539 | nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY; | |
4540 | nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY; | |
4541 | ||
4542 | VPRINTK(skdev, "comp pci_alloc, total bytes %d entries %d\n", nbytes, | |
4543 | SKD_N_COMPLETION_ENTRY); | |
4544 | ||
4545 | skcomp = pci_alloc_consistent(skdev->pdev, nbytes, | |
4546 | &skdev->cq_dma_address); | |
4547 | ||
4548 | if (skcomp == NULL) { | |
4549 | rc = -ENOMEM; | |
4550 | goto err_out; | |
4551 | } | |
4552 | ||
4553 | memset(skcomp, 0, nbytes); | |
4554 | ||
4555 | skdev->skcomp_table = skcomp; | |
4556 | skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp + | |
4557 | sizeof(*skcomp) * | |
4558 | SKD_N_COMPLETION_ENTRY); | |
4559 | ||
4560 | err_out: | |
4561 | return rc; | |
4562 | } | |
4563 | ||
4564 | static int skd_cons_skmsg(struct skd_device *skdev) | |
4565 | { | |
4566 | int rc = 0; | |
4567 | u32 i; | |
4568 | ||
f721bb0d | 4569 | VPRINTK(skdev, "skmsg_table kzalloc, struct %lu, count %u total %lu\n", |
e67f86b3 AB |
4570 | sizeof(struct skd_fitmsg_context), |
4571 | skdev->num_fitmsg_context, | |
f721bb0d | 4572 | sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context); |
e67f86b3 AB |
4573 | |
4574 | skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context) | |
4575 | *skdev->num_fitmsg_context, GFP_KERNEL); | |
4576 | if (skdev->skmsg_table == NULL) { | |
4577 | rc = -ENOMEM; | |
4578 | goto err_out; | |
4579 | } | |
4580 | ||
4581 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
4582 | struct skd_fitmsg_context *skmsg; | |
4583 | ||
4584 | skmsg = &skdev->skmsg_table[i]; | |
4585 | ||
4586 | skmsg->id = i + SKD_ID_FIT_MSG; | |
4587 | ||
4588 | skmsg->state = SKD_MSG_STATE_IDLE; | |
4589 | skmsg->msg_buf = pci_alloc_consistent(skdev->pdev, | |
4590 | SKD_N_FITMSG_BYTES + 64, | |
4591 | &skmsg->mb_dma_address); | |
4592 | ||
4593 | if (skmsg->msg_buf == NULL) { | |
4594 | rc = -ENOMEM; | |
4595 | goto err_out; | |
4596 | } | |
4597 | ||
4598 | skmsg->offset = (u32)((u64)skmsg->msg_buf & | |
4599 | (~FIT_QCMD_BASE_ADDRESS_MASK)); | |
4600 | skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK; | |
4601 | skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf & | |
4602 | FIT_QCMD_BASE_ADDRESS_MASK); | |
4603 | skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK; | |
4604 | skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK; | |
4605 | memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); | |
4606 | ||
4607 | skmsg->next = &skmsg[1]; | |
4608 | } | |
4609 | ||
4610 | /* Free list is in order starting with the 0th entry. */ | |
4611 | skdev->skmsg_table[i - 1].next = NULL; | |
4612 | skdev->skmsg_free_list = skdev->skmsg_table; | |
4613 | ||
4614 | err_out: | |
4615 | return rc; | |
4616 | } | |
4617 | ||
4618 | static int skd_cons_skreq(struct skd_device *skdev) | |
4619 | { | |
4620 | int rc = 0; | |
4621 | u32 i; | |
4622 | ||
f721bb0d | 4623 | VPRINTK(skdev, "skreq_table kzalloc, struct %lu, count %u total %lu\n", |
e67f86b3 AB |
4624 | sizeof(struct skd_request_context), |
4625 | skdev->num_req_context, | |
4626 | sizeof(struct skd_request_context) * skdev->num_req_context); | |
4627 | ||
4628 | skdev->skreq_table = kzalloc(sizeof(struct skd_request_context) | |
4629 | * skdev->num_req_context, GFP_KERNEL); | |
4630 | if (skdev->skreq_table == NULL) { | |
4631 | rc = -ENOMEM; | |
4632 | goto err_out; | |
4633 | } | |
4634 | ||
f721bb0d | 4635 | VPRINTK(skdev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n", |
e67f86b3 AB |
4636 | skdev->sgs_per_request, sizeof(struct scatterlist), |
4637 | skdev->sgs_per_request * sizeof(struct scatterlist)); | |
4638 | ||
4639 | for (i = 0; i < skdev->num_req_context; i++) { | |
4640 | struct skd_request_context *skreq; | |
4641 | ||
4642 | skreq = &skdev->skreq_table[i]; | |
4643 | ||
4644 | skreq->id = i + SKD_ID_RW_REQUEST; | |
4645 | skreq->state = SKD_REQ_STATE_IDLE; | |
4646 | ||
4647 | skreq->sg = kzalloc(sizeof(struct scatterlist) * | |
4648 | skdev->sgs_per_request, GFP_KERNEL); | |
4649 | if (skreq->sg == NULL) { | |
4650 | rc = -ENOMEM; | |
4651 | goto err_out; | |
4652 | } | |
4653 | sg_init_table(skreq->sg, skdev->sgs_per_request); | |
4654 | ||
4655 | skreq->sksg_list = skd_cons_sg_list(skdev, | |
4656 | skdev->sgs_per_request, | |
4657 | &skreq->sksg_dma_address); | |
4658 | ||
4659 | if (skreq->sksg_list == NULL) { | |
4660 | rc = -ENOMEM; | |
4661 | goto err_out; | |
4662 | } | |
4663 | ||
4664 | skreq->next = &skreq[1]; | |
4665 | } | |
4666 | ||
4667 | /* Free list is in order starting with the 0th entry. */ | |
4668 | skdev->skreq_table[i - 1].next = NULL; | |
4669 | skdev->skreq_free_list = skdev->skreq_table; | |
4670 | ||
4671 | err_out: | |
4672 | return rc; | |
4673 | } | |
4674 | ||
4675 | static int skd_cons_skspcl(struct skd_device *skdev) | |
4676 | { | |
4677 | int rc = 0; | |
4678 | u32 i, nbytes; | |
4679 | ||
f721bb0d | 4680 | VPRINTK(skdev, "skspcl_table kzalloc, struct %lu, count %u total %lu\n", |
e67f86b3 AB |
4681 | sizeof(struct skd_special_context), |
4682 | skdev->n_special, | |
4683 | sizeof(struct skd_special_context) * skdev->n_special); | |
4684 | ||
4685 | skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context) | |
4686 | * skdev->n_special, GFP_KERNEL); | |
4687 | if (skdev->skspcl_table == NULL) { | |
4688 | rc = -ENOMEM; | |
4689 | goto err_out; | |
4690 | } | |
4691 | ||
4692 | for (i = 0; i < skdev->n_special; i++) { | |
4693 | struct skd_special_context *skspcl; | |
4694 | ||
4695 | skspcl = &skdev->skspcl_table[i]; | |
4696 | ||
4697 | skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST; | |
4698 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
4699 | ||
4700 | skspcl->req.next = &skspcl[1].req; | |
4701 | ||
4702 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
4703 | ||
4704 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | |
4705 | &skspcl->mb_dma_address); | |
4706 | if (skspcl->msg_buf == NULL) { | |
4707 | rc = -ENOMEM; | |
4708 | goto err_out; | |
4709 | } | |
4710 | ||
4711 | memset(skspcl->msg_buf, 0, nbytes); | |
4712 | ||
4713 | skspcl->req.sg = kzalloc(sizeof(struct scatterlist) * | |
4714 | SKD_N_SG_PER_SPECIAL, GFP_KERNEL); | |
4715 | if (skspcl->req.sg == NULL) { | |
4716 | rc = -ENOMEM; | |
4717 | goto err_out; | |
4718 | } | |
4719 | ||
4720 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, | |
4721 | SKD_N_SG_PER_SPECIAL, | |
4722 | &skspcl->req. | |
4723 | sksg_dma_address); | |
4724 | if (skspcl->req.sksg_list == NULL) { | |
4725 | rc = -ENOMEM; | |
4726 | goto err_out; | |
4727 | } | |
4728 | } | |
4729 | ||
4730 | /* Free list is in order starting with the 0th entry. */ | |
4731 | skdev->skspcl_table[i - 1].req.next = NULL; | |
4732 | skdev->skspcl_free_list = skdev->skspcl_table; | |
4733 | ||
4734 | return rc; | |
4735 | ||
4736 | err_out: | |
4737 | return rc; | |
4738 | } | |
4739 | ||
4740 | static int skd_cons_sksb(struct skd_device *skdev) | |
4741 | { | |
4742 | int rc = 0; | |
4743 | struct skd_special_context *skspcl; | |
4744 | u32 nbytes; | |
4745 | ||
4746 | skspcl = &skdev->internal_skspcl; | |
4747 | ||
4748 | skspcl->req.id = 0 + SKD_ID_INTERNAL; | |
4749 | skspcl->req.state = SKD_REQ_STATE_IDLE; | |
4750 | ||
4751 | nbytes = SKD_N_INTERNAL_BYTES; | |
4752 | ||
4753 | skspcl->data_buf = pci_alloc_consistent(skdev->pdev, nbytes, | |
4754 | &skspcl->db_dma_address); | |
4755 | if (skspcl->data_buf == NULL) { | |
4756 | rc = -ENOMEM; | |
4757 | goto err_out; | |
4758 | } | |
4759 | ||
4760 | memset(skspcl->data_buf, 0, nbytes); | |
4761 | ||
4762 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
4763 | skspcl->msg_buf = pci_alloc_consistent(skdev->pdev, nbytes, | |
4764 | &skspcl->mb_dma_address); | |
4765 | if (skspcl->msg_buf == NULL) { | |
4766 | rc = -ENOMEM; | |
4767 | goto err_out; | |
4768 | } | |
4769 | ||
4770 | memset(skspcl->msg_buf, 0, nbytes); | |
4771 | ||
4772 | skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1, | |
4773 | &skspcl->req.sksg_dma_address); | |
4774 | if (skspcl->req.sksg_list == NULL) { | |
4775 | rc = -ENOMEM; | |
4776 | goto err_out; | |
4777 | } | |
4778 | ||
4779 | if (!skd_format_internal_skspcl(skdev)) { | |
4780 | rc = -EINVAL; | |
4781 | goto err_out; | |
4782 | } | |
4783 | ||
4784 | err_out: | |
4785 | return rc; | |
4786 | } | |
4787 | ||
4788 | static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev, | |
4789 | u32 n_sg, | |
4790 | dma_addr_t *ret_dma_addr) | |
4791 | { | |
4792 | struct fit_sg_descriptor *sg_list; | |
4793 | u32 nbytes; | |
4794 | ||
4795 | nbytes = sizeof(*sg_list) * n_sg; | |
4796 | ||
4797 | sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr); | |
4798 | ||
4799 | if (sg_list != NULL) { | |
4800 | uint64_t dma_address = *ret_dma_addr; | |
4801 | u32 i; | |
4802 | ||
4803 | memset(sg_list, 0, nbytes); | |
4804 | ||
4805 | for (i = 0; i < n_sg - 1; i++) { | |
4806 | uint64_t ndp_off; | |
4807 | ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor); | |
4808 | ||
4809 | sg_list[i].next_desc_ptr = dma_address + ndp_off; | |
4810 | } | |
4811 | sg_list[i].next_desc_ptr = 0LL; | |
4812 | } | |
4813 | ||
4814 | return sg_list; | |
4815 | } | |
4816 | ||
4817 | static int skd_cons_disk(struct skd_device *skdev) | |
4818 | { | |
4819 | int rc = 0; | |
4820 | struct gendisk *disk; | |
4821 | struct request_queue *q; | |
4822 | unsigned long flags; | |
4823 | ||
4824 | disk = alloc_disk(SKD_MINORS_PER_DEVICE); | |
4825 | if (!disk) { | |
4826 | rc = -ENOMEM; | |
4827 | goto err_out; | |
4828 | } | |
4829 | ||
4830 | skdev->disk = disk; | |
4831 | sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno); | |
4832 | ||
4833 | disk->major = skdev->major; | |
4834 | disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE; | |
4835 | disk->fops = &skd_blockdev_ops; | |
4836 | disk->private_data = skdev; | |
4837 | ||
4838 | if (!skd_bio) { | |
4839 | q = blk_init_queue(skd_request_fn, &skdev->lock); | |
4840 | } else { | |
4841 | q = blk_alloc_queue(GFP_KERNEL); | |
4842 | q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE; | |
4843 | } | |
4844 | ||
4845 | if (!q) { | |
4846 | rc = -ENOMEM; | |
4847 | goto err_out; | |
4848 | } | |
4849 | ||
4850 | skdev->queue = q; | |
4851 | disk->queue = q; | |
4852 | q->queuedata = skdev; | |
4853 | ||
4854 | if (skd_bio) { | |
4855 | q->queue_lock = &skdev->lock; | |
4856 | blk_queue_make_request(q, skd_make_request); | |
4857 | } | |
4858 | ||
4859 | blk_queue_flush(q, REQ_FLUSH | REQ_FUA); | |
4860 | blk_queue_max_segments(q, skdev->sgs_per_request); | |
4861 | blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); | |
4862 | ||
4863 | /* set sysfs ptimal_io_size to 8K */ | |
4864 | blk_queue_io_opt(q, 8192); | |
4865 | ||
4866 | /* DISCARD Flag initialization. */ | |
4867 | q->limits.discard_granularity = 8192; | |
4868 | q->limits.discard_alignment = 0; | |
4869 | q->limits.max_discard_sectors = UINT_MAX >> 9; | |
4870 | q->limits.discard_zeroes_data = 1; | |
4871 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
4872 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); | |
4873 | ||
4874 | spin_lock_irqsave(&skdev->lock, flags); | |
4875 | VPRINTK(skdev, "stopping %s queue\n", skdev->name); | |
4876 | skd_stop_queue(skdev); | |
4877 | spin_unlock_irqrestore(&skdev->lock, flags); | |
4878 | ||
4879 | err_out: | |
4880 | return rc; | |
4881 | } | |
4882 | ||
4883 | /* | |
4884 | ***************************************************************************** | |
4885 | * DESTRUCT (FREE) | |
4886 | ***************************************************************************** | |
4887 | */ | |
4888 | ||
4889 | static void skd_free_skcomp(struct skd_device *skdev); | |
4890 | static void skd_free_skmsg(struct skd_device *skdev); | |
4891 | static void skd_free_skreq(struct skd_device *skdev); | |
4892 | static void skd_free_skspcl(struct skd_device *skdev); | |
4893 | static void skd_free_sksb(struct skd_device *skdev); | |
4894 | static void skd_free_sg_list(struct skd_device *skdev, | |
4895 | struct fit_sg_descriptor *sg_list, | |
4896 | u32 n_sg, dma_addr_t dma_addr); | |
4897 | static void skd_free_disk(struct skd_device *skdev); | |
4898 | ||
4899 | static void skd_destruct(struct skd_device *skdev) | |
4900 | { | |
4901 | if (skdev == NULL) | |
4902 | return; | |
4903 | ||
4904 | ||
4905 | VPRINTK(skdev, "disk\n"); | |
4906 | skd_free_disk(skdev); | |
4907 | ||
4908 | VPRINTK(skdev, "sksb\n"); | |
4909 | skd_free_sksb(skdev); | |
4910 | ||
4911 | VPRINTK(skdev, "skspcl\n"); | |
4912 | skd_free_skspcl(skdev); | |
4913 | ||
4914 | VPRINTK(skdev, "skreq\n"); | |
4915 | skd_free_skreq(skdev); | |
4916 | ||
4917 | VPRINTK(skdev, "skmsg\n"); | |
4918 | skd_free_skmsg(skdev); | |
4919 | ||
4920 | VPRINTK(skdev, "skcomp\n"); | |
4921 | skd_free_skcomp(skdev); | |
4922 | ||
4923 | VPRINTK(skdev, "skdev\n"); | |
4924 | kfree(skdev); | |
4925 | } | |
4926 | ||
4927 | static void skd_free_skcomp(struct skd_device *skdev) | |
4928 | { | |
4929 | if (skdev->skcomp_table != NULL) { | |
4930 | u32 nbytes; | |
4931 | ||
4932 | nbytes = sizeof(skdev->skcomp_table[0]) * | |
4933 | SKD_N_COMPLETION_ENTRY; | |
4934 | pci_free_consistent(skdev->pdev, nbytes, | |
4935 | skdev->skcomp_table, skdev->cq_dma_address); | |
4936 | } | |
4937 | ||
4938 | skdev->skcomp_table = NULL; | |
4939 | skdev->cq_dma_address = 0; | |
4940 | } | |
4941 | ||
4942 | static void skd_free_skmsg(struct skd_device *skdev) | |
4943 | { | |
4944 | u32 i; | |
4945 | ||
4946 | if (skdev->skmsg_table == NULL) | |
4947 | return; | |
4948 | ||
4949 | for (i = 0; i < skdev->num_fitmsg_context; i++) { | |
4950 | struct skd_fitmsg_context *skmsg; | |
4951 | ||
4952 | skmsg = &skdev->skmsg_table[i]; | |
4953 | ||
4954 | if (skmsg->msg_buf != NULL) { | |
4955 | skmsg->msg_buf += skmsg->offset; | |
4956 | skmsg->mb_dma_address += skmsg->offset; | |
4957 | pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES, | |
4958 | skmsg->msg_buf, | |
4959 | skmsg->mb_dma_address); | |
4960 | } | |
4961 | skmsg->msg_buf = NULL; | |
4962 | skmsg->mb_dma_address = 0; | |
4963 | } | |
4964 | ||
4965 | kfree(skdev->skmsg_table); | |
4966 | skdev->skmsg_table = NULL; | |
4967 | } | |
4968 | ||
4969 | static void skd_free_skreq(struct skd_device *skdev) | |
4970 | { | |
4971 | u32 i; | |
4972 | ||
4973 | if (skdev->skreq_table == NULL) | |
4974 | return; | |
4975 | ||
4976 | for (i = 0; i < skdev->num_req_context; i++) { | |
4977 | struct skd_request_context *skreq; | |
4978 | ||
4979 | skreq = &skdev->skreq_table[i]; | |
4980 | ||
4981 | skd_free_sg_list(skdev, skreq->sksg_list, | |
4982 | skdev->sgs_per_request, | |
4983 | skreq->sksg_dma_address); | |
4984 | ||
4985 | skreq->sksg_list = NULL; | |
4986 | skreq->sksg_dma_address = 0; | |
4987 | ||
4988 | kfree(skreq->sg); | |
4989 | } | |
4990 | ||
4991 | kfree(skdev->skreq_table); | |
4992 | skdev->skreq_table = NULL; | |
4993 | } | |
4994 | ||
4995 | static void skd_free_skspcl(struct skd_device *skdev) | |
4996 | { | |
4997 | u32 i; | |
4998 | u32 nbytes; | |
4999 | ||
5000 | if (skdev->skspcl_table == NULL) | |
5001 | return; | |
5002 | ||
5003 | for (i = 0; i < skdev->n_special; i++) { | |
5004 | struct skd_special_context *skspcl; | |
5005 | ||
5006 | skspcl = &skdev->skspcl_table[i]; | |
5007 | ||
5008 | if (skspcl->msg_buf != NULL) { | |
5009 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
5010 | pci_free_consistent(skdev->pdev, nbytes, | |
5011 | skspcl->msg_buf, | |
5012 | skspcl->mb_dma_address); | |
5013 | } | |
5014 | ||
5015 | skspcl->msg_buf = NULL; | |
5016 | skspcl->mb_dma_address = 0; | |
5017 | ||
5018 | skd_free_sg_list(skdev, skspcl->req.sksg_list, | |
5019 | SKD_N_SG_PER_SPECIAL, | |
5020 | skspcl->req.sksg_dma_address); | |
5021 | ||
5022 | skspcl->req.sksg_list = NULL; | |
5023 | skspcl->req.sksg_dma_address = 0; | |
5024 | ||
5025 | kfree(skspcl->req.sg); | |
5026 | } | |
5027 | ||
5028 | kfree(skdev->skspcl_table); | |
5029 | skdev->skspcl_table = NULL; | |
5030 | } | |
5031 | ||
5032 | static void skd_free_sksb(struct skd_device *skdev) | |
5033 | { | |
5034 | struct skd_special_context *skspcl; | |
5035 | u32 nbytes; | |
5036 | ||
5037 | skspcl = &skdev->internal_skspcl; | |
5038 | ||
5039 | if (skspcl->data_buf != NULL) { | |
5040 | nbytes = SKD_N_INTERNAL_BYTES; | |
5041 | ||
5042 | pci_free_consistent(skdev->pdev, nbytes, | |
5043 | skspcl->data_buf, skspcl->db_dma_address); | |
5044 | } | |
5045 | ||
5046 | skspcl->data_buf = NULL; | |
5047 | skspcl->db_dma_address = 0; | |
5048 | ||
5049 | if (skspcl->msg_buf != NULL) { | |
5050 | nbytes = SKD_N_SPECIAL_FITMSG_BYTES; | |
5051 | pci_free_consistent(skdev->pdev, nbytes, | |
5052 | skspcl->msg_buf, skspcl->mb_dma_address); | |
5053 | } | |
5054 | ||
5055 | skspcl->msg_buf = NULL; | |
5056 | skspcl->mb_dma_address = 0; | |
5057 | ||
5058 | skd_free_sg_list(skdev, skspcl->req.sksg_list, 1, | |
5059 | skspcl->req.sksg_dma_address); | |
5060 | ||
5061 | skspcl->req.sksg_list = NULL; | |
5062 | skspcl->req.sksg_dma_address = 0; | |
5063 | } | |
5064 | ||
5065 | static void skd_free_sg_list(struct skd_device *skdev, | |
5066 | struct fit_sg_descriptor *sg_list, | |
5067 | u32 n_sg, dma_addr_t dma_addr) | |
5068 | { | |
5069 | if (sg_list != NULL) { | |
5070 | u32 nbytes; | |
5071 | ||
5072 | nbytes = sizeof(*sg_list) * n_sg; | |
5073 | ||
5074 | pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr); | |
5075 | } | |
5076 | } | |
5077 | ||
5078 | static void skd_free_disk(struct skd_device *skdev) | |
5079 | { | |
5080 | struct gendisk *disk = skdev->disk; | |
5081 | ||
5082 | if (disk != NULL) { | |
5083 | struct request_queue *q = disk->queue; | |
5084 | ||
5085 | if (disk->flags & GENHD_FL_UP) | |
5086 | del_gendisk(disk); | |
5087 | if (q) | |
5088 | blk_cleanup_queue(q); | |
5089 | put_disk(disk); | |
5090 | } | |
5091 | skdev->disk = NULL; | |
5092 | } | |
5093 | ||
5094 | ||
5095 | ||
5096 | /* | |
5097 | ***************************************************************************** | |
5098 | * BLOCK DEVICE (BDEV) GLUE | |
5099 | ***************************************************************************** | |
5100 | */ | |
5101 | ||
5102 | static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
5103 | { | |
5104 | struct skd_device *skdev; | |
5105 | u64 capacity; | |
5106 | ||
5107 | skdev = bdev->bd_disk->private_data; | |
5108 | ||
5109 | DPRINTK(skdev, "%s: CMD[%s] getgeo device\n", | |
5110 | bdev->bd_disk->disk_name, current->comm); | |
5111 | ||
5112 | if (skdev->read_cap_is_valid) { | |
5113 | capacity = get_capacity(skdev->disk); | |
5114 | geo->heads = 64; | |
5115 | geo->sectors = 255; | |
5116 | geo->cylinders = (capacity) / (255 * 64); | |
5117 | ||
5118 | return 0; | |
5119 | } | |
5120 | return -EIO; | |
5121 | } | |
5122 | ||
5123 | static int skd_bdev_attach(struct skd_device *skdev) | |
5124 | { | |
5125 | DPRINTK(skdev, "add_disk\n"); | |
5126 | add_disk(skdev->disk); | |
5127 | return 0; | |
5128 | } | |
5129 | ||
5130 | static const struct block_device_operations skd_blockdev_ops = { | |
5131 | .owner = THIS_MODULE, | |
5132 | .ioctl = skd_bdev_ioctl, | |
5133 | .getgeo = skd_bdev_getgeo, | |
5134 | }; | |
5135 | ||
5136 | ||
5137 | /* | |
5138 | ***************************************************************************** | |
5139 | * PCIe DRIVER GLUE | |
5140 | ***************************************************************************** | |
5141 | */ | |
5142 | ||
5143 | static DEFINE_PCI_DEVICE_TABLE(skd_pci_tbl) = { | |
5144 | { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120, | |
5145 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | |
5146 | { 0 } /* terminate list */ | |
5147 | }; | |
5148 | ||
5149 | MODULE_DEVICE_TABLE(pci, skd_pci_tbl); | |
5150 | ||
5151 | static char *skd_pci_info(struct skd_device *skdev, char *str) | |
5152 | { | |
5153 | int pcie_reg; | |
5154 | ||
5155 | strcpy(str, "PCIe ("); | |
5156 | pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP); | |
5157 | ||
5158 | if (pcie_reg) { | |
5159 | ||
5160 | char lwstr[6]; | |
5161 | uint16_t pcie_lstat, lspeed, lwidth; | |
5162 | ||
5163 | pcie_reg += 0x12; | |
5164 | pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat); | |
5165 | lspeed = pcie_lstat & (0xF); | |
5166 | lwidth = (pcie_lstat & 0x3F0) >> 4; | |
5167 | ||
5168 | if (lspeed == 1) | |
5169 | strcat(str, "2.5GT/s "); | |
5170 | else if (lspeed == 2) | |
5171 | strcat(str, "5.0GT/s "); | |
5172 | else | |
5173 | strcat(str, "<unknown> "); | |
5174 | snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth); | |
5175 | strcat(str, lwstr); | |
5176 | } | |
5177 | return str; | |
5178 | } | |
5179 | ||
5180 | static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
5181 | { | |
5182 | int i; | |
5183 | int rc = 0; | |
5184 | char pci_str[32]; | |
5185 | struct skd_device *skdev; | |
5186 | ||
5187 | pr_info("STEC s1120 Driver(%s) version %s-b%s\n", | |
5188 | DRV_NAME, DRV_VERSION, DRV_BUILD_ID); | |
5189 | pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n", | |
5190 | pci_name(pdev), pdev->vendor, pdev->device); | |
5191 | ||
5192 | rc = pci_enable_device(pdev); | |
5193 | if (rc) | |
5194 | return rc; | |
5195 | rc = pci_request_regions(pdev, DRV_NAME); | |
5196 | if (rc) | |
5197 | goto err_out; | |
5198 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
5199 | if (!rc) { | |
5200 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
5201 | ||
5202 | pr_err("(%s): consistent DMA mask error %d\n", | |
5203 | pci_name(pdev), rc); | |
5204 | } | |
5205 | } else { | |
5206 | (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))); | |
5207 | if (rc) { | |
5208 | ||
5209 | pr_err("(%s): DMA mask error %d\n", | |
5210 | pci_name(pdev), rc); | |
5211 | goto err_out_regions; | |
5212 | } | |
5213 | } | |
5214 | ||
5215 | skdev = skd_construct(pdev); | |
5216 | if (skdev == NULL) | |
5217 | goto err_out_regions; | |
5218 | ||
5219 | skd_pci_info(skdev, pci_str); | |
5220 | pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str); | |
5221 | ||
5222 | pci_set_master(pdev); | |
5223 | rc = pci_enable_pcie_error_reporting(pdev); | |
5224 | if (rc) { | |
5225 | pr_err( | |
5226 | "(%s): bad enable of PCIe error reporting rc=%d\n", | |
5227 | skd_name(skdev), rc); | |
5228 | skdev->pcie_error_reporting_is_enabled = 0; | |
5229 | } else | |
5230 | skdev->pcie_error_reporting_is_enabled = 1; | |
5231 | ||
5232 | ||
5233 | pci_set_drvdata(pdev, skdev); | |
5234 | skdev->pdev = pdev; | |
5235 | skdev->disk->driverfs_dev = &pdev->dev; | |
5236 | ||
5237 | for (i = 0; i < SKD_MAX_BARS; i++) { | |
5238 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
5239 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
5240 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
5241 | skdev->mem_size[i]); | |
5242 | if (!skdev->mem_map[i]) { | |
5243 | pr_err("(%s): Unable to map adapter memory!\n", | |
5244 | skd_name(skdev)); | |
5245 | rc = -ENODEV; | |
5246 | goto err_out_iounmap; | |
5247 | } | |
5248 | DPRINTK(skdev, "mem_map=%p, phyd=%016llx, size=%d\n", | |
5249 | skdev->mem_map[i], | |
5250 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | |
5251 | } | |
5252 | ||
5253 | rc = skd_acquire_irq(skdev); | |
5254 | if (rc) { | |
5255 | pr_err("(%s): interrupt resource error %d\n", | |
5256 | skd_name(skdev), rc); | |
5257 | goto err_out_iounmap; | |
5258 | } | |
5259 | ||
5260 | rc = skd_start_timer(skdev); | |
5261 | if (rc) | |
5262 | goto err_out_timer; | |
5263 | ||
5264 | init_waitqueue_head(&skdev->waitq); | |
5265 | ||
5266 | skd_start_device(skdev); | |
5267 | ||
5268 | rc = wait_event_interruptible_timeout(skdev->waitq, | |
5269 | (skdev->gendisk_on), | |
5270 | (SKD_START_WAIT_SECONDS * HZ)); | |
5271 | if (skdev->gendisk_on > 0) { | |
5272 | /* device came on-line after reset */ | |
5273 | skd_bdev_attach(skdev); | |
5274 | rc = 0; | |
5275 | } else { | |
5276 | /* we timed out, something is wrong with the device, | |
5277 | don't add the disk structure */ | |
5278 | pr_err( | |
5279 | "(%s): error: waiting for s1120 timed out %d!\n", | |
5280 | skd_name(skdev), rc); | |
5281 | /* in case of no error; we timeout with ENXIO */ | |
5282 | if (!rc) | |
5283 | rc = -ENXIO; | |
5284 | goto err_out_timer; | |
5285 | } | |
5286 | ||
5287 | ||
5288 | #ifdef SKD_VMK_POLL_HANDLER | |
5289 | if (skdev->irq_type == SKD_IRQ_MSIX) { | |
5290 | /* MSIX completion handler is being used for coredump */ | |
5291 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | |
5292 | skdev->msix_entries[5].vector, | |
5293 | skd_comp_q, skdev); | |
5294 | } else { | |
5295 | vmklnx_scsi_register_poll_handler(skdev->scsi_host, | |
5296 | skdev->pdev->irq, skd_isr, | |
5297 | skdev); | |
5298 | } | |
5299 | #endif /* SKD_VMK_POLL_HANDLER */ | |
5300 | ||
5301 | return rc; | |
5302 | ||
5303 | err_out_timer: | |
5304 | skd_stop_device(skdev); | |
5305 | skd_release_irq(skdev); | |
5306 | ||
5307 | err_out_iounmap: | |
5308 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5309 | if (skdev->mem_map[i]) | |
5310 | iounmap(skdev->mem_map[i]); | |
5311 | ||
5312 | if (skdev->pcie_error_reporting_is_enabled) | |
5313 | pci_disable_pcie_error_reporting(pdev); | |
5314 | ||
5315 | skd_destruct(skdev); | |
5316 | ||
5317 | err_out_regions: | |
5318 | pci_release_regions(pdev); | |
5319 | ||
5320 | err_out: | |
5321 | pci_disable_device(pdev); | |
5322 | pci_set_drvdata(pdev, NULL); | |
5323 | return rc; | |
5324 | } | |
5325 | ||
5326 | static void skd_pci_remove(struct pci_dev *pdev) | |
5327 | { | |
5328 | int i; | |
5329 | struct skd_device *skdev; | |
5330 | ||
5331 | skdev = pci_get_drvdata(pdev); | |
5332 | if (!skdev) { | |
5333 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5334 | return; | |
5335 | } | |
5336 | skd_stop_device(skdev); | |
5337 | skd_release_irq(skdev); | |
5338 | ||
5339 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5340 | if (skdev->mem_map[i]) | |
5341 | iounmap((u32 *)skdev->mem_map[i]); | |
5342 | ||
5343 | if (skdev->pcie_error_reporting_is_enabled) | |
5344 | pci_disable_pcie_error_reporting(pdev); | |
5345 | ||
5346 | skd_destruct(skdev); | |
5347 | ||
5348 | pci_release_regions(pdev); | |
5349 | pci_disable_device(pdev); | |
5350 | pci_set_drvdata(pdev, NULL); | |
5351 | ||
5352 | return; | |
5353 | } | |
5354 | ||
5355 | static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state) | |
5356 | { | |
5357 | int i; | |
5358 | struct skd_device *skdev; | |
5359 | ||
5360 | skdev = pci_get_drvdata(pdev); | |
5361 | if (!skdev) { | |
5362 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5363 | return -EIO; | |
5364 | } | |
5365 | ||
5366 | skd_stop_device(skdev); | |
5367 | ||
5368 | skd_release_irq(skdev); | |
5369 | ||
5370 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5371 | if (skdev->mem_map[i]) | |
5372 | iounmap((u32 *)skdev->mem_map[i]); | |
5373 | ||
5374 | if (skdev->pcie_error_reporting_is_enabled) | |
5375 | pci_disable_pcie_error_reporting(pdev); | |
5376 | ||
5377 | pci_release_regions(pdev); | |
5378 | pci_save_state(pdev); | |
5379 | pci_disable_device(pdev); | |
5380 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | |
5381 | return 0; | |
5382 | } | |
5383 | ||
5384 | static int skd_pci_resume(struct pci_dev *pdev) | |
5385 | { | |
5386 | int i; | |
5387 | int rc = 0; | |
5388 | struct skd_device *skdev; | |
5389 | ||
5390 | skdev = pci_get_drvdata(pdev); | |
5391 | if (!skdev) { | |
5392 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5393 | return -1; | |
5394 | } | |
5395 | ||
5396 | pci_set_power_state(pdev, PCI_D0); | |
5397 | pci_enable_wake(pdev, PCI_D0, 0); | |
5398 | pci_restore_state(pdev); | |
5399 | ||
5400 | rc = pci_enable_device(pdev); | |
5401 | if (rc) | |
5402 | return rc; | |
5403 | rc = pci_request_regions(pdev, DRV_NAME); | |
5404 | if (rc) | |
5405 | goto err_out; | |
5406 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
5407 | if (!rc) { | |
5408 | if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { | |
5409 | ||
5410 | pr_err("(%s): consistent DMA mask error %d\n", | |
5411 | pci_name(pdev), rc); | |
5412 | } | |
5413 | } else { | |
5414 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
5415 | if (rc) { | |
5416 | ||
5417 | pr_err("(%s): DMA mask error %d\n", | |
5418 | pci_name(pdev), rc); | |
5419 | goto err_out_regions; | |
5420 | } | |
5421 | } | |
5422 | ||
5423 | pci_set_master(pdev); | |
5424 | rc = pci_enable_pcie_error_reporting(pdev); | |
5425 | if (rc) { | |
5426 | pr_err("(%s): bad enable of PCIe error reporting rc=%d\n", | |
5427 | skdev->name, rc); | |
5428 | skdev->pcie_error_reporting_is_enabled = 0; | |
5429 | } else | |
5430 | skdev->pcie_error_reporting_is_enabled = 1; | |
5431 | ||
5432 | for (i = 0; i < SKD_MAX_BARS; i++) { | |
5433 | ||
5434 | skdev->mem_phys[i] = pci_resource_start(pdev, i); | |
5435 | skdev->mem_size[i] = (u32)pci_resource_len(pdev, i); | |
5436 | skdev->mem_map[i] = ioremap(skdev->mem_phys[i], | |
5437 | skdev->mem_size[i]); | |
5438 | if (!skdev->mem_map[i]) { | |
5439 | pr_err("(%s): Unable to map adapter memory!\n", | |
5440 | skd_name(skdev)); | |
5441 | rc = -ENODEV; | |
5442 | goto err_out_iounmap; | |
5443 | } | |
5444 | DPRINTK(skdev, "mem_map=%p, phyd=%016llx, size=%d\n", | |
5445 | skdev->mem_map[i], | |
5446 | (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]); | |
5447 | } | |
5448 | rc = skd_acquire_irq(skdev); | |
5449 | if (rc) { | |
5450 | ||
5451 | pr_err("(%s): interrupt resource error %d\n", | |
5452 | pci_name(pdev), rc); | |
5453 | goto err_out_iounmap; | |
5454 | } | |
5455 | ||
5456 | rc = skd_start_timer(skdev); | |
5457 | if (rc) | |
5458 | goto err_out_timer; | |
5459 | ||
5460 | init_waitqueue_head(&skdev->waitq); | |
5461 | ||
5462 | skd_start_device(skdev); | |
5463 | ||
5464 | return rc; | |
5465 | ||
5466 | err_out_timer: | |
5467 | skd_stop_device(skdev); | |
5468 | skd_release_irq(skdev); | |
5469 | ||
5470 | err_out_iounmap: | |
5471 | for (i = 0; i < SKD_MAX_BARS; i++) | |
5472 | if (skdev->mem_map[i]) | |
5473 | iounmap(skdev->mem_map[i]); | |
5474 | ||
5475 | if (skdev->pcie_error_reporting_is_enabled) | |
5476 | pci_disable_pcie_error_reporting(pdev); | |
5477 | ||
5478 | err_out_regions: | |
5479 | pci_release_regions(pdev); | |
5480 | ||
5481 | err_out: | |
5482 | pci_disable_device(pdev); | |
5483 | return rc; | |
5484 | } | |
5485 | ||
5486 | static void skd_pci_shutdown(struct pci_dev *pdev) | |
5487 | { | |
5488 | struct skd_device *skdev; | |
5489 | ||
5490 | pr_err("skd_pci_shutdown called\n"); | |
5491 | ||
5492 | skdev = pci_get_drvdata(pdev); | |
5493 | if (!skdev) { | |
5494 | pr_err("%s: no device data for PCI\n", pci_name(pdev)); | |
5495 | return; | |
5496 | } | |
5497 | ||
5498 | pr_err("%s: calling stop\n", skd_name(skdev)); | |
5499 | skd_stop_device(skdev); | |
5500 | } | |
5501 | ||
5502 | static struct pci_driver skd_driver = { | |
5503 | .name = DRV_NAME, | |
5504 | .id_table = skd_pci_tbl, | |
5505 | .probe = skd_pci_probe, | |
5506 | .remove = skd_pci_remove, | |
5507 | .suspend = skd_pci_suspend, | |
5508 | .resume = skd_pci_resume, | |
5509 | .shutdown = skd_pci_shutdown, | |
5510 | }; | |
5511 | ||
5512 | /* | |
5513 | ***************************************************************************** | |
5514 | * LOGGING SUPPORT | |
5515 | ***************************************************************************** | |
5516 | */ | |
5517 | ||
5518 | static const char *skd_name(struct skd_device *skdev) | |
5519 | { | |
5520 | memset(skdev->id_str, 0, sizeof(skdev->id_str)); | |
5521 | ||
5522 | if (skdev->inquiry_is_valid) | |
5523 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]", | |
5524 | skdev->name, skdev->inq_serial_num, | |
5525 | pci_name(skdev->pdev)); | |
5526 | else | |
5527 | snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]", | |
5528 | skdev->name, pci_name(skdev->pdev)); | |
5529 | ||
5530 | return skdev->id_str; | |
5531 | } | |
5532 | ||
5533 | const char *skd_drive_state_to_str(int state) | |
5534 | { | |
5535 | switch (state) { | |
5536 | case FIT_SR_DRIVE_OFFLINE: | |
5537 | return "OFFLINE"; | |
5538 | case FIT_SR_DRIVE_INIT: | |
5539 | return "INIT"; | |
5540 | case FIT_SR_DRIVE_ONLINE: | |
5541 | return "ONLINE"; | |
5542 | case FIT_SR_DRIVE_BUSY: | |
5543 | return "BUSY"; | |
5544 | case FIT_SR_DRIVE_FAULT: | |
5545 | return "FAULT"; | |
5546 | case FIT_SR_DRIVE_DEGRADED: | |
5547 | return "DEGRADED"; | |
5548 | case FIT_SR_PCIE_LINK_DOWN: | |
5549 | return "INK_DOWN"; | |
5550 | case FIT_SR_DRIVE_SOFT_RESET: | |
5551 | return "SOFT_RESET"; | |
5552 | case FIT_SR_DRIVE_NEED_FW_DOWNLOAD: | |
5553 | return "NEED_FW"; | |
5554 | case FIT_SR_DRIVE_INIT_FAULT: | |
5555 | return "INIT_FAULT"; | |
5556 | case FIT_SR_DRIVE_BUSY_SANITIZE: | |
5557 | return "BUSY_SANITIZE"; | |
5558 | case FIT_SR_DRIVE_BUSY_ERASE: | |
5559 | return "BUSY_ERASE"; | |
5560 | case FIT_SR_DRIVE_FW_BOOTING: | |
5561 | return "FW_BOOTING"; | |
5562 | default: | |
5563 | return "???"; | |
5564 | } | |
5565 | } | |
5566 | ||
5567 | const char *skd_skdev_state_to_str(enum skd_drvr_state state) | |
5568 | { | |
5569 | switch (state) { | |
5570 | case SKD_DRVR_STATE_LOAD: | |
5571 | return "LOAD"; | |
5572 | case SKD_DRVR_STATE_IDLE: | |
5573 | return "IDLE"; | |
5574 | case SKD_DRVR_STATE_BUSY: | |
5575 | return "BUSY"; | |
5576 | case SKD_DRVR_STATE_STARTING: | |
5577 | return "STARTING"; | |
5578 | case SKD_DRVR_STATE_ONLINE: | |
5579 | return "ONLINE"; | |
5580 | case SKD_DRVR_STATE_PAUSING: | |
5581 | return "PAUSING"; | |
5582 | case SKD_DRVR_STATE_PAUSED: | |
5583 | return "PAUSED"; | |
5584 | case SKD_DRVR_STATE_DRAINING_TIMEOUT: | |
5585 | return "DRAINING_TIMEOUT"; | |
5586 | case SKD_DRVR_STATE_RESTARTING: | |
5587 | return "RESTARTING"; | |
5588 | case SKD_DRVR_STATE_RESUMING: | |
5589 | return "RESUMING"; | |
5590 | case SKD_DRVR_STATE_STOPPING: | |
5591 | return "STOPPING"; | |
5592 | case SKD_DRVR_STATE_SYNCING: | |
5593 | return "SYNCING"; | |
5594 | case SKD_DRVR_STATE_FAULT: | |
5595 | return "FAULT"; | |
5596 | case SKD_DRVR_STATE_DISAPPEARED: | |
5597 | return "DISAPPEARED"; | |
5598 | case SKD_DRVR_STATE_BUSY_ERASE: | |
5599 | return "BUSY_ERASE"; | |
5600 | case SKD_DRVR_STATE_BUSY_SANITIZE: | |
5601 | return "BUSY_SANITIZE"; | |
5602 | case SKD_DRVR_STATE_BUSY_IMMINENT: | |
5603 | return "BUSY_IMMINENT"; | |
5604 | case SKD_DRVR_STATE_WAIT_BOOT: | |
5605 | return "WAIT_BOOT"; | |
5606 | ||
5607 | default: | |
5608 | return "???"; | |
5609 | } | |
5610 | } | |
5611 | ||
5612 | const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) | |
5613 | { | |
5614 | switch (state) { | |
5615 | case SKD_MSG_STATE_IDLE: | |
5616 | return "IDLE"; | |
5617 | case SKD_MSG_STATE_BUSY: | |
5618 | return "BUSY"; | |
5619 | default: | |
5620 | return "???"; | |
5621 | } | |
5622 | } | |
5623 | ||
5624 | const char *skd_skreq_state_to_str(enum skd_req_state state) | |
5625 | { | |
5626 | switch (state) { | |
5627 | case SKD_REQ_STATE_IDLE: | |
5628 | return "IDLE"; | |
5629 | case SKD_REQ_STATE_SETUP: | |
5630 | return "SETUP"; | |
5631 | case SKD_REQ_STATE_BUSY: | |
5632 | return "BUSY"; | |
5633 | case SKD_REQ_STATE_COMPLETED: | |
5634 | return "COMPLETED"; | |
5635 | case SKD_REQ_STATE_TIMEOUT: | |
5636 | return "TIMEOUT"; | |
5637 | case SKD_REQ_STATE_ABORTED: | |
5638 | return "ABORTED"; | |
5639 | default: | |
5640 | return "???"; | |
5641 | } | |
5642 | } | |
5643 | ||
5644 | static void skd_log_skdev(struct skd_device *skdev, const char *event) | |
5645 | { | |
5646 | DPRINTK(skdev, "(%s) skdev=%p event='%s'\n", skdev->name, skdev, event); | |
5647 | DPRINTK(skdev, " drive_state=%s(%d) driver_state=%s(%d)\n", | |
5648 | skd_drive_state_to_str(skdev->drive_state), skdev->drive_state, | |
5649 | skd_skdev_state_to_str(skdev->state), skdev->state); | |
5650 | DPRINTK(skdev, " busy=%d limit=%d dev=%d lowat=%d\n", | |
5651 | skdev->in_flight, skdev->cur_max_queue_depth, | |
5652 | skdev->dev_max_queue_depth, skdev->queue_low_water_mark); | |
5653 | DPRINTK(skdev, " timestamp=0x%x cycle=%d cycle_ix=%d\n", | |
5654 | skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix); | |
5655 | } | |
5656 | ||
5657 | static void skd_log_skmsg(struct skd_device *skdev, | |
5658 | struct skd_fitmsg_context *skmsg, const char *event) | |
5659 | { | |
5660 | DPRINTK(skdev, "(%s) skmsg=%p event='%s'\n", skdev->name, skmsg, event); | |
5661 | DPRINTK(skdev, " state=%s(%d) id=0x%04x length=%d\n", | |
5662 | skd_skmsg_state_to_str(skmsg->state), skmsg->state, | |
5663 | skmsg->id, skmsg->length); | |
5664 | } | |
5665 | ||
5666 | static void skd_log_skreq(struct skd_device *skdev, | |
5667 | struct skd_request_context *skreq, const char *event) | |
5668 | { | |
5669 | DPRINTK(skdev, "(%s) skreq=%p event='%s'\n", skdev->name, skreq, event); | |
5670 | DPRINTK(skdev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n", | |
5671 | skd_skreq_state_to_str(skreq->state), skreq->state, | |
5672 | skreq->id, skreq->fitmsg_id); | |
5673 | DPRINTK(skdev, " timo=0x%x sg_dir=%d n_sg=%d\n", | |
5674 | skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); | |
5675 | ||
5676 | if (!skd_bio) { | |
5677 | if (skreq->req != NULL) { | |
5678 | struct request *req = skreq->req; | |
5679 | u32 lba = (u32)blk_rq_pos(req); | |
5680 | u32 count = blk_rq_sectors(req); | |
5681 | ||
5682 | DPRINTK(skdev, | |
5683 | " req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
5684 | req, lba, lba, count, count, | |
5685 | (int)rq_data_dir(req)); | |
5686 | } else | |
5687 | DPRINTK(skdev, " req=NULL\n"); | |
5688 | } else { | |
5689 | if (skreq->bio != NULL) { | |
5690 | struct bio *bio = skreq->bio; | |
5691 | u32 lba = (u32)bio->bi_sector; | |
5692 | u32 count = bio_sectors(bio); | |
5693 | ||
5694 | DPRINTK(skdev, | |
5695 | " bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", | |
5696 | bio, lba, lba, count, count, | |
5697 | (int)bio_data_dir(bio)); | |
5698 | } else | |
5699 | DPRINTK(skdev, " req=NULL\n"); | |
5700 | } | |
5701 | } | |
5702 | ||
5703 | /* | |
5704 | ***************************************************************************** | |
5705 | * MODULE GLUE | |
5706 | ***************************************************************************** | |
5707 | */ | |
5708 | ||
5709 | static int __init skd_init(void) | |
5710 | { | |
5711 | int rc = 0; | |
5712 | ||
5713 | pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID); | |
5714 | ||
5715 | switch (skd_isr_type) { | |
5716 | case SKD_IRQ_LEGACY: | |
5717 | case SKD_IRQ_MSI: | |
5718 | case SKD_IRQ_MSIX: | |
5719 | break; | |
5720 | default: | |
5721 | pr_info("skd_isr_type %d invalid, re-set to %d\n", | |
5722 | skd_isr_type, SKD_IRQ_DEFAULT); | |
5723 | skd_isr_type = SKD_IRQ_DEFAULT; | |
5724 | } | |
5725 | ||
5726 | skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB, | |
5727 | sizeof(struct skd_flush_cmd), | |
5728 | 0, 0, NULL); | |
5729 | ||
5730 | if (!skd_flush_slab) { | |
5731 | pr_err("failed to allocated flush slab.\n"); | |
5732 | return -ENOMEM; | |
5733 | } | |
5734 | ||
5735 | if (skd_max_queue_depth < 1 | |
5736 | || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) { | |
5737 | pr_info( | |
5738 | "skd_max_queue_depth %d invalid, re-set to %d\n", | |
5739 | skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT); | |
5740 | skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT; | |
5741 | } | |
5742 | ||
5743 | if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) { | |
5744 | pr_info( | |
5745 | "skd_max_req_per_msg %d invalid, re-set to %d\n", | |
5746 | skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT); | |
5747 | skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT; | |
5748 | } | |
5749 | ||
5750 | if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) { | |
5751 | pr_info( | |
5752 | "skd_sg_per_request %d invalid, re-set to %d\n", | |
5753 | skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT); | |
5754 | skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT; | |
5755 | } | |
5756 | ||
5757 | if (skd_dbg_level < 0 || skd_dbg_level > 2) { | |
5758 | pr_info("skd_dbg_level %d invalid, re-set to %d\n", | |
5759 | skd_dbg_level, 0); | |
5760 | skd_dbg_level = 0; | |
5761 | } | |
5762 | ||
5763 | if (skd_isr_comp_limit < 0) { | |
5764 | pr_info("skd_isr_comp_limit %d invalid, set to %d\n", | |
5765 | skd_isr_comp_limit, 0); | |
5766 | skd_isr_comp_limit = 0; | |
5767 | } | |
5768 | ||
5769 | if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) { | |
5770 | pr_info("skd_max_pass_thru %d invalid, re-set to %d\n", | |
5771 | skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT); | |
5772 | skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT; | |
5773 | } | |
5774 | ||
5775 | /* Obtain major device number. */ | |
5776 | rc = register_blkdev(0, DRV_NAME); | |
5777 | if (rc < 0) | |
5778 | return rc; | |
5779 | ||
5780 | skd_major = rc; | |
5781 | ||
5782 | return pci_register_driver(&skd_driver); | |
5783 | ||
5784 | } | |
5785 | ||
5786 | static void __exit skd_exit(void) | |
5787 | { | |
5788 | pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID); | |
5789 | ||
5790 | unregister_blkdev(skd_major, DRV_NAME); | |
5791 | pci_unregister_driver(&skd_driver); | |
5792 | ||
5793 | kmem_cache_destroy(skd_flush_slab); | |
5794 | } | |
5795 | ||
5796 | static int | |
5797 | skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd) | |
5798 | { | |
5799 | struct skd_flush_cmd *item; | |
5800 | ||
5801 | item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC); | |
5802 | if (!item) { | |
5803 | pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n"); | |
5804 | return -ENOMEM; | |
5805 | } | |
5806 | ||
5807 | item->cmd = cmd; | |
5808 | list_add_tail(&item->flist, &skdev->flush_list); | |
5809 | return 0; | |
5810 | } | |
5811 | ||
5812 | static void * | |
5813 | skd_flush_cmd_dequeue(struct skd_device *skdev) | |
5814 | { | |
5815 | void *cmd; | |
5816 | struct skd_flush_cmd *item; | |
5817 | ||
5818 | item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist); | |
5819 | list_del_init(&item->flist); | |
5820 | cmd = item->cmd; | |
5821 | kmem_cache_free(skd_flush_slab, item); | |
5822 | return cmd; | |
5823 | } | |
5824 | ||
5825 | module_init(skd_init); | |
5826 | module_exit(skd_exit); |