]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hpsa.h
scsi: qla2xxx: Fix double free bug after firmware timeout
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hpsa.h
1 /*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17 *
18 */
19 #ifndef HPSA_H
20 #define HPSA_H
21
22 #include <scsi/scsicam.h>
23
24 #define IO_OK 0
25 #define IO_ERROR 1
26
27 struct ctlr_info;
28
29 struct access_method {
30 void (*submit_command)(struct ctlr_info *h,
31 struct CommandList *c);
32 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
33 bool (*intr_pending)(struct ctlr_info *h);
34 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
35 };
36
37 /* for SAS hosts and SAS expanders */
38 struct hpsa_sas_node {
39 struct device *parent_dev;
40 struct list_head port_list_head;
41 };
42
43 struct hpsa_sas_port {
44 struct list_head port_list_entry;
45 u64 sas_address;
46 struct sas_port *port;
47 int next_phy_index;
48 struct list_head phy_list_head;
49 struct hpsa_sas_node *parent_node;
50 struct sas_rphy *rphy;
51 };
52
53 struct hpsa_sas_phy {
54 struct list_head phy_list_entry;
55 struct sas_phy *phy;
56 struct hpsa_sas_port *parent_port;
57 bool added_to_port;
58 };
59
60 #define EXTERNAL_QD 7
61 struct hpsa_scsi_dev_t {
62 unsigned int devtype;
63 int bus, target, lun; /* as presented to the OS */
64 unsigned char scsi3addr[8]; /* as presented to the HW */
65 u8 physical_device : 1;
66 u8 expose_device;
67 u8 removed : 1; /* device is marked for death */
68 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
69 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
70 u64 sas_address;
71 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
72 unsigned char model[16]; /* bytes 16-31 of inquiry data */
73 unsigned char rev; /* byte 2 of inquiry data */
74 unsigned char raid_level; /* from inquiry page 0xC1 */
75 unsigned char volume_offline; /* discovered via TUR or VPD */
76 u16 queue_depth; /* max queue_depth for this device */
77 atomic_t reset_cmds_out; /* Count of commands to-be affected */
78 atomic_t ioaccel_cmds_out; /* Only used for physical devices
79 * counts commands sent to physical
80 * device via "ioaccel" path.
81 */
82 u32 ioaccel_handle;
83 u8 active_path_index;
84 u8 path_map;
85 u8 bay;
86 u8 box[8];
87 u16 phys_connector[8];
88 int offload_config; /* I/O accel RAID offload configured */
89 int offload_enabled; /* I/O accel RAID offload enabled */
90 int offload_to_be_enabled;
91 int hba_ioaccel_enabled;
92 int offload_to_mirror; /* Send next I/O accelerator RAID
93 * offload request to mirror drive
94 */
95 struct raid_map_data raid_map; /* I/O accelerator RAID map */
96
97 /*
98 * Pointers from logical drive map indices to the phys drives that
99 * make those logical drives. Note, multiple logical drives may
100 * share physical drives. You can have for instance 5 physical
101 * drives with 3 logical drives each using those same 5 physical
102 * disks. We need these pointers for counting i/o's out to physical
103 * devices in order to honor physical device queue depth limits.
104 */
105 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
106 int nphysical_disks;
107 int supports_aborts;
108 struct hpsa_sas_port *sas_port;
109 int external; /* 1-from external array 0-not <0-unknown */
110 };
111
112 struct reply_queue_buffer {
113 u64 *head;
114 size_t size;
115 u8 wraparound;
116 u32 current_entry;
117 dma_addr_t busaddr;
118 };
119
120 #pragma pack(1)
121 struct bmic_controller_parameters {
122 u8 led_flags;
123 u8 enable_command_list_verification;
124 u8 backed_out_write_drives;
125 u16 stripes_for_parity;
126 u8 parity_distribution_mode_flags;
127 u16 max_driver_requests;
128 u16 elevator_trend_count;
129 u8 disable_elevator;
130 u8 force_scan_complete;
131 u8 scsi_transfer_mode;
132 u8 force_narrow;
133 u8 rebuild_priority;
134 u8 expand_priority;
135 u8 host_sdb_asic_fix;
136 u8 pdpi_burst_from_host_disabled;
137 char software_name[64];
138 char hardware_name[32];
139 u8 bridge_revision;
140 u8 snapshot_priority;
141 u32 os_specific;
142 u8 post_prompt_timeout;
143 u8 automatic_drive_slamming;
144 u8 reserved1;
145 u8 nvram_flags;
146 u8 cache_nvram_flags;
147 u8 drive_config_flags;
148 u16 reserved2;
149 u8 temp_warning_level;
150 u8 temp_shutdown_level;
151 u8 temp_condition_reset;
152 u8 max_coalesce_commands;
153 u32 max_coalesce_delay;
154 u8 orca_password[4];
155 u8 access_id[16];
156 u8 reserved[356];
157 };
158 #pragma pack()
159
160 struct ctlr_info {
161 int ctlr;
162 char devname[8];
163 char *product_name;
164 struct pci_dev *pdev;
165 u32 board_id;
166 u64 sas_address;
167 void __iomem *vaddr;
168 unsigned long paddr;
169 int nr_cmds; /* Number of commands allowed on this controller */
170 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
171 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
172 struct CfgTable __iomem *cfgtable;
173 int interrupts_enabled;
174 int max_commands;
175 atomic_t commands_outstanding;
176 # define PERF_MODE_INT 0
177 # define DOORBELL_INT 1
178 # define SIMPLE_MODE_INT 2
179 # define MEMQ_MODE_INT 3
180 unsigned int msix_vectors;
181 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
182 struct access_method access;
183
184 /* queue and queue Info */
185 unsigned int Qdepth;
186 unsigned int maxSG;
187 spinlock_t lock;
188 int maxsgentries;
189 u8 max_cmd_sg_entries;
190 int chainsize;
191 struct SGDescriptor **cmd_sg_list;
192 struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
193
194 /* pointers to command and error info pool */
195 struct CommandList *cmd_pool;
196 dma_addr_t cmd_pool_dhandle;
197 struct io_accel1_cmd *ioaccel_cmd_pool;
198 dma_addr_t ioaccel_cmd_pool_dhandle;
199 struct io_accel2_cmd *ioaccel2_cmd_pool;
200 dma_addr_t ioaccel2_cmd_pool_dhandle;
201 struct ErrorInfo *errinfo_pool;
202 dma_addr_t errinfo_pool_dhandle;
203 unsigned long *cmd_pool_bits;
204 int scan_finished;
205 u8 scan_waiting : 1;
206 spinlock_t scan_lock;
207 wait_queue_head_t scan_wait_queue;
208
209 struct Scsi_Host *scsi_host;
210 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
211 int ndevices; /* number of used elements in .dev[] array. */
212 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
213 /*
214 * Performant mode tables.
215 */
216 u32 trans_support;
217 u32 trans_offset;
218 struct TransTable_struct __iomem *transtable;
219 unsigned long transMethod;
220
221 /* cap concurrent passthrus at some reasonable maximum */
222 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
223 atomic_t passthru_cmds_avail;
224
225 /*
226 * Performant mode completion buffers
227 */
228 size_t reply_queue_size;
229 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
230 u8 nreply_queues;
231 u32 *blockFetchTable;
232 u32 *ioaccel1_blockFetchTable;
233 u32 *ioaccel2_blockFetchTable;
234 u32 __iomem *ioaccel2_bft2_regs;
235 unsigned char *hba_inquiry_data;
236 u32 driver_support;
237 u32 fw_support;
238 int ioaccel_support;
239 int ioaccel_maxsg;
240 u64 last_intr_timestamp;
241 u32 last_heartbeat;
242 u64 last_heartbeat_timestamp;
243 u32 heartbeat_sample_interval;
244 atomic_t firmware_flash_in_progress;
245 u32 __percpu *lockup_detected;
246 struct delayed_work monitor_ctlr_work;
247 struct delayed_work rescan_ctlr_work;
248 struct delayed_work event_monitor_work;
249 int remove_in_progress;
250 /* Address of h->q[x] is passed to intr handler to know which queue */
251 u8 q[MAX_REPLY_QUEUES];
252 char intrname[MAX_REPLY_QUEUES][16]; /* "hpsa0-msix00" names */
253 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
254 #define HPSATMF_BITS_SUPPORTED (1 << 0)
255 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
256 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
257 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
258 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
259 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
260 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
261 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
262 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
263 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
264 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
265 #define HPSATMF_MASK_SUPPORTED (1 << 16)
266 #define HPSATMF_LOG_LUN_RESET (1 << 17)
267 #define HPSATMF_LOG_NEX_RESET (1 << 18)
268 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
269 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
270 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
271 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
272 #define HPSATMF_LOG_QRY_TASK (1 << 23)
273 #define HPSATMF_LOG_QRY_TSET (1 << 24)
274 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
275 u32 events;
276 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
277 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
278 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
279 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
280 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
281 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
282 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
283
284 #define RESCAN_REQUIRED_EVENT_BITS \
285 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
286 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
287 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
288 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
289 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
290 spinlock_t offline_device_lock;
291 struct list_head offline_device_list;
292 int acciopath_status;
293 int drv_req_rescan;
294 int raid_offload_debug;
295 int discovery_polling;
296 int legacy_board;
297 struct ReportLUNdata *lastlogicals;
298 int needs_abort_tags_swizzled;
299 struct workqueue_struct *resubmit_wq;
300 struct workqueue_struct *rescan_ctlr_wq;
301 atomic_t abort_cmds_available;
302 wait_queue_head_t event_sync_wait_queue;
303 struct mutex reset_mutex;
304 u8 reset_in_progress;
305 struct hpsa_sas_node *sas_host;
306 spinlock_t reset_lock;
307 };
308
309 struct offline_device_entry {
310 unsigned char scsi3addr[8];
311 struct list_head offline_list;
312 };
313
314 #define HPSA_ABORT_MSG 0
315 #define HPSA_DEVICE_RESET_MSG 1
316 #define HPSA_RESET_TYPE_CONTROLLER 0x00
317 #define HPSA_RESET_TYPE_BUS 0x01
318 #define HPSA_RESET_TYPE_LUN 0x04
319 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
320 #define HPSA_MSG_SEND_RETRY_LIMIT 10
321 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
322
323 /* Maximum time in seconds driver will wait for command completions
324 * when polling before giving up.
325 */
326 #define HPSA_MAX_POLL_TIME_SECS (20)
327
328 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
329 * how many times to retry TEST UNIT READY on a device
330 * while waiting for it to become ready before giving up.
331 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
332 * between sending TURs while waiting for a device
333 * to become ready.
334 */
335 #define HPSA_TUR_RETRY_LIMIT (20)
336 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
337
338 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
339 * to become ready, in seconds, before giving up on it.
340 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
341 * between polling the board to see if it is ready, in
342 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
343 * HPSA_BOARD_READY_ITERATIONS are derived from those.
344 */
345 #define HPSA_BOARD_READY_WAIT_SECS (120)
346 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
347 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
348 #define HPSA_BOARD_READY_POLL_INTERVAL \
349 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
350 #define HPSA_BOARD_READY_ITERATIONS \
351 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
352 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
353 #define HPSA_BOARD_NOT_READY_ITERATIONS \
354 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
355 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
356 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
357 #define HPSA_POST_RESET_NOOP_RETRIES (12)
358
359 /* Defining the diffent access_menthods */
360 /*
361 * Memory mapped FIFO interface (SMART 53xx cards)
362 */
363 #define SA5_DOORBELL 0x20
364 #define SA5_REQUEST_PORT_OFFSET 0x40
365 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
366 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
367 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
368 #define SA5_REPLY_PORT_OFFSET 0x44
369 #define SA5_INTR_STATUS 0x30
370 #define SA5_SCRATCHPAD_OFFSET 0xB0
371
372 #define SA5_CTCFG_OFFSET 0xB4
373 #define SA5_CTMEM_OFFSET 0xB8
374
375 #define SA5_INTR_OFF 0x08
376 #define SA5B_INTR_OFF 0x04
377 #define SA5_INTR_PENDING 0x08
378 #define SA5B_INTR_PENDING 0x04
379 #define FIFO_EMPTY 0xffffffff
380 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
381
382 #define HPSA_ERROR_BIT 0x02
383
384 /* Performant mode flags */
385 #define SA5_PERF_INTR_PENDING 0x04
386 #define SA5_PERF_INTR_OFF 0x05
387 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
388 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
389 #define SA5_OUTDB_CLEAR 0xA0
390 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
391 #define SA5_OUTDB_STATUS 0x9C
392
393
394 #define HPSA_INTR_ON 1
395 #define HPSA_INTR_OFF 0
396
397 /*
398 * Inbound Post Queue offsets for IO Accelerator Mode 2
399 */
400 #define IOACCEL2_INBOUND_POSTQ_32 0x48
401 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
402 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
403
404 #define HPSA_PHYSICAL_DEVICE_BUS 0
405 #define HPSA_RAID_VOLUME_BUS 1
406 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
407 #define HPSA_HBA_BUS 0
408 #define HPSA_LEGACY_HBA_BUS 3
409
410 /*
411 Send the command to the hardware
412 */
413 static void SA5_submit_command(struct ctlr_info *h,
414 struct CommandList *c)
415 {
416 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
417 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
418 }
419
420 static void SA5_submit_command_no_read(struct ctlr_info *h,
421 struct CommandList *c)
422 {
423 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
424 }
425
426 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
427 struct CommandList *c)
428 {
429 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
430 }
431
432 /*
433 * This card is the opposite of the other cards.
434 * 0 turns interrupts on...
435 * 0x08 turns them off...
436 */
437 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
438 {
439 if (val) { /* Turn interrupts on */
440 h->interrupts_enabled = 1;
441 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
442 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
443 } else { /* Turn them off */
444 h->interrupts_enabled = 0;
445 writel(SA5_INTR_OFF,
446 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
447 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
448 }
449 }
450
451 /*
452 * Variant of the above; 0x04 turns interrupts off...
453 */
454 static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
455 {
456 if (val) { /* Turn interrupts on */
457 h->interrupts_enabled = 1;
458 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
459 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
460 } else { /* Turn them off */
461 h->interrupts_enabled = 0;
462 writel(SA5B_INTR_OFF,
463 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
464 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
465 }
466 }
467
468 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
469 {
470 if (val) { /* turn on interrupts */
471 h->interrupts_enabled = 1;
472 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
473 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
474 } else {
475 h->interrupts_enabled = 0;
476 writel(SA5_PERF_INTR_OFF,
477 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
478 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
479 }
480 }
481
482 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
483 {
484 struct reply_queue_buffer *rq = &h->reply_queue[q];
485 unsigned long register_value = FIFO_EMPTY;
486
487 /* msi auto clears the interrupt pending bit. */
488 if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
489 /* flush the controller write of the reply queue by reading
490 * outbound doorbell status register.
491 */
492 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
493 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
494 /* Do a read in order to flush the write to the controller
495 * (as per spec.)
496 */
497 (void) readl(h->vaddr + SA5_OUTDB_STATUS);
498 }
499
500 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
501 register_value = rq->head[rq->current_entry];
502 rq->current_entry++;
503 atomic_dec(&h->commands_outstanding);
504 } else {
505 register_value = FIFO_EMPTY;
506 }
507 /* Check for wraparound */
508 if (rq->current_entry == h->max_commands) {
509 rq->current_entry = 0;
510 rq->wraparound ^= 1;
511 }
512 return register_value;
513 }
514
515 /*
516 * returns value read from hardware.
517 * returns FIFO_EMPTY if there is nothing to read
518 */
519 static unsigned long SA5_completed(struct ctlr_info *h,
520 __attribute__((unused)) u8 q)
521 {
522 unsigned long register_value
523 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
524
525 if (register_value != FIFO_EMPTY)
526 atomic_dec(&h->commands_outstanding);
527
528 #ifdef HPSA_DEBUG
529 if (register_value != FIFO_EMPTY)
530 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
531 register_value);
532 else
533 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
534 #endif
535
536 return register_value;
537 }
538 /*
539 * Returns true if an interrupt is pending..
540 */
541 static bool SA5_intr_pending(struct ctlr_info *h)
542 {
543 unsigned long register_value =
544 readl(h->vaddr + SA5_INTR_STATUS);
545 return register_value & SA5_INTR_PENDING;
546 }
547
548 static bool SA5_performant_intr_pending(struct ctlr_info *h)
549 {
550 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
551
552 if (!register_value)
553 return false;
554
555 /* Read outbound doorbell to flush */
556 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
557 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
558 }
559
560 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
561
562 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
563 {
564 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
565
566 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
567 true : false;
568 }
569
570 /*
571 * Returns true if an interrupt is pending..
572 */
573 static bool SA5B_intr_pending(struct ctlr_info *h)
574 {
575 return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
576 }
577
578 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
579 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
580 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
581 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
582
583 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
584 {
585 u64 register_value;
586 struct reply_queue_buffer *rq = &h->reply_queue[q];
587
588 BUG_ON(q >= h->nreply_queues);
589
590 register_value = rq->head[rq->current_entry];
591 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
592 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
593 if (++rq->current_entry == rq->size)
594 rq->current_entry = 0;
595 /*
596 * @todo
597 *
598 * Don't really need to write the new index after each command,
599 * but with current driver design this is easiest.
600 */
601 wmb();
602 writel((q << 24) | rq->current_entry, h->vaddr +
603 IOACCEL_MODE1_CONSUMER_INDEX);
604 atomic_dec(&h->commands_outstanding);
605 }
606 return (unsigned long) register_value;
607 }
608
609 static struct access_method SA5_access = {
610 .submit_command = SA5_submit_command,
611 .set_intr_mask = SA5_intr_mask,
612 .intr_pending = SA5_intr_pending,
613 .command_completed = SA5_completed,
614 };
615
616 /* Duplicate entry of the above to mark unsupported boards */
617 static struct access_method SA5A_access = {
618 .submit_command = SA5_submit_command,
619 .set_intr_mask = SA5_intr_mask,
620 .intr_pending = SA5_intr_pending,
621 .command_completed = SA5_completed,
622 };
623
624 static struct access_method SA5B_access = {
625 .submit_command = SA5_submit_command,
626 .set_intr_mask = SA5B_intr_mask,
627 .intr_pending = SA5B_intr_pending,
628 .command_completed = SA5_completed,
629 };
630
631 static struct access_method SA5_ioaccel_mode1_access = {
632 .submit_command = SA5_submit_command,
633 .set_intr_mask = SA5_performant_intr_mask,
634 .intr_pending = SA5_ioaccel_mode1_intr_pending,
635 .command_completed = SA5_ioaccel_mode1_completed,
636 };
637
638 static struct access_method SA5_ioaccel_mode2_access = {
639 .submit_command = SA5_submit_command_ioaccel2,
640 .set_intr_mask = SA5_performant_intr_mask,
641 .intr_pending = SA5_performant_intr_pending,
642 .command_completed = SA5_performant_completed,
643 };
644
645 static struct access_method SA5_performant_access = {
646 .submit_command = SA5_submit_command,
647 .set_intr_mask = SA5_performant_intr_mask,
648 .intr_pending = SA5_performant_intr_pending,
649 .command_completed = SA5_performant_completed,
650 };
651
652 static struct access_method SA5_performant_access_no_read = {
653 .submit_command = SA5_submit_command_no_read,
654 .set_intr_mask = SA5_performant_intr_mask,
655 .intr_pending = SA5_performant_intr_pending,
656 .command_completed = SA5_performant_completed,
657 };
658
659 struct board_type {
660 u32 board_id;
661 char *product_name;
662 struct access_method *access;
663 };
664
665 #endif /* HPSA_H */
666