]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/hpsa.h
hpsa: optimize cmd_alloc function by remembering last allocation
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / hpsa.h
1 /*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21 #ifndef HPSA_H
22 #define HPSA_H
23
24 #include <scsi/scsicam.h>
25
26 #define IO_OK 0
27 #define IO_ERROR 1
28
29 struct ctlr_info;
30
31 struct access_method {
32 void (*submit_command)(struct ctlr_info *h,
33 struct CommandList *c);
34 void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
35 bool (*intr_pending)(struct ctlr_info *h);
36 unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
37 };
38
39 struct hpsa_scsi_dev_t {
40 int devtype;
41 int bus, target, lun; /* as presented to the OS */
42 unsigned char scsi3addr[8]; /* as presented to the HW */
43 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
44 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
45 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
46 unsigned char model[16]; /* bytes 16-31 of inquiry data */
47 unsigned char raid_level; /* from inquiry page 0xC1 */
48 unsigned char volume_offline; /* discovered via TUR or VPD */
49 u16 queue_depth; /* max queue_depth for this device */
50 atomic_t ioaccel_cmds_out; /* Only used for physical devices
51 * counts commands sent to physical
52 * device via "ioaccel" path.
53 */
54 u32 ioaccel_handle;
55 int offload_config; /* I/O accel RAID offload configured */
56 int offload_enabled; /* I/O accel RAID offload enabled */
57 int offload_to_mirror; /* Send next I/O accelerator RAID
58 * offload request to mirror drive
59 */
60 struct raid_map_data raid_map; /* I/O accelerator RAID map */
61
62 /*
63 * Pointers from logical drive map indices to the phys drives that
64 * make those logical drives. Note, multiple logical drives may
65 * share physical drives. You can have for instance 5 physical
66 * drives with 3 logical drives each using those same 5 physical
67 * disks. We need these pointers for counting i/o's out to physical
68 * devices in order to honor physical device queue depth limits.
69 */
70 struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
71 };
72
73 struct reply_queue_buffer {
74 u64 *head;
75 size_t size;
76 u8 wraparound;
77 u32 current_entry;
78 dma_addr_t busaddr;
79 };
80
81 #pragma pack(1)
82 struct bmic_controller_parameters {
83 u8 led_flags;
84 u8 enable_command_list_verification;
85 u8 backed_out_write_drives;
86 u16 stripes_for_parity;
87 u8 parity_distribution_mode_flags;
88 u16 max_driver_requests;
89 u16 elevator_trend_count;
90 u8 disable_elevator;
91 u8 force_scan_complete;
92 u8 scsi_transfer_mode;
93 u8 force_narrow;
94 u8 rebuild_priority;
95 u8 expand_priority;
96 u8 host_sdb_asic_fix;
97 u8 pdpi_burst_from_host_disabled;
98 char software_name[64];
99 char hardware_name[32];
100 u8 bridge_revision;
101 u8 snapshot_priority;
102 u32 os_specific;
103 u8 post_prompt_timeout;
104 u8 automatic_drive_slamming;
105 u8 reserved1;
106 u8 nvram_flags;
107 #define HBA_MODE_ENABLED_FLAG (1 << 3)
108 u8 cache_nvram_flags;
109 u8 drive_config_flags;
110 u16 reserved2;
111 u8 temp_warning_level;
112 u8 temp_shutdown_level;
113 u8 temp_condition_reset;
114 u8 max_coalesce_commands;
115 u32 max_coalesce_delay;
116 u8 orca_password[4];
117 u8 access_id[16];
118 u8 reserved[356];
119 };
120 #pragma pack()
121
122 struct ctlr_info {
123 int ctlr;
124 char devname[8];
125 char *product_name;
126 struct pci_dev *pdev;
127 u32 board_id;
128 void __iomem *vaddr;
129 unsigned long paddr;
130 int nr_cmds; /* Number of commands allowed on this controller */
131 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
132 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
133 struct CfgTable __iomem *cfgtable;
134 int interrupts_enabled;
135 int max_commands;
136 int last_allocation;
137 atomic_t commands_outstanding;
138 # define PERF_MODE_INT 0
139 # define DOORBELL_INT 1
140 # define SIMPLE_MODE_INT 2
141 # define MEMQ_MODE_INT 3
142 unsigned int intr[MAX_REPLY_QUEUES];
143 unsigned int msix_vector;
144 unsigned int msi_vector;
145 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
146 struct access_method access;
147 char hba_mode_enabled;
148
149 /* queue and queue Info */
150 unsigned int Qdepth;
151 unsigned int maxSG;
152 spinlock_t lock;
153 int maxsgentries;
154 u8 max_cmd_sg_entries;
155 int chainsize;
156 struct SGDescriptor **cmd_sg_list;
157
158 /* pointers to command and error info pool */
159 struct CommandList *cmd_pool;
160 dma_addr_t cmd_pool_dhandle;
161 struct io_accel1_cmd *ioaccel_cmd_pool;
162 dma_addr_t ioaccel_cmd_pool_dhandle;
163 struct io_accel2_cmd *ioaccel2_cmd_pool;
164 dma_addr_t ioaccel2_cmd_pool_dhandle;
165 struct ErrorInfo *errinfo_pool;
166 dma_addr_t errinfo_pool_dhandle;
167 unsigned long *cmd_pool_bits;
168 int scan_finished;
169 spinlock_t scan_lock;
170 wait_queue_head_t scan_wait_queue;
171
172 struct Scsi_Host *scsi_host;
173 spinlock_t devlock; /* to protect hba[ctlr]->dev[]; */
174 int ndevices; /* number of used elements in .dev[] array. */
175 struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
176 /*
177 * Performant mode tables.
178 */
179 u32 trans_support;
180 u32 trans_offset;
181 struct TransTable_struct __iomem *transtable;
182 unsigned long transMethod;
183
184 /* cap concurrent passthrus at some reasonable maximum */
185 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
186 spinlock_t passthru_count_lock; /* protects passthru_count */
187 int passthru_count;
188
189 /*
190 * Performant mode completion buffers
191 */
192 size_t reply_queue_size;
193 struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
194 u8 nreply_queues;
195 u32 *blockFetchTable;
196 u32 *ioaccel1_blockFetchTable;
197 u32 *ioaccel2_blockFetchTable;
198 u32 __iomem *ioaccel2_bft2_regs;
199 unsigned char *hba_inquiry_data;
200 u32 driver_support;
201 u32 fw_support;
202 int ioaccel_support;
203 int ioaccel_maxsg;
204 u64 last_intr_timestamp;
205 u32 last_heartbeat;
206 u64 last_heartbeat_timestamp;
207 u32 heartbeat_sample_interval;
208 atomic_t firmware_flash_in_progress;
209 u32 __percpu *lockup_detected;
210 struct delayed_work monitor_ctlr_work;
211 int remove_in_progress;
212 /* Address of h->q[x] is passed to intr handler to know which queue */
213 u8 q[MAX_REPLY_QUEUES];
214 u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
215 #define HPSATMF_BITS_SUPPORTED (1 << 0)
216 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
217 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
218 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
219 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
220 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
221 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
222 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
223 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
224 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
225 #define HPSATMF_MASK_SUPPORTED (1 << 16)
226 #define HPSATMF_LOG_LUN_RESET (1 << 17)
227 #define HPSATMF_LOG_NEX_RESET (1 << 18)
228 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
229 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
230 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
231 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
232 #define HPSATMF_LOG_QRY_TASK (1 << 23)
233 #define HPSATMF_LOG_QRY_TSET (1 << 24)
234 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
235 u32 events;
236 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
237 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
238 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
239 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
240 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
241 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
242 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
243
244 #define RESCAN_REQUIRED_EVENT_BITS \
245 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
246 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
247 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
248 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
249 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
250 spinlock_t offline_device_lock;
251 struct list_head offline_device_list;
252 int acciopath_status;
253 int raid_offload_debug;
254 struct workqueue_struct *resubmit_wq;
255 };
256
257 struct offline_device_entry {
258 unsigned char scsi3addr[8];
259 struct list_head offline_list;
260 };
261
262 #define HPSA_ABORT_MSG 0
263 #define HPSA_DEVICE_RESET_MSG 1
264 #define HPSA_RESET_TYPE_CONTROLLER 0x00
265 #define HPSA_RESET_TYPE_BUS 0x01
266 #define HPSA_RESET_TYPE_TARGET 0x03
267 #define HPSA_RESET_TYPE_LUN 0x04
268 #define HPSA_MSG_SEND_RETRY_LIMIT 10
269 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
270
271 /* Maximum time in seconds driver will wait for command completions
272 * when polling before giving up.
273 */
274 #define HPSA_MAX_POLL_TIME_SECS (20)
275
276 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
277 * how many times to retry TEST UNIT READY on a device
278 * while waiting for it to become ready before giving up.
279 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
280 * between sending TURs while waiting for a device
281 * to become ready.
282 */
283 #define HPSA_TUR_RETRY_LIMIT (20)
284 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
285
286 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
287 * to become ready, in seconds, before giving up on it.
288 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
289 * between polling the board to see if it is ready, in
290 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
291 * HPSA_BOARD_READY_ITERATIONS are derived from those.
292 */
293 #define HPSA_BOARD_READY_WAIT_SECS (120)
294 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
295 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
296 #define HPSA_BOARD_READY_POLL_INTERVAL \
297 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
298 #define HPSA_BOARD_READY_ITERATIONS \
299 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
300 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
301 #define HPSA_BOARD_NOT_READY_ITERATIONS \
302 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
303 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
304 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
305 #define HPSA_POST_RESET_NOOP_RETRIES (12)
306
307 /* Defining the diffent access_menthods */
308 /*
309 * Memory mapped FIFO interface (SMART 53xx cards)
310 */
311 #define SA5_DOORBELL 0x20
312 #define SA5_REQUEST_PORT_OFFSET 0x40
313 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
314 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
315 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
316 #define SA5_REPLY_PORT_OFFSET 0x44
317 #define SA5_INTR_STATUS 0x30
318 #define SA5_SCRATCHPAD_OFFSET 0xB0
319
320 #define SA5_CTCFG_OFFSET 0xB4
321 #define SA5_CTMEM_OFFSET 0xB8
322
323 #define SA5_INTR_OFF 0x08
324 #define SA5B_INTR_OFF 0x04
325 #define SA5_INTR_PENDING 0x08
326 #define SA5B_INTR_PENDING 0x04
327 #define FIFO_EMPTY 0xffffffff
328 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
329
330 #define HPSA_ERROR_BIT 0x02
331
332 /* Performant mode flags */
333 #define SA5_PERF_INTR_PENDING 0x04
334 #define SA5_PERF_INTR_OFF 0x05
335 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
336 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
337 #define SA5_OUTDB_CLEAR 0xA0
338 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
339 #define SA5_OUTDB_STATUS 0x9C
340
341
342 #define HPSA_INTR_ON 1
343 #define HPSA_INTR_OFF 0
344
345 /*
346 * Inbound Post Queue offsets for IO Accelerator Mode 2
347 */
348 #define IOACCEL2_INBOUND_POSTQ_32 0x48
349 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
350 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
351
352 /*
353 Send the command to the hardware
354 */
355 static void SA5_submit_command(struct ctlr_info *h,
356 struct CommandList *c)
357 {
358 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
359 (void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
360 }
361
362 static void SA5_submit_command_no_read(struct ctlr_info *h,
363 struct CommandList *c)
364 {
365 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
366 }
367
368 static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
369 struct CommandList *c)
370 {
371 if (c->cmd_type == CMD_IOACCEL2)
372 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
373 else
374 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
375 }
376
377 /*
378 * This card is the opposite of the other cards.
379 * 0 turns interrupts on...
380 * 0x08 turns them off...
381 */
382 static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
383 {
384 if (val) { /* Turn interrupts on */
385 h->interrupts_enabled = 1;
386 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
387 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
388 } else { /* Turn them off */
389 h->interrupts_enabled = 0;
390 writel(SA5_INTR_OFF,
391 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
392 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
393 }
394 }
395
396 static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
397 {
398 if (val) { /* turn on interrupts */
399 h->interrupts_enabled = 1;
400 writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
401 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
402 } else {
403 h->interrupts_enabled = 0;
404 writel(SA5_PERF_INTR_OFF,
405 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
406 (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
407 }
408 }
409
410 static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
411 {
412 struct reply_queue_buffer *rq = &h->reply_queue[q];
413 unsigned long register_value = FIFO_EMPTY;
414
415 /* msi auto clears the interrupt pending bit. */
416 if (!(h->msi_vector || h->msix_vector)) {
417 /* flush the controller write of the reply queue by reading
418 * outbound doorbell status register.
419 */
420 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
421 writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
422 /* Do a read in order to flush the write to the controller
423 * (as per spec.)
424 */
425 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
426 }
427
428 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
429 register_value = rq->head[rq->current_entry];
430 rq->current_entry++;
431 atomic_dec(&h->commands_outstanding);
432 } else {
433 register_value = FIFO_EMPTY;
434 }
435 /* Check for wraparound */
436 if (rq->current_entry == h->max_commands) {
437 rq->current_entry = 0;
438 rq->wraparound ^= 1;
439 }
440 return register_value;
441 }
442
443 /*
444 * returns value read from hardware.
445 * returns FIFO_EMPTY if there is nothing to read
446 */
447 static unsigned long SA5_completed(struct ctlr_info *h,
448 __attribute__((unused)) u8 q)
449 {
450 unsigned long register_value
451 = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
452
453 if (register_value != FIFO_EMPTY)
454 atomic_dec(&h->commands_outstanding);
455
456 #ifdef HPSA_DEBUG
457 if (register_value != FIFO_EMPTY)
458 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
459 register_value);
460 else
461 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
462 #endif
463
464 return register_value;
465 }
466 /*
467 * Returns true if an interrupt is pending..
468 */
469 static bool SA5_intr_pending(struct ctlr_info *h)
470 {
471 unsigned long register_value =
472 readl(h->vaddr + SA5_INTR_STATUS);
473 return register_value & SA5_INTR_PENDING;
474 }
475
476 static bool SA5_performant_intr_pending(struct ctlr_info *h)
477 {
478 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
479
480 if (!register_value)
481 return false;
482
483 if (h->msi_vector || h->msix_vector)
484 return true;
485
486 /* Read outbound doorbell to flush */
487 register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
488 return register_value & SA5_OUTDB_STATUS_PERF_BIT;
489 }
490
491 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
492
493 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
494 {
495 unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
496
497 return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
498 true : false;
499 }
500
501 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
502 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
503 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
504 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
505
506 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
507 {
508 u64 register_value;
509 struct reply_queue_buffer *rq = &h->reply_queue[q];
510
511 BUG_ON(q >= h->nreply_queues);
512
513 register_value = rq->head[rq->current_entry];
514 if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
515 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
516 if (++rq->current_entry == rq->size)
517 rq->current_entry = 0;
518 /*
519 * @todo
520 *
521 * Don't really need to write the new index after each command,
522 * but with current driver design this is easiest.
523 */
524 wmb();
525 writel((q << 24) | rq->current_entry, h->vaddr +
526 IOACCEL_MODE1_CONSUMER_INDEX);
527 atomic_dec(&h->commands_outstanding);
528 }
529 return (unsigned long) register_value;
530 }
531
532 static struct access_method SA5_access = {
533 SA5_submit_command,
534 SA5_intr_mask,
535 SA5_intr_pending,
536 SA5_completed,
537 };
538
539 static struct access_method SA5_ioaccel_mode1_access = {
540 SA5_submit_command,
541 SA5_performant_intr_mask,
542 SA5_ioaccel_mode1_intr_pending,
543 SA5_ioaccel_mode1_completed,
544 };
545
546 static struct access_method SA5_ioaccel_mode2_access = {
547 SA5_submit_command_ioaccel2,
548 SA5_performant_intr_mask,
549 SA5_performant_intr_pending,
550 SA5_performant_completed,
551 };
552
553 static struct access_method SA5_performant_access = {
554 SA5_submit_command,
555 SA5_performant_intr_mask,
556 SA5_performant_intr_pending,
557 SA5_performant_completed,
558 };
559
560 static struct access_method SA5_performant_access_no_read = {
561 SA5_submit_command_no_read,
562 SA5_performant_intr_mask,
563 SA5_performant_intr_pending,
564 SA5_performant_completed,
565 };
566
567 struct board_type {
568 u32 board_id;
569 char *product_name;
570 struct access_method *access;
571 };
572
573 #endif /* HPSA_H */
574