2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
24 #include <scsi/scsicam.h>
31 struct access_method
{
32 void (*submit_command
)(struct ctlr_info
*h
,
33 struct CommandList
*c
);
34 void (*set_intr_mask
)(struct ctlr_info
*h
, unsigned long val
);
35 bool (*intr_pending
)(struct ctlr_info
*h
);
36 unsigned long (*command_completed
)(struct ctlr_info
*h
, u8 q
);
39 struct hpsa_scsi_dev_t
{
41 int bus
, target
, lun
; /* as presented to the OS */
42 unsigned char scsi3addr
[8]; /* as presented to the HW */
43 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
44 unsigned char device_id
[16]; /* from inquiry pg. 0x83 */
45 unsigned char vendor
[8]; /* bytes 8-15 of inquiry data */
46 unsigned char model
[16]; /* bytes 16-31 of inquiry data */
47 unsigned char raid_level
; /* from inquiry page 0xC1 */
48 unsigned char volume_offline
; /* discovered via TUR or VPD */
49 u16 queue_depth
; /* max queue_depth for this device */
50 atomic_t ioaccel_cmds_out
; /* Only used for physical devices
51 * counts commands sent to physical
52 * device via "ioaccel" path.
55 int offload_config
; /* I/O accel RAID offload configured */
56 int offload_enabled
; /* I/O accel RAID offload enabled */
57 int offload_to_mirror
; /* Send next I/O accelerator RAID
58 * offload request to mirror drive
60 struct raid_map_data raid_map
; /* I/O accelerator RAID map */
63 * Pointers from logical drive map indices to the phys drives that
64 * make those logical drives. Note, multiple logical drives may
65 * share physical drives. You can have for instance 5 physical
66 * drives with 3 logical drives each using those same 5 physical
67 * disks. We need these pointers for counting i/o's out to physical
68 * devices in order to honor physical device queue depth limits.
70 struct hpsa_scsi_dev_t
*phys_disk
[RAID_MAP_MAX_ENTRIES
];
73 struct reply_queue_buffer
{
82 struct bmic_controller_parameters
{
84 u8 enable_command_list_verification
;
85 u8 backed_out_write_drives
;
86 u16 stripes_for_parity
;
87 u8 parity_distribution_mode_flags
;
88 u16 max_driver_requests
;
89 u16 elevator_trend_count
;
91 u8 force_scan_complete
;
92 u8 scsi_transfer_mode
;
97 u8 pdpi_burst_from_host_disabled
;
98 char software_name
[64];
99 char hardware_name
[32];
101 u8 snapshot_priority
;
103 u8 post_prompt_timeout
;
104 u8 automatic_drive_slamming
;
107 #define HBA_MODE_ENABLED_FLAG (1 << 3)
108 u8 cache_nvram_flags
;
109 u8 drive_config_flags
;
111 u8 temp_warning_level
;
112 u8 temp_shutdown_level
;
113 u8 temp_condition_reset
;
114 u8 max_coalesce_commands
;
115 u32 max_coalesce_delay
;
126 struct pci_dev
*pdev
;
130 int nr_cmds
; /* Number of commands allowed on this controller */
131 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
132 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
133 struct CfgTable __iomem
*cfgtable
;
134 int interrupts_enabled
;
137 atomic_t commands_outstanding
;
138 # define PERF_MODE_INT 0
139 # define DOORBELL_INT 1
140 # define SIMPLE_MODE_INT 2
141 # define MEMQ_MODE_INT 3
142 unsigned int intr
[MAX_REPLY_QUEUES
];
143 unsigned int msix_vector
;
144 unsigned int msi_vector
;
145 int intr_mode
; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
146 struct access_method access
;
147 char hba_mode_enabled
;
149 /* queue and queue Info */
154 u8 max_cmd_sg_entries
;
156 struct SGDescriptor
**cmd_sg_list
;
158 /* pointers to command and error info pool */
159 struct CommandList
*cmd_pool
;
160 dma_addr_t cmd_pool_dhandle
;
161 struct io_accel1_cmd
*ioaccel_cmd_pool
;
162 dma_addr_t ioaccel_cmd_pool_dhandle
;
163 struct io_accel2_cmd
*ioaccel2_cmd_pool
;
164 dma_addr_t ioaccel2_cmd_pool_dhandle
;
165 struct ErrorInfo
*errinfo_pool
;
166 dma_addr_t errinfo_pool_dhandle
;
167 unsigned long *cmd_pool_bits
;
169 spinlock_t scan_lock
;
170 wait_queue_head_t scan_wait_queue
;
172 struct Scsi_Host
*scsi_host
;
173 spinlock_t devlock
; /* to protect hba[ctlr]->dev[]; */
174 int ndevices
; /* number of used elements in .dev[] array. */
175 struct hpsa_scsi_dev_t
*dev
[HPSA_MAX_DEVICES
];
177 * Performant mode tables.
181 struct TransTable_struct __iomem
*transtable
;
182 unsigned long transMethod
;
184 /* cap concurrent passthrus at some reasonable maximum */
185 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
186 atomic_t passthru_cmds_avail
;
189 * Performant mode completion buffers
191 size_t reply_queue_size
;
192 struct reply_queue_buffer reply_queue
[MAX_REPLY_QUEUES
];
194 u32
*blockFetchTable
;
195 u32
*ioaccel1_blockFetchTable
;
196 u32
*ioaccel2_blockFetchTable
;
197 u32 __iomem
*ioaccel2_bft2_regs
;
198 unsigned char *hba_inquiry_data
;
203 u64 last_intr_timestamp
;
205 u64 last_heartbeat_timestamp
;
206 u32 heartbeat_sample_interval
;
207 atomic_t firmware_flash_in_progress
;
208 u32 __percpu
*lockup_detected
;
209 struct delayed_work monitor_ctlr_work
;
210 int remove_in_progress
;
211 /* Address of h->q[x] is passed to intr handler to know which queue */
212 u8 q
[MAX_REPLY_QUEUES
];
213 u32 TMFSupportFlags
; /* cache what task mgmt funcs are supported. */
214 #define HPSATMF_BITS_SUPPORTED (1 << 0)
215 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
216 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
217 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
218 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
219 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
220 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
221 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
222 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
223 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
224 #define HPSATMF_MASK_SUPPORTED (1 << 16)
225 #define HPSATMF_LOG_LUN_RESET (1 << 17)
226 #define HPSATMF_LOG_NEX_RESET (1 << 18)
227 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
228 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
229 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
230 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
231 #define HPSATMF_LOG_QRY_TASK (1 << 23)
232 #define HPSATMF_LOG_QRY_TSET (1 << 24)
233 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
235 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
236 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
237 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
238 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
239 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
240 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
241 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
243 #define RESCAN_REQUIRED_EVENT_BITS \
244 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
245 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
246 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
247 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
248 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
249 spinlock_t offline_device_lock
;
250 struct list_head offline_device_list
;
251 int acciopath_status
;
252 int raid_offload_debug
;
253 struct workqueue_struct
*resubmit_wq
;
256 struct offline_device_entry
{
257 unsigned char scsi3addr
[8];
258 struct list_head offline_list
;
261 #define HPSA_ABORT_MSG 0
262 #define HPSA_DEVICE_RESET_MSG 1
263 #define HPSA_RESET_TYPE_CONTROLLER 0x00
264 #define HPSA_RESET_TYPE_BUS 0x01
265 #define HPSA_RESET_TYPE_TARGET 0x03
266 #define HPSA_RESET_TYPE_LUN 0x04
267 #define HPSA_MSG_SEND_RETRY_LIMIT 10
268 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
270 /* Maximum time in seconds driver will wait for command completions
271 * when polling before giving up.
273 #define HPSA_MAX_POLL_TIME_SECS (20)
275 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
276 * how many times to retry TEST UNIT READY on a device
277 * while waiting for it to become ready before giving up.
278 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
279 * between sending TURs while waiting for a device
282 #define HPSA_TUR_RETRY_LIMIT (20)
283 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
285 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
286 * to become ready, in seconds, before giving up on it.
287 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
288 * between polling the board to see if it is ready, in
289 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
290 * HPSA_BOARD_READY_ITERATIONS are derived from those.
292 #define HPSA_BOARD_READY_WAIT_SECS (120)
293 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
294 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
295 #define HPSA_BOARD_READY_POLL_INTERVAL \
296 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
297 #define HPSA_BOARD_READY_ITERATIONS \
298 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
299 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
300 #define HPSA_BOARD_NOT_READY_ITERATIONS \
301 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
302 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
303 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
304 #define HPSA_POST_RESET_NOOP_RETRIES (12)
306 /* Defining the diffent access_menthods */
308 * Memory mapped FIFO interface (SMART 53xx cards)
310 #define SA5_DOORBELL 0x20
311 #define SA5_REQUEST_PORT_OFFSET 0x40
312 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
313 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
314 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
315 #define SA5_REPLY_PORT_OFFSET 0x44
316 #define SA5_INTR_STATUS 0x30
317 #define SA5_SCRATCHPAD_OFFSET 0xB0
319 #define SA5_CTCFG_OFFSET 0xB4
320 #define SA5_CTMEM_OFFSET 0xB8
322 #define SA5_INTR_OFF 0x08
323 #define SA5B_INTR_OFF 0x04
324 #define SA5_INTR_PENDING 0x08
325 #define SA5B_INTR_PENDING 0x04
326 #define FIFO_EMPTY 0xffffffff
327 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
329 #define HPSA_ERROR_BIT 0x02
331 /* Performant mode flags */
332 #define SA5_PERF_INTR_PENDING 0x04
333 #define SA5_PERF_INTR_OFF 0x05
334 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
335 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
336 #define SA5_OUTDB_CLEAR 0xA0
337 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
338 #define SA5_OUTDB_STATUS 0x9C
341 #define HPSA_INTR_ON 1
342 #define HPSA_INTR_OFF 0
345 * Inbound Post Queue offsets for IO Accelerator Mode 2
347 #define IOACCEL2_INBOUND_POSTQ_32 0x48
348 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
349 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
352 Send the command to the hardware
354 static void SA5_submit_command(struct ctlr_info
*h
,
355 struct CommandList
*c
)
357 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
358 (void) readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
361 static void SA5_submit_command_no_read(struct ctlr_info
*h
,
362 struct CommandList
*c
)
364 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
367 static void SA5_submit_command_ioaccel2(struct ctlr_info
*h
,
368 struct CommandList
*c
)
370 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
374 * This card is the opposite of the other cards.
375 * 0 turns interrupts on...
376 * 0x08 turns them off...
378 static void SA5_intr_mask(struct ctlr_info
*h
, unsigned long val
)
380 if (val
) { /* Turn interrupts on */
381 h
->interrupts_enabled
= 1;
382 writel(0, h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
383 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
384 } else { /* Turn them off */
385 h
->interrupts_enabled
= 0;
387 h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
388 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
392 static void SA5_performant_intr_mask(struct ctlr_info
*h
, unsigned long val
)
394 if (val
) { /* turn on interrupts */
395 h
->interrupts_enabled
= 1;
396 writel(0, h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
397 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
399 h
->interrupts_enabled
= 0;
400 writel(SA5_PERF_INTR_OFF
,
401 h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
402 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
406 static unsigned long SA5_performant_completed(struct ctlr_info
*h
, u8 q
)
408 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
409 unsigned long register_value
= FIFO_EMPTY
;
411 /* msi auto clears the interrupt pending bit. */
412 if (unlikely(!(h
->msi_vector
|| h
->msix_vector
))) {
413 /* flush the controller write of the reply queue by reading
414 * outbound doorbell status register.
416 (void) readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
417 writel(SA5_OUTDB_CLEAR_PERF_BIT
, h
->vaddr
+ SA5_OUTDB_CLEAR
);
418 /* Do a read in order to flush the write to the controller
421 (void) readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
424 if ((((u32
) rq
->head
[rq
->current_entry
]) & 1) == rq
->wraparound
) {
425 register_value
= rq
->head
[rq
->current_entry
];
427 atomic_dec(&h
->commands_outstanding
);
429 register_value
= FIFO_EMPTY
;
431 /* Check for wraparound */
432 if (rq
->current_entry
== h
->max_commands
) {
433 rq
->current_entry
= 0;
436 return register_value
;
440 * returns value read from hardware.
441 * returns FIFO_EMPTY if there is nothing to read
443 static unsigned long SA5_completed(struct ctlr_info
*h
,
444 __attribute__((unused
)) u8 q
)
446 unsigned long register_value
447 = readl(h
->vaddr
+ SA5_REPLY_PORT_OFFSET
);
449 if (register_value
!= FIFO_EMPTY
)
450 atomic_dec(&h
->commands_outstanding
);
453 if (register_value
!= FIFO_EMPTY
)
454 dev_dbg(&h
->pdev
->dev
, "Read %lx back from board\n",
457 dev_dbg(&h
->pdev
->dev
, "FIFO Empty read\n");
460 return register_value
;
463 * Returns true if an interrupt is pending..
465 static bool SA5_intr_pending(struct ctlr_info
*h
)
467 unsigned long register_value
=
468 readl(h
->vaddr
+ SA5_INTR_STATUS
);
469 return register_value
& SA5_INTR_PENDING
;
472 static bool SA5_performant_intr_pending(struct ctlr_info
*h
)
474 unsigned long register_value
= readl(h
->vaddr
+ SA5_INTR_STATUS
);
479 /* Read outbound doorbell to flush */
480 register_value
= readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
481 return register_value
& SA5_OUTDB_STATUS_PERF_BIT
;
484 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
486 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info
*h
)
488 unsigned long register_value
= readl(h
->vaddr
+ SA5_INTR_STATUS
);
490 return (register_value
& SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT
) ?
494 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
495 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
496 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
497 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
499 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info
*h
, u8 q
)
502 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
504 BUG_ON(q
>= h
->nreply_queues
);
506 register_value
= rq
->head
[rq
->current_entry
];
507 if (register_value
!= IOACCEL_MODE1_REPLY_UNUSED
) {
508 rq
->head
[rq
->current_entry
] = IOACCEL_MODE1_REPLY_UNUSED
;
509 if (++rq
->current_entry
== rq
->size
)
510 rq
->current_entry
= 0;
514 * Don't really need to write the new index after each command,
515 * but with current driver design this is easiest.
518 writel((q
<< 24) | rq
->current_entry
, h
->vaddr
+
519 IOACCEL_MODE1_CONSUMER_INDEX
);
520 atomic_dec(&h
->commands_outstanding
);
522 return (unsigned long) register_value
;
525 static struct access_method SA5_access
= {
532 static struct access_method SA5_ioaccel_mode1_access
= {
534 SA5_performant_intr_mask
,
535 SA5_ioaccel_mode1_intr_pending
,
536 SA5_ioaccel_mode1_completed
,
539 static struct access_method SA5_ioaccel_mode2_access
= {
540 SA5_submit_command_ioaccel2
,
541 SA5_performant_intr_mask
,
542 SA5_performant_intr_pending
,
543 SA5_performant_completed
,
546 static struct access_method SA5_performant_access
= {
548 SA5_performant_intr_mask
,
549 SA5_performant_intr_pending
,
550 SA5_performant_completed
,
553 static struct access_method SA5_performant_access_no_read
= {
554 SA5_submit_command_no_read
,
555 SA5_performant_intr_mask
,
556 SA5_performant_intr_pending
,
557 SA5_performant_completed
,
563 struct access_method
*access
;