2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_dbg.h>
45 #include <linux/cciss_ioctl.h>
46 #include <linux/string.h>
47 #include <linux/bitmap.h>
48 #include <linux/atomic.h>
49 #include <linux/jiffies.h>
50 #include <linux/percpu-defs.h>
51 #include <linux/percpu.h>
52 #include <asm/unaligned.h>
53 #include <asm/div64.h>
57 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
58 #define HPSA_DRIVER_VERSION "3.4.10-0"
59 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
62 /* How long to wait for CISS doorbell communication */
63 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
64 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
65 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
66 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
67 #define MAX_IOCTL_CONFIG_WAIT 1000
69 /*define how many times we will try a command because of bus resets */
70 #define MAX_CMD_RETRIES 3
72 /* Embedded module documentation macros - see modules.h */
73 MODULE_AUTHOR("Hewlett-Packard Company");
74 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
76 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77 MODULE_VERSION(HPSA_DRIVER_VERSION
);
78 MODULE_LICENSE("GPL");
80 static int hpsa_allow_any
;
81 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
82 MODULE_PARM_DESC(hpsa_allow_any
,
83 "Allow hpsa driver to access unknown HP Smart Array hardware");
84 static int hpsa_simple_mode
;
85 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
86 MODULE_PARM_DESC(hpsa_simple_mode
,
87 "Use 'simple mode' rather than 'performant mode'");
89 /* define the PCI info for the cards we can control */
90 static const struct pci_device_id hpsa_pci_device_id
[] = {
91 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
92 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
93 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
94 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
95 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C6},
123 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
124 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
125 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
126 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CA},
127 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CB},
128 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CC},
129 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CD},
130 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CE},
131 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0580},
132 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0581},
133 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0582},
134 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0583},
135 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0584},
136 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0585},
137 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
138 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
139 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
140 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
141 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
142 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
143 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
147 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
149 /* board_id = Subsystem Device ID & Vendor ID
150 * product = Marketing Name for the board
151 * access = Address of the struct of function pointers
153 static struct board_type products
[] = {
154 {0x3241103C, "Smart Array P212", &SA5_access
},
155 {0x3243103C, "Smart Array P410", &SA5_access
},
156 {0x3245103C, "Smart Array P410i", &SA5_access
},
157 {0x3247103C, "Smart Array P411", &SA5_access
},
158 {0x3249103C, "Smart Array P812", &SA5_access
},
159 {0x324A103C, "Smart Array P712m", &SA5_access
},
160 {0x324B103C, "Smart Array P711m", &SA5_access
},
161 {0x3233103C, "HP StorageWorks 1210m", &SA5_access
}, /* alias of 333f */
162 {0x3350103C, "Smart Array P222", &SA5_access
},
163 {0x3351103C, "Smart Array P420", &SA5_access
},
164 {0x3352103C, "Smart Array P421", &SA5_access
},
165 {0x3353103C, "Smart Array P822", &SA5_access
},
166 {0x3354103C, "Smart Array P420i", &SA5_access
},
167 {0x3355103C, "Smart Array P220i", &SA5_access
},
168 {0x3356103C, "Smart Array P721m", &SA5_access
},
169 {0x1921103C, "Smart Array P830i", &SA5_access
},
170 {0x1922103C, "Smart Array P430", &SA5_access
},
171 {0x1923103C, "Smart Array P431", &SA5_access
},
172 {0x1924103C, "Smart Array P830", &SA5_access
},
173 {0x1926103C, "Smart Array P731m", &SA5_access
},
174 {0x1928103C, "Smart Array P230i", &SA5_access
},
175 {0x1929103C, "Smart Array P530", &SA5_access
},
176 {0x21BD103C, "Smart Array P244br", &SA5_access
},
177 {0x21BE103C, "Smart Array P741m", &SA5_access
},
178 {0x21BF103C, "Smart HBA H240ar", &SA5_access
},
179 {0x21C0103C, "Smart Array P440ar", &SA5_access
},
180 {0x21C1103C, "Smart Array P840ar", &SA5_access
},
181 {0x21C2103C, "Smart Array P440", &SA5_access
},
182 {0x21C3103C, "Smart Array P441", &SA5_access
},
183 {0x21C4103C, "Smart Array", &SA5_access
},
184 {0x21C5103C, "Smart Array P841", &SA5_access
},
185 {0x21C6103C, "Smart HBA H244br", &SA5_access
},
186 {0x21C7103C, "Smart HBA H240", &SA5_access
},
187 {0x21C8103C, "Smart HBA H241", &SA5_access
},
188 {0x21C9103C, "Smart Array", &SA5_access
},
189 {0x21CA103C, "Smart Array P246br", &SA5_access
},
190 {0x21CB103C, "Smart Array P840", &SA5_access
},
191 {0x21CC103C, "Smart Array", &SA5_access
},
192 {0x21CD103C, "Smart Array", &SA5_access
},
193 {0x21CE103C, "Smart HBA", &SA5_access
},
194 {0x05809005, "SmartHBA-SA", &SA5_access
},
195 {0x05819005, "SmartHBA-SA 8i", &SA5_access
},
196 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access
},
197 {0x05839005, "SmartHBA-SA 8e", &SA5_access
},
198 {0x05849005, "SmartHBA-SA 16i", &SA5_access
},
199 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access
},
200 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
201 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
202 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
203 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
204 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
205 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
208 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
209 static const struct scsi_cmnd hpsa_cmd_busy
;
210 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
211 static const struct scsi_cmnd hpsa_cmd_idle
;
212 static int number_of_controllers
;
214 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
215 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
216 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
);
219 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
,
223 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
224 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
225 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
);
226 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
227 struct scsi_cmnd
*scmd
);
228 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
229 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
231 static void hpsa_free_cmd_pool(struct ctlr_info
*h
);
232 #define VPD_PAGE (1 << 8)
233 #define HPSA_SIMPLE_ERROR_BITS 0x03
235 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
236 static void hpsa_scan_start(struct Scsi_Host
*);
237 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
238 unsigned long elapsed_time
);
239 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
);
241 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
242 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
243 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
244 static int hpsa_slave_configure(struct scsi_device
*sdev
);
245 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
247 static void hpsa_update_scsi_devices(struct ctlr_info
*h
);
248 static int check_for_unit_attention(struct ctlr_info
*h
,
249 struct CommandList
*c
);
250 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
251 struct CommandList
*c
);
252 /* performant mode helper functions */
253 static void calc_bucket_map(int *bucket
, int num_buckets
,
254 int nsgs
, int min_blocks
, u32
*bucket_map
);
255 static void hpsa_free_performant_mode(struct ctlr_info
*h
);
256 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
257 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
258 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
259 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
261 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
262 unsigned long *memory_bar
);
263 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
264 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
266 static inline void finish_cmd(struct CommandList
*c
);
267 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
268 #define BOARD_NOT_READY 0
269 #define BOARD_READY 1
270 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
271 static void hpsa_flush_cache(struct ctlr_info
*h
);
272 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
273 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
274 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
);
275 static void hpsa_command_resubmit_worker(struct work_struct
*work
);
276 static u32
lockup_detected(struct ctlr_info
*h
);
277 static int detect_controller_lockup(struct ctlr_info
*h
);
278 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
);
280 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
282 unsigned long *priv
= shost_priv(sdev
->host
);
283 return (struct ctlr_info
*) *priv
;
286 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
288 unsigned long *priv
= shost_priv(sh
);
289 return (struct ctlr_info
*) *priv
;
292 static inline bool hpsa_is_cmd_idle(struct CommandList
*c
)
294 return c
->scsi_cmd
== SCSI_CMD_IDLE
;
297 static inline bool hpsa_is_pending_event(struct CommandList
*c
)
299 return c
->abort_pending
|| c
->reset_pending
;
302 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
303 static void decode_sense_data(const u8
*sense_data
, int sense_data_len
,
304 u8
*sense_key
, u8
*asc
, u8
*ascq
)
306 struct scsi_sense_hdr sshdr
;
313 if (sense_data_len
< 1)
316 rc
= scsi_normalize_sense(sense_data
, sense_data_len
, &sshdr
);
318 *sense_key
= sshdr
.sense_key
;
324 static int check_for_unit_attention(struct ctlr_info
*h
,
325 struct CommandList
*c
)
327 u8 sense_key
, asc
, ascq
;
330 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
331 sense_len
= sizeof(c
->err_info
->SenseInfo
);
333 sense_len
= c
->err_info
->SenseLen
;
335 decode_sense_data(c
->err_info
->SenseInfo
, sense_len
,
336 &sense_key
, &asc
, &ascq
);
337 if (sense_key
!= UNIT_ATTENTION
|| asc
== 0xff)
342 dev_warn(&h
->pdev
->dev
,
343 "%s: a state change detected, command retried\n",
347 dev_warn(&h
->pdev
->dev
,
348 "%s: LUN failure detected\n", h
->devname
);
350 case REPORT_LUNS_CHANGED
:
351 dev_warn(&h
->pdev
->dev
,
352 "%s: report LUN data changed\n", h
->devname
);
354 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
355 * target (array) devices.
359 dev_warn(&h
->pdev
->dev
,
360 "%s: a power on or device reset detected\n",
363 case UNIT_ATTENTION_CLEARED
:
364 dev_warn(&h
->pdev
->dev
,
365 "%s: unit attention cleared by another initiator\n",
369 dev_warn(&h
->pdev
->dev
,
370 "%s: unknown unit attention detected\n",
377 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
379 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
380 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
381 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
383 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
387 static u32
lockup_detected(struct ctlr_info
*h
);
388 static ssize_t
host_show_lockup_detected(struct device
*dev
,
389 struct device_attribute
*attr
, char *buf
)
393 struct Scsi_Host
*shost
= class_to_shost(dev
);
395 h
= shost_to_hba(shost
);
396 ld
= lockup_detected(h
);
398 return sprintf(buf
, "ld=%d\n", ld
);
401 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
402 struct device_attribute
*attr
,
403 const char *buf
, size_t count
)
407 struct Scsi_Host
*shost
= class_to_shost(dev
);
410 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
412 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
413 strncpy(tmpbuf
, buf
, len
);
415 if (sscanf(tmpbuf
, "%d", &status
) != 1)
417 h
= shost_to_hba(shost
);
418 h
->acciopath_status
= !!status
;
419 dev_warn(&h
->pdev
->dev
,
420 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
421 h
->acciopath_status
? "enabled" : "disabled");
425 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
426 struct device_attribute
*attr
,
427 const char *buf
, size_t count
)
429 int debug_level
, len
;
431 struct Scsi_Host
*shost
= class_to_shost(dev
);
434 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
436 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
437 strncpy(tmpbuf
, buf
, len
);
439 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
443 h
= shost_to_hba(shost
);
444 h
->raid_offload_debug
= debug_level
;
445 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
446 h
->raid_offload_debug
);
450 static ssize_t
host_store_rescan(struct device
*dev
,
451 struct device_attribute
*attr
,
452 const char *buf
, size_t count
)
455 struct Scsi_Host
*shost
= class_to_shost(dev
);
456 h
= shost_to_hba(shost
);
457 hpsa_scan_start(h
->scsi_host
);
461 static ssize_t
host_show_firmware_revision(struct device
*dev
,
462 struct device_attribute
*attr
, char *buf
)
465 struct Scsi_Host
*shost
= class_to_shost(dev
);
466 unsigned char *fwrev
;
468 h
= shost_to_hba(shost
);
469 if (!h
->hba_inquiry_data
)
471 fwrev
= &h
->hba_inquiry_data
[32];
472 return snprintf(buf
, 20, "%c%c%c%c\n",
473 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
476 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
477 struct device_attribute
*attr
, char *buf
)
479 struct Scsi_Host
*shost
= class_to_shost(dev
);
480 struct ctlr_info
*h
= shost_to_hba(shost
);
482 return snprintf(buf
, 20, "%d\n",
483 atomic_read(&h
->commands_outstanding
));
486 static ssize_t
host_show_transport_mode(struct device
*dev
,
487 struct device_attribute
*attr
, char *buf
)
490 struct Scsi_Host
*shost
= class_to_shost(dev
);
492 h
= shost_to_hba(shost
);
493 return snprintf(buf
, 20, "%s\n",
494 h
->transMethod
& CFGTBL_Trans_Performant
?
495 "performant" : "simple");
498 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
499 struct device_attribute
*attr
, char *buf
)
502 struct Scsi_Host
*shost
= class_to_shost(dev
);
504 h
= shost_to_hba(shost
);
505 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
506 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
509 /* List of controllers which cannot be hard reset on kexec with reset_devices */
510 static u32 unresettable_controller
[] = {
511 0x324a103C, /* Smart Array P712m */
512 0x324b103C, /* Smart Array P711m */
513 0x3223103C, /* Smart Array P800 */
514 0x3234103C, /* Smart Array P400 */
515 0x3235103C, /* Smart Array P400i */
516 0x3211103C, /* Smart Array E200i */
517 0x3212103C, /* Smart Array E200 */
518 0x3213103C, /* Smart Array E200i */
519 0x3214103C, /* Smart Array E200i */
520 0x3215103C, /* Smart Array E200i */
521 0x3237103C, /* Smart Array E500 */
522 0x323D103C, /* Smart Array P700m */
523 0x40800E11, /* Smart Array 5i */
524 0x409C0E11, /* Smart Array 6400 */
525 0x409D0E11, /* Smart Array 6400 EM */
526 0x40700E11, /* Smart Array 5300 */
527 0x40820E11, /* Smart Array 532 */
528 0x40830E11, /* Smart Array 5312 */
529 0x409A0E11, /* Smart Array 641 */
530 0x409B0E11, /* Smart Array 642 */
531 0x40910E11, /* Smart Array 6i */
534 /* List of controllers which cannot even be soft reset */
535 static u32 soft_unresettable_controller
[] = {
536 0x40800E11, /* Smart Array 5i */
537 0x40700E11, /* Smart Array 5300 */
538 0x40820E11, /* Smart Array 532 */
539 0x40830E11, /* Smart Array 5312 */
540 0x409A0E11, /* Smart Array 641 */
541 0x409B0E11, /* Smart Array 642 */
542 0x40910E11, /* Smart Array 6i */
543 /* Exclude 640x boards. These are two pci devices in one slot
544 * which share a battery backed cache module. One controls the
545 * cache, the other accesses the cache through the one that controls
546 * it. If we reset the one controlling the cache, the other will
547 * likely not be happy. Just forbid resetting this conjoined mess.
548 * The 640x isn't really supported by hpsa anyway.
550 0x409C0E11, /* Smart Array 6400 */
551 0x409D0E11, /* Smart Array 6400 EM */
554 static u32 needs_abort_tags_swizzled
[] = {
555 0x323D103C, /* Smart Array P700m */
556 0x324a103C, /* Smart Array P712m */
557 0x324b103C, /* SmartArray P711m */
560 static int board_id_in_array(u32 a
[], int nelems
, u32 board_id
)
564 for (i
= 0; i
< nelems
; i
++)
565 if (a
[i
] == board_id
)
570 static int ctlr_is_hard_resettable(u32 board_id
)
572 return !board_id_in_array(unresettable_controller
,
573 ARRAY_SIZE(unresettable_controller
), board_id
);
576 static int ctlr_is_soft_resettable(u32 board_id
)
578 return !board_id_in_array(soft_unresettable_controller
,
579 ARRAY_SIZE(soft_unresettable_controller
), board_id
);
582 static int ctlr_is_resettable(u32 board_id
)
584 return ctlr_is_hard_resettable(board_id
) ||
585 ctlr_is_soft_resettable(board_id
);
588 static int ctlr_needs_abort_tags_swizzled(u32 board_id
)
590 return board_id_in_array(needs_abort_tags_swizzled
,
591 ARRAY_SIZE(needs_abort_tags_swizzled
), board_id
);
594 static ssize_t
host_show_resettable(struct device
*dev
,
595 struct device_attribute
*attr
, char *buf
)
598 struct Scsi_Host
*shost
= class_to_shost(dev
);
600 h
= shost_to_hba(shost
);
601 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
604 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
606 return (scsi3addr
[3] & 0xC0) == 0x40;
609 static const char * const raid_label
[] = { "0", "4", "1(+0)", "5", "5+1", "6",
610 "1(+0)ADM", "UNKNOWN"
612 #define HPSA_RAID_0 0
613 #define HPSA_RAID_4 1
614 #define HPSA_RAID_1 2 /* also used for RAID 10 */
615 #define HPSA_RAID_5 3 /* also used for RAID 50 */
616 #define HPSA_RAID_51 4
617 #define HPSA_RAID_6 5 /* also used for RAID 60 */
618 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
619 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
621 static inline bool is_logical_device(struct hpsa_scsi_dev_t
*device
)
623 return !device
->physical_device
;
626 static ssize_t
raid_level_show(struct device
*dev
,
627 struct device_attribute
*attr
, char *buf
)
630 unsigned char rlevel
;
632 struct scsi_device
*sdev
;
633 struct hpsa_scsi_dev_t
*hdev
;
636 sdev
= to_scsi_device(dev
);
637 h
= sdev_to_hba(sdev
);
638 spin_lock_irqsave(&h
->lock
, flags
);
639 hdev
= sdev
->hostdata
;
641 spin_unlock_irqrestore(&h
->lock
, flags
);
645 /* Is this even a logical drive? */
646 if (!is_logical_device(hdev
)) {
647 spin_unlock_irqrestore(&h
->lock
, flags
);
648 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
652 rlevel
= hdev
->raid_level
;
653 spin_unlock_irqrestore(&h
->lock
, flags
);
654 if (rlevel
> RAID_UNKNOWN
)
655 rlevel
= RAID_UNKNOWN
;
656 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
660 static ssize_t
lunid_show(struct device
*dev
,
661 struct device_attribute
*attr
, char *buf
)
664 struct scsi_device
*sdev
;
665 struct hpsa_scsi_dev_t
*hdev
;
667 unsigned char lunid
[8];
669 sdev
= to_scsi_device(dev
);
670 h
= sdev_to_hba(sdev
);
671 spin_lock_irqsave(&h
->lock
, flags
);
672 hdev
= sdev
->hostdata
;
674 spin_unlock_irqrestore(&h
->lock
, flags
);
677 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
678 spin_unlock_irqrestore(&h
->lock
, flags
);
679 return snprintf(buf
, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
680 lunid
[0], lunid
[1], lunid
[2], lunid
[3],
681 lunid
[4], lunid
[5], lunid
[6], lunid
[7]);
684 static ssize_t
unique_id_show(struct device
*dev
,
685 struct device_attribute
*attr
, char *buf
)
688 struct scsi_device
*sdev
;
689 struct hpsa_scsi_dev_t
*hdev
;
691 unsigned char sn
[16];
693 sdev
= to_scsi_device(dev
);
694 h
= sdev_to_hba(sdev
);
695 spin_lock_irqsave(&h
->lock
, flags
);
696 hdev
= sdev
->hostdata
;
698 spin_unlock_irqrestore(&h
->lock
, flags
);
701 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
702 spin_unlock_irqrestore(&h
->lock
, flags
);
703 return snprintf(buf
, 16 * 2 + 2,
704 "%02X%02X%02X%02X%02X%02X%02X%02X"
705 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
706 sn
[0], sn
[1], sn
[2], sn
[3],
707 sn
[4], sn
[5], sn
[6], sn
[7],
708 sn
[8], sn
[9], sn
[10], sn
[11],
709 sn
[12], sn
[13], sn
[14], sn
[15]);
712 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
713 struct device_attribute
*attr
, char *buf
)
716 struct scsi_device
*sdev
;
717 struct hpsa_scsi_dev_t
*hdev
;
721 sdev
= to_scsi_device(dev
);
722 h
= sdev_to_hba(sdev
);
723 spin_lock_irqsave(&h
->lock
, flags
);
724 hdev
= sdev
->hostdata
;
726 spin_unlock_irqrestore(&h
->lock
, flags
);
729 offload_enabled
= hdev
->offload_enabled
;
730 spin_unlock_irqrestore(&h
->lock
, flags
);
731 return snprintf(buf
, 20, "%d\n", offload_enabled
);
735 #define PATH_STRING_LEN 50
737 static ssize_t
path_info_show(struct device
*dev
,
738 struct device_attribute
*attr
, char *buf
)
741 struct scsi_device
*sdev
;
742 struct hpsa_scsi_dev_t
*hdev
;
748 u8 path_map_index
= 0;
750 unsigned char phys_connector
[2];
751 unsigned char path
[MAX_PATHS
][PATH_STRING_LEN
];
753 memset(path
, 0, MAX_PATHS
* PATH_STRING_LEN
);
754 sdev
= to_scsi_device(dev
);
755 h
= sdev_to_hba(sdev
);
756 spin_lock_irqsave(&h
->devlock
, flags
);
757 hdev
= sdev
->hostdata
;
759 spin_unlock_irqrestore(&h
->devlock
, flags
);
764 for (i
= 0; i
< MAX_PATHS
; i
++) {
765 path_map_index
= 1<<i
;
766 if (i
== hdev
->active_path_index
)
768 else if (hdev
->path_map
& path_map_index
)
773 output_len
= snprintf(path
[i
],
774 PATH_STRING_LEN
, "[%d:%d:%d:%d] %20.20s ",
775 h
->scsi_host
->host_no
,
776 hdev
->bus
, hdev
->target
, hdev
->lun
,
777 scsi_device_type(hdev
->devtype
));
779 if (is_ext_target(h
, hdev
) ||
780 hdev
->devtype
== TYPE_RAID
||
781 is_logical_device(hdev
)) {
782 output_len
+= snprintf(path
[i
] + output_len
,
783 PATH_STRING_LEN
, "%s\n",
789 memcpy(&phys_connector
, &hdev
->phys_connector
[i
],
790 sizeof(phys_connector
));
791 if (phys_connector
[0] < '0')
792 phys_connector
[0] = '0';
793 if (phys_connector
[1] < '0')
794 phys_connector
[1] = '0';
795 if (hdev
->phys_connector
[i
] > 0)
796 output_len
+= snprintf(path
[i
] + output_len
,
800 if (hdev
->devtype
== TYPE_DISK
&& hdev
->expose_device
) {
801 if (box
== 0 || box
== 0xFF) {
802 output_len
+= snprintf(path
[i
] + output_len
,
807 output_len
+= snprintf(path
[i
] + output_len
,
809 "BOX: %hhu BAY: %hhu %s\n",
812 } else if (box
!= 0 && box
!= 0xFF) {
813 output_len
+= snprintf(path
[i
] + output_len
,
814 PATH_STRING_LEN
, "BOX: %hhu %s\n",
817 output_len
+= snprintf(path
[i
] + output_len
,
818 PATH_STRING_LEN
, "%s\n", active
);
821 spin_unlock_irqrestore(&h
->devlock
, flags
);
822 return snprintf(buf
, output_len
+1, "%s%s%s%s%s%s%s%s",
823 path
[0], path
[1], path
[2], path
[3],
824 path
[4], path
[5], path
[6], path
[7]);
827 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
828 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
829 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
830 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
831 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
832 host_show_hp_ssd_smart_path_enabled
, NULL
);
833 static DEVICE_ATTR(path_info
, S_IRUGO
, path_info_show
, NULL
);
834 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
835 host_show_hp_ssd_smart_path_status
,
836 host_store_hp_ssd_smart_path_status
);
837 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
838 host_store_raid_offload_debug
);
839 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
840 host_show_firmware_revision
, NULL
);
841 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
842 host_show_commands_outstanding
, NULL
);
843 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
844 host_show_transport_mode
, NULL
);
845 static DEVICE_ATTR(resettable
, S_IRUGO
,
846 host_show_resettable
, NULL
);
847 static DEVICE_ATTR(lockup_detected
, S_IRUGO
,
848 host_show_lockup_detected
, NULL
);
850 static struct device_attribute
*hpsa_sdev_attrs
[] = {
851 &dev_attr_raid_level
,
854 &dev_attr_hp_ssd_smart_path_enabled
,
856 &dev_attr_lockup_detected
,
860 static struct device_attribute
*hpsa_shost_attrs
[] = {
862 &dev_attr_firmware_revision
,
863 &dev_attr_commands_outstanding
,
864 &dev_attr_transport_mode
,
865 &dev_attr_resettable
,
866 &dev_attr_hp_ssd_smart_path_status
,
867 &dev_attr_raid_offload_debug
,
871 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
872 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
874 static struct scsi_host_template hpsa_driver_template
= {
875 .module
= THIS_MODULE
,
878 .queuecommand
= hpsa_scsi_queue_command
,
879 .scan_start
= hpsa_scan_start
,
880 .scan_finished
= hpsa_scan_finished
,
881 .change_queue_depth
= hpsa_change_queue_depth
,
883 .use_clustering
= ENABLE_CLUSTERING
,
884 .eh_abort_handler
= hpsa_eh_abort_handler
,
885 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
887 .slave_alloc
= hpsa_slave_alloc
,
888 .slave_configure
= hpsa_slave_configure
,
889 .slave_destroy
= hpsa_slave_destroy
,
891 .compat_ioctl
= hpsa_compat_ioctl
,
893 .sdev_attrs
= hpsa_sdev_attrs
,
894 .shost_attrs
= hpsa_shost_attrs
,
899 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
902 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
904 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
905 return h
->access
.command_completed(h
, q
);
907 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
908 return h
->access
.command_completed(h
, q
);
910 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
911 a
= rq
->head
[rq
->current_entry
];
913 atomic_dec(&h
->commands_outstanding
);
917 /* Check for wraparound */
918 if (rq
->current_entry
== h
->max_commands
) {
919 rq
->current_entry
= 0;
926 * There are some special bits in the bus address of the
927 * command that we have to set for the controller to know
928 * how to process the command:
930 * Normal performant mode:
931 * bit 0: 1 means performant mode, 0 means simple mode.
932 * bits 1-3 = block fetch table entry
933 * bits 4-6 = command type (== 0)
936 * bit 0 = "performant mode" bit.
937 * bits 1-3 = block fetch table entry
938 * bits 4-6 = command type (== 110)
939 * (command type is needed because ioaccel1 mode
940 * commands are submitted through the same register as normal
941 * mode commands, so this is how the controller knows whether
942 * the command is normal mode or ioaccel1 mode.)
945 * bit 0 = "performant mode" bit.
946 * bits 1-4 = block fetch table entry (note extra bit)
947 * bits 4-6 = not needed, because ioaccel2 mode has
948 * a separate special register for submitting commands.
952 * set_performant_mode: Modify the tag for cciss performant
953 * set bit 0 for pull model, bits 3-1 for block fetch
956 #define DEFAULT_REPLY_QUEUE (-1)
957 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
,
960 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
961 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
962 if (unlikely(!h
->msix_vector
))
964 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
965 c
->Header
.ReplyQueue
=
966 raw_smp_processor_id() % h
->nreply_queues
;
968 c
->Header
.ReplyQueue
= reply_queue
% h
->nreply_queues
;
972 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
973 struct CommandList
*c
,
976 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
979 * Tell the controller to post the reply to the queue for this
980 * processor. This seems to give the best I/O throughput.
982 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
983 cp
->ReplyQueue
= smp_processor_id() % h
->nreply_queues
;
985 cp
->ReplyQueue
= reply_queue
% h
->nreply_queues
;
987 * Set the bits in the address sent down to include:
988 * - performant mode bit (bit 0)
989 * - pull count (bits 1-3)
990 * - command type (bits 4-6)
992 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
993 IOACCEL1_BUSADDR_CMDTYPE
;
996 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info
*h
,
997 struct CommandList
*c
,
1000 struct hpsa_tmf_struct
*cp
= (struct hpsa_tmf_struct
*)
1001 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1003 /* Tell the controller to post the reply to the queue for this
1004 * processor. This seems to give the best I/O throughput.
1006 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1007 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1009 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1010 /* Set the bits in the address sent down to include:
1011 * - performant mode bit not used in ioaccel mode 2
1012 * - pull count (bits 0-3)
1013 * - command type isn't needed for ioaccel2
1015 c
->busaddr
|= h
->ioaccel2_blockFetchTable
[0];
1018 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
1019 struct CommandList
*c
,
1022 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1025 * Tell the controller to post the reply to the queue for this
1026 * processor. This seems to give the best I/O throughput.
1028 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1029 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1031 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1033 * Set the bits in the address sent down to include:
1034 * - performant mode bit not used in ioaccel mode 2
1035 * - pull count (bits 0-3)
1036 * - command type isn't needed for ioaccel2
1038 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
1041 static int is_firmware_flash_cmd(u8
*cdb
)
1043 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
1047 * During firmware flash, the heartbeat register may not update as frequently
1048 * as it should. So we dial down lockup detection during firmware flash. and
1049 * dial it back up when firmware flash completes.
1051 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1052 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1053 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
1054 struct CommandList
*c
)
1056 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
1058 atomic_inc(&h
->firmware_flash_in_progress
);
1059 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
1062 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
1063 struct CommandList
*c
)
1065 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
1066 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
1067 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
1070 static void __enqueue_cmd_and_start_io(struct ctlr_info
*h
,
1071 struct CommandList
*c
, int reply_queue
)
1073 dial_down_lockup_detection_during_fw_flash(h
, c
);
1074 atomic_inc(&h
->commands_outstanding
);
1075 switch (c
->cmd_type
) {
1077 set_ioaccel1_performant_mode(h
, c
, reply_queue
);
1078 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
1081 set_ioaccel2_performant_mode(h
, c
, reply_queue
);
1082 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1085 set_ioaccel2_tmf_performant_mode(h
, c
, reply_queue
);
1086 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1089 set_performant_mode(h
, c
, reply_queue
);
1090 h
->access
.submit_command(h
, c
);
1094 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
, struct CommandList
*c
)
1096 if (unlikely(hpsa_is_pending_event(c
)))
1097 return finish_cmd(c
);
1099 __enqueue_cmd_and_start_io(h
, c
, DEFAULT_REPLY_QUEUE
);
1102 static inline int is_hba_lunid(unsigned char scsi3addr
[])
1104 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
1107 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
1109 if (!h
->hba_inquiry_data
)
1111 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
1116 static int hpsa_find_target_lun(struct ctlr_info
*h
,
1117 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
1119 /* finds an unused bus, target, lun for a new physical device
1120 * assumes h->devlock is held
1123 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
1125 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
1127 for (i
= 0; i
< h
->ndevices
; i
++) {
1128 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
1129 __set_bit(h
->dev
[i
]->target
, lun_taken
);
1132 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
1133 if (i
< HPSA_MAX_DEVICES
) {
1142 static void hpsa_show_dev_msg(const char *level
, struct ctlr_info
*h
,
1143 struct hpsa_scsi_dev_t
*dev
, char *description
)
1145 if (h
== NULL
|| h
->pdev
== NULL
|| h
->scsi_host
== NULL
)
1148 dev_printk(level
, &h
->pdev
->dev
,
1149 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1150 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
1152 scsi_device_type(dev
->devtype
),
1155 dev
->raid_level
> RAID_UNKNOWN
?
1156 "RAID-?" : raid_label
[dev
->raid_level
],
1157 dev
->offload_config
? '+' : '-',
1158 dev
->offload_enabled
? '+' : '-',
1159 dev
->expose_device
);
1162 /* Add an entry into h->dev[] array. */
1163 static int hpsa_scsi_add_entry(struct ctlr_info
*h
,
1164 struct hpsa_scsi_dev_t
*device
,
1165 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
1167 /* assumes h->devlock is held */
1168 int n
= h
->ndevices
;
1170 unsigned char addr1
[8], addr2
[8];
1171 struct hpsa_scsi_dev_t
*sd
;
1173 if (n
>= HPSA_MAX_DEVICES
) {
1174 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
1179 /* physical devices do not have lun or target assigned until now. */
1180 if (device
->lun
!= -1)
1181 /* Logical device, lun is already assigned. */
1184 /* If this device a non-zero lun of a multi-lun device
1185 * byte 4 of the 8-byte LUN addr will contain the logical
1186 * unit no, zero otherwise.
1188 if (device
->scsi3addr
[4] == 0) {
1189 /* This is not a non-zero lun of a multi-lun device */
1190 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
1191 device
->bus
, &device
->target
, &device
->lun
) != 0)
1196 /* This is a non-zero lun of a multi-lun device.
1197 * Search through our list and find the device which
1198 * has the same 8 byte LUN address, excepting byte 4 and 5.
1199 * Assign the same bus and target for this new LUN.
1200 * Use the logical unit number from the firmware.
1202 memcpy(addr1
, device
->scsi3addr
, 8);
1205 for (i
= 0; i
< n
; i
++) {
1207 memcpy(addr2
, sd
->scsi3addr
, 8);
1210 /* differ only in byte 4 and 5? */
1211 if (memcmp(addr1
, addr2
, 8) == 0) {
1212 device
->bus
= sd
->bus
;
1213 device
->target
= sd
->target
;
1214 device
->lun
= device
->scsi3addr
[4];
1218 if (device
->lun
== -1) {
1219 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
1220 " suspect firmware bug or unsupported hardware "
1221 "configuration.\n");
1229 added
[*nadded
] = device
;
1231 hpsa_show_dev_msg(KERN_INFO
, h
, device
,
1232 device
->expose_device
? "added" : "masked");
1233 device
->offload_to_be_enabled
= device
->offload_enabled
;
1234 device
->offload_enabled
= 0;
1238 /* Update an entry in h->dev[] array. */
1239 static void hpsa_scsi_update_entry(struct ctlr_info
*h
,
1240 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
1242 int offload_enabled
;
1243 /* assumes h->devlock is held */
1244 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1246 /* Raid level changed. */
1247 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
1249 /* Raid offload parameters changed. Careful about the ordering. */
1250 if (new_entry
->offload_config
&& new_entry
->offload_enabled
) {
1252 * if drive is newly offload_enabled, we want to copy the
1253 * raid map data first. If previously offload_enabled and
1254 * offload_config were set, raid map data had better be
1255 * the same as it was before. if raid map data is changed
1256 * then it had better be the case that
1257 * h->dev[entry]->offload_enabled is currently 0.
1259 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
1260 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1262 if (new_entry
->hba_ioaccel_enabled
) {
1263 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1264 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1266 h
->dev
[entry
]->hba_ioaccel_enabled
= new_entry
->hba_ioaccel_enabled
;
1267 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
1268 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
1269 h
->dev
[entry
]->queue_depth
= new_entry
->queue_depth
;
1272 * We can turn off ioaccel offload now, but need to delay turning
1273 * it on until we can update h->dev[entry]->phys_disk[], but we
1274 * can't do that until all the devices are updated.
1276 h
->dev
[entry
]->offload_to_be_enabled
= new_entry
->offload_enabled
;
1277 if (!new_entry
->offload_enabled
)
1278 h
->dev
[entry
]->offload_enabled
= 0;
1280 offload_enabled
= h
->dev
[entry
]->offload_enabled
;
1281 h
->dev
[entry
]->offload_enabled
= h
->dev
[entry
]->offload_to_be_enabled
;
1282 hpsa_show_dev_msg(KERN_INFO
, h
, h
->dev
[entry
], "updated");
1283 h
->dev
[entry
]->offload_enabled
= offload_enabled
;
1286 /* Replace an entry from h->dev[] array. */
1287 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
,
1288 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
1289 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
1290 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1292 /* assumes h->devlock is held */
1293 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1294 removed
[*nremoved
] = h
->dev
[entry
];
1298 * New physical devices won't have target/lun assigned yet
1299 * so we need to preserve the values in the slot we are replacing.
1301 if (new_entry
->target
== -1) {
1302 new_entry
->target
= h
->dev
[entry
]->target
;
1303 new_entry
->lun
= h
->dev
[entry
]->lun
;
1306 h
->dev
[entry
] = new_entry
;
1307 added
[*nadded
] = new_entry
;
1309 hpsa_show_dev_msg(KERN_INFO
, h
, new_entry
, "replaced");
1310 new_entry
->offload_to_be_enabled
= new_entry
->offload_enabled
;
1311 new_entry
->offload_enabled
= 0;
1314 /* Remove an entry from h->dev[] array. */
1315 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int entry
,
1316 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1318 /* assumes h->devlock is held */
1320 struct hpsa_scsi_dev_t
*sd
;
1322 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1325 removed
[*nremoved
] = h
->dev
[entry
];
1328 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1329 h
->dev
[i
] = h
->dev
[i
+1];
1331 hpsa_show_dev_msg(KERN_INFO
, h
, sd
, "removed");
1334 #define SCSI3ADDR_EQ(a, b) ( \
1335 (a)[7] == (b)[7] && \
1336 (a)[6] == (b)[6] && \
1337 (a)[5] == (b)[5] && \
1338 (a)[4] == (b)[4] && \
1339 (a)[3] == (b)[3] && \
1340 (a)[2] == (b)[2] && \
1341 (a)[1] == (b)[1] && \
1344 static void fixup_botched_add(struct ctlr_info
*h
,
1345 struct hpsa_scsi_dev_t
*added
)
1347 /* called when scsi_add_device fails in order to re-adjust
1348 * h->dev[] to match the mid layer's view.
1350 unsigned long flags
;
1353 spin_lock_irqsave(&h
->lock
, flags
);
1354 for (i
= 0; i
< h
->ndevices
; i
++) {
1355 if (h
->dev
[i
] == added
) {
1356 for (j
= i
; j
< h
->ndevices
-1; j
++)
1357 h
->dev
[j
] = h
->dev
[j
+1];
1362 spin_unlock_irqrestore(&h
->lock
, flags
);
1366 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1367 struct hpsa_scsi_dev_t
*dev2
)
1369 /* we compare everything except lun and target as these
1370 * are not yet assigned. Compare parts likely
1373 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1374 sizeof(dev1
->scsi3addr
)) != 0)
1376 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1377 sizeof(dev1
->device_id
)) != 0)
1379 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1381 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1383 if (dev1
->devtype
!= dev2
->devtype
)
1385 if (dev1
->bus
!= dev2
->bus
)
1390 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1391 struct hpsa_scsi_dev_t
*dev2
)
1393 /* Device attributes that can change, but don't mean
1394 * that the device is a different device, nor that the OS
1395 * needs to be told anything about the change.
1397 if (dev1
->raid_level
!= dev2
->raid_level
)
1399 if (dev1
->offload_config
!= dev2
->offload_config
)
1401 if (dev1
->offload_enabled
!= dev2
->offload_enabled
)
1403 if (!is_logical_dev_addr_mode(dev1
->scsi3addr
))
1404 if (dev1
->queue_depth
!= dev2
->queue_depth
)
1409 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1410 * and return needle location in *index. If scsi3addr matches, but not
1411 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1412 * location in *index.
1413 * In the case of a minor device attribute change, such as RAID level, just
1414 * return DEVICE_UPDATED, along with the updated device's location in index.
1415 * If needle not found, return DEVICE_NOT_FOUND.
1417 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1418 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1422 #define DEVICE_NOT_FOUND 0
1423 #define DEVICE_CHANGED 1
1424 #define DEVICE_SAME 2
1425 #define DEVICE_UPDATED 3
1427 return DEVICE_NOT_FOUND
;
1429 for (i
= 0; i
< haystack_size
; i
++) {
1430 if (haystack
[i
] == NULL
) /* previously removed. */
1432 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1434 if (device_is_the_same(needle
, haystack
[i
])) {
1435 if (device_updated(needle
, haystack
[i
]))
1436 return DEVICE_UPDATED
;
1439 /* Keep offline devices offline */
1440 if (needle
->volume_offline
)
1441 return DEVICE_NOT_FOUND
;
1442 return DEVICE_CHANGED
;
1447 return DEVICE_NOT_FOUND
;
1450 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1451 unsigned char scsi3addr
[])
1453 struct offline_device_entry
*device
;
1454 unsigned long flags
;
1456 /* Check to see if device is already on the list */
1457 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1458 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1459 if (memcmp(device
->scsi3addr
, scsi3addr
,
1460 sizeof(device
->scsi3addr
)) == 0) {
1461 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1465 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1467 /* Device is not on the list, add it. */
1468 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1470 dev_warn(&h
->pdev
->dev
, "out of memory in %s\n", __func__
);
1473 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1474 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1475 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1476 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1479 /* Print a message explaining various offline volume states */
1480 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1481 struct hpsa_scsi_dev_t
*sd
)
1483 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1484 dev_info(&h
->pdev
->dev
,
1485 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1486 h
->scsi_host
->host_no
,
1487 sd
->bus
, sd
->target
, sd
->lun
);
1488 switch (sd
->volume_offline
) {
1491 case HPSA_LV_UNDERGOING_ERASE
:
1492 dev_info(&h
->pdev
->dev
,
1493 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1494 h
->scsi_host
->host_no
,
1495 sd
->bus
, sd
->target
, sd
->lun
);
1497 case HPSA_LV_NOT_AVAILABLE
:
1498 dev_info(&h
->pdev
->dev
,
1499 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1500 h
->scsi_host
->host_no
,
1501 sd
->bus
, sd
->target
, sd
->lun
);
1503 case HPSA_LV_UNDERGOING_RPI
:
1504 dev_info(&h
->pdev
->dev
,
1505 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1506 h
->scsi_host
->host_no
,
1507 sd
->bus
, sd
->target
, sd
->lun
);
1509 case HPSA_LV_PENDING_RPI
:
1510 dev_info(&h
->pdev
->dev
,
1511 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1512 h
->scsi_host
->host_no
,
1513 sd
->bus
, sd
->target
, sd
->lun
);
1515 case HPSA_LV_ENCRYPTED_NO_KEY
:
1516 dev_info(&h
->pdev
->dev
,
1517 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1518 h
->scsi_host
->host_no
,
1519 sd
->bus
, sd
->target
, sd
->lun
);
1521 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1522 dev_info(&h
->pdev
->dev
,
1523 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1524 h
->scsi_host
->host_no
,
1525 sd
->bus
, sd
->target
, sd
->lun
);
1527 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1528 dev_info(&h
->pdev
->dev
,
1529 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1530 h
->scsi_host
->host_no
,
1531 sd
->bus
, sd
->target
, sd
->lun
);
1533 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1534 dev_info(&h
->pdev
->dev
,
1535 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1536 h
->scsi_host
->host_no
,
1537 sd
->bus
, sd
->target
, sd
->lun
);
1539 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1540 dev_info(&h
->pdev
->dev
,
1541 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1542 h
->scsi_host
->host_no
,
1543 sd
->bus
, sd
->target
, sd
->lun
);
1545 case HPSA_LV_PENDING_ENCRYPTION
:
1546 dev_info(&h
->pdev
->dev
,
1547 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1548 h
->scsi_host
->host_no
,
1549 sd
->bus
, sd
->target
, sd
->lun
);
1551 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1552 dev_info(&h
->pdev
->dev
,
1553 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1554 h
->scsi_host
->host_no
,
1555 sd
->bus
, sd
->target
, sd
->lun
);
1561 * Figure the list of physical drive pointers for a logical drive with
1562 * raid offload configured.
1564 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info
*h
,
1565 struct hpsa_scsi_dev_t
*dev
[], int ndevices
,
1566 struct hpsa_scsi_dev_t
*logical_drive
)
1568 struct raid_map_data
*map
= &logical_drive
->raid_map
;
1569 struct raid_map_disk_data
*dd
= &map
->data
[0];
1571 int total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
1572 le16_to_cpu(map
->metadata_disks_per_row
);
1573 int nraid_map_entries
= le16_to_cpu(map
->row_cnt
) *
1574 le16_to_cpu(map
->layout_map_count
) *
1575 total_disks_per_row
;
1576 int nphys_disk
= le16_to_cpu(map
->layout_map_count
) *
1577 total_disks_per_row
;
1580 if (nraid_map_entries
> RAID_MAP_MAX_ENTRIES
)
1581 nraid_map_entries
= RAID_MAP_MAX_ENTRIES
;
1583 logical_drive
->nphysical_disks
= nraid_map_entries
;
1586 for (i
= 0; i
< nraid_map_entries
; i
++) {
1587 logical_drive
->phys_disk
[i
] = NULL
;
1588 if (!logical_drive
->offload_config
)
1590 for (j
= 0; j
< ndevices
; j
++) {
1593 if (dev
[j
]->devtype
!= TYPE_DISK
)
1595 if (is_logical_device(dev
[j
]))
1597 if (dev
[j
]->ioaccel_handle
!= dd
[i
].ioaccel_handle
)
1600 logical_drive
->phys_disk
[i
] = dev
[j
];
1602 qdepth
= min(h
->nr_cmds
, qdepth
+
1603 logical_drive
->phys_disk
[i
]->queue_depth
);
1608 * This can happen if a physical drive is removed and
1609 * the logical drive is degraded. In that case, the RAID
1610 * map data will refer to a physical disk which isn't actually
1611 * present. And in that case offload_enabled should already
1612 * be 0, but we'll turn it off here just in case
1614 if (!logical_drive
->phys_disk
[i
]) {
1615 logical_drive
->offload_enabled
= 0;
1616 logical_drive
->offload_to_be_enabled
= 0;
1617 logical_drive
->queue_depth
= 8;
1620 if (nraid_map_entries
)
1622 * This is correct for reads, too high for full stripe writes,
1623 * way too high for partial stripe writes
1625 logical_drive
->queue_depth
= qdepth
;
1627 logical_drive
->queue_depth
= h
->nr_cmds
;
1630 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info
*h
,
1631 struct hpsa_scsi_dev_t
*dev
[], int ndevices
)
1635 for (i
= 0; i
< ndevices
; i
++) {
1638 if (dev
[i
]->devtype
!= TYPE_DISK
)
1640 if (!is_logical_device(dev
[i
]))
1644 * If offload is currently enabled, the RAID map and
1645 * phys_disk[] assignment *better* not be changing
1646 * and since it isn't changing, we do not need to
1649 if (dev
[i
]->offload_enabled
)
1652 hpsa_figure_phys_disk_ptrs(h
, dev
, ndevices
, dev
[i
]);
1656 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
,
1657 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1659 /* sd contains scsi3 addresses and devtypes, and inquiry
1660 * data. This function takes what's in sd to be the current
1661 * reality and updates h->dev[] to reflect that reality.
1663 int i
, entry
, device_change
, changes
= 0;
1664 struct hpsa_scsi_dev_t
*csd
;
1665 unsigned long flags
;
1666 struct hpsa_scsi_dev_t
**added
, **removed
;
1667 int nadded
, nremoved
;
1668 struct Scsi_Host
*sh
= NULL
;
1671 * A reset can cause a device status to change
1672 * re-schedule the scan to see what happened.
1674 if (h
->reset_in_progress
) {
1675 h
->drv_req_rescan
= 1;
1679 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1680 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1682 if (!added
|| !removed
) {
1683 dev_warn(&h
->pdev
->dev
, "out of memory in "
1684 "adjust_hpsa_scsi_table\n");
1688 spin_lock_irqsave(&h
->devlock
, flags
);
1690 /* find any devices in h->dev[] that are not in
1691 * sd[] and remove them from h->dev[], and for any
1692 * devices which have changed, remove the old device
1693 * info and add the new device info.
1694 * If minor device attributes change, just update
1695 * the existing device structure.
1700 while (i
< h
->ndevices
) {
1702 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1703 if (device_change
== DEVICE_NOT_FOUND
) {
1705 hpsa_scsi_remove_entry(h
, i
, removed
, &nremoved
);
1706 continue; /* remove ^^^, hence i not incremented */
1707 } else if (device_change
== DEVICE_CHANGED
) {
1709 hpsa_scsi_replace_entry(h
, i
, sd
[entry
],
1710 added
, &nadded
, removed
, &nremoved
);
1711 /* Set it to NULL to prevent it from being freed
1712 * at the bottom of hpsa_update_scsi_devices()
1715 } else if (device_change
== DEVICE_UPDATED
) {
1716 hpsa_scsi_update_entry(h
, i
, sd
[entry
]);
1721 /* Now, make sure every device listed in sd[] is also
1722 * listed in h->dev[], adding them if they aren't found
1725 for (i
= 0; i
< nsds
; i
++) {
1726 if (!sd
[i
]) /* if already added above. */
1729 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1730 * as the SCSI mid-layer does not handle such devices well.
1731 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1732 * at 160Hz, and prevents the system from coming up.
1734 if (sd
[i
]->volume_offline
) {
1735 hpsa_show_volume_status(h
, sd
[i
]);
1736 hpsa_show_dev_msg(KERN_INFO
, h
, sd
[i
], "offline");
1740 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1741 h
->ndevices
, &entry
);
1742 if (device_change
== DEVICE_NOT_FOUND
) {
1744 if (hpsa_scsi_add_entry(h
, sd
[i
], added
, &nadded
) != 0)
1746 sd
[i
] = NULL
; /* prevent from being freed later. */
1747 } else if (device_change
== DEVICE_CHANGED
) {
1748 /* should never happen... */
1750 dev_warn(&h
->pdev
->dev
,
1751 "device unexpectedly changed.\n");
1752 /* but if it does happen, we just ignore that device */
1755 hpsa_update_log_drive_phys_drive_ptrs(h
, h
->dev
, h
->ndevices
);
1757 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1758 * any logical drives that need it enabled.
1760 for (i
= 0; i
< h
->ndevices
; i
++) {
1761 if (h
->dev
[i
] == NULL
)
1763 h
->dev
[i
]->offload_enabled
= h
->dev
[i
]->offload_to_be_enabled
;
1766 spin_unlock_irqrestore(&h
->devlock
, flags
);
1768 /* Monitor devices which are in one of several NOT READY states to be
1769 * brought online later. This must be done without holding h->devlock,
1770 * so don't touch h->dev[]
1772 for (i
= 0; i
< nsds
; i
++) {
1773 if (!sd
[i
]) /* if already added above. */
1775 if (sd
[i
]->volume_offline
)
1776 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
1779 /* Don't notify scsi mid layer of any changes the first time through
1780 * (or if there are no changes) scsi_scan_host will do it later the
1781 * first time through.
1788 dev_warn(&h
->pdev
->dev
, "%s: scsi_host is null\n", __func__
);
1791 /* Notify scsi mid layer of any removed devices */
1792 for (i
= 0; i
< nremoved
; i
++) {
1793 if (removed
[i
] == NULL
)
1795 if (removed
[i
]->expose_device
) {
1796 struct scsi_device
*sdev
=
1797 scsi_device_lookup(sh
, removed
[i
]->bus
,
1798 removed
[i
]->target
, removed
[i
]->lun
);
1800 scsi_remove_device(sdev
);
1801 scsi_device_put(sdev
);
1804 * We don't expect to get here.
1805 * future cmds to this device will get selection
1806 * timeout as if the device was gone.
1808 hpsa_show_dev_msg(KERN_WARNING
, h
, removed
[i
],
1809 "didn't find device for removal.");
1816 /* Notify scsi mid layer of any added devices */
1817 for (i
= 0; i
< nadded
; i
++) {
1818 if (added
[i
] == NULL
)
1820 if (!(added
[i
]->expose_device
))
1822 if (scsi_add_device(sh
, added
[i
]->bus
,
1823 added
[i
]->target
, added
[i
]->lun
) == 0)
1825 dev_warn(&h
->pdev
->dev
, "addition failed, device not added.");
1826 /* now we have to remove it from h->dev,
1827 * since it didn't get added to scsi mid layer
1829 fixup_botched_add(h
, added
[i
]);
1830 h
->drv_req_rescan
= 1;
1839 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1840 * Assume's h->devlock is held.
1842 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
1843 int bus
, int target
, int lun
)
1846 struct hpsa_scsi_dev_t
*sd
;
1848 for (i
= 0; i
< h
->ndevices
; i
++) {
1850 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
1856 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
1858 struct hpsa_scsi_dev_t
*sd
;
1859 unsigned long flags
;
1860 struct ctlr_info
*h
;
1862 h
= sdev_to_hba(sdev
);
1863 spin_lock_irqsave(&h
->devlock
, flags
);
1864 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
1865 sdev_id(sdev
), sdev
->lun
);
1867 atomic_set(&sd
->ioaccel_cmds_out
, 0);
1868 sdev
->hostdata
= sd
->expose_device
? sd
: NULL
;
1870 sdev
->hostdata
= NULL
;
1871 spin_unlock_irqrestore(&h
->devlock
, flags
);
1875 /* configure scsi device based on internal per-device structure */
1876 static int hpsa_slave_configure(struct scsi_device
*sdev
)
1878 struct hpsa_scsi_dev_t
*sd
;
1881 sd
= sdev
->hostdata
;
1882 sdev
->no_uld_attach
= !sd
|| !sd
->expose_device
;
1885 queue_depth
= sd
->queue_depth
!= 0 ?
1886 sd
->queue_depth
: sdev
->host
->can_queue
;
1888 queue_depth
= sdev
->host
->can_queue
;
1890 scsi_change_queue_depth(sdev
, queue_depth
);
1895 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
1897 /* nothing to do. */
1900 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
1904 if (!h
->ioaccel2_cmd_sg_list
)
1906 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1907 kfree(h
->ioaccel2_cmd_sg_list
[i
]);
1908 h
->ioaccel2_cmd_sg_list
[i
] = NULL
;
1910 kfree(h
->ioaccel2_cmd_sg_list
);
1911 h
->ioaccel2_cmd_sg_list
= NULL
;
1914 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
1918 if (h
->chainsize
<= 0)
1921 h
->ioaccel2_cmd_sg_list
=
1922 kzalloc(sizeof(*h
->ioaccel2_cmd_sg_list
) * h
->nr_cmds
,
1924 if (!h
->ioaccel2_cmd_sg_list
)
1926 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1927 h
->ioaccel2_cmd_sg_list
[i
] =
1928 kmalloc(sizeof(*h
->ioaccel2_cmd_sg_list
[i
]) *
1929 h
->maxsgentries
, GFP_KERNEL
);
1930 if (!h
->ioaccel2_cmd_sg_list
[i
])
1936 hpsa_free_ioaccel2_sg_chain_blocks(h
);
1940 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
1944 if (!h
->cmd_sg_list
)
1946 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1947 kfree(h
->cmd_sg_list
[i
]);
1948 h
->cmd_sg_list
[i
] = NULL
;
1950 kfree(h
->cmd_sg_list
);
1951 h
->cmd_sg_list
= NULL
;
1954 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info
*h
)
1958 if (h
->chainsize
<= 0)
1961 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
1963 if (!h
->cmd_sg_list
) {
1964 dev_err(&h
->pdev
->dev
, "Failed to allocate SG list\n");
1967 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1968 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
1969 h
->chainsize
, GFP_KERNEL
);
1970 if (!h
->cmd_sg_list
[i
]) {
1971 dev_err(&h
->pdev
->dev
, "Failed to allocate cmd SG\n");
1978 hpsa_free_sg_chain_blocks(h
);
1982 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
1983 struct io_accel2_cmd
*cp
, struct CommandList
*c
)
1985 struct ioaccel2_sg_element
*chain_block
;
1989 chain_block
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
1990 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
1991 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_size
,
1993 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
1994 /* prevent subsequent unmapping */
1995 cp
->sg
->address
= 0;
1998 cp
->sg
->address
= cpu_to_le64(temp64
);
2002 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2003 struct io_accel2_cmd
*cp
)
2005 struct ioaccel2_sg_element
*chain_sg
;
2010 temp64
= le64_to_cpu(chain_sg
->address
);
2011 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2012 pci_unmap_single(h
->pdev
, temp64
, chain_size
, PCI_DMA_TODEVICE
);
2015 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
2016 struct CommandList
*c
)
2018 struct SGDescriptor
*chain_sg
, *chain_block
;
2022 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2023 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
2024 chain_sg
->Ext
= cpu_to_le32(HPSA_SG_CHAIN
);
2025 chain_len
= sizeof(*chain_sg
) *
2026 (le16_to_cpu(c
->Header
.SGTotal
) - h
->max_cmd_sg_entries
);
2027 chain_sg
->Len
= cpu_to_le32(chain_len
);
2028 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_len
,
2030 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2031 /* prevent subsequent unmapping */
2032 chain_sg
->Addr
= cpu_to_le64(0);
2035 chain_sg
->Addr
= cpu_to_le64(temp64
);
2039 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
2040 struct CommandList
*c
)
2042 struct SGDescriptor
*chain_sg
;
2044 if (le16_to_cpu(c
->Header
.SGTotal
) <= h
->max_cmd_sg_entries
)
2047 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2048 pci_unmap_single(h
->pdev
, le64_to_cpu(chain_sg
->Addr
),
2049 le32_to_cpu(chain_sg
->Len
), PCI_DMA_TODEVICE
);
2053 /* Decode the various types of errors on ioaccel2 path.
2054 * Return 1 for any error that should generate a RAID path retry.
2055 * Return 0 for errors that don't require a RAID path retry.
2057 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
2058 struct CommandList
*c
,
2059 struct scsi_cmnd
*cmd
,
2060 struct io_accel2_cmd
*c2
)
2064 u32 ioaccel2_resid
= 0;
2066 switch (c2
->error_data
.serv_response
) {
2067 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
2068 switch (c2
->error_data
.status
) {
2069 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
2071 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
2072 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
2073 if (c2
->error_data
.data_present
!=
2074 IOACCEL2_SENSE_DATA_PRESENT
) {
2075 memset(cmd
->sense_buffer
, 0,
2076 SCSI_SENSE_BUFFERSIZE
);
2079 /* copy the sense data */
2080 data_len
= c2
->error_data
.sense_data_len
;
2081 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
2082 data_len
= SCSI_SENSE_BUFFERSIZE
;
2083 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
2085 sizeof(c2
->error_data
.sense_data_buff
);
2086 memcpy(cmd
->sense_buffer
,
2087 c2
->error_data
.sense_data_buff
, data_len
);
2090 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
2093 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
2096 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
2099 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
2107 case IOACCEL2_SERV_RESPONSE_FAILURE
:
2108 switch (c2
->error_data
.status
) {
2109 case IOACCEL2_STATUS_SR_IO_ERROR
:
2110 case IOACCEL2_STATUS_SR_IO_ABORTED
:
2111 case IOACCEL2_STATUS_SR_OVERRUN
:
2114 case IOACCEL2_STATUS_SR_UNDERRUN
:
2115 cmd
->result
= (DID_OK
<< 16); /* host byte */
2116 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2117 ioaccel2_resid
= get_unaligned_le32(
2118 &c2
->error_data
.resid_cnt
[0]);
2119 scsi_set_resid(cmd
, ioaccel2_resid
);
2121 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE
:
2122 case IOACCEL2_STATUS_SR_INVALID_DEVICE
:
2123 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED
:
2124 /* We will get an event from ctlr to trigger rescan */
2131 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
2133 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
2135 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
2138 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
2145 return retry
; /* retry on raid path? */
2148 static void hpsa_cmd_resolve_events(struct ctlr_info
*h
,
2149 struct CommandList
*c
)
2151 bool do_wake
= false;
2154 * Prevent the following race in the abort handler:
2156 * 1. LLD is requested to abort a SCSI command
2157 * 2. The SCSI command completes
2158 * 3. The struct CommandList associated with step 2 is made available
2159 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2160 * 5. Abort handler follows scsi_cmnd->host_scribble and
2161 * finds struct CommandList and tries to aborts it
2162 * Now we have aborted the wrong command.
2164 * Reset c->scsi_cmd here so that the abort or reset handler will know
2165 * this command has completed. Then, check to see if the handler is
2166 * waiting for this command, and, if so, wake it.
2168 c
->scsi_cmd
= SCSI_CMD_IDLE
;
2169 mb(); /* Declare command idle before checking for pending events. */
2170 if (c
->abort_pending
) {
2172 c
->abort_pending
= false;
2174 if (c
->reset_pending
) {
2175 unsigned long flags
;
2176 struct hpsa_scsi_dev_t
*dev
;
2179 * There appears to be a reset pending; lock the lock and
2180 * reconfirm. If so, then decrement the count of outstanding
2181 * commands and wake the reset command if this is the last one.
2183 spin_lock_irqsave(&h
->lock
, flags
);
2184 dev
= c
->reset_pending
; /* Re-fetch under the lock. */
2185 if (dev
&& atomic_dec_and_test(&dev
->reset_cmds_out
))
2187 c
->reset_pending
= NULL
;
2188 spin_unlock_irqrestore(&h
->lock
, flags
);
2192 wake_up_all(&h
->event_sync_wait_queue
);
2195 static void hpsa_cmd_resolve_and_free(struct ctlr_info
*h
,
2196 struct CommandList
*c
)
2198 hpsa_cmd_resolve_events(h
, c
);
2199 cmd_tagged_free(h
, c
);
2202 static void hpsa_cmd_free_and_done(struct ctlr_info
*h
,
2203 struct CommandList
*c
, struct scsi_cmnd
*cmd
)
2205 hpsa_cmd_resolve_and_free(h
, c
);
2206 cmd
->scsi_done(cmd
);
2209 static void hpsa_retry_cmd(struct ctlr_info
*h
, struct CommandList
*c
)
2211 INIT_WORK(&c
->work
, hpsa_command_resubmit_worker
);
2212 queue_work_on(raw_smp_processor_id(), h
->resubmit_wq
, &c
->work
);
2215 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd
*cmd
)
2217 cmd
->result
= DID_ABORT
<< 16;
2220 static void hpsa_cmd_abort_and_free(struct ctlr_info
*h
, struct CommandList
*c
,
2221 struct scsi_cmnd
*cmd
)
2223 hpsa_set_scsi_cmd_aborted(cmd
);
2224 dev_warn(&h
->pdev
->dev
, "CDB %16phN was aborted with status 0x%x\n",
2225 c
->Request
.CDB
, c
->err_info
->ScsiStatus
);
2226 hpsa_cmd_resolve_and_free(h
, c
);
2229 static void process_ioaccel2_completion(struct ctlr_info
*h
,
2230 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
2231 struct hpsa_scsi_dev_t
*dev
)
2233 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2235 /* check for good status */
2236 if (likely(c2
->error_data
.serv_response
== 0 &&
2237 c2
->error_data
.status
== 0))
2238 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2241 * Any RAID offload error results in retry which will use
2242 * the normal I/O path so the controller can handle whatever's
2245 if (is_logical_device(dev
) &&
2246 c2
->error_data
.serv_response
==
2247 IOACCEL2_SERV_RESPONSE_FAILURE
) {
2248 if (c2
->error_data
.status
==
2249 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
)
2250 dev
->offload_enabled
= 0;
2252 return hpsa_retry_cmd(h
, c
);
2255 if (handle_ioaccel_mode2_error(h
, c
, cmd
, c2
))
2256 return hpsa_retry_cmd(h
, c
);
2258 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2261 /* Returns 0 on success, < 0 otherwise. */
2262 static int hpsa_evaluate_tmf_status(struct ctlr_info
*h
,
2263 struct CommandList
*cp
)
2265 u8 tmf_status
= cp
->err_info
->ScsiStatus
;
2267 switch (tmf_status
) {
2268 case CISS_TMF_COMPLETE
:
2270 * CISS_TMF_COMPLETE never happens, instead,
2271 * ei->CommandStatus == 0 for this case.
2273 case CISS_TMF_SUCCESS
:
2275 case CISS_TMF_INVALID_FRAME
:
2276 case CISS_TMF_NOT_SUPPORTED
:
2277 case CISS_TMF_FAILED
:
2278 case CISS_TMF_WRONG_LUN
:
2279 case CISS_TMF_OVERLAPPED_TAG
:
2282 dev_warn(&h
->pdev
->dev
, "Unknown TMF status: 0x%02x\n",
2289 static void complete_scsi_command(struct CommandList
*cp
)
2291 struct scsi_cmnd
*cmd
;
2292 struct ctlr_info
*h
;
2293 struct ErrorInfo
*ei
;
2294 struct hpsa_scsi_dev_t
*dev
;
2295 struct io_accel2_cmd
*c2
;
2298 u8 asc
; /* additional sense code */
2299 u8 ascq
; /* additional sense code qualifier */
2300 unsigned long sense_data_size
;
2305 dev
= cmd
->device
->hostdata
;
2306 c2
= &h
->ioaccel2_cmd_pool
[cp
->cmdindex
];
2308 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
2309 if ((cp
->cmd_type
== CMD_SCSI
) &&
2310 (le16_to_cpu(cp
->Header
.SGTotal
) > h
->max_cmd_sg_entries
))
2311 hpsa_unmap_sg_chain_block(h
, cp
);
2313 if ((cp
->cmd_type
== CMD_IOACCEL2
) &&
2314 (c2
->sg
[0].chain_indicator
== IOACCEL2_CHAIN
))
2315 hpsa_unmap_ioaccel2_sg_chain_block(h
, c2
);
2317 cmd
->result
= (DID_OK
<< 16); /* host byte */
2318 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2320 if (cp
->cmd_type
== CMD_IOACCEL2
|| cp
->cmd_type
== CMD_IOACCEL1
)
2321 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
2324 * We check for lockup status here as it may be set for
2325 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2326 * fail_all_oustanding_cmds()
2328 if (unlikely(ei
->CommandStatus
== CMD_CTLR_LOCKUP
)) {
2329 /* DID_NO_CONNECT will prevent a retry */
2330 cmd
->result
= DID_NO_CONNECT
<< 16;
2331 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2334 if ((unlikely(hpsa_is_pending_event(cp
)))) {
2335 if (cp
->reset_pending
)
2336 return hpsa_cmd_resolve_and_free(h
, cp
);
2337 if (cp
->abort_pending
)
2338 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2341 if (cp
->cmd_type
== CMD_IOACCEL2
)
2342 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
2344 scsi_set_resid(cmd
, ei
->ResidualCnt
);
2345 if (ei
->CommandStatus
== 0)
2346 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2348 /* For I/O accelerator commands, copy over some fields to the normal
2349 * CISS header used below for error handling.
2351 if (cp
->cmd_type
== CMD_IOACCEL1
) {
2352 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
2353 cp
->Header
.SGList
= scsi_sg_count(cmd
);
2354 cp
->Header
.SGTotal
= cpu_to_le16(cp
->Header
.SGList
);
2355 cp
->Request
.CDBLen
= le16_to_cpu(c
->io_flags
) &
2356 IOACCEL1_IOFLAGS_CDBLEN_MASK
;
2357 cp
->Header
.tag
= c
->tag
;
2358 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
2359 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
2361 /* Any RAID offload error results in retry which will use
2362 * the normal I/O path so the controller can handle whatever's
2365 if (is_logical_device(dev
)) {
2366 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
2367 dev
->offload_enabled
= 0;
2368 return hpsa_retry_cmd(h
, cp
);
2372 /* an error has occurred */
2373 switch (ei
->CommandStatus
) {
2375 case CMD_TARGET_STATUS
:
2376 cmd
->result
|= ei
->ScsiStatus
;
2377 /* copy the sense data */
2378 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
2379 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
2381 sense_data_size
= sizeof(ei
->SenseInfo
);
2382 if (ei
->SenseLen
< sense_data_size
)
2383 sense_data_size
= ei
->SenseLen
;
2384 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
2386 decode_sense_data(ei
->SenseInfo
, sense_data_size
,
2387 &sense_key
, &asc
, &ascq
);
2388 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
2389 if (sense_key
== ABORTED_COMMAND
) {
2390 cmd
->result
|= DID_SOFT_ERROR
<< 16;
2395 /* Problem was not a check condition
2396 * Pass it up to the upper layers...
2398 if (ei
->ScsiStatus
) {
2399 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
2400 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2401 "Returning result: 0x%x\n",
2403 sense_key
, asc
, ascq
,
2405 } else { /* scsi status is zero??? How??? */
2406 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
2407 "Returning no connection.\n", cp
),
2409 /* Ordinarily, this case should never happen,
2410 * but there is a bug in some released firmware
2411 * revisions that allows it to happen if, for
2412 * example, a 4100 backplane loses power and
2413 * the tape drive is in it. We assume that
2414 * it's a fatal error of some kind because we
2415 * can't show that it wasn't. We will make it
2416 * look like selection timeout since that is
2417 * the most common reason for this to occur,
2418 * and it's severe enough.
2421 cmd
->result
= DID_NO_CONNECT
<< 16;
2425 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2427 case CMD_DATA_OVERRUN
:
2428 dev_warn(&h
->pdev
->dev
,
2429 "CDB %16phN data overrun\n", cp
->Request
.CDB
);
2432 /* print_bytes(cp, sizeof(*cp), 1, 0);
2434 /* We get CMD_INVALID if you address a non-existent device
2435 * instead of a selection timeout (no response). You will
2436 * see this if you yank out a drive, then try to access it.
2437 * This is kind of a shame because it means that any other
2438 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2439 * missing target. */
2440 cmd
->result
= DID_NO_CONNECT
<< 16;
2443 case CMD_PROTOCOL_ERR
:
2444 cmd
->result
= DID_ERROR
<< 16;
2445 dev_warn(&h
->pdev
->dev
, "CDB %16phN : protocol error\n",
2448 case CMD_HARDWARE_ERR
:
2449 cmd
->result
= DID_ERROR
<< 16;
2450 dev_warn(&h
->pdev
->dev
, "CDB %16phN : hardware error\n",
2453 case CMD_CONNECTION_LOST
:
2454 cmd
->result
= DID_ERROR
<< 16;
2455 dev_warn(&h
->pdev
->dev
, "CDB %16phN : connection lost\n",
2459 /* Return now to avoid calling scsi_done(). */
2460 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2461 case CMD_ABORT_FAILED
:
2462 cmd
->result
= DID_ERROR
<< 16;
2463 dev_warn(&h
->pdev
->dev
, "CDB %16phN : abort failed\n",
2466 case CMD_UNSOLICITED_ABORT
:
2467 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
2468 dev_warn(&h
->pdev
->dev
, "CDB %16phN : unsolicited abort\n",
2472 cmd
->result
= DID_TIME_OUT
<< 16;
2473 dev_warn(&h
->pdev
->dev
, "CDB %16phN timed out\n",
2476 case CMD_UNABORTABLE
:
2477 cmd
->result
= DID_ERROR
<< 16;
2478 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
2480 case CMD_TMF_STATUS
:
2481 if (hpsa_evaluate_tmf_status(h
, cp
)) /* TMF failed? */
2482 cmd
->result
= DID_ERROR
<< 16;
2484 case CMD_IOACCEL_DISABLED
:
2485 /* This only handles the direct pass-through case since RAID
2486 * offload is handled above. Just attempt a retry.
2488 cmd
->result
= DID_SOFT_ERROR
<< 16;
2489 dev_warn(&h
->pdev
->dev
,
2490 "cp %p had HP SSD Smart Path error\n", cp
);
2493 cmd
->result
= DID_ERROR
<< 16;
2494 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
2495 cp
, ei
->CommandStatus
);
2498 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2501 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
2502 struct CommandList
*c
, int sg_used
, int data_direction
)
2506 for (i
= 0; i
< sg_used
; i
++)
2507 pci_unmap_single(pdev
, (dma_addr_t
) le64_to_cpu(c
->SG
[i
].Addr
),
2508 le32_to_cpu(c
->SG
[i
].Len
),
2512 static int hpsa_map_one(struct pci_dev
*pdev
,
2513 struct CommandList
*cp
,
2520 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
2521 cp
->Header
.SGList
= 0;
2522 cp
->Header
.SGTotal
= cpu_to_le16(0);
2526 addr64
= pci_map_single(pdev
, buf
, buflen
, data_direction
);
2527 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
2528 /* Prevent subsequent unmap of something never mapped */
2529 cp
->Header
.SGList
= 0;
2530 cp
->Header
.SGTotal
= cpu_to_le16(0);
2533 cp
->SG
[0].Addr
= cpu_to_le64(addr64
);
2534 cp
->SG
[0].Len
= cpu_to_le32(buflen
);
2535 cp
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* we are not chaining */
2536 cp
->Header
.SGList
= 1; /* no. SGs contig in this cmd */
2537 cp
->Header
.SGTotal
= cpu_to_le16(1); /* total sgs in cmd list */
2541 #define NO_TIMEOUT ((unsigned long) -1)
2542 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2543 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
2544 struct CommandList
*c
, int reply_queue
, unsigned long timeout_msecs
)
2546 DECLARE_COMPLETION_ONSTACK(wait
);
2549 __enqueue_cmd_and_start_io(h
, c
, reply_queue
);
2550 if (timeout_msecs
== NO_TIMEOUT
) {
2551 /* TODO: get rid of this no-timeout thing */
2552 wait_for_completion_io(&wait
);
2555 if (!wait_for_completion_io_timeout(&wait
,
2556 msecs_to_jiffies(timeout_msecs
))) {
2557 dev_warn(&h
->pdev
->dev
, "Command timed out.\n");
2563 static int hpsa_scsi_do_simple_cmd(struct ctlr_info
*h
, struct CommandList
*c
,
2564 int reply_queue
, unsigned long timeout_msecs
)
2566 if (unlikely(lockup_detected(h
))) {
2567 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
2570 return hpsa_scsi_do_simple_cmd_core(h
, c
, reply_queue
, timeout_msecs
);
2573 static u32
lockup_detected(struct ctlr_info
*h
)
2576 u32 rc
, *lockup_detected
;
2579 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
2580 rc
= *lockup_detected
;
2585 #define MAX_DRIVER_CMD_RETRIES 25
2586 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2587 struct CommandList
*c
, int data_direction
, unsigned long timeout_msecs
)
2589 int backoff_time
= 10, retry_count
= 0;
2593 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2594 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
2599 if (retry_count
> 3) {
2600 msleep(backoff_time
);
2601 if (backoff_time
< 1000)
2604 } while ((check_for_unit_attention(h
, c
) ||
2605 check_for_busy(h
, c
)) &&
2606 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2607 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2608 if (retry_count
> MAX_DRIVER_CMD_RETRIES
)
2613 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2614 struct CommandList
*c
)
2616 const u8
*cdb
= c
->Request
.CDB
;
2617 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2619 dev_warn(&h
->pdev
->dev
, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2620 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2621 txt
, lun
[0], lun
[1], lun
[2], lun
[3],
2622 lun
[4], lun
[5], lun
[6], lun
[7],
2623 cdb
[0], cdb
[1], cdb
[2], cdb
[3],
2624 cdb
[4], cdb
[5], cdb
[6], cdb
[7],
2625 cdb
[8], cdb
[9], cdb
[10], cdb
[11],
2626 cdb
[12], cdb
[13], cdb
[14], cdb
[15]);
2629 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2630 struct CommandList
*cp
)
2632 const struct ErrorInfo
*ei
= cp
->err_info
;
2633 struct device
*d
= &cp
->h
->pdev
->dev
;
2634 u8 sense_key
, asc
, ascq
;
2637 switch (ei
->CommandStatus
) {
2638 case CMD_TARGET_STATUS
:
2639 if (ei
->SenseLen
> sizeof(ei
->SenseInfo
))
2640 sense_len
= sizeof(ei
->SenseInfo
);
2642 sense_len
= ei
->SenseLen
;
2643 decode_sense_data(ei
->SenseInfo
, sense_len
,
2644 &sense_key
, &asc
, &ascq
);
2645 hpsa_print_cmd(h
, "SCSI status", cp
);
2646 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2647 dev_warn(d
, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2648 sense_key
, asc
, ascq
);
2650 dev_warn(d
, "SCSI Status = 0x%02x\n", ei
->ScsiStatus
);
2651 if (ei
->ScsiStatus
== 0)
2652 dev_warn(d
, "SCSI status is abnormally zero. "
2653 "(probably indicates selection timeout "
2654 "reported incorrectly due to a known "
2655 "firmware bug, circa July, 2001.)\n");
2657 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2659 case CMD_DATA_OVERRUN
:
2660 hpsa_print_cmd(h
, "overrun condition", cp
);
2663 /* controller unfortunately reports SCSI passthru's
2664 * to non-existent targets as invalid commands.
2666 hpsa_print_cmd(h
, "invalid command", cp
);
2667 dev_warn(d
, "probably means device no longer present\n");
2670 case CMD_PROTOCOL_ERR
:
2671 hpsa_print_cmd(h
, "protocol error", cp
);
2673 case CMD_HARDWARE_ERR
:
2674 hpsa_print_cmd(h
, "hardware error", cp
);
2676 case CMD_CONNECTION_LOST
:
2677 hpsa_print_cmd(h
, "connection lost", cp
);
2680 hpsa_print_cmd(h
, "aborted", cp
);
2682 case CMD_ABORT_FAILED
:
2683 hpsa_print_cmd(h
, "abort failed", cp
);
2685 case CMD_UNSOLICITED_ABORT
:
2686 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2689 hpsa_print_cmd(h
, "timed out", cp
);
2691 case CMD_UNABORTABLE
:
2692 hpsa_print_cmd(h
, "unabortable", cp
);
2694 case CMD_CTLR_LOCKUP
:
2695 hpsa_print_cmd(h
, "controller lockup detected", cp
);
2698 hpsa_print_cmd(h
, "unknown status", cp
);
2699 dev_warn(d
, "Unknown command status %x\n",
2704 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2705 u16 page
, unsigned char *buf
,
2706 unsigned char bufsize
)
2709 struct CommandList
*c
;
2710 struct ErrorInfo
*ei
;
2714 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
2715 page
, scsi3addr
, TYPE_CMD
)) {
2719 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
2720 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
2724 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2725 hpsa_scsi_interpret_error(h
, c
);
2733 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2734 u8 reset_type
, int reply_queue
)
2737 struct CommandList
*c
;
2738 struct ErrorInfo
*ei
;
2743 /* fill_cmd can't fail here, no data buffer to map. */
2744 (void) fill_cmd(c
, reset_type
, h
, NULL
, 0, 0,
2745 scsi3addr
, TYPE_MSG
);
2746 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
2748 dev_warn(&h
->pdev
->dev
, "Failed to send reset command\n");
2751 /* no unmap needed here because no data xfer. */
2754 if (ei
->CommandStatus
!= 0) {
2755 hpsa_scsi_interpret_error(h
, c
);
2763 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
2764 struct hpsa_scsi_dev_t
*dev
,
2765 unsigned char *scsi3addr
)
2769 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2770 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
2772 if (hpsa_is_cmd_idle(c
))
2775 switch (c
->cmd_type
) {
2777 case CMD_IOCTL_PEND
:
2778 match
= !memcmp(scsi3addr
, &c
->Header
.LUN
.LunAddrBytes
,
2779 sizeof(c
->Header
.LUN
.LunAddrBytes
));
2784 if (c
->phys_disk
== dev
) {
2785 /* HBA mode match */
2788 /* Possible RAID mode -- check each phys dev. */
2789 /* FIXME: Do we need to take out a lock here? If
2790 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2792 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
2793 /* FIXME: an alternate test might be
2795 * match = dev->phys_disk[i]->ioaccel_handle
2796 * == c2->scsi_nexus; */
2797 match
= dev
->phys_disk
[i
] == c
->phys_disk
;
2803 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
2804 match
= dev
->phys_disk
[i
]->ioaccel_handle
==
2805 le32_to_cpu(ac
->it_nexus
);
2809 case 0: /* The command is in the middle of being initialized. */
2814 dev_err(&h
->pdev
->dev
, "unexpected cmd_type: %d\n",
2822 static int hpsa_do_reset(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*dev
,
2823 unsigned char *scsi3addr
, u8 reset_type
, int reply_queue
)
2828 /* We can really only handle one reset at a time */
2829 if (mutex_lock_interruptible(&h
->reset_mutex
) == -EINTR
) {
2830 dev_warn(&h
->pdev
->dev
, "concurrent reset wait interrupted.\n");
2834 BUG_ON(atomic_read(&dev
->reset_cmds_out
) != 0);
2836 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2837 struct CommandList
*c
= h
->cmd_pool
+ i
;
2838 int refcount
= atomic_inc_return(&c
->refcount
);
2840 if (refcount
> 1 && hpsa_cmd_dev_match(h
, c
, dev
, scsi3addr
)) {
2841 unsigned long flags
;
2844 * Mark the target command as having a reset pending,
2845 * then lock a lock so that the command cannot complete
2846 * while we're considering it. If the command is not
2847 * idle then count it; otherwise revoke the event.
2849 c
->reset_pending
= dev
;
2850 spin_lock_irqsave(&h
->lock
, flags
); /* Implied MB */
2851 if (!hpsa_is_cmd_idle(c
))
2852 atomic_inc(&dev
->reset_cmds_out
);
2854 c
->reset_pending
= NULL
;
2855 spin_unlock_irqrestore(&h
->lock
, flags
);
2861 rc
= hpsa_send_reset(h
, scsi3addr
, reset_type
, reply_queue
);
2863 wait_event(h
->event_sync_wait_queue
,
2864 atomic_read(&dev
->reset_cmds_out
) == 0 ||
2865 lockup_detected(h
));
2867 if (unlikely(lockup_detected(h
))) {
2868 dev_warn(&h
->pdev
->dev
,
2869 "Controller lockup detected during reset wait\n");
2874 atomic_set(&dev
->reset_cmds_out
, 0);
2876 mutex_unlock(&h
->reset_mutex
);
2880 static void hpsa_get_raid_level(struct ctlr_info
*h
,
2881 unsigned char *scsi3addr
, unsigned char *raid_level
)
2886 *raid_level
= RAID_UNKNOWN
;
2887 buf
= kzalloc(64, GFP_KERNEL
);
2890 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0xC1, buf
, 64);
2892 *raid_level
= buf
[8];
2893 if (*raid_level
> RAID_UNKNOWN
)
2894 *raid_level
= RAID_UNKNOWN
;
2899 #define HPSA_MAP_DEBUG
2900 #ifdef HPSA_MAP_DEBUG
2901 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
2902 struct raid_map_data
*map_buff
)
2904 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
2906 u16 map_cnt
, row_cnt
, disks_per_row
;
2911 /* Show details only if debugging has been activated. */
2912 if (h
->raid_offload_debug
< 2)
2915 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
2916 le32_to_cpu(map_buff
->structure_size
));
2917 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
2918 le32_to_cpu(map_buff
->volume_blk_size
));
2919 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
2920 le64_to_cpu(map_buff
->volume_blk_cnt
));
2921 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
2922 map_buff
->phys_blk_shift
);
2923 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
2924 map_buff
->parity_rotation_shift
);
2925 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
2926 le16_to_cpu(map_buff
->strip_size
));
2927 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
2928 le64_to_cpu(map_buff
->disk_starting_blk
));
2929 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
2930 le64_to_cpu(map_buff
->disk_blk_cnt
));
2931 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
2932 le16_to_cpu(map_buff
->data_disks_per_row
));
2933 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
2934 le16_to_cpu(map_buff
->metadata_disks_per_row
));
2935 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
2936 le16_to_cpu(map_buff
->row_cnt
));
2937 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
2938 le16_to_cpu(map_buff
->layout_map_count
));
2939 dev_info(&h
->pdev
->dev
, "flags = 0x%x\n",
2940 le16_to_cpu(map_buff
->flags
));
2941 dev_info(&h
->pdev
->dev
, "encrypytion = %s\n",
2942 le16_to_cpu(map_buff
->flags
) &
2943 RAID_MAP_FLAG_ENCRYPT_ON
? "ON" : "OFF");
2944 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
2945 le16_to_cpu(map_buff
->dekindex
));
2946 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
2947 for (map
= 0; map
< map_cnt
; map
++) {
2948 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
2949 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
2950 for (row
= 0; row
< row_cnt
; row
++) {
2951 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
2953 le16_to_cpu(map_buff
->data_disks_per_row
);
2954 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2955 dev_info(&h
->pdev
->dev
,
2956 " D%02u: h=0x%04x xor=%u,%u\n",
2957 col
, dd
->ioaccel_handle
,
2958 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2960 le16_to_cpu(map_buff
->metadata_disks_per_row
);
2961 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
2962 dev_info(&h
->pdev
->dev
,
2963 " M%02u: h=0x%04x xor=%u,%u\n",
2964 col
, dd
->ioaccel_handle
,
2965 dd
->xor_mult
[0], dd
->xor_mult
[1]);
2970 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
2971 __attribute__((unused
)) int rc
,
2972 __attribute__((unused
)) struct raid_map_data
*map_buff
)
2977 static int hpsa_get_raid_map(struct ctlr_info
*h
,
2978 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
2981 struct CommandList
*c
;
2982 struct ErrorInfo
*ei
;
2986 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
2987 sizeof(this_device
->raid_map
), 0,
2988 scsi3addr
, TYPE_CMD
)) {
2989 dev_warn(&h
->pdev
->dev
, "hpsa_get_raid_map fill_cmd failed\n");
2993 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
2994 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
2998 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2999 hpsa_scsi_interpret_error(h
, c
);
3005 /* @todo in the future, dynamically allocate RAID map memory */
3006 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
3007 sizeof(this_device
->raid_map
)) {
3008 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
3011 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
3018 static int hpsa_bmic_id_physical_device(struct ctlr_info
*h
,
3019 unsigned char scsi3addr
[], u16 bmic_device_index
,
3020 struct bmic_identify_physical_device
*buf
, size_t bufsize
)
3023 struct CommandList
*c
;
3024 struct ErrorInfo
*ei
;
3027 rc
= fill_cmd(c
, BMIC_IDENTIFY_PHYSICAL_DEVICE
, h
, buf
, bufsize
,
3028 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3032 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3033 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3035 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
,
3038 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3039 hpsa_scsi_interpret_error(h
, c
);
3047 static int hpsa_vpd_page_supported(struct ctlr_info
*h
,
3048 unsigned char scsi3addr
[], u8 page
)
3053 unsigned char *buf
, bufsize
;
3055 buf
= kzalloc(256, GFP_KERNEL
);
3059 /* Get the size of the page list first */
3060 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3061 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3062 buf
, HPSA_VPD_HEADER_SZ
);
3064 goto exit_unsupported
;
3066 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
3067 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
3071 /* Get the whole VPD page list */
3072 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3073 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3076 goto exit_unsupported
;
3079 for (i
= 1; i
<= pages
; i
++)
3080 if (buf
[3 + i
] == page
)
3081 goto exit_supported
;
3090 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
3091 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3097 this_device
->offload_config
= 0;
3098 this_device
->offload_enabled
= 0;
3099 this_device
->offload_to_be_enabled
= 0;
3101 buf
= kzalloc(64, GFP_KERNEL
);
3104 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
3106 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3107 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
3111 #define IOACCEL_STATUS_BYTE 4
3112 #define OFFLOAD_CONFIGURED_BIT 0x01
3113 #define OFFLOAD_ENABLED_BIT 0x02
3114 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
3115 this_device
->offload_config
=
3116 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
3117 if (this_device
->offload_config
) {
3118 this_device
->offload_enabled
=
3119 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
3120 if (hpsa_get_raid_map(h
, scsi3addr
, this_device
))
3121 this_device
->offload_enabled
= 0;
3123 this_device
->offload_to_be_enabled
= this_device
->offload_enabled
;
3129 /* Get the device id from inquiry page 0x83 */
3130 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3131 unsigned char *device_id
, int index
, int buflen
)
3138 buf
= kzalloc(64, GFP_KERNEL
);
3141 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| 0x83, buf
, 64);
3143 memcpy(device_id
, &buf
[index
], buflen
);
3150 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
3151 void *buf
, int bufsize
,
3152 int extended_response
)
3155 struct CommandList
*c
;
3156 unsigned char scsi3addr
[8];
3157 struct ErrorInfo
*ei
;
3161 /* address the controller */
3162 memset(scsi3addr
, 0, sizeof(scsi3addr
));
3163 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
3164 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
3168 if (extended_response
)
3169 c
->Request
.CDB
[1] = extended_response
;
3170 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3171 PCI_DMA_FROMDEVICE
, NO_TIMEOUT
);
3175 if (ei
->CommandStatus
!= 0 &&
3176 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3177 hpsa_scsi_interpret_error(h
, c
);
3180 struct ReportLUNdata
*rld
= buf
;
3182 if (rld
->extended_response_flag
!= extended_response
) {
3183 dev_err(&h
->pdev
->dev
,
3184 "report luns requested format %u, got %u\n",
3186 rld
->extended_response_flag
);
3195 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
3196 struct ReportExtendedLUNdata
*buf
, int bufsize
)
3198 return hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
,
3199 HPSA_REPORT_PHYS_EXTENDED
);
3202 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
3203 struct ReportLUNdata
*buf
, int bufsize
)
3205 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
3208 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
3209 int bus
, int target
, int lun
)
3212 device
->target
= target
;
3216 /* Use VPD inquiry to get details of volume status */
3217 static int hpsa_get_volume_status(struct ctlr_info
*h
,
3218 unsigned char scsi3addr
[])
3225 buf
= kzalloc(64, GFP_KERNEL
);
3227 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3229 /* Does controller have VPD for logical volume status? */
3230 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
))
3233 /* Get the size of the VPD return buffer */
3234 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3235 buf
, HPSA_VPD_HEADER_SZ
);
3240 /* Now get the whole VPD buffer */
3241 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3242 buf
, size
+ HPSA_VPD_HEADER_SZ
);
3245 status
= buf
[4]; /* status byte */
3251 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3254 /* Determine offline status of a volume.
3257 * 0xff (offline for unknown reasons)
3258 * # (integer code indicating one of several NOT READY states
3259 * describing why a volume is to be kept offline)
3261 static int hpsa_volume_offline(struct ctlr_info
*h
,
3262 unsigned char scsi3addr
[])
3264 struct CommandList
*c
;
3265 unsigned char *sense
;
3266 u8 sense_key
, asc
, ascq
;
3271 #define ASC_LUN_NOT_READY 0x04
3272 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3273 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3277 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
3278 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
3283 sense
= c
->err_info
->SenseInfo
;
3284 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
3285 sense_len
= sizeof(c
->err_info
->SenseInfo
);
3287 sense_len
= c
->err_info
->SenseLen
;
3288 decode_sense_data(sense
, sense_len
, &sense_key
, &asc
, &ascq
);
3289 cmd_status
= c
->err_info
->CommandStatus
;
3290 scsi_status
= c
->err_info
->ScsiStatus
;
3292 /* Is the volume 'not ready'? */
3293 if (cmd_status
!= CMD_TARGET_STATUS
||
3294 scsi_status
!= SAM_STAT_CHECK_CONDITION
||
3295 sense_key
!= NOT_READY
||
3296 asc
!= ASC_LUN_NOT_READY
) {
3300 /* Determine the reason for not ready state */
3301 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
3303 /* Keep volume offline in certain cases: */
3305 case HPSA_LV_UNDERGOING_ERASE
:
3306 case HPSA_LV_NOT_AVAILABLE
:
3307 case HPSA_LV_UNDERGOING_RPI
:
3308 case HPSA_LV_PENDING_RPI
:
3309 case HPSA_LV_ENCRYPTED_NO_KEY
:
3310 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
3311 case HPSA_LV_UNDERGOING_ENCRYPTION
:
3312 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
3313 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
3315 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
3316 /* If VPD status page isn't available,
3317 * use ASC/ASCQ to determine state
3319 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
3320 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
3330 * Find out if a logical device supports aborts by simply trying one.
3331 * Smart Array may claim not to support aborts on logical drives, but
3332 * if a MSA2000 * is connected, the drives on that will be presented
3333 * by the Smart Array as logical drives, and aborts may be sent to
3334 * those devices successfully. So the simplest way to find out is
3335 * to simply try an abort and see how the device responds.
3337 static int hpsa_device_supports_aborts(struct ctlr_info
*h
,
3338 unsigned char *scsi3addr
)
3340 struct CommandList
*c
;
3341 struct ErrorInfo
*ei
;
3344 u64 tag
= (u64
) -1; /* bogus tag */
3346 /* Assume that physical devices support aborts */
3347 if (!is_logical_dev_addr_mode(scsi3addr
))
3352 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &tag
, 0, 0, scsi3addr
, TYPE_MSG
);
3353 (void) hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
3354 /* no unmap needed here because no data xfer. */
3356 switch (ei
->CommandStatus
) {
3360 case CMD_UNABORTABLE
:
3361 case CMD_ABORT_FAILED
:
3364 case CMD_TMF_STATUS
:
3365 rc
= hpsa_evaluate_tmf_status(h
, c
);
3375 static void sanitize_inquiry_string(unsigned char *s
, int len
)
3377 bool terminated
= false;
3379 for (; len
> 0; (--len
, ++s
)) {
3382 if (terminated
|| *s
< 0x20 || *s
> 0x7e)
3387 static int hpsa_update_device_info(struct ctlr_info
*h
,
3388 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
3389 unsigned char *is_OBDR_device
)
3392 #define OBDR_SIG_OFFSET 43
3393 #define OBDR_TAPE_SIG "$DR-10"
3394 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3395 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3397 unsigned char *inq_buff
;
3398 unsigned char *obdr_sig
;
3401 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
3407 /* Do an inquiry to the device to see what it is. */
3408 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
3409 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
3410 /* Inquiry failed (msg printed already) */
3411 dev_err(&h
->pdev
->dev
,
3412 "hpsa_update_device_info: inquiry failed\n");
3417 sanitize_inquiry_string(&inq_buff
[8], 8);
3418 sanitize_inquiry_string(&inq_buff
[16], 16);
3420 this_device
->devtype
= (inq_buff
[0] & 0x1f);
3421 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
3422 memcpy(this_device
->vendor
, &inq_buff
[8],
3423 sizeof(this_device
->vendor
));
3424 memcpy(this_device
->model
, &inq_buff
[16],
3425 sizeof(this_device
->model
));
3426 memset(this_device
->device_id
, 0,
3427 sizeof(this_device
->device_id
));
3428 hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
, 8,
3429 sizeof(this_device
->device_id
));
3431 if (this_device
->devtype
== TYPE_DISK
&&
3432 is_logical_dev_addr_mode(scsi3addr
)) {
3435 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
3436 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
3437 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
3438 volume_offline
= hpsa_volume_offline(h
, scsi3addr
);
3439 if (volume_offline
< 0 || volume_offline
> 0xff)
3440 volume_offline
= HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3441 this_device
->volume_offline
= volume_offline
& 0xff;
3443 this_device
->raid_level
= RAID_UNKNOWN
;
3444 this_device
->offload_config
= 0;
3445 this_device
->offload_enabled
= 0;
3446 this_device
->offload_to_be_enabled
= 0;
3447 this_device
->hba_ioaccel_enabled
= 0;
3448 this_device
->volume_offline
= 0;
3449 this_device
->queue_depth
= h
->nr_cmds
;
3452 if (is_OBDR_device
) {
3453 /* See if this is a One-Button-Disaster-Recovery device
3454 * by looking for "$DR-10" at offset 43 in inquiry data.
3456 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
3457 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
3458 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
3459 OBDR_SIG_LEN
) == 0);
3469 static void hpsa_update_device_supports_aborts(struct ctlr_info
*h
,
3470 struct hpsa_scsi_dev_t
*dev
, u8
*scsi3addr
)
3472 unsigned long flags
;
3475 * See if this device supports aborts. If we already know
3476 * the device, we already know if it supports aborts, otherwise
3477 * we have to find out if it supports aborts by trying one.
3479 spin_lock_irqsave(&h
->devlock
, flags
);
3480 rc
= hpsa_scsi_find_entry(dev
, h
->dev
, h
->ndevices
, &entry
);
3481 if ((rc
== DEVICE_SAME
|| rc
== DEVICE_UPDATED
) &&
3482 entry
>= 0 && entry
< h
->ndevices
) {
3483 dev
->supports_aborts
= h
->dev
[entry
]->supports_aborts
;
3484 spin_unlock_irqrestore(&h
->devlock
, flags
);
3486 spin_unlock_irqrestore(&h
->devlock
, flags
);
3487 dev
->supports_aborts
=
3488 hpsa_device_supports_aborts(h
, scsi3addr
);
3489 if (dev
->supports_aborts
< 0)
3490 dev
->supports_aborts
= 0;
3494 static unsigned char *ext_target_model
[] = {
3504 static int is_ext_target(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
3508 for (i
= 0; ext_target_model
[i
]; i
++)
3509 if (strncmp(device
->model
, ext_target_model
[i
],
3510 strlen(ext_target_model
[i
])) == 0)
3516 * Helper function to assign bus, target, lun mapping of devices.
3517 * Logical drive target and lun are assigned at this time, but
3518 * physical device lun and target assignment are deferred (assigned
3519 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3521 static void figure_bus_target_lun(struct ctlr_info
*h
,
3522 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
3524 u32 lunid
= get_unaligned_le32(lunaddrbytes
);
3526 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
3527 /* physical device, target and lun filled in later */
3528 if (is_hba_lunid(lunaddrbytes
))
3529 hpsa_set_bus_target_lun(device
,
3530 HPSA_HBA_BUS
, 0, lunid
& 0x3fff);
3532 /* defer target, lun assignment for physical devices */
3533 hpsa_set_bus_target_lun(device
,
3534 HPSA_PHYSICAL_DEVICE_BUS
, -1, -1);
3537 /* It's a logical device */
3538 if (is_ext_target(h
, device
)) {
3539 hpsa_set_bus_target_lun(device
,
3540 HPSA_EXTERNAL_RAID_VOLUME_BUS
, (lunid
>> 16) & 0x3fff,
3544 hpsa_set_bus_target_lun(device
, HPSA_RAID_VOLUME_BUS
,
3549 * If there is no lun 0 on a target, linux won't find any devices.
3550 * For the external targets (arrays), we have to manually detect the enclosure
3551 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3552 * it for some reason. *tmpdevice is the target we're adding,
3553 * this_device is a pointer into the current element of currentsd[]
3554 * that we're building up in update_scsi_devices(), below.
3555 * lunzerobits is a bitmap that tracks which targets already have a
3557 * Returns 1 if an enclosure was added, 0 if not.
3559 static int add_ext_target_dev(struct ctlr_info
*h
,
3560 struct hpsa_scsi_dev_t
*tmpdevice
,
3561 struct hpsa_scsi_dev_t
*this_device
, u8
*lunaddrbytes
,
3562 unsigned long lunzerobits
[], int *n_ext_target_devs
)
3564 unsigned char scsi3addr
[8];
3566 if (test_bit(tmpdevice
->target
, lunzerobits
))
3567 return 0; /* There is already a lun 0 on this target. */
3569 if (!is_logical_dev_addr_mode(lunaddrbytes
))
3570 return 0; /* It's the logical targets that may lack lun 0. */
3572 if (!is_ext_target(h
, tmpdevice
))
3573 return 0; /* Only external target devices have this problem. */
3575 if (tmpdevice
->lun
== 0) /* if lun is 0, then we have a lun 0. */
3578 memset(scsi3addr
, 0, 8);
3579 scsi3addr
[3] = tmpdevice
->target
;
3580 if (is_hba_lunid(scsi3addr
))
3581 return 0; /* Don't add the RAID controller here. */
3583 if (is_scsi_rev_5(h
))
3584 return 0; /* p1210m doesn't need to do this. */
3586 if (*n_ext_target_devs
>= MAX_EXT_TARGETS
) {
3587 dev_warn(&h
->pdev
->dev
, "Maximum number of external "
3588 "target devices exceeded. Check your hardware "
3593 if (hpsa_update_device_info(h
, scsi3addr
, this_device
, NULL
))
3595 (*n_ext_target_devs
)++;
3596 hpsa_set_bus_target_lun(this_device
,
3597 tmpdevice
->bus
, tmpdevice
->target
, 0);
3598 hpsa_update_device_supports_aborts(h
, this_device
, scsi3addr
);
3599 set_bit(tmpdevice
->target
, lunzerobits
);
3604 * Get address of physical disk used for an ioaccel2 mode command:
3605 * 1. Extract ioaccel2 handle from the command.
3606 * 2. Find a matching ioaccel2 handle from list of physical disks.
3608 * 1 and set scsi3addr to address of matching physical
3609 * 0 if no matching physical disk was found.
3611 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info
*h
,
3612 struct CommandList
*ioaccel2_cmd_to_abort
, unsigned char *scsi3addr
)
3614 struct io_accel2_cmd
*c2
=
3615 &h
->ioaccel2_cmd_pool
[ioaccel2_cmd_to_abort
->cmdindex
];
3616 unsigned long flags
;
3619 spin_lock_irqsave(&h
->devlock
, flags
);
3620 for (i
= 0; i
< h
->ndevices
; i
++)
3621 if (h
->dev
[i
]->ioaccel_handle
== le32_to_cpu(c2
->scsi_nexus
)) {
3622 memcpy(scsi3addr
, h
->dev
[i
]->scsi3addr
,
3623 sizeof(h
->dev
[i
]->scsi3addr
));
3624 spin_unlock_irqrestore(&h
->devlock
, flags
);
3627 spin_unlock_irqrestore(&h
->devlock
, flags
);
3632 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3633 * logdev. The number of luns in physdev and logdev are returned in
3634 * *nphysicals and *nlogicals, respectively.
3635 * Returns 0 on success, -1 otherwise.
3637 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
3638 struct ReportExtendedLUNdata
*physdev
, u32
*nphysicals
,
3639 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
3641 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
3642 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
3645 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 24;
3646 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
3647 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3648 HPSA_MAX_PHYS_LUN
, *nphysicals
- HPSA_MAX_PHYS_LUN
);
3649 *nphysicals
= HPSA_MAX_PHYS_LUN
;
3651 if (hpsa_scsi_do_report_log_luns(h
, logdev
, sizeof(*logdev
))) {
3652 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
3655 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
3656 /* Reject Logicals in excess of our max capability. */
3657 if (*nlogicals
> HPSA_MAX_LUN
) {
3658 dev_warn(&h
->pdev
->dev
,
3659 "maximum logical LUNs (%d) exceeded. "
3660 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
3661 *nlogicals
- HPSA_MAX_LUN
);
3662 *nlogicals
= HPSA_MAX_LUN
;
3664 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
3665 dev_warn(&h
->pdev
->dev
,
3666 "maximum logical + physical LUNs (%d) exceeded. "
3667 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
3668 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
3669 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
3674 static u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
,
3675 int i
, int nphysicals
, int nlogicals
,
3676 struct ReportExtendedLUNdata
*physdev_list
,
3677 struct ReportLUNdata
*logdev_list
)
3679 /* Helper function, figure out where the LUN ID info is coming from
3680 * given index i, lists of physical and logical devices, where in
3681 * the list the raid controller is supposed to appear (first or last)
3684 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
3685 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
3687 if (i
== raid_ctlr_position
)
3688 return RAID_CTLR_LUNID
;
3690 if (i
< logicals_start
)
3691 return &physdev_list
->LUN
[i
-
3692 (raid_ctlr_position
== 0)].lunid
[0];
3694 if (i
< last_device
)
3695 return &logdev_list
->LUN
[i
- nphysicals
-
3696 (raid_ctlr_position
== 0)][0];
3701 /* get physical drive ioaccel handle and queue depth */
3702 static void hpsa_get_ioaccel_drive_info(struct ctlr_info
*h
,
3703 struct hpsa_scsi_dev_t
*dev
,
3704 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3705 struct bmic_identify_physical_device
*id_phys
)
3708 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3710 dev
->ioaccel_handle
= rle
->ioaccel_handle
;
3711 if ((rle
->device_flags
& 0x08) && dev
->ioaccel_handle
)
3712 dev
->hba_ioaccel_enabled
= 1;
3713 memset(id_phys
, 0, sizeof(*id_phys
));
3714 rc
= hpsa_bmic_id_physical_device(h
, &rle
->lunid
[0],
3715 GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]), id_phys
,
3718 /* Reserve space for FW operations */
3719 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3720 #define DRIVE_QUEUE_DEPTH 7
3722 le16_to_cpu(id_phys
->current_queue_depth_limit
) -
3723 DRIVE_CMDS_RESERVED_FOR_FW
;
3725 dev
->queue_depth
= DRIVE_QUEUE_DEPTH
; /* conservative */
3728 static void hpsa_get_path_info(struct hpsa_scsi_dev_t
*this_device
,
3729 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3730 struct bmic_identify_physical_device
*id_phys
)
3732 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3734 if ((rle
->device_flags
& 0x08) && this_device
->ioaccel_handle
)
3735 this_device
->hba_ioaccel_enabled
= 1;
3737 memcpy(&this_device
->active_path_index
,
3738 &id_phys
->active_path_number
,
3739 sizeof(this_device
->active_path_index
));
3740 memcpy(&this_device
->path_map
,
3741 &id_phys
->redundant_path_present_map
,
3742 sizeof(this_device
->path_map
));
3743 memcpy(&this_device
->box
,
3744 &id_phys
->alternate_paths_phys_box_on_port
,
3745 sizeof(this_device
->box
));
3746 memcpy(&this_device
->phys_connector
,
3747 &id_phys
->alternate_paths_phys_connector
,
3748 sizeof(this_device
->phys_connector
));
3749 memcpy(&this_device
->bay
,
3750 &id_phys
->phys_bay_in_box
,
3751 sizeof(this_device
->bay
));
3754 static void hpsa_update_scsi_devices(struct ctlr_info
*h
)
3756 /* the idea here is we could get notified
3757 * that some devices have changed, so we do a report
3758 * physical luns and report logical luns cmd, and adjust
3759 * our list of devices accordingly.
3761 * The scsi3addr's of devices won't change so long as the
3762 * adapter is not reset. That means we can rescan and
3763 * tell which devices we already know about, vs. new
3764 * devices, vs. disappearing devices.
3766 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
3767 struct ReportLUNdata
*logdev_list
= NULL
;
3768 struct bmic_identify_physical_device
*id_phys
= NULL
;
3771 u32 ndev_allocated
= 0;
3772 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
3774 int i
, n_ext_target_devs
, ndevs_to_allocate
;
3775 int raid_ctlr_position
;
3776 bool physical_device
;
3777 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
3779 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
3780 physdev_list
= kzalloc(sizeof(*physdev_list
), GFP_KERNEL
);
3781 logdev_list
= kzalloc(sizeof(*logdev_list
), GFP_KERNEL
);
3782 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
3783 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
3785 if (!currentsd
|| !physdev_list
|| !logdev_list
||
3786 !tmpdevice
|| !id_phys
) {
3787 dev_err(&h
->pdev
->dev
, "out of memory\n");
3790 memset(lunzerobits
, 0, sizeof(lunzerobits
));
3792 h
->drv_req_rescan
= 0; /* cancel scheduled rescan - we're doing it. */
3794 if (hpsa_gather_lun_info(h
, physdev_list
, &nphysicals
,
3795 logdev_list
, &nlogicals
)) {
3796 h
->drv_req_rescan
= 1;
3800 /* We might see up to the maximum number of logical and physical disks
3801 * plus external target devices, and a device for the local RAID
3804 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
3806 /* Allocate the per device structures */
3807 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
3808 if (i
>= HPSA_MAX_DEVICES
) {
3809 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
3810 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
3811 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
3815 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
3816 if (!currentsd
[i
]) {
3817 dev_warn(&h
->pdev
->dev
, "out of memory at %s:%d\n",
3818 __FILE__
, __LINE__
);
3819 h
->drv_req_rescan
= 1;
3825 if (is_scsi_rev_5(h
))
3826 raid_ctlr_position
= 0;
3828 raid_ctlr_position
= nphysicals
+ nlogicals
;
3830 /* adjust our table of devices */
3831 n_ext_target_devs
= 0;
3832 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
3833 u8
*lunaddrbytes
, is_OBDR
= 0;
3835 int phys_dev_index
= i
- (raid_ctlr_position
== 0);
3837 physical_device
= i
< nphysicals
+ (raid_ctlr_position
== 0);
3839 /* Figure out where the LUN ID info is coming from */
3840 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
3841 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
3843 /* skip masked non-disk devices */
3844 if (MASKED_DEVICE(lunaddrbytes
) && physical_device
&&
3845 (physdev_list
->LUN
[phys_dev_index
].device_flags
& 0x01))
3848 /* Get device type, vendor, model, device id */
3849 rc
= hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
3851 if (rc
== -ENOMEM
) {
3852 dev_warn(&h
->pdev
->dev
,
3853 "Out of memory, rescan deferred.\n");
3854 h
->drv_req_rescan
= 1;
3858 dev_warn(&h
->pdev
->dev
,
3859 "Inquiry failed, skipping device.\n");
3863 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
3864 hpsa_update_device_supports_aborts(h
, tmpdevice
, lunaddrbytes
);
3865 this_device
= currentsd
[ncurrent
];
3868 * For external target devices, we have to insert a LUN 0 which
3869 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3870 * is nonetheless an enclosure device there. We have to
3871 * present that otherwise linux won't find anything if
3872 * there is no lun 0.
3874 if (add_ext_target_dev(h
, tmpdevice
, this_device
,
3875 lunaddrbytes
, lunzerobits
,
3876 &n_ext_target_devs
)) {
3878 this_device
= currentsd
[ncurrent
];
3881 *this_device
= *tmpdevice
;
3882 this_device
->physical_device
= physical_device
;
3885 * Expose all devices except for physical devices that
3888 if (MASKED_DEVICE(lunaddrbytes
) && this_device
->physical_device
)
3889 this_device
->expose_device
= 0;
3891 this_device
->expose_device
= 1;
3893 switch (this_device
->devtype
) {
3895 /* We don't *really* support actual CD-ROM devices,
3896 * just "One Button Disaster Recovery" tape drive
3897 * which temporarily pretends to be a CD-ROM drive.
3898 * So we check that the device is really an OBDR tape
3899 * device by checking for "$DR-10" in bytes 43-48 of
3906 if (this_device
->physical_device
) {
3907 /* The disk is in HBA mode. */
3908 /* Never use RAID mapper in HBA mode. */
3909 this_device
->offload_enabled
= 0;
3910 hpsa_get_ioaccel_drive_info(h
, this_device
,
3911 physdev_list
, phys_dev_index
, id_phys
);
3912 hpsa_get_path_info(this_device
,
3913 physdev_list
, phys_dev_index
, id_phys
);
3918 case TYPE_MEDIUM_CHANGER
:
3919 case TYPE_ENCLOSURE
:
3923 /* Only present the Smartarray HBA as a RAID controller.
3924 * If it's a RAID controller other than the HBA itself
3925 * (an external RAID controller, MSA500 or similar)
3928 if (!is_hba_lunid(lunaddrbytes
))
3935 if (ncurrent
>= HPSA_MAX_DEVICES
)
3938 adjust_hpsa_scsi_table(h
, currentsd
, ncurrent
);
3941 for (i
= 0; i
< ndev_allocated
; i
++)
3942 kfree(currentsd
[i
]);
3944 kfree(physdev_list
);
3949 static void hpsa_set_sg_descriptor(struct SGDescriptor
*desc
,
3950 struct scatterlist
*sg
)
3952 u64 addr64
= (u64
) sg_dma_address(sg
);
3953 unsigned int len
= sg_dma_len(sg
);
3955 desc
->Addr
= cpu_to_le64(addr64
);
3956 desc
->Len
= cpu_to_le32(len
);
3961 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3962 * dma mapping and fills in the scatter gather entries of the
3965 static int hpsa_scatter_gather(struct ctlr_info
*h
,
3966 struct CommandList
*cp
,
3967 struct scsi_cmnd
*cmd
)
3969 struct scatterlist
*sg
;
3970 int use_sg
, i
, sg_limit
, chained
, last_sg
;
3971 struct SGDescriptor
*curr_sg
;
3973 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
3975 use_sg
= scsi_dma_map(cmd
);
3980 goto sglist_finished
;
3983 * If the number of entries is greater than the max for a single list,
3984 * then we have a chained list; we will set up all but one entry in the
3985 * first list (the last entry is saved for link information);
3986 * otherwise, we don't have a chained list and we'll set up at each of
3987 * the entries in the one list.
3990 chained
= use_sg
> h
->max_cmd_sg_entries
;
3991 sg_limit
= chained
? h
->max_cmd_sg_entries
- 1 : use_sg
;
3992 last_sg
= scsi_sg_count(cmd
) - 1;
3993 scsi_for_each_sg(cmd
, sg
, sg_limit
, i
) {
3994 hpsa_set_sg_descriptor(curr_sg
, sg
);
4000 * Continue with the chained list. Set curr_sg to the chained
4001 * list. Modify the limit to the total count less the entries
4002 * we've already set up. Resume the scan at the list entry
4003 * where the previous loop left off.
4005 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
4006 sg_limit
= use_sg
- sg_limit
;
4007 for_each_sg(sg
, sg
, sg_limit
, i
) {
4008 hpsa_set_sg_descriptor(curr_sg
, sg
);
4013 /* Back the pointer up to the last entry and mark it as "last". */
4014 (curr_sg
- 1)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4016 if (use_sg
+ chained
> h
->maxSG
)
4017 h
->maxSG
= use_sg
+ chained
;
4020 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
4021 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
+ 1);
4022 if (hpsa_map_sg_chain_block(h
, cp
)) {
4023 scsi_dma_unmap(cmd
);
4031 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
4032 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
); /* total sgs in cmd list */
4036 #define IO_ACCEL_INELIGIBLE (1)
4037 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
4043 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4050 if (*cdb_len
== 6) {
4051 block
= get_unaligned_be16(&cdb
[2]);
4056 BUG_ON(*cdb_len
!= 12);
4057 block
= get_unaligned_be32(&cdb
[2]);
4058 block_cnt
= get_unaligned_be32(&cdb
[6]);
4060 if (block_cnt
> 0xffff)
4061 return IO_ACCEL_INELIGIBLE
;
4063 cdb
[0] = is_write
? WRITE_10
: READ_10
;
4065 cdb
[2] = (u8
) (block
>> 24);
4066 cdb
[3] = (u8
) (block
>> 16);
4067 cdb
[4] = (u8
) (block
>> 8);
4068 cdb
[5] = (u8
) (block
);
4070 cdb
[7] = (u8
) (block_cnt
>> 8);
4071 cdb
[8] = (u8
) (block_cnt
);
4079 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
4080 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4081 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4083 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4084 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4086 unsigned int total_len
= 0;
4087 struct scatterlist
*sg
;
4090 struct SGDescriptor
*curr_sg
;
4091 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
4093 /* TODO: implement chaining support */
4094 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
4095 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4096 return IO_ACCEL_INELIGIBLE
;
4099 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
4101 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4102 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4103 return IO_ACCEL_INELIGIBLE
;
4106 c
->cmd_type
= CMD_IOACCEL1
;
4108 /* Adjust the DMA address to point to the accelerated command buffer */
4109 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
4110 (c
->cmdindex
* sizeof(*cp
));
4111 BUG_ON(c
->busaddr
& 0x0000007F);
4113 use_sg
= scsi_dma_map(cmd
);
4115 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4121 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4122 addr64
= (u64
) sg_dma_address(sg
);
4123 len
= sg_dma_len(sg
);
4125 curr_sg
->Addr
= cpu_to_le64(addr64
);
4126 curr_sg
->Len
= cpu_to_le32(len
);
4127 curr_sg
->Ext
= cpu_to_le32(0);
4130 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4132 switch (cmd
->sc_data_direction
) {
4134 control
|= IOACCEL1_CONTROL_DATA_OUT
;
4136 case DMA_FROM_DEVICE
:
4137 control
|= IOACCEL1_CONTROL_DATA_IN
;
4140 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4143 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4144 cmd
->sc_data_direction
);
4149 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4152 c
->Header
.SGList
= use_sg
;
4153 /* Fill out the command structure to submit */
4154 cp
->dev_handle
= cpu_to_le16(ioaccel_handle
& 0xFFFF);
4155 cp
->transfer_len
= cpu_to_le32(total_len
);
4156 cp
->io_flags
= cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ
|
4157 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
));
4158 cp
->control
= cpu_to_le32(control
);
4159 memcpy(cp
->CDB
, cdb
, cdb_len
);
4160 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
4161 /* Tag was already set at init time. */
4162 enqueue_cmd_and_start_io(h
, c
);
4167 * Queue a command directly to a device behind the controller using the
4168 * I/O accelerator path.
4170 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
4171 struct CommandList
*c
)
4173 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4174 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4178 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
4179 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
, dev
);
4183 * Set encryption parameters for the ioaccel2 request
4185 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
4186 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
4188 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4189 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4190 struct raid_map_data
*map
= &dev
->raid_map
;
4193 /* Are we doing encryption on this device */
4194 if (!(le16_to_cpu(map
->flags
) & RAID_MAP_FLAG_ENCRYPT_ON
))
4196 /* Set the data encryption key index. */
4197 cp
->dekindex
= map
->dekindex
;
4199 /* Set the encryption enable flag, encoded into direction field. */
4200 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
4202 /* Set encryption tweak values based on logical block address
4203 * If block size is 512, tweak value is LBA.
4204 * For other block sizes, tweak is (LBA * block size)/ 512)
4206 switch (cmd
->cmnd
[0]) {
4207 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4210 first_block
= get_unaligned_be16(&cmd
->cmnd
[2]);
4214 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4217 first_block
= get_unaligned_be32(&cmd
->cmnd
[2]);
4221 first_block
= get_unaligned_be64(&cmd
->cmnd
[2]);
4224 dev_err(&h
->pdev
->dev
,
4225 "ERROR: %s: size (0x%x) not supported for encryption\n",
4226 __func__
, cmd
->cmnd
[0]);
4231 if (le32_to_cpu(map
->volume_blk_size
) != 512)
4232 first_block
= first_block
*
4233 le32_to_cpu(map
->volume_blk_size
)/512;
4235 cp
->tweak_lower
= cpu_to_le32(first_block
);
4236 cp
->tweak_upper
= cpu_to_le32(first_block
>> 32);
4239 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
4240 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4241 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4243 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4244 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4245 struct ioaccel2_sg_element
*curr_sg
;
4247 struct scatterlist
*sg
;
4252 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4254 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4255 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4256 return IO_ACCEL_INELIGIBLE
;
4259 c
->cmd_type
= CMD_IOACCEL2
;
4260 /* Adjust the DMA address to point to the accelerated command buffer */
4261 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
4262 (c
->cmdindex
* sizeof(*cp
));
4263 BUG_ON(c
->busaddr
& 0x0000007F);
4265 memset(cp
, 0, sizeof(*cp
));
4266 cp
->IU_type
= IOACCEL2_IU_TYPE
;
4268 use_sg
= scsi_dma_map(cmd
);
4270 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4276 if (use_sg
> h
->ioaccel_maxsg
) {
4277 addr64
= le64_to_cpu(
4278 h
->ioaccel2_cmd_sg_list
[c
->cmdindex
]->address
);
4279 curr_sg
->address
= cpu_to_le64(addr64
);
4280 curr_sg
->length
= 0;
4281 curr_sg
->reserved
[0] = 0;
4282 curr_sg
->reserved
[1] = 0;
4283 curr_sg
->reserved
[2] = 0;
4284 curr_sg
->chain_indicator
= 0x80;
4286 curr_sg
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
4288 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4289 addr64
= (u64
) sg_dma_address(sg
);
4290 len
= sg_dma_len(sg
);
4292 curr_sg
->address
= cpu_to_le64(addr64
);
4293 curr_sg
->length
= cpu_to_le32(len
);
4294 curr_sg
->reserved
[0] = 0;
4295 curr_sg
->reserved
[1] = 0;
4296 curr_sg
->reserved
[2] = 0;
4297 curr_sg
->chain_indicator
= 0;
4301 switch (cmd
->sc_data_direction
) {
4303 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4304 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
4306 case DMA_FROM_DEVICE
:
4307 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4308 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
4311 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4312 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4315 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4316 cmd
->sc_data_direction
);
4321 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4322 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4325 /* Set encryption parameters, if necessary */
4326 set_encrypt_ioaccel2(h
, c
, cp
);
4328 cp
->scsi_nexus
= cpu_to_le32(ioaccel_handle
);
4329 cp
->Tag
= cpu_to_le32(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
4330 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
4332 cp
->data_len
= cpu_to_le32(total_len
);
4333 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
4334 offsetof(struct io_accel2_cmd
, error_data
));
4335 cp
->err_len
= cpu_to_le32(sizeof(cp
->error_data
));
4337 /* fill in sg elements */
4338 if (use_sg
> h
->ioaccel_maxsg
) {
4340 cp
->sg
[0].length
= cpu_to_le32(use_sg
* sizeof(cp
->sg
[0]));
4341 if (hpsa_map_ioaccel2_sg_chain_block(h
, cp
, c
)) {
4342 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4343 scsi_dma_unmap(cmd
);
4347 cp
->sg_count
= (u8
) use_sg
;
4349 enqueue_cmd_and_start_io(h
, c
);
4354 * Queue a command to the correct I/O accelerator path.
4356 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
4357 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4358 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4360 /* Try to honor the device's queue depth */
4361 if (atomic_inc_return(&phys_disk
->ioaccel_cmds_out
) >
4362 phys_disk
->queue_depth
) {
4363 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4364 return IO_ACCEL_INELIGIBLE
;
4366 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
4367 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
4368 cdb
, cdb_len
, scsi3addr
,
4371 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
4372 cdb
, cdb_len
, scsi3addr
,
4376 static void raid_map_helper(struct raid_map_data
*map
,
4377 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
4379 if (offload_to_mirror
== 0) {
4380 /* use physical disk in the first mirrored group. */
4381 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4385 /* determine mirror group that *map_index indicates */
4386 *current_group
= *map_index
/
4387 le16_to_cpu(map
->data_disks_per_row
);
4388 if (offload_to_mirror
== *current_group
)
4390 if (*current_group
< le16_to_cpu(map
->layout_map_count
) - 1) {
4391 /* select map index from next group */
4392 *map_index
+= le16_to_cpu(map
->data_disks_per_row
);
4395 /* select map index from first group */
4396 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4399 } while (offload_to_mirror
!= *current_group
);
4403 * Attempt to perform offload RAID mapping for a logical volume I/O.
4405 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
4406 struct CommandList
*c
)
4408 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4409 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4410 struct raid_map_data
*map
= &dev
->raid_map
;
4411 struct raid_map_disk_data
*dd
= &map
->data
[0];
4414 u64 first_block
, last_block
;
4417 u64 first_row
, last_row
;
4418 u32 first_row_offset
, last_row_offset
;
4419 u32 first_column
, last_column
;
4420 u64 r0_first_row
, r0_last_row
;
4421 u32 r5or6_blocks_per_row
;
4422 u64 r5or6_first_row
, r5or6_last_row
;
4423 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
4424 u32 r5or6_first_column
, r5or6_last_column
;
4425 u32 total_disks_per_row
;
4427 u32 first_group
, last_group
, current_group
;
4435 #if BITS_PER_LONG == 32
4438 int offload_to_mirror
;
4440 /* check for valid opcode, get LBA and block count */
4441 switch (cmd
->cmnd
[0]) {
4445 first_block
= get_unaligned_be16(&cmd
->cmnd
[2]);
4446 block_cnt
= cmd
->cmnd
[4];
4454 (((u64
) cmd
->cmnd
[2]) << 24) |
4455 (((u64
) cmd
->cmnd
[3]) << 16) |
4456 (((u64
) cmd
->cmnd
[4]) << 8) |
4459 (((u32
) cmd
->cmnd
[7]) << 8) |
4466 (((u64
) cmd
->cmnd
[2]) << 24) |
4467 (((u64
) cmd
->cmnd
[3]) << 16) |
4468 (((u64
) cmd
->cmnd
[4]) << 8) |
4471 (((u32
) cmd
->cmnd
[6]) << 24) |
4472 (((u32
) cmd
->cmnd
[7]) << 16) |
4473 (((u32
) cmd
->cmnd
[8]) << 8) |
4480 (((u64
) cmd
->cmnd
[2]) << 56) |
4481 (((u64
) cmd
->cmnd
[3]) << 48) |
4482 (((u64
) cmd
->cmnd
[4]) << 40) |
4483 (((u64
) cmd
->cmnd
[5]) << 32) |
4484 (((u64
) cmd
->cmnd
[6]) << 24) |
4485 (((u64
) cmd
->cmnd
[7]) << 16) |
4486 (((u64
) cmd
->cmnd
[8]) << 8) |
4489 (((u32
) cmd
->cmnd
[10]) << 24) |
4490 (((u32
) cmd
->cmnd
[11]) << 16) |
4491 (((u32
) cmd
->cmnd
[12]) << 8) |
4495 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
4497 last_block
= first_block
+ block_cnt
- 1;
4499 /* check for write to non-RAID-0 */
4500 if (is_write
&& dev
->raid_level
!= 0)
4501 return IO_ACCEL_INELIGIBLE
;
4503 /* check for invalid block or wraparound */
4504 if (last_block
>= le64_to_cpu(map
->volume_blk_cnt
) ||
4505 last_block
< first_block
)
4506 return IO_ACCEL_INELIGIBLE
;
4508 /* calculate stripe information for the request */
4509 blocks_per_row
= le16_to_cpu(map
->data_disks_per_row
) *
4510 le16_to_cpu(map
->strip_size
);
4511 strip_size
= le16_to_cpu(map
->strip_size
);
4512 #if BITS_PER_LONG == 32
4513 tmpdiv
= first_block
;
4514 (void) do_div(tmpdiv
, blocks_per_row
);
4516 tmpdiv
= last_block
;
4517 (void) do_div(tmpdiv
, blocks_per_row
);
4519 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
4520 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
4521 tmpdiv
= first_row_offset
;
4522 (void) do_div(tmpdiv
, strip_size
);
4523 first_column
= tmpdiv
;
4524 tmpdiv
= last_row_offset
;
4525 (void) do_div(tmpdiv
, strip_size
);
4526 last_column
= tmpdiv
;
4528 first_row
= first_block
/ blocks_per_row
;
4529 last_row
= last_block
/ blocks_per_row
;
4530 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
4531 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
4532 first_column
= first_row_offset
/ strip_size
;
4533 last_column
= last_row_offset
/ strip_size
;
4536 /* if this isn't a single row/column then give to the controller */
4537 if ((first_row
!= last_row
) || (first_column
!= last_column
))
4538 return IO_ACCEL_INELIGIBLE
;
4540 /* proceeding with driver mapping */
4541 total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
4542 le16_to_cpu(map
->metadata_disks_per_row
);
4543 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
4544 le16_to_cpu(map
->row_cnt
);
4545 map_index
= (map_row
* total_disks_per_row
) + first_column
;
4547 switch (dev
->raid_level
) {
4549 break; /* nothing special to do */
4551 /* Handles load balance across RAID 1 members.
4552 * (2-drive R1 and R10 with even # of drives.)
4553 * Appropriate for SSDs, not optimal for HDDs
4555 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 2);
4556 if (dev
->offload_to_mirror
)
4557 map_index
+= le16_to_cpu(map
->data_disks_per_row
);
4558 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
4561 /* Handles N-way mirrors (R1-ADM)
4562 * and R10 with # of drives divisible by 3.)
4564 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 3);
4566 offload_to_mirror
= dev
->offload_to_mirror
;
4567 raid_map_helper(map
, offload_to_mirror
,
4568 &map_index
, ¤t_group
);
4569 /* set mirror group to use next time */
4571 (offload_to_mirror
>=
4572 le16_to_cpu(map
->layout_map_count
) - 1)
4573 ? 0 : offload_to_mirror
+ 1;
4574 dev
->offload_to_mirror
= offload_to_mirror
;
4575 /* Avoid direct use of dev->offload_to_mirror within this
4576 * function since multiple threads might simultaneously
4577 * increment it beyond the range of dev->layout_map_count -1.
4582 if (le16_to_cpu(map
->layout_map_count
) <= 1)
4585 /* Verify first and last block are in same RAID group */
4586 r5or6_blocks_per_row
=
4587 le16_to_cpu(map
->strip_size
) *
4588 le16_to_cpu(map
->data_disks_per_row
);
4589 BUG_ON(r5or6_blocks_per_row
== 0);
4590 stripesize
= r5or6_blocks_per_row
*
4591 le16_to_cpu(map
->layout_map_count
);
4592 #if BITS_PER_LONG == 32
4593 tmpdiv
= first_block
;
4594 first_group
= do_div(tmpdiv
, stripesize
);
4595 tmpdiv
= first_group
;
4596 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
4597 first_group
= tmpdiv
;
4598 tmpdiv
= last_block
;
4599 last_group
= do_div(tmpdiv
, stripesize
);
4600 tmpdiv
= last_group
;
4601 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
4602 last_group
= tmpdiv
;
4604 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
4605 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
4607 if (first_group
!= last_group
)
4608 return IO_ACCEL_INELIGIBLE
;
4610 /* Verify request is in a single row of RAID 5/6 */
4611 #if BITS_PER_LONG == 32
4612 tmpdiv
= first_block
;
4613 (void) do_div(tmpdiv
, stripesize
);
4614 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
4615 tmpdiv
= last_block
;
4616 (void) do_div(tmpdiv
, stripesize
);
4617 r5or6_last_row
= r0_last_row
= tmpdiv
;
4619 first_row
= r5or6_first_row
= r0_first_row
=
4620 first_block
/ stripesize
;
4621 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
4623 if (r5or6_first_row
!= r5or6_last_row
)
4624 return IO_ACCEL_INELIGIBLE
;
4627 /* Verify request is in a single column */
4628 #if BITS_PER_LONG == 32
4629 tmpdiv
= first_block
;
4630 first_row_offset
= do_div(tmpdiv
, stripesize
);
4631 tmpdiv
= first_row_offset
;
4632 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
4633 r5or6_first_row_offset
= first_row_offset
;
4634 tmpdiv
= last_block
;
4635 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
4636 tmpdiv
= r5or6_last_row_offset
;
4637 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
4638 tmpdiv
= r5or6_first_row_offset
;
4639 (void) do_div(tmpdiv
, map
->strip_size
);
4640 first_column
= r5or6_first_column
= tmpdiv
;
4641 tmpdiv
= r5or6_last_row_offset
;
4642 (void) do_div(tmpdiv
, map
->strip_size
);
4643 r5or6_last_column
= tmpdiv
;
4645 first_row_offset
= r5or6_first_row_offset
=
4646 (u32
)((first_block
% stripesize
) %
4647 r5or6_blocks_per_row
);
4649 r5or6_last_row_offset
=
4650 (u32
)((last_block
% stripesize
) %
4651 r5or6_blocks_per_row
);
4653 first_column
= r5or6_first_column
=
4654 r5or6_first_row_offset
/ le16_to_cpu(map
->strip_size
);
4656 r5or6_last_row_offset
/ le16_to_cpu(map
->strip_size
);
4658 if (r5or6_first_column
!= r5or6_last_column
)
4659 return IO_ACCEL_INELIGIBLE
;
4661 /* Request is eligible */
4662 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
4663 le16_to_cpu(map
->row_cnt
);
4665 map_index
= (first_group
*
4666 (le16_to_cpu(map
->row_cnt
) * total_disks_per_row
)) +
4667 (map_row
* total_disks_per_row
) + first_column
;
4670 return IO_ACCEL_INELIGIBLE
;
4673 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
4674 return IO_ACCEL_INELIGIBLE
;
4676 c
->phys_disk
= dev
->phys_disk
[map_index
];
4678 disk_handle
= dd
[map_index
].ioaccel_handle
;
4679 disk_block
= le64_to_cpu(map
->disk_starting_blk
) +
4680 first_row
* le16_to_cpu(map
->strip_size
) +
4681 (first_row_offset
- first_column
*
4682 le16_to_cpu(map
->strip_size
));
4683 disk_block_cnt
= block_cnt
;
4685 /* handle differing logical/physical block sizes */
4686 if (map
->phys_blk_shift
) {
4687 disk_block
<<= map
->phys_blk_shift
;
4688 disk_block_cnt
<<= map
->phys_blk_shift
;
4690 BUG_ON(disk_block_cnt
> 0xffff);
4692 /* build the new CDB for the physical disk I/O */
4693 if (disk_block
> 0xffffffff) {
4694 cdb
[0] = is_write
? WRITE_16
: READ_16
;
4696 cdb
[2] = (u8
) (disk_block
>> 56);
4697 cdb
[3] = (u8
) (disk_block
>> 48);
4698 cdb
[4] = (u8
) (disk_block
>> 40);
4699 cdb
[5] = (u8
) (disk_block
>> 32);
4700 cdb
[6] = (u8
) (disk_block
>> 24);
4701 cdb
[7] = (u8
) (disk_block
>> 16);
4702 cdb
[8] = (u8
) (disk_block
>> 8);
4703 cdb
[9] = (u8
) (disk_block
);
4704 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
4705 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
4706 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
4707 cdb
[13] = (u8
) (disk_block_cnt
);
4712 cdb
[0] = is_write
? WRITE_10
: READ_10
;
4714 cdb
[2] = (u8
) (disk_block
>> 24);
4715 cdb
[3] = (u8
) (disk_block
>> 16);
4716 cdb
[4] = (u8
) (disk_block
>> 8);
4717 cdb
[5] = (u8
) (disk_block
);
4719 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
4720 cdb
[8] = (u8
) (disk_block_cnt
);
4724 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
4726 dev
->phys_disk
[map_index
]);
4730 * Submit commands down the "normal" RAID stack path
4731 * All callers to hpsa_ciss_submit must check lockup_detected
4732 * beforehand, before (opt.) and after calling cmd_alloc
4734 static int hpsa_ciss_submit(struct ctlr_info
*h
,
4735 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
4736 unsigned char scsi3addr
[])
4738 cmd
->host_scribble
= (unsigned char *) c
;
4739 c
->cmd_type
= CMD_SCSI
;
4741 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
4742 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
4743 c
->Header
.tag
= cpu_to_le64((c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
));
4745 /* Fill in the request block... */
4747 c
->Request
.Timeout
= 0;
4748 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
4749 c
->Request
.CDBLen
= cmd
->cmd_len
;
4750 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
4751 switch (cmd
->sc_data_direction
) {
4753 c
->Request
.type_attr_dir
=
4754 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_WRITE
);
4756 case DMA_FROM_DEVICE
:
4757 c
->Request
.type_attr_dir
=
4758 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_READ
);
4761 c
->Request
.type_attr_dir
=
4762 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_NONE
);
4764 case DMA_BIDIRECTIONAL
:
4765 /* This can happen if a buggy application does a scsi passthru
4766 * and sets both inlen and outlen to non-zero. ( see
4767 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4770 c
->Request
.type_attr_dir
=
4771 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_RSVD
);
4772 /* This is technically wrong, and hpsa controllers should
4773 * reject it with CMD_INVALID, which is the most correct
4774 * response, but non-fibre backends appear to let it
4775 * slide by, and give the same results as if this field
4776 * were set correctly. Either way is acceptable for
4777 * our purposes here.
4783 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4784 cmd
->sc_data_direction
);
4789 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
4790 hpsa_cmd_resolve_and_free(h
, c
);
4791 return SCSI_MLQUEUE_HOST_BUSY
;
4793 enqueue_cmd_and_start_io(h
, c
);
4794 /* the cmd'll come back via intr handler in complete_scsi_command() */
4798 static void hpsa_cmd_init(struct ctlr_info
*h
, int index
,
4799 struct CommandList
*c
)
4801 dma_addr_t cmd_dma_handle
, err_dma_handle
;
4803 /* Zero out all of commandlist except the last field, refcount */
4804 memset(c
, 0, offsetof(struct CommandList
, refcount
));
4805 c
->Header
.tag
= cpu_to_le64((u64
) (index
<< DIRECT_LOOKUP_SHIFT
));
4806 cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
4807 c
->err_info
= h
->errinfo_pool
+ index
;
4808 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
4809 err_dma_handle
= h
->errinfo_pool_dhandle
4810 + index
* sizeof(*c
->err_info
);
4811 c
->cmdindex
= index
;
4812 c
->busaddr
= (u32
) cmd_dma_handle
;
4813 c
->ErrDesc
.Addr
= cpu_to_le64((u64
) err_dma_handle
);
4814 c
->ErrDesc
.Len
= cpu_to_le32((u32
) sizeof(*c
->err_info
));
4816 c
->scsi_cmd
= SCSI_CMD_IDLE
;
4819 static void hpsa_preinitialize_commands(struct ctlr_info
*h
)
4823 for (i
= 0; i
< h
->nr_cmds
; i
++) {
4824 struct CommandList
*c
= h
->cmd_pool
+ i
;
4826 hpsa_cmd_init(h
, i
, c
);
4827 atomic_set(&c
->refcount
, 0);
4831 static inline void hpsa_cmd_partial_init(struct ctlr_info
*h
, int index
,
4832 struct CommandList
*c
)
4834 dma_addr_t cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
4836 BUG_ON(c
->cmdindex
!= index
);
4838 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
4839 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
4840 c
->busaddr
= (u32
) cmd_dma_handle
;
4843 static int hpsa_ioaccel_submit(struct ctlr_info
*h
,
4844 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
4845 unsigned char *scsi3addr
)
4847 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4848 int rc
= IO_ACCEL_INELIGIBLE
;
4850 cmd
->host_scribble
= (unsigned char *) c
;
4852 if (dev
->offload_enabled
) {
4853 hpsa_cmd_init(h
, c
->cmdindex
, c
);
4854 c
->cmd_type
= CMD_SCSI
;
4856 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
4857 if (rc
< 0) /* scsi_dma_map failed. */
4858 rc
= SCSI_MLQUEUE_HOST_BUSY
;
4859 } else if (dev
->hba_ioaccel_enabled
) {
4860 hpsa_cmd_init(h
, c
->cmdindex
, c
);
4861 c
->cmd_type
= CMD_SCSI
;
4863 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
4864 if (rc
< 0) /* scsi_dma_map failed. */
4865 rc
= SCSI_MLQUEUE_HOST_BUSY
;
4870 static void hpsa_command_resubmit_worker(struct work_struct
*work
)
4872 struct scsi_cmnd
*cmd
;
4873 struct hpsa_scsi_dev_t
*dev
;
4874 struct CommandList
*c
= container_of(work
, struct CommandList
, work
);
4877 dev
= cmd
->device
->hostdata
;
4879 cmd
->result
= DID_NO_CONNECT
<< 16;
4880 return hpsa_cmd_free_and_done(c
->h
, c
, cmd
);
4882 if (c
->reset_pending
)
4883 return hpsa_cmd_resolve_and_free(c
->h
, c
);
4884 if (c
->abort_pending
)
4885 return hpsa_cmd_abort_and_free(c
->h
, c
, cmd
);
4886 if (c
->cmd_type
== CMD_IOACCEL2
) {
4887 struct ctlr_info
*h
= c
->h
;
4888 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4891 if (c2
->error_data
.serv_response
==
4892 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
) {
4893 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, dev
->scsi3addr
);
4896 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
4898 * If we get here, it means dma mapping failed.
4899 * Try again via scsi mid layer, which will
4900 * then get SCSI_MLQUEUE_HOST_BUSY.
4902 cmd
->result
= DID_IMM_RETRY
<< 16;
4903 return hpsa_cmd_free_and_done(h
, c
, cmd
);
4905 /* else, fall thru and resubmit down CISS path */
4908 hpsa_cmd_partial_init(c
->h
, c
->cmdindex
, c
);
4909 if (hpsa_ciss_submit(c
->h
, c
, cmd
, dev
->scsi3addr
)) {
4911 * If we get here, it means dma mapping failed. Try
4912 * again via scsi mid layer, which will then get
4913 * SCSI_MLQUEUE_HOST_BUSY.
4915 * hpsa_ciss_submit will have already freed c
4916 * if it encountered a dma mapping failure.
4918 cmd
->result
= DID_IMM_RETRY
<< 16;
4919 cmd
->scsi_done(cmd
);
4923 /* Running in struct Scsi_Host->host_lock less mode */
4924 static int hpsa_scsi_queue_command(struct Scsi_Host
*sh
, struct scsi_cmnd
*cmd
)
4926 struct ctlr_info
*h
;
4927 struct hpsa_scsi_dev_t
*dev
;
4928 unsigned char scsi3addr
[8];
4929 struct CommandList
*c
;
4932 /* Get the ptr to our adapter structure out of cmd->host. */
4933 h
= sdev_to_hba(cmd
->device
);
4935 BUG_ON(cmd
->request
->tag
< 0);
4937 dev
= cmd
->device
->hostdata
;
4939 cmd
->result
= DID_NO_CONNECT
<< 16;
4940 cmd
->scsi_done(cmd
);
4944 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
4946 if (unlikely(lockup_detected(h
))) {
4947 cmd
->result
= DID_NO_CONNECT
<< 16;
4948 cmd
->scsi_done(cmd
);
4951 c
= cmd_tagged_alloc(h
, cmd
);
4954 * Call alternate submit routine for I/O accelerated commands.
4955 * Retries always go down the normal I/O path.
4957 if (likely(cmd
->retries
== 0 &&
4958 cmd
->request
->cmd_type
== REQ_TYPE_FS
&&
4959 h
->acciopath_status
)) {
4960 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, scsi3addr
);
4963 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
4964 hpsa_cmd_resolve_and_free(h
, c
);
4965 return SCSI_MLQUEUE_HOST_BUSY
;
4968 return hpsa_ciss_submit(h
, c
, cmd
, scsi3addr
);
4971 static void hpsa_scan_complete(struct ctlr_info
*h
)
4973 unsigned long flags
;
4975 spin_lock_irqsave(&h
->scan_lock
, flags
);
4976 h
->scan_finished
= 1;
4977 wake_up_all(&h
->scan_wait_queue
);
4978 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
4981 static void hpsa_scan_start(struct Scsi_Host
*sh
)
4983 struct ctlr_info
*h
= shost_to_hba(sh
);
4984 unsigned long flags
;
4987 * Don't let rescans be initiated on a controller known to be locked
4988 * up. If the controller locks up *during* a rescan, that thread is
4989 * probably hosed, but at least we can prevent new rescan threads from
4990 * piling up on a locked up controller.
4992 if (unlikely(lockup_detected(h
)))
4993 return hpsa_scan_complete(h
);
4995 /* wait until any scan already in progress is finished. */
4997 spin_lock_irqsave(&h
->scan_lock
, flags
);
4998 if (h
->scan_finished
)
5000 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5001 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
5002 /* Note: We don't need to worry about a race between this
5003 * thread and driver unload because the midlayer will
5004 * have incremented the reference count, so unload won't
5005 * happen if we're in here.
5008 h
->scan_finished
= 0; /* mark scan as in progress */
5009 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5011 if (unlikely(lockup_detected(h
)))
5012 return hpsa_scan_complete(h
);
5014 hpsa_update_scsi_devices(h
);
5016 hpsa_scan_complete(h
);
5019 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
5021 struct hpsa_scsi_dev_t
*logical_drive
= sdev
->hostdata
;
5028 else if (qdepth
> logical_drive
->queue_depth
)
5029 qdepth
= logical_drive
->queue_depth
;
5031 return scsi_change_queue_depth(sdev
, qdepth
);
5034 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
5035 unsigned long elapsed_time
)
5037 struct ctlr_info
*h
= shost_to_hba(sh
);
5038 unsigned long flags
;
5041 spin_lock_irqsave(&h
->scan_lock
, flags
);
5042 finished
= h
->scan_finished
;
5043 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5047 static int hpsa_scsi_host_alloc(struct ctlr_info
*h
)
5049 struct Scsi_Host
*sh
;
5052 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
5054 dev_err(&h
->pdev
->dev
, "scsi_host_alloc failed\n");
5061 sh
->max_channel
= 3;
5062 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
5063 sh
->max_lun
= HPSA_MAX_LUN
;
5064 sh
->max_id
= HPSA_MAX_LUN
;
5065 sh
->can_queue
= h
->nr_cmds
- HPSA_NRESERVED_CMDS
;
5066 sh
->cmd_per_lun
= sh
->can_queue
;
5067 sh
->sg_tablesize
= h
->maxsgentries
;
5068 sh
->hostdata
[0] = (unsigned long) h
;
5069 sh
->irq
= h
->intr
[h
->intr_mode
];
5070 sh
->unique_id
= sh
->irq
;
5071 error
= scsi_init_shared_tag_map(sh
, sh
->can_queue
);
5073 dev_err(&h
->pdev
->dev
,
5074 "%s: scsi_init_shared_tag_map failed for controller %d\n",
5083 static int hpsa_scsi_add_host(struct ctlr_info
*h
)
5087 rv
= scsi_add_host(h
->scsi_host
, &h
->pdev
->dev
);
5089 dev_err(&h
->pdev
->dev
, "scsi_add_host failed\n");
5092 scsi_scan_host(h
->scsi_host
);
5097 * The block layer has already gone to the trouble of picking out a unique,
5098 * small-integer tag for this request. We use an offset from that value as
5099 * an index to select our command block. (The offset allows us to reserve the
5100 * low-numbered entries for our own uses.)
5102 static int hpsa_get_cmd_index(struct scsi_cmnd
*scmd
)
5104 int idx
= scmd
->request
->tag
;
5109 /* Offset to leave space for internal cmds. */
5110 return idx
+= HPSA_NRESERVED_CMDS
;
5114 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5115 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5117 static int hpsa_send_test_unit_ready(struct ctlr_info
*h
,
5118 struct CommandList
*c
, unsigned char lunaddr
[],
5123 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5124 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
5125 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
5126 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5129 /* no unmap needed here because no data xfer. */
5131 /* Check if the unit is already ready. */
5132 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
5136 * The first command sent after reset will receive "unit attention" to
5137 * indicate that the LUN has been reset...this is actually what we're
5138 * looking for (but, success is good too).
5140 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5141 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
5142 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
5143 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
5150 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5151 * returns zero when the unit is ready, and non-zero when giving up.
5153 static int hpsa_wait_for_test_unit_ready(struct ctlr_info
*h
,
5154 struct CommandList
*c
,
5155 unsigned char lunaddr
[], int reply_queue
)
5159 int waittime
= 1; /* seconds */
5161 /* Send test unit ready until device ready, or give up. */
5162 for (count
= 0; count
< HPSA_TUR_RETRY_LIMIT
; count
++) {
5165 * Wait for a bit. do this first, because if we send
5166 * the TUR right away, the reset will just abort it.
5168 msleep(1000 * waittime
);
5170 rc
= hpsa_send_test_unit_ready(h
, c
, lunaddr
, reply_queue
);
5174 /* Increase wait time with each try, up to a point. */
5175 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
5178 dev_warn(&h
->pdev
->dev
,
5179 "waiting %d secs for device to become ready.\n",
5186 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
5187 unsigned char lunaddr
[],
5194 struct CommandList
*c
;
5199 * If no specific reply queue was requested, then send the TUR
5200 * repeatedly, requesting a reply on each reply queue; otherwise execute
5201 * the loop exactly once using only the specified queue.
5203 if (reply_queue
== DEFAULT_REPLY_QUEUE
) {
5205 last_queue
= h
->nreply_queues
- 1;
5207 first_queue
= reply_queue
;
5208 last_queue
= reply_queue
;
5211 for (rq
= first_queue
; rq
<= last_queue
; rq
++) {
5212 rc
= hpsa_wait_for_test_unit_ready(h
, c
, lunaddr
, rq
);
5218 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
5220 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
5226 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5227 * complaining. Doing a host- or bus-reset can't do anything good here.
5229 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
5232 struct ctlr_info
*h
;
5233 struct hpsa_scsi_dev_t
*dev
;
5237 /* find the controller to which the command to be aborted was sent */
5238 h
= sdev_to_hba(scsicmd
->device
);
5239 if (h
== NULL
) /* paranoia */
5242 if (lockup_detected(h
))
5245 dev
= scsicmd
->device
->hostdata
;
5247 dev_err(&h
->pdev
->dev
, "%s: device lookup failed\n", __func__
);
5251 /* if controller locked up, we can guarantee command won't complete */
5252 if (lockup_detected(h
)) {
5253 snprintf(msg
, sizeof(msg
),
5254 "cmd %d RESET FAILED, lockup detected",
5255 hpsa_get_cmd_index(scsicmd
));
5256 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5260 /* this reset request might be the result of a lockup; check */
5261 if (detect_controller_lockup(h
)) {
5262 snprintf(msg
, sizeof(msg
),
5263 "cmd %d RESET FAILED, new lockup detected",
5264 hpsa_get_cmd_index(scsicmd
));
5265 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5269 /* Do not attempt on controller */
5270 if (is_hba_lunid(dev
->scsi3addr
))
5273 if (is_logical_dev_addr_mode(dev
->scsi3addr
))
5274 reset_type
= HPSA_DEVICE_RESET_MSG
;
5276 reset_type
= HPSA_PHYS_TARGET_RESET
;
5278 sprintf(msg
, "resetting %s",
5279 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ");
5280 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5282 h
->reset_in_progress
= 1;
5284 /* send a reset to the SCSI LUN which the command was sent to */
5285 rc
= hpsa_do_reset(h
, dev
, dev
->scsi3addr
, reset_type
,
5286 DEFAULT_REPLY_QUEUE
);
5287 sprintf(msg
, "reset %s %s",
5288 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ",
5289 rc
== 0 ? "completed successfully" : "failed");
5290 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5291 h
->reset_in_progress
= 0;
5292 return rc
== 0 ? SUCCESS
: FAILED
;
5295 static void swizzle_abort_tag(u8
*tag
)
5299 memcpy(original_tag
, tag
, 8);
5300 tag
[0] = original_tag
[3];
5301 tag
[1] = original_tag
[2];
5302 tag
[2] = original_tag
[1];
5303 tag
[3] = original_tag
[0];
5304 tag
[4] = original_tag
[7];
5305 tag
[5] = original_tag
[6];
5306 tag
[6] = original_tag
[5];
5307 tag
[7] = original_tag
[4];
5310 static void hpsa_get_tag(struct ctlr_info
*h
,
5311 struct CommandList
*c
, __le32
*taglower
, __le32
*tagupper
)
5314 if (c
->cmd_type
== CMD_IOACCEL1
) {
5315 struct io_accel1_cmd
*cm1
= (struct io_accel1_cmd
*)
5316 &h
->ioaccel_cmd_pool
[c
->cmdindex
];
5317 tag
= le64_to_cpu(cm1
->tag
);
5318 *tagupper
= cpu_to_le32(tag
>> 32);
5319 *taglower
= cpu_to_le32(tag
);
5322 if (c
->cmd_type
== CMD_IOACCEL2
) {
5323 struct io_accel2_cmd
*cm2
= (struct io_accel2_cmd
*)
5324 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5325 /* upper tag not used in ioaccel2 mode */
5326 memset(tagupper
, 0, sizeof(*tagupper
));
5327 *taglower
= cm2
->Tag
;
5330 tag
= le64_to_cpu(c
->Header
.tag
);
5331 *tagupper
= cpu_to_le32(tag
>> 32);
5332 *taglower
= cpu_to_le32(tag
);
5335 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
5336 struct CommandList
*abort
, int reply_queue
)
5339 struct CommandList
*c
;
5340 struct ErrorInfo
*ei
;
5341 __le32 tagupper
, taglower
;
5345 /* fill_cmd can't fail here, no buffer to map */
5346 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &abort
->Header
.tag
,
5347 0, 0, scsi3addr
, TYPE_MSG
);
5348 if (h
->needs_abort_tags_swizzled
)
5349 swizzle_abort_tag(&c
->Request
.CDB
[4]);
5350 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5351 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5352 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5353 __func__
, tagupper
, taglower
);
5354 /* no unmap needed here because no data xfer. */
5357 switch (ei
->CommandStatus
) {
5360 case CMD_TMF_STATUS
:
5361 rc
= hpsa_evaluate_tmf_status(h
, c
);
5363 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
5367 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5368 __func__
, tagupper
, taglower
);
5369 hpsa_scsi_interpret_error(h
, c
);
5374 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n",
5375 __func__
, tagupper
, taglower
);
5379 static void setup_ioaccel2_abort_cmd(struct CommandList
*c
, struct ctlr_info
*h
,
5380 struct CommandList
*command_to_abort
, int reply_queue
)
5382 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5383 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
5384 struct io_accel2_cmd
*c2a
=
5385 &h
->ioaccel2_cmd_pool
[command_to_abort
->cmdindex
];
5386 struct scsi_cmnd
*scmd
= command_to_abort
->scsi_cmd
;
5387 struct hpsa_scsi_dev_t
*dev
= scmd
->device
->hostdata
;
5390 * We're overlaying struct hpsa_tmf_struct on top of something which
5391 * was allocated as a struct io_accel2_cmd, so we better be sure it
5392 * actually fits, and doesn't overrun the error info space.
5394 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct
) >
5395 sizeof(struct io_accel2_cmd
));
5396 BUG_ON(offsetof(struct io_accel2_cmd
, error_data
) <
5397 offsetof(struct hpsa_tmf_struct
, error_len
) +
5398 sizeof(ac
->error_len
));
5400 c
->cmd_type
= IOACCEL2_TMF
;
5401 c
->scsi_cmd
= SCSI_CMD_BUSY
;
5403 /* Adjust the DMA address to point to the accelerated command buffer */
5404 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
5405 (c
->cmdindex
* sizeof(struct io_accel2_cmd
));
5406 BUG_ON(c
->busaddr
& 0x0000007F);
5408 memset(ac
, 0, sizeof(*c2
)); /* yes this is correct */
5409 ac
->iu_type
= IOACCEL2_IU_TMF_TYPE
;
5410 ac
->reply_queue
= reply_queue
;
5411 ac
->tmf
= IOACCEL2_TMF_ABORT
;
5412 ac
->it_nexus
= cpu_to_le32(dev
->ioaccel_handle
);
5413 memset(ac
->lun_id
, 0, sizeof(ac
->lun_id
));
5414 ac
->tag
= cpu_to_le64(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
5415 ac
->abort_tag
= cpu_to_le64(le32_to_cpu(c2a
->Tag
));
5416 ac
->error_ptr
= cpu_to_le64(c
->busaddr
+
5417 offsetof(struct io_accel2_cmd
, error_data
));
5418 ac
->error_len
= cpu_to_le32(sizeof(c2
->error_data
));
5421 /* ioaccel2 path firmware cannot handle abort task requests.
5422 * Change abort requests to physical target reset, and send to the
5423 * address of the physical disk used for the ioaccel 2 command.
5424 * Return 0 on success (IO_OK)
5428 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info
*h
,
5429 unsigned char *scsi3addr
, struct CommandList
*abort
, int reply_queue
)
5432 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
5433 struct hpsa_scsi_dev_t
*dev
; /* device to which scsi cmd was sent */
5434 unsigned char phys_scsi3addr
[8]; /* addr of phys disk with volume */
5435 unsigned char *psa
= &phys_scsi3addr
[0];
5437 /* Get a pointer to the hpsa logical device. */
5438 scmd
= abort
->scsi_cmd
;
5439 dev
= (struct hpsa_scsi_dev_t
*)(scmd
->device
->hostdata
);
5441 dev_warn(&h
->pdev
->dev
,
5442 "Cannot abort: no device pointer for command.\n");
5443 return -1; /* not abortable */
5446 if (h
->raid_offload_debug
> 0)
5447 dev_info(&h
->pdev
->dev
,
5448 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5449 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
5451 scsi3addr
[0], scsi3addr
[1], scsi3addr
[2], scsi3addr
[3],
5452 scsi3addr
[4], scsi3addr
[5], scsi3addr
[6], scsi3addr
[7]);
5454 if (!dev
->offload_enabled
) {
5455 dev_warn(&h
->pdev
->dev
,
5456 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5457 return -1; /* not abortable */
5460 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5461 if (!hpsa_get_pdisk_of_ioaccel2(h
, abort
, psa
)) {
5462 dev_warn(&h
->pdev
->dev
, "Can't abort: Failed lookup of physical address.\n");
5463 return -1; /* not abortable */
5466 /* send the reset */
5467 if (h
->raid_offload_debug
> 0)
5468 dev_info(&h
->pdev
->dev
,
5469 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5470 psa
[0], psa
[1], psa
[2], psa
[3],
5471 psa
[4], psa
[5], psa
[6], psa
[7]);
5472 rc
= hpsa_do_reset(h
, dev
, psa
, HPSA_RESET_TYPE_TARGET
, reply_queue
);
5474 dev_warn(&h
->pdev
->dev
,
5475 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5476 psa
[0], psa
[1], psa
[2], psa
[3],
5477 psa
[4], psa
[5], psa
[6], psa
[7]);
5478 return rc
; /* failed to reset */
5481 /* wait for device to recover */
5482 if (wait_for_device_to_become_ready(h
, psa
, reply_queue
) != 0) {
5483 dev_warn(&h
->pdev
->dev
,
5484 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5485 psa
[0], psa
[1], psa
[2], psa
[3],
5486 psa
[4], psa
[5], psa
[6], psa
[7]);
5487 return -1; /* failed to recover */
5490 /* device recovered */
5491 dev_info(&h
->pdev
->dev
,
5492 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5493 psa
[0], psa
[1], psa
[2], psa
[3],
5494 psa
[4], psa
[5], psa
[6], psa
[7]);
5496 return rc
; /* success */
5499 static int hpsa_send_abort_ioaccel2(struct ctlr_info
*h
,
5500 struct CommandList
*abort
, int reply_queue
)
5503 struct CommandList
*c
;
5504 __le32 taglower
, tagupper
;
5505 struct hpsa_scsi_dev_t
*dev
;
5506 struct io_accel2_cmd
*c2
;
5508 dev
= abort
->scsi_cmd
->device
->hostdata
;
5509 if (!dev
->offload_enabled
&& !dev
->hba_ioaccel_enabled
)
5513 setup_ioaccel2_abort_cmd(c
, h
, abort
, reply_queue
);
5514 c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5515 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
5516 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5517 dev_dbg(&h
->pdev
->dev
,
5518 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5519 __func__
, tagupper
, taglower
);
5520 /* no unmap needed here because no data xfer. */
5522 dev_dbg(&h
->pdev
->dev
,
5523 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5524 __func__
, tagupper
, taglower
, c2
->error_data
.serv_response
);
5525 switch (c2
->error_data
.serv_response
) {
5526 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
5527 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
5530 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
5531 case IOACCEL2_SERV_RESPONSE_FAILURE
:
5532 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
5536 dev_warn(&h
->pdev
->dev
,
5537 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5538 __func__
, tagupper
, taglower
,
5539 c2
->error_data
.serv_response
);
5543 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n", __func__
,
5544 tagupper
, taglower
);
5548 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
5549 unsigned char *scsi3addr
, struct CommandList
*abort
, int reply_queue
)
5552 * ioccelerator mode 2 commands should be aborted via the
5553 * accelerated path, since RAID path is unaware of these commands,
5554 * but not all underlying firmware can handle abort TMF.
5555 * Change abort to physical device reset when abort TMF is unsupported.
5557 if (abort
->cmd_type
== CMD_IOACCEL2
) {
5558 if (HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
)
5559 return hpsa_send_abort_ioaccel2(h
, abort
,
5562 return hpsa_send_reset_as_abort_ioaccel2(h
, scsi3addr
,
5563 abort
, reply_queue
);
5565 return hpsa_send_abort(h
, scsi3addr
, abort
, reply_queue
);
5568 /* Find out which reply queue a command was meant to return on */
5569 static int hpsa_extract_reply_queue(struct ctlr_info
*h
,
5570 struct CommandList
*c
)
5572 if (c
->cmd_type
== CMD_IOACCEL2
)
5573 return h
->ioaccel2_cmd_pool
[c
->cmdindex
].reply_queue
;
5574 return c
->Header
.ReplyQueue
;
5578 * Limit concurrency of abort commands to prevent
5579 * over-subscription of commands
5581 static inline int wait_for_available_abort_cmd(struct ctlr_info
*h
)
5583 #define ABORT_CMD_WAIT_MSECS 5000
5584 return !wait_event_timeout(h
->abort_cmd_wait_queue
,
5585 atomic_dec_if_positive(&h
->abort_cmds_available
) >= 0,
5586 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS
));
5589 /* Send an abort for the specified command.
5590 * If the device and controller support it,
5591 * send a task abort request.
5593 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
5597 struct ctlr_info
*h
;
5598 struct hpsa_scsi_dev_t
*dev
;
5599 struct CommandList
*abort
; /* pointer to command to be aborted */
5600 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
5601 char msg
[256]; /* For debug messaging. */
5603 __le32 tagupper
, taglower
;
5604 int refcount
, reply_queue
;
5609 if (sc
->device
== NULL
)
5612 /* Find the controller of the command to be aborted */
5613 h
= sdev_to_hba(sc
->device
);
5617 /* Find the device of the command to be aborted */
5618 dev
= sc
->device
->hostdata
;
5620 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
5625 /* If controller locked up, we can guarantee command won't complete */
5626 if (lockup_detected(h
)) {
5627 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5628 "ABORT FAILED, lockup detected");
5632 /* This is a good time to check if controller lockup has occurred */
5633 if (detect_controller_lockup(h
)) {
5634 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5635 "ABORT FAILED, new lockup detected");
5639 /* Check that controller supports some kind of task abort */
5640 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
5641 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
5644 memset(msg
, 0, sizeof(msg
));
5645 ml
+= sprintf(msg
+ml
, "scsi %d:%d:%d:%llu %s %p",
5646 h
->scsi_host
->host_no
, sc
->device
->channel
,
5647 sc
->device
->id
, sc
->device
->lun
,
5648 "Aborting command", sc
);
5650 /* Get SCSI command to be aborted */
5651 abort
= (struct CommandList
*) sc
->host_scribble
;
5652 if (abort
== NULL
) {
5653 /* This can happen if the command already completed. */
5656 refcount
= atomic_inc_return(&abort
->refcount
);
5657 if (refcount
== 1) { /* Command is done already. */
5662 /* Don't bother trying the abort if we know it won't work. */
5663 if (abort
->cmd_type
!= CMD_IOACCEL2
&&
5664 abort
->cmd_type
!= CMD_IOACCEL1
&& !dev
->supports_aborts
) {
5670 * Check that we're aborting the right command.
5671 * It's possible the CommandList already completed and got re-used.
5673 if (abort
->scsi_cmd
!= sc
) {
5678 abort
->abort_pending
= true;
5679 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5680 reply_queue
= hpsa_extract_reply_queue(h
, abort
);
5681 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ", tagupper
, taglower
);
5682 as
= abort
->scsi_cmd
;
5684 ml
+= sprintf(msg
+ml
,
5685 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5686 as
->cmd_len
, as
->cmnd
[0], as
->cmnd
[1],
5688 dev_warn(&h
->pdev
->dev
, "%s BEING SENT\n", msg
);
5689 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, "Aborting command");
5692 * Command is in flight, or possibly already completed
5693 * by the firmware (but not to the scsi mid layer) but we can't
5694 * distinguish which. Send the abort down.
5696 if (wait_for_available_abort_cmd(h
)) {
5697 dev_warn(&h
->pdev
->dev
,
5698 "%s FAILED, timeout waiting for an abort command to become available.\n",
5703 rc
= hpsa_send_abort_both_ways(h
, dev
->scsi3addr
, abort
, reply_queue
);
5704 atomic_inc(&h
->abort_cmds_available
);
5705 wake_up_all(&h
->abort_cmd_wait_queue
);
5707 dev_warn(&h
->pdev
->dev
, "%s SENT, FAILED\n", msg
);
5708 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
5709 "FAILED to abort command");
5713 dev_info(&h
->pdev
->dev
, "%s SENT, SUCCESS\n", msg
);
5714 wait_event(h
->event_sync_wait_queue
,
5715 abort
->scsi_cmd
!= sc
|| lockup_detected(h
));
5717 return !lockup_detected(h
) ? SUCCESS
: FAILED
;
5721 * For operations with an associated SCSI command, a command block is allocated
5722 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5723 * block request tag as an index into a table of entries. cmd_tagged_free() is
5724 * the complement, although cmd_free() may be called instead.
5726 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
5727 struct scsi_cmnd
*scmd
)
5729 int idx
= hpsa_get_cmd_index(scmd
);
5730 struct CommandList
*c
= h
->cmd_pool
+ idx
;
5732 if (idx
< HPSA_NRESERVED_CMDS
|| idx
>= h
->nr_cmds
) {
5733 dev_err(&h
->pdev
->dev
, "Bad block tag: %d not in [%d..%d]\n",
5734 idx
, HPSA_NRESERVED_CMDS
, h
->nr_cmds
- 1);
5735 /* The index value comes from the block layer, so if it's out of
5736 * bounds, it's probably not our bug.
5741 atomic_inc(&c
->refcount
);
5742 if (unlikely(!hpsa_is_cmd_idle(c
))) {
5744 * We expect that the SCSI layer will hand us a unique tag
5745 * value. Thus, there should never be a collision here between
5746 * two requests...because if the selected command isn't idle
5747 * then someone is going to be very disappointed.
5749 dev_err(&h
->pdev
->dev
,
5750 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5752 if (c
->scsi_cmd
!= NULL
)
5753 scsi_print_command(c
->scsi_cmd
);
5754 scsi_print_command(scmd
);
5757 hpsa_cmd_partial_init(h
, idx
, c
);
5761 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
)
5764 * Release our reference to the block. We don't need to do anything
5765 * else to free it, because it is accessed by index. (There's no point
5766 * in checking the result of the decrement, since we cannot guarantee
5767 * that there isn't a concurrent abort which is also accessing it.)
5769 (void)atomic_dec(&c
->refcount
);
5773 * For operations that cannot sleep, a command block is allocated at init,
5774 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5775 * which ones are free or in use. Lock must be held when calling this.
5776 * cmd_free() is the complement.
5777 * This function never gives up and returns NULL. If it hangs,
5778 * another thread must call cmd_free() to free some tags.
5781 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
5783 struct CommandList
*c
;
5788 * There is some *extremely* small but non-zero chance that that
5789 * multiple threads could get in here, and one thread could
5790 * be scanning through the list of bits looking for a free
5791 * one, but the free ones are always behind him, and other
5792 * threads sneak in behind him and eat them before he can
5793 * get to them, so that while there is always a free one, a
5794 * very unlucky thread might be starved anyway, never able to
5795 * beat the other threads. In reality, this happens so
5796 * infrequently as to be indistinguishable from never.
5798 * Note that we start allocating commands before the SCSI host structure
5799 * is initialized. Since the search starts at bit zero, this
5800 * all works, since we have at least one command structure available;
5801 * however, it means that the structures with the low indexes have to be
5802 * reserved for driver-initiated requests, while requests from the block
5803 * layer will use the higher indexes.
5807 i
= find_next_zero_bit(h
->cmd_pool_bits
,
5808 HPSA_NRESERVED_CMDS
,
5810 if (unlikely(i
>= HPSA_NRESERVED_CMDS
)) {
5814 c
= h
->cmd_pool
+ i
;
5815 refcount
= atomic_inc_return(&c
->refcount
);
5816 if (unlikely(refcount
> 1)) {
5817 cmd_free(h
, c
); /* already in use */
5818 offset
= (i
+ 1) % HPSA_NRESERVED_CMDS
;
5821 set_bit(i
& (BITS_PER_LONG
- 1),
5822 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
5823 break; /* it's ours now. */
5825 hpsa_cmd_partial_init(h
, i
, c
);
5830 * This is the complementary operation to cmd_alloc(). Note, however, in some
5831 * corner cases it may also be used to free blocks allocated by
5832 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5833 * the clear-bit is harmless.
5835 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
5837 if (atomic_dec_and_test(&c
->refcount
)) {
5840 i
= c
- h
->cmd_pool
;
5841 clear_bit(i
& (BITS_PER_LONG
- 1),
5842 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
5846 #ifdef CONFIG_COMPAT
5848 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
,
5851 IOCTL32_Command_struct __user
*arg32
=
5852 (IOCTL32_Command_struct __user
*) arg
;
5853 IOCTL_Command_struct arg64
;
5854 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
5858 memset(&arg64
, 0, sizeof(arg64
));
5860 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
5861 sizeof(arg64
.LUN_info
));
5862 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
5863 sizeof(arg64
.Request
));
5864 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
5865 sizeof(arg64
.error_info
));
5866 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
5867 err
|= get_user(cp
, &arg32
->buf
);
5868 arg64
.buf
= compat_ptr(cp
);
5869 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
5874 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, p
);
5877 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
5878 sizeof(arg32
->error_info
));
5884 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
5885 int cmd
, void __user
*arg
)
5887 BIG_IOCTL32_Command_struct __user
*arg32
=
5888 (BIG_IOCTL32_Command_struct __user
*) arg
;
5889 BIG_IOCTL_Command_struct arg64
;
5890 BIG_IOCTL_Command_struct __user
*p
=
5891 compat_alloc_user_space(sizeof(arg64
));
5895 memset(&arg64
, 0, sizeof(arg64
));
5897 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
5898 sizeof(arg64
.LUN_info
));
5899 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
5900 sizeof(arg64
.Request
));
5901 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
5902 sizeof(arg64
.error_info
));
5903 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
5904 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
5905 err
|= get_user(cp
, &arg32
->buf
);
5906 arg64
.buf
= compat_ptr(cp
);
5907 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
5912 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, p
);
5915 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
5916 sizeof(arg32
->error_info
));
5922 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
5925 case CCISS_GETPCIINFO
:
5926 case CCISS_GETINTINFO
:
5927 case CCISS_SETINTINFO
:
5928 case CCISS_GETNODENAME
:
5929 case CCISS_SETNODENAME
:
5930 case CCISS_GETHEARTBEAT
:
5931 case CCISS_GETBUSTYPES
:
5932 case CCISS_GETFIRMVER
:
5933 case CCISS_GETDRIVVER
:
5934 case CCISS_REVALIDVOLS
:
5935 case CCISS_DEREGDISK
:
5936 case CCISS_REGNEWDISK
:
5938 case CCISS_RESCANDISK
:
5939 case CCISS_GETLUNINFO
:
5940 return hpsa_ioctl(dev
, cmd
, arg
);
5942 case CCISS_PASSTHRU32
:
5943 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
5944 case CCISS_BIG_PASSTHRU32
:
5945 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
5948 return -ENOIOCTLCMD
;
5953 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
5955 struct hpsa_pci_info pciinfo
;
5959 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
5960 pciinfo
.bus
= h
->pdev
->bus
->number
;
5961 pciinfo
.dev_fn
= h
->pdev
->devfn
;
5962 pciinfo
.board_id
= h
->board_id
;
5963 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
5968 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
5970 DriverVer_type DriverVer
;
5971 unsigned char vmaj
, vmin
, vsubmin
;
5974 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
5975 &vmaj
, &vmin
, &vsubmin
);
5977 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
5978 "unrecognized.", HPSA_DRIVER_VERSION
);
5983 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
5986 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
5991 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
5993 IOCTL_Command_struct iocommand
;
5994 struct CommandList
*c
;
6001 if (!capable(CAP_SYS_RAWIO
))
6003 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
6005 if ((iocommand
.buf_size
< 1) &&
6006 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
6009 if (iocommand
.buf_size
> 0) {
6010 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
6013 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
6014 /* Copy the data into the buffer we created */
6015 if (copy_from_user(buff
, iocommand
.buf
,
6016 iocommand
.buf_size
)) {
6021 memset(buff
, 0, iocommand
.buf_size
);
6026 /* Fill in the command type */
6027 c
->cmd_type
= CMD_IOCTL_PEND
;
6028 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6029 /* Fill in Command Header */
6030 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
6031 if (iocommand
.buf_size
> 0) { /* buffer to fill */
6032 c
->Header
.SGList
= 1;
6033 c
->Header
.SGTotal
= cpu_to_le16(1);
6034 } else { /* no buffers to fill */
6035 c
->Header
.SGList
= 0;
6036 c
->Header
.SGTotal
= cpu_to_le16(0);
6038 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
6040 /* Fill in Request block */
6041 memcpy(&c
->Request
, &iocommand
.Request
,
6042 sizeof(c
->Request
));
6044 /* Fill in the scatter gather information */
6045 if (iocommand
.buf_size
> 0) {
6046 temp64
= pci_map_single(h
->pdev
, buff
,
6047 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
6048 if (dma_mapping_error(&h
->pdev
->dev
, (dma_addr_t
) temp64
)) {
6049 c
->SG
[0].Addr
= cpu_to_le64(0);
6050 c
->SG
[0].Len
= cpu_to_le32(0);
6054 c
->SG
[0].Addr
= cpu_to_le64(temp64
);
6055 c
->SG
[0].Len
= cpu_to_le32(iocommand
.buf_size
);
6056 c
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* not chaining */
6058 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
6059 if (iocommand
.buf_size
> 0)
6060 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
6061 check_ioctl_unit_attention(h
, c
);
6067 /* Copy the error information out */
6068 memcpy(&iocommand
.error_info
, c
->err_info
,
6069 sizeof(iocommand
.error_info
));
6070 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
6074 if ((iocommand
.Request
.Type
.Direction
& XFER_READ
) &&
6075 iocommand
.buf_size
> 0) {
6076 /* Copy the data out of the buffer we created */
6077 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
6089 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6091 BIG_IOCTL_Command_struct
*ioc
;
6092 struct CommandList
*c
;
6093 unsigned char **buff
= NULL
;
6094 int *buff_size
= NULL
;
6100 BYTE __user
*data_ptr
;
6104 if (!capable(CAP_SYS_RAWIO
))
6106 ioc
= (BIG_IOCTL_Command_struct
*)
6107 kmalloc(sizeof(*ioc
), GFP_KERNEL
);
6112 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
6116 if ((ioc
->buf_size
< 1) &&
6117 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
6121 /* Check kmalloc limits using all SGs */
6122 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
6126 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
6130 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
6135 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
6140 left
= ioc
->buf_size
;
6141 data_ptr
= ioc
->buf
;
6143 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
6144 buff_size
[sg_used
] = sz
;
6145 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
6146 if (buff
[sg_used
] == NULL
) {
6150 if (ioc
->Request
.Type
.Direction
& XFER_WRITE
) {
6151 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
6156 memset(buff
[sg_used
], 0, sz
);
6163 c
->cmd_type
= CMD_IOCTL_PEND
;
6164 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6165 c
->Header
.ReplyQueue
= 0;
6166 c
->Header
.SGList
= (u8
) sg_used
;
6167 c
->Header
.SGTotal
= cpu_to_le16(sg_used
);
6168 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
6169 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
6170 if (ioc
->buf_size
> 0) {
6172 for (i
= 0; i
< sg_used
; i
++) {
6173 temp64
= pci_map_single(h
->pdev
, buff
[i
],
6174 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
6175 if (dma_mapping_error(&h
->pdev
->dev
,
6176 (dma_addr_t
) temp64
)) {
6177 c
->SG
[i
].Addr
= cpu_to_le64(0);
6178 c
->SG
[i
].Len
= cpu_to_le32(0);
6179 hpsa_pci_unmap(h
->pdev
, c
, i
,
6180 PCI_DMA_BIDIRECTIONAL
);
6184 c
->SG
[i
].Addr
= cpu_to_le64(temp64
);
6185 c
->SG
[i
].Len
= cpu_to_le32(buff_size
[i
]);
6186 c
->SG
[i
].Ext
= cpu_to_le32(0);
6188 c
->SG
[--i
].Ext
= cpu_to_le32(HPSA_SG_LAST
);
6190 status
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
, NO_TIMEOUT
);
6192 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
6193 check_ioctl_unit_attention(h
, c
);
6199 /* Copy the error information out */
6200 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
6201 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
6205 if ((ioc
->Request
.Type
.Direction
& XFER_READ
) && ioc
->buf_size
> 0) {
6208 /* Copy the data out of the buffer we created */
6209 BYTE __user
*ptr
= ioc
->buf
;
6210 for (i
= 0; i
< sg_used
; i
++) {
6211 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
6215 ptr
+= buff_size
[i
];
6225 for (i
= 0; i
< sg_used
; i
++)
6234 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
6235 struct CommandList
*c
)
6237 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
6238 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
6239 (void) check_for_unit_attention(h
, c
);
6245 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
6247 struct ctlr_info
*h
;
6248 void __user
*argp
= (void __user
*)arg
;
6251 h
= sdev_to_hba(dev
);
6254 case CCISS_DEREGDISK
:
6255 case CCISS_REGNEWDISK
:
6257 hpsa_scan_start(h
->scsi_host
);
6259 case CCISS_GETPCIINFO
:
6260 return hpsa_getpciinfo_ioctl(h
, argp
);
6261 case CCISS_GETDRIVVER
:
6262 return hpsa_getdrivver_ioctl(h
, argp
);
6263 case CCISS_PASSTHRU
:
6264 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6266 rc
= hpsa_passthru_ioctl(h
, argp
);
6267 atomic_inc(&h
->passthru_cmds_avail
);
6269 case CCISS_BIG_PASSTHRU
:
6270 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6272 rc
= hpsa_big_passthru_ioctl(h
, argp
);
6273 atomic_inc(&h
->passthru_cmds_avail
);
6280 static void hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
6283 struct CommandList
*c
;
6287 /* fill_cmd can't fail here, no data buffer to map */
6288 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
6289 RAID_CTLR_LUNID
, TYPE_MSG
);
6290 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
6292 enqueue_cmd_and_start_io(h
, c
);
6293 /* Don't wait for completion, the reset won't complete. Don't free
6294 * the command either. This is the last command we will send before
6295 * re-initializing everything, so it doesn't matter and won't leak.
6300 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
6301 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
6304 int pci_dir
= XFER_NONE
;
6305 u64 tag
; /* for commands to be aborted */
6307 c
->cmd_type
= CMD_IOCTL_PEND
;
6308 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6309 c
->Header
.ReplyQueue
= 0;
6310 if (buff
!= NULL
&& size
> 0) {
6311 c
->Header
.SGList
= 1;
6312 c
->Header
.SGTotal
= cpu_to_le16(1);
6314 c
->Header
.SGList
= 0;
6315 c
->Header
.SGTotal
= cpu_to_le16(0);
6317 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
6319 if (cmd_type
== TYPE_CMD
) {
6322 /* are we trying to read a vital product page */
6323 if (page_code
& VPD_PAGE
) {
6324 c
->Request
.CDB
[1] = 0x01;
6325 c
->Request
.CDB
[2] = (page_code
& 0xff);
6327 c
->Request
.CDBLen
= 6;
6328 c
->Request
.type_attr_dir
=
6329 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6330 c
->Request
.Timeout
= 0;
6331 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
6332 c
->Request
.CDB
[4] = size
& 0xFF;
6334 case HPSA_REPORT_LOG
:
6335 case HPSA_REPORT_PHYS
:
6336 /* Talking to controller so It's a physical command
6337 mode = 00 target = 0. Nothing to write.
6339 c
->Request
.CDBLen
= 12;
6340 c
->Request
.type_attr_dir
=
6341 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6342 c
->Request
.Timeout
= 0;
6343 c
->Request
.CDB
[0] = cmd
;
6344 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6345 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6346 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6347 c
->Request
.CDB
[9] = size
& 0xFF;
6349 case HPSA_CACHE_FLUSH
:
6350 c
->Request
.CDBLen
= 12;
6351 c
->Request
.type_attr_dir
=
6352 TYPE_ATTR_DIR(cmd_type
,
6353 ATTR_SIMPLE
, XFER_WRITE
);
6354 c
->Request
.Timeout
= 0;
6355 c
->Request
.CDB
[0] = BMIC_WRITE
;
6356 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
6357 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
6358 c
->Request
.CDB
[8] = size
& 0xFF;
6360 case TEST_UNIT_READY
:
6361 c
->Request
.CDBLen
= 6;
6362 c
->Request
.type_attr_dir
=
6363 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6364 c
->Request
.Timeout
= 0;
6366 case HPSA_GET_RAID_MAP
:
6367 c
->Request
.CDBLen
= 12;
6368 c
->Request
.type_attr_dir
=
6369 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6370 c
->Request
.Timeout
= 0;
6371 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
6372 c
->Request
.CDB
[1] = cmd
;
6373 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6374 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6375 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6376 c
->Request
.CDB
[9] = size
& 0xFF;
6378 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
6379 c
->Request
.CDBLen
= 10;
6380 c
->Request
.type_attr_dir
=
6381 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6382 c
->Request
.Timeout
= 0;
6383 c
->Request
.CDB
[0] = BMIC_READ
;
6384 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
6385 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6386 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6388 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
6389 c
->Request
.CDBLen
= 10;
6390 c
->Request
.type_attr_dir
=
6391 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6392 c
->Request
.Timeout
= 0;
6393 c
->Request
.CDB
[0] = BMIC_READ
;
6394 c
->Request
.CDB
[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE
;
6395 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6396 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
6399 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
6403 } else if (cmd_type
== TYPE_MSG
) {
6406 case HPSA_PHYS_TARGET_RESET
:
6407 c
->Request
.CDBLen
= 16;
6408 c
->Request
.type_attr_dir
=
6409 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6410 c
->Request
.Timeout
= 0; /* Don't time out */
6411 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6412 c
->Request
.CDB
[0] = HPSA_RESET
;
6413 c
->Request
.CDB
[1] = HPSA_TARGET_RESET_TYPE
;
6414 /* Physical target reset needs no control bytes 4-7*/
6415 c
->Request
.CDB
[4] = 0x00;
6416 c
->Request
.CDB
[5] = 0x00;
6417 c
->Request
.CDB
[6] = 0x00;
6418 c
->Request
.CDB
[7] = 0x00;
6420 case HPSA_DEVICE_RESET_MSG
:
6421 c
->Request
.CDBLen
= 16;
6422 c
->Request
.type_attr_dir
=
6423 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6424 c
->Request
.Timeout
= 0; /* Don't time out */
6425 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
6426 c
->Request
.CDB
[0] = cmd
;
6427 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
6428 /* If bytes 4-7 are zero, it means reset the */
6430 c
->Request
.CDB
[4] = 0x00;
6431 c
->Request
.CDB
[5] = 0x00;
6432 c
->Request
.CDB
[6] = 0x00;
6433 c
->Request
.CDB
[7] = 0x00;
6435 case HPSA_ABORT_MSG
:
6436 memcpy(&tag
, buff
, sizeof(tag
));
6437 dev_dbg(&h
->pdev
->dev
,
6438 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6439 tag
, c
->Header
.tag
);
6440 c
->Request
.CDBLen
= 16;
6441 c
->Request
.type_attr_dir
=
6442 TYPE_ATTR_DIR(cmd_type
,
6443 ATTR_SIMPLE
, XFER_WRITE
);
6444 c
->Request
.Timeout
= 0; /* Don't time out */
6445 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
6446 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
6447 c
->Request
.CDB
[2] = 0x00; /* reserved */
6448 c
->Request
.CDB
[3] = 0x00; /* reserved */
6449 /* Tag to abort goes in CDB[4]-CDB[11] */
6450 memcpy(&c
->Request
.CDB
[4], &tag
, sizeof(tag
));
6451 c
->Request
.CDB
[12] = 0x00; /* reserved */
6452 c
->Request
.CDB
[13] = 0x00; /* reserved */
6453 c
->Request
.CDB
[14] = 0x00; /* reserved */
6454 c
->Request
.CDB
[15] = 0x00; /* reserved */
6457 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
6462 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
6466 switch (GET_DIR(c
->Request
.type_attr_dir
)) {
6468 pci_dir
= PCI_DMA_FROMDEVICE
;
6471 pci_dir
= PCI_DMA_TODEVICE
;
6474 pci_dir
= PCI_DMA_NONE
;
6477 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
6479 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
6485 * Map (physical) PCI mem into (virtual) kernel space
6487 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
6489 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
6490 ulong page_offs
= ((ulong
) base
) - page_base
;
6491 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
6494 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
6497 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
6499 return h
->access
.command_completed(h
, q
);
6502 static inline bool interrupt_pending(struct ctlr_info
*h
)
6504 return h
->access
.intr_pending(h
);
6507 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
6509 return (h
->access
.intr_pending(h
) == 0) ||
6510 (h
->interrupts_enabled
== 0);
6513 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
6516 if (unlikely(tag_index
>= h
->nr_cmds
)) {
6517 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
6523 static inline void finish_cmd(struct CommandList
*c
)
6525 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
6526 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
6527 || c
->cmd_type
== CMD_IOACCEL2
))
6528 complete_scsi_command(c
);
6529 else if (c
->cmd_type
== CMD_IOCTL_PEND
|| c
->cmd_type
== IOACCEL2_TMF
)
6530 complete(c
->waiting
);
6533 /* process completion of an indexed ("direct lookup") command */
6534 static inline void process_indexed_cmd(struct ctlr_info
*h
,
6538 struct CommandList
*c
;
6540 tag_index
= raw_tag
>> DIRECT_LOOKUP_SHIFT
;
6541 if (!bad_tag(h
, tag_index
, raw_tag
)) {
6542 c
= h
->cmd_pool
+ tag_index
;
6547 /* Some controllers, like p400, will give us one interrupt
6548 * after a soft reset, even if we turned interrupts off.
6549 * Only need to check for this in the hpsa_xxx_discard_completions
6552 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
6554 if (likely(!reset_devices
))
6557 if (likely(h
->interrupts_enabled
))
6560 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
6561 "(known firmware bug.) Ignoring.\n");
6567 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6568 * Relies on (h-q[x] == x) being true for x such that
6569 * 0 <= x < MAX_REPLY_QUEUES.
6571 static struct ctlr_info
*queue_to_hba(u8
*queue
)
6573 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
6576 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
6578 struct ctlr_info
*h
= queue_to_hba(queue
);
6579 u8 q
= *(u8
*) queue
;
6582 if (ignore_bogus_interrupt(h
))
6585 if (interrupt_not_for_us(h
))
6587 h
->last_intr_timestamp
= get_jiffies_64();
6588 while (interrupt_pending(h
)) {
6589 raw_tag
= get_next_completion(h
, q
);
6590 while (raw_tag
!= FIFO_EMPTY
)
6591 raw_tag
= next_command(h
, q
);
6596 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
6598 struct ctlr_info
*h
= queue_to_hba(queue
);
6600 u8 q
= *(u8
*) queue
;
6602 if (ignore_bogus_interrupt(h
))
6605 h
->last_intr_timestamp
= get_jiffies_64();
6606 raw_tag
= get_next_completion(h
, q
);
6607 while (raw_tag
!= FIFO_EMPTY
)
6608 raw_tag
= next_command(h
, q
);
6612 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
6614 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
6616 u8 q
= *(u8
*) queue
;
6618 if (interrupt_not_for_us(h
))
6620 h
->last_intr_timestamp
= get_jiffies_64();
6621 while (interrupt_pending(h
)) {
6622 raw_tag
= get_next_completion(h
, q
);
6623 while (raw_tag
!= FIFO_EMPTY
) {
6624 process_indexed_cmd(h
, raw_tag
);
6625 raw_tag
= next_command(h
, q
);
6631 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
6633 struct ctlr_info
*h
= queue_to_hba(queue
);
6635 u8 q
= *(u8
*) queue
;
6637 h
->last_intr_timestamp
= get_jiffies_64();
6638 raw_tag
= get_next_completion(h
, q
);
6639 while (raw_tag
!= FIFO_EMPTY
) {
6640 process_indexed_cmd(h
, raw_tag
);
6641 raw_tag
= next_command(h
, q
);
6646 /* Send a message CDB to the firmware. Careful, this only works
6647 * in simple mode, not performant mode due to the tag lookup.
6648 * We only ever use this immediately after a controller reset.
6650 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
6654 struct CommandListHeader CommandHeader
;
6655 struct RequestBlock Request
;
6656 struct ErrDescriptor ErrorDescriptor
;
6658 struct Command
*cmd
;
6659 static const size_t cmd_sz
= sizeof(*cmd
) +
6660 sizeof(cmd
->ErrorDescriptor
);
6664 void __iomem
*vaddr
;
6667 vaddr
= pci_ioremap_bar(pdev
, 0);
6671 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6672 * CCISS commands, so they must be allocated from the lower 4GiB of
6675 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
6681 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
6687 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6688 * although there's no guarantee, we assume that the address is at
6689 * least 4-byte aligned (most likely, it's page-aligned).
6691 paddr32
= cpu_to_le32(paddr64
);
6693 cmd
->CommandHeader
.ReplyQueue
= 0;
6694 cmd
->CommandHeader
.SGList
= 0;
6695 cmd
->CommandHeader
.SGTotal
= cpu_to_le16(0);
6696 cmd
->CommandHeader
.tag
= cpu_to_le64(paddr64
);
6697 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
6699 cmd
->Request
.CDBLen
= 16;
6700 cmd
->Request
.type_attr_dir
=
6701 TYPE_ATTR_DIR(TYPE_MSG
, ATTR_HEADOFQUEUE
, XFER_NONE
);
6702 cmd
->Request
.Timeout
= 0; /* Don't time out */
6703 cmd
->Request
.CDB
[0] = opcode
;
6704 cmd
->Request
.CDB
[1] = type
;
6705 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
6706 cmd
->ErrorDescriptor
.Addr
=
6707 cpu_to_le64((le32_to_cpu(paddr32
) + sizeof(*cmd
)));
6708 cmd
->ErrorDescriptor
.Len
= cpu_to_le32(sizeof(struct ErrorInfo
));
6710 writel(le32_to_cpu(paddr32
), vaddr
+ SA5_REQUEST_PORT_OFFSET
);
6712 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
6713 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
6714 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr64
)
6716 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
6721 /* we leak the DMA buffer here ... no choice since the controller could
6722 * still complete the command.
6724 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
6725 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
6730 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
6732 if (tag
& HPSA_ERROR_BIT
) {
6733 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
6738 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
6743 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6745 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
6746 void __iomem
*vaddr
, u32 use_doorbell
)
6750 /* For everything after the P600, the PCI power state method
6751 * of resetting the controller doesn't work, so we have this
6752 * other way using the doorbell register.
6754 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
6755 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
6757 /* PMC hardware guys tell us we need a 10 second delay after
6758 * doorbell reset and before any attempt to talk to the board
6759 * at all to ensure that this actually works and doesn't fall
6760 * over in some weird corner cases.
6763 } else { /* Try to do it the PCI power state way */
6765 /* Quoting from the Open CISS Specification: "The Power
6766 * Management Control/Status Register (CSR) controls the power
6767 * state of the device. The normal operating state is D0,
6768 * CSR=00h. The software off state is D3, CSR=03h. To reset
6769 * the controller, place the interface device in D3 then to D0,
6770 * this causes a secondary PCI reset which will reset the
6775 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
6777 /* enter the D3hot power management state */
6778 rc
= pci_set_power_state(pdev
, PCI_D3hot
);
6784 /* enter the D0 power management state */
6785 rc
= pci_set_power_state(pdev
, PCI_D0
);
6790 * The P600 requires a small delay when changing states.
6791 * Otherwise we may think the board did not reset and we bail.
6792 * This for kdump only and is particular to the P600.
6799 static void init_driver_version(char *driver_version
, int len
)
6801 memset(driver_version
, 0, len
);
6802 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
6805 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
6807 char *driver_version
;
6808 int i
, size
= sizeof(cfgtable
->driver_version
);
6810 driver_version
= kmalloc(size
, GFP_KERNEL
);
6811 if (!driver_version
)
6814 init_driver_version(driver_version
, size
);
6815 for (i
= 0; i
< size
; i
++)
6816 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
6817 kfree(driver_version
);
6821 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
6822 unsigned char *driver_ver
)
6826 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
6827 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
6830 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
6833 char *driver_ver
, *old_driver_ver
;
6834 int rc
, size
= sizeof(cfgtable
->driver_version
);
6836 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
6837 if (!old_driver_ver
)
6839 driver_ver
= old_driver_ver
+ size
;
6841 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6842 * should have been changed, otherwise we know the reset failed.
6844 init_driver_version(old_driver_ver
, size
);
6845 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
6846 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
6847 kfree(old_driver_ver
);
6850 /* This does a hard reset of the controller using PCI power management
6851 * states or the using the doorbell register.
6853 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
, u32 board_id
)
6857 u64 cfg_base_addr_index
;
6858 void __iomem
*vaddr
;
6859 unsigned long paddr
;
6860 u32 misc_fw_support
;
6862 struct CfgTable __iomem
*cfgtable
;
6864 u16 command_register
;
6866 /* For controllers as old as the P600, this is very nearly
6869 * pci_save_state(pci_dev);
6870 * pci_set_power_state(pci_dev, PCI_D3hot);
6871 * pci_set_power_state(pci_dev, PCI_D0);
6872 * pci_restore_state(pci_dev);
6874 * For controllers newer than the P600, the pci power state
6875 * method of resetting doesn't work so we have another way
6876 * using the doorbell register.
6879 if (!ctlr_is_resettable(board_id
)) {
6880 dev_warn(&pdev
->dev
, "Controller not resettable\n");
6884 /* if controller is soft- but not hard resettable... */
6885 if (!ctlr_is_hard_resettable(board_id
))
6886 return -ENOTSUPP
; /* try soft reset later. */
6888 /* Save the PCI command register */
6889 pci_read_config_word(pdev
, 4, &command_register
);
6890 pci_save_state(pdev
);
6892 /* find the first memory BAR, so we can find the cfg table */
6893 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
6896 vaddr
= remap_pci_mem(paddr
, 0x250);
6900 /* find cfgtable in order to check if reset via doorbell is supported */
6901 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
6902 &cfg_base_addr_index
, &cfg_offset
);
6905 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
6906 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
6911 rc
= write_driver_ver_to_cfgtable(cfgtable
);
6913 goto unmap_cfgtable
;
6915 /* If reset via doorbell register is supported, use that.
6916 * There are two such methods. Favor the newest method.
6918 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
6919 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
6921 use_doorbell
= DOORBELL_CTLR_RESET2
;
6923 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
6925 dev_warn(&pdev
->dev
,
6926 "Soft reset not supported. Firmware update is required.\n");
6927 rc
= -ENOTSUPP
; /* try soft reset */
6928 goto unmap_cfgtable
;
6932 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
6934 goto unmap_cfgtable
;
6936 pci_restore_state(pdev
);
6937 pci_write_config_word(pdev
, 4, command_register
);
6939 /* Some devices (notably the HP Smart Array 5i Controller)
6940 need a little pause here */
6941 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
6943 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
6945 dev_warn(&pdev
->dev
,
6946 "Failed waiting for board to become ready after hard reset\n");
6947 goto unmap_cfgtable
;
6950 rc
= controller_reset_failed(vaddr
);
6952 goto unmap_cfgtable
;
6954 dev_warn(&pdev
->dev
, "Unable to successfully reset "
6955 "controller. Will try soft reset.\n");
6958 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
6970 * We cannot read the structure directly, for portability we must use
6972 * This is for debug only.
6974 static void print_cfg_table(struct device
*dev
, struct CfgTable __iomem
*tb
)
6980 dev_info(dev
, "Controller Configuration information\n");
6981 dev_info(dev
, "------------------------------------\n");
6982 for (i
= 0; i
< 4; i
++)
6983 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
6984 temp_name
[4] = '\0';
6985 dev_info(dev
, " Signature = %s\n", temp_name
);
6986 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
6987 dev_info(dev
, " Transport methods supported = 0x%x\n",
6988 readl(&(tb
->TransportSupport
)));
6989 dev_info(dev
, " Transport methods active = 0x%x\n",
6990 readl(&(tb
->TransportActive
)));
6991 dev_info(dev
, " Requested transport Method = 0x%x\n",
6992 readl(&(tb
->HostWrite
.TransportRequest
)));
6993 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
6994 readl(&(tb
->HostWrite
.CoalIntDelay
)));
6995 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
6996 readl(&(tb
->HostWrite
.CoalIntCount
)));
6997 dev_info(dev
, " Max outstanding commands = %d\n",
6998 readl(&(tb
->CmdsOutMax
)));
6999 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
7000 for (i
= 0; i
< 16; i
++)
7001 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
7002 temp_name
[16] = '\0';
7003 dev_info(dev
, " Server Name = %s\n", temp_name
);
7004 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
7005 readl(&(tb
->HeartBeat
)));
7006 #endif /* HPSA_DEBUG */
7009 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
7011 int i
, offset
, mem_type
, bar_type
;
7013 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
7016 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
7017 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
7018 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
7021 mem_type
= pci_resource_flags(pdev
, i
) &
7022 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
7024 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
7025 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
7026 offset
+= 4; /* 32 bit */
7028 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
7031 default: /* reserved in PCI 2.2 */
7032 dev_warn(&pdev
->dev
,
7033 "base address is invalid\n");
7038 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
7044 static void hpsa_disable_interrupt_mode(struct ctlr_info
*h
)
7046 if (h
->msix_vector
) {
7047 if (h
->pdev
->msix_enabled
)
7048 pci_disable_msix(h
->pdev
);
7050 } else if (h
->msi_vector
) {
7051 if (h
->pdev
->msi_enabled
)
7052 pci_disable_msi(h
->pdev
);
7057 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7058 * controllers that are capable. If not, we use legacy INTx mode.
7060 static void hpsa_interrupt_mode(struct ctlr_info
*h
)
7062 #ifdef CONFIG_PCI_MSI
7064 struct msix_entry hpsa_msix_entries
[MAX_REPLY_QUEUES
];
7066 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++) {
7067 hpsa_msix_entries
[i
].vector
= 0;
7068 hpsa_msix_entries
[i
].entry
= i
;
7071 /* Some boards advertise MSI but don't really support it */
7072 if ((h
->board_id
== 0x40700E11) || (h
->board_id
== 0x40800E11) ||
7073 (h
->board_id
== 0x40820E11) || (h
->board_id
== 0x40830E11))
7074 goto default_int_mode
;
7075 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSIX
)) {
7076 dev_info(&h
->pdev
->dev
, "MSI-X capable controller\n");
7077 h
->msix_vector
= MAX_REPLY_QUEUES
;
7078 if (h
->msix_vector
> num_online_cpus())
7079 h
->msix_vector
= num_online_cpus();
7080 err
= pci_enable_msix_range(h
->pdev
, hpsa_msix_entries
,
7083 dev_warn(&h
->pdev
->dev
, "MSI-X init failed %d\n", err
);
7085 goto single_msi_mode
;
7086 } else if (err
< h
->msix_vector
) {
7087 dev_warn(&h
->pdev
->dev
, "only %d MSI-X vectors "
7088 "available\n", err
);
7090 h
->msix_vector
= err
;
7091 for (i
= 0; i
< h
->msix_vector
; i
++)
7092 h
->intr
[i
] = hpsa_msix_entries
[i
].vector
;
7096 if (pci_find_capability(h
->pdev
, PCI_CAP_ID_MSI
)) {
7097 dev_info(&h
->pdev
->dev
, "MSI capable controller\n");
7098 if (!pci_enable_msi(h
->pdev
))
7101 dev_warn(&h
->pdev
->dev
, "MSI init failed\n");
7104 #endif /* CONFIG_PCI_MSI */
7105 /* if we get here we're going to use the default interrupt mode */
7106 h
->intr
[h
->intr_mode
] = h
->pdev
->irq
;
7109 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
7112 u32 subsystem_vendor_id
, subsystem_device_id
;
7114 subsystem_vendor_id
= pdev
->subsystem_vendor
;
7115 subsystem_device_id
= pdev
->subsystem_device
;
7116 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
7117 subsystem_vendor_id
;
7119 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
7120 if (*board_id
== products
[i
].board_id
)
7123 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
7124 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
7126 dev_warn(&pdev
->dev
, "unrecognized board ID: "
7127 "0x%08x, ignoring.\n", *board_id
);
7130 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
7133 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
7134 unsigned long *memory_bar
)
7138 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
7139 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
7140 /* addressing mode bits already removed */
7141 *memory_bar
= pci_resource_start(pdev
, i
);
7142 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
7146 dev_warn(&pdev
->dev
, "no memory BAR found\n");
7150 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7156 iterations
= HPSA_BOARD_READY_ITERATIONS
;
7158 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
7160 for (i
= 0; i
< iterations
; i
++) {
7161 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
7162 if (wait_for_ready
) {
7163 if (scratchpad
== HPSA_FIRMWARE_READY
)
7166 if (scratchpad
!= HPSA_FIRMWARE_READY
)
7169 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
7171 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
7175 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7176 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
7179 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
7180 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
7181 *cfg_base_addr
&= (u32
) 0x0000ffff;
7182 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
7183 if (*cfg_base_addr_index
== -1) {
7184 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
7190 static void hpsa_free_cfgtables(struct ctlr_info
*h
)
7192 if (h
->transtable
) {
7193 iounmap(h
->transtable
);
7194 h
->transtable
= NULL
;
7197 iounmap(h
->cfgtable
);
7202 /* Find and map CISS config table and transfer table
7203 + * several items must be unmapped (freed) later
7205 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
7209 u64 cfg_base_addr_index
;
7213 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7214 &cfg_base_addr_index
, &cfg_offset
);
7217 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7218 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
7220 dev_err(&h
->pdev
->dev
, "Failed mapping cfgtable\n");
7223 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
7226 /* Find performant mode table. */
7227 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
7228 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7229 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
7230 sizeof(*h
->transtable
));
7231 if (!h
->transtable
) {
7232 dev_err(&h
->pdev
->dev
, "Failed mapping transfer table\n");
7233 hpsa_free_cfgtables(h
);
7239 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
7241 #define MIN_MAX_COMMANDS 16
7242 BUILD_BUG_ON(MIN_MAX_COMMANDS
<= HPSA_NRESERVED_CMDS
);
7244 h
->max_commands
= readl(&h
->cfgtable
->MaxPerformantModeCommands
);
7246 /* Limit commands in memory limited kdump scenario. */
7247 if (reset_devices
&& h
->max_commands
> 32)
7248 h
->max_commands
= 32;
7250 if (h
->max_commands
< MIN_MAX_COMMANDS
) {
7251 dev_warn(&h
->pdev
->dev
,
7252 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7255 h
->max_commands
= MIN_MAX_COMMANDS
;
7259 /* If the controller reports that the total max sg entries is greater than 512,
7260 * then we know that chained SG blocks work. (Original smart arrays did not
7261 * support chained SG blocks and would return zero for max sg entries.)
7263 static int hpsa_supports_chained_sg_blocks(struct ctlr_info
*h
)
7265 return h
->maxsgentries
> 512;
7268 /* Interrogate the hardware for some limits:
7269 * max commands, max SG elements without chaining, and with chaining,
7270 * SG chain block size, etc.
7272 static void hpsa_find_board_params(struct ctlr_info
*h
)
7274 hpsa_get_max_perf_mode_cmds(h
);
7275 h
->nr_cmds
= h
->max_commands
;
7276 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
7277 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
7278 if (hpsa_supports_chained_sg_blocks(h
)) {
7279 /* Limit in-command s/g elements to 32 save dma'able memory. */
7280 h
->max_cmd_sg_entries
= 32;
7281 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
;
7282 h
->maxsgentries
--; /* save one for chain pointer */
7285 * Original smart arrays supported at most 31 s/g entries
7286 * embedded inline in the command (trying to use more
7287 * would lock up the controller)
7289 h
->max_cmd_sg_entries
= 31;
7290 h
->maxsgentries
= 31; /* default to traditional values */
7294 /* Find out what task management functions are supported and cache */
7295 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
7296 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
7297 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
7298 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
7299 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
7300 if (!(HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
))
7301 dev_warn(&h
->pdev
->dev
, "HP SSD Smart Path aborts not supported\n");
7304 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
7306 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
7307 dev_err(&h
->pdev
->dev
, "not a valid CISS config table\n");
7313 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
7317 driver_support
= readl(&(h
->cfgtable
->driver_support
));
7318 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7320 driver_support
|= ENABLE_SCSI_PREFETCH
;
7322 driver_support
|= ENABLE_UNIT_ATTN
;
7323 writel(driver_support
, &(h
->cfgtable
->driver_support
));
7326 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7327 * in a prefetch beyond physical memory.
7329 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
7333 if (h
->board_id
!= 0x3225103C)
7335 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
7336 dma_prefetch
|= 0x8000;
7337 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
7340 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
7344 unsigned long flags
;
7345 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7346 for (i
= 0; i
< MAX_CLEAR_EVENT_WAIT
; i
++) {
7347 spin_lock_irqsave(&h
->lock
, flags
);
7348 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7349 spin_unlock_irqrestore(&h
->lock
, flags
);
7350 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
7352 /* delay and try again */
7353 msleep(CLEAR_EVENT_WAIT_INTERVAL
);
7360 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
7364 unsigned long flags
;
7366 /* under certain very rare conditions, this can take awhile.
7367 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7368 * as we enter this code.)
7370 for (i
= 0; i
< MAX_MODE_CHANGE_WAIT
; i
++) {
7371 if (h
->remove_in_progress
)
7373 spin_lock_irqsave(&h
->lock
, flags
);
7374 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7375 spin_unlock_irqrestore(&h
->lock
, flags
);
7376 if (!(doorbell_value
& CFGTBL_ChangeReq
))
7378 /* delay and try again */
7379 msleep(MODE_CHANGE_WAIT_INTERVAL
);
7386 /* return -ENODEV or other reason on error, 0 on success */
7387 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
7391 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
7392 if (!(trans_support
& SIMPLE_MODE
))
7395 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
7397 /* Update the field, and then ring the doorbell */
7398 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
7399 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
7400 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7401 if (hpsa_wait_for_mode_change_ack(h
))
7403 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
7404 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
7406 h
->transMethod
= CFGTBL_Trans_Simple
;
7409 dev_err(&h
->pdev
->dev
, "failed to enter simple mode\n");
7413 /* free items allocated or mapped by hpsa_pci_init */
7414 static void hpsa_free_pci_init(struct ctlr_info
*h
)
7416 hpsa_free_cfgtables(h
); /* pci_init 4 */
7417 iounmap(h
->vaddr
); /* pci_init 3 */
7419 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
7421 * call pci_disable_device before pci_release_regions per
7422 * Documentation/PCI/pci.txt
7424 pci_disable_device(h
->pdev
); /* pci_init 1 */
7425 pci_release_regions(h
->pdev
); /* pci_init 2 */
7428 /* several items must be freed later */
7429 static int hpsa_pci_init(struct ctlr_info
*h
)
7431 int prod_index
, err
;
7433 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
7436 h
->product_name
= products
[prod_index
].product_name
;
7437 h
->access
= *(products
[prod_index
].access
);
7439 h
->needs_abort_tags_swizzled
=
7440 ctlr_needs_abort_tags_swizzled(h
->board_id
);
7442 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
7443 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
7445 err
= pci_enable_device(h
->pdev
);
7447 dev_err(&h
->pdev
->dev
, "failed to enable PCI device\n");
7448 pci_disable_device(h
->pdev
);
7452 err
= pci_request_regions(h
->pdev
, HPSA
);
7454 dev_err(&h
->pdev
->dev
,
7455 "failed to obtain PCI resources\n");
7456 pci_disable_device(h
->pdev
);
7460 pci_set_master(h
->pdev
);
7462 hpsa_interrupt_mode(h
);
7463 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
7465 goto clean2
; /* intmode+region, pci */
7466 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
7468 dev_err(&h
->pdev
->dev
, "failed to remap PCI mem\n");
7470 goto clean2
; /* intmode+region, pci */
7472 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
7474 goto clean3
; /* vaddr, intmode+region, pci */
7475 err
= hpsa_find_cfgtables(h
);
7477 goto clean3
; /* vaddr, intmode+region, pci */
7478 hpsa_find_board_params(h
);
7480 if (!hpsa_CISS_signature_present(h
)) {
7482 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7484 hpsa_set_driver_support_bits(h
);
7485 hpsa_p600_dma_prefetch_quirk(h
);
7486 err
= hpsa_enter_simple_mode(h
);
7488 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
7491 clean4
: /* cfgtables, vaddr, intmode+region, pci */
7492 hpsa_free_cfgtables(h
);
7493 clean3
: /* vaddr, intmode+region, pci */
7496 clean2
: /* intmode+region, pci */
7497 hpsa_disable_interrupt_mode(h
);
7499 * call pci_disable_device before pci_release_regions per
7500 * Documentation/PCI/pci.txt
7502 pci_disable_device(h
->pdev
);
7503 pci_release_regions(h
->pdev
);
7507 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
7511 #define HBA_INQUIRY_BYTE_COUNT 64
7512 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
7513 if (!h
->hba_inquiry_data
)
7515 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
7516 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
7518 kfree(h
->hba_inquiry_data
);
7519 h
->hba_inquiry_data
= NULL
;
7523 static int hpsa_init_reset_devices(struct pci_dev
*pdev
, u32 board_id
)
7526 void __iomem
*vaddr
;
7531 /* kdump kernel is loading, we don't know in which state is
7532 * the pci interface. The dev->enable_cnt is equal zero
7533 * so we call enable+disable, wait a while and switch it on.
7535 rc
= pci_enable_device(pdev
);
7537 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
7540 pci_disable_device(pdev
);
7541 msleep(260); /* a randomly chosen number */
7542 rc
= pci_enable_device(pdev
);
7544 dev_warn(&pdev
->dev
, "failed to enable device.\n");
7548 pci_set_master(pdev
);
7550 vaddr
= pci_ioremap_bar(pdev
, 0);
7551 if (vaddr
== NULL
) {
7555 writel(SA5_INTR_OFF
, vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
7558 /* Reset the controller with a PCI power-cycle or via doorbell */
7559 rc
= hpsa_kdump_hard_reset_controller(pdev
, board_id
);
7561 /* -ENOTSUPP here means we cannot reset the controller
7562 * but it's already (and still) up and running in
7563 * "performant mode". Or, it might be 640x, which can't reset
7564 * due to concerns about shared bbwc between 6402/6404 pair.
7569 /* Now try to get the controller to respond to a no-op */
7570 dev_info(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
7571 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
7572 if (hpsa_noop(pdev
) == 0)
7575 dev_warn(&pdev
->dev
, "no-op failed%s\n",
7576 (i
< 11 ? "; re-trying" : ""));
7581 pci_disable_device(pdev
);
7585 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
7587 kfree(h
->cmd_pool_bits
);
7588 h
->cmd_pool_bits
= NULL
;
7590 pci_free_consistent(h
->pdev
,
7591 h
->nr_cmds
* sizeof(struct CommandList
),
7593 h
->cmd_pool_dhandle
);
7595 h
->cmd_pool_dhandle
= 0;
7597 if (h
->errinfo_pool
) {
7598 pci_free_consistent(h
->pdev
,
7599 h
->nr_cmds
* sizeof(struct ErrorInfo
),
7601 h
->errinfo_pool_dhandle
);
7602 h
->errinfo_pool
= NULL
;
7603 h
->errinfo_pool_dhandle
= 0;
7607 static int hpsa_alloc_cmd_pool(struct ctlr_info
*h
)
7609 h
->cmd_pool_bits
= kzalloc(
7610 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
7611 sizeof(unsigned long), GFP_KERNEL
);
7612 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
7613 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
7614 &(h
->cmd_pool_dhandle
));
7615 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
7616 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
7617 &(h
->errinfo_pool_dhandle
));
7618 if ((h
->cmd_pool_bits
== NULL
)
7619 || (h
->cmd_pool
== NULL
)
7620 || (h
->errinfo_pool
== NULL
)) {
7621 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
7624 hpsa_preinitialize_commands(h
);
7627 hpsa_free_cmd_pool(h
);
7631 static void hpsa_irq_affinity_hints(struct ctlr_info
*h
)
7635 cpu
= cpumask_first(cpu_online_mask
);
7636 for (i
= 0; i
< h
->msix_vector
; i
++) {
7637 irq_set_affinity_hint(h
->intr
[i
], get_cpu_mask(cpu
));
7638 cpu
= cpumask_next(cpu
, cpu_online_mask
);
7642 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7643 static void hpsa_free_irqs(struct ctlr_info
*h
)
7647 if (!h
->msix_vector
|| h
->intr_mode
!= PERF_MODE_INT
) {
7648 /* Single reply queue, only one irq to free */
7650 irq_set_affinity_hint(h
->intr
[i
], NULL
);
7651 free_irq(h
->intr
[i
], &h
->q
[i
]);
7656 for (i
= 0; i
< h
->msix_vector
; i
++) {
7657 irq_set_affinity_hint(h
->intr
[i
], NULL
);
7658 free_irq(h
->intr
[i
], &h
->q
[i
]);
7661 for (; i
< MAX_REPLY_QUEUES
; i
++)
7665 /* returns 0 on success; cleans up and returns -Enn on error */
7666 static int hpsa_request_irqs(struct ctlr_info
*h
,
7667 irqreturn_t (*msixhandler
)(int, void *),
7668 irqreturn_t (*intxhandler
)(int, void *))
7673 * initialize h->q[x] = x so that interrupt handlers know which
7676 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
7679 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vector
> 0) {
7680 /* If performant mode and MSI-X, use multiple reply queues */
7681 for (i
= 0; i
< h
->msix_vector
; i
++) {
7682 sprintf(h
->intrname
[i
], "%s-msix%d", h
->devname
, i
);
7683 rc
= request_irq(h
->intr
[i
], msixhandler
,
7689 dev_err(&h
->pdev
->dev
,
7690 "failed to get irq %d for %s\n",
7691 h
->intr
[i
], h
->devname
);
7692 for (j
= 0; j
< i
; j
++) {
7693 free_irq(h
->intr
[j
], &h
->q
[j
]);
7696 for (; j
< MAX_REPLY_QUEUES
; j
++)
7701 hpsa_irq_affinity_hints(h
);
7703 /* Use single reply pool */
7704 if (h
->msix_vector
> 0 || h
->msi_vector
) {
7706 sprintf(h
->intrname
[h
->intr_mode
],
7707 "%s-msix", h
->devname
);
7709 sprintf(h
->intrname
[h
->intr_mode
],
7710 "%s-msi", h
->devname
);
7711 rc
= request_irq(h
->intr
[h
->intr_mode
],
7713 h
->intrname
[h
->intr_mode
],
7714 &h
->q
[h
->intr_mode
]);
7716 sprintf(h
->intrname
[h
->intr_mode
],
7717 "%s-intx", h
->devname
);
7718 rc
= request_irq(h
->intr
[h
->intr_mode
],
7719 intxhandler
, IRQF_SHARED
,
7720 h
->intrname
[h
->intr_mode
],
7721 &h
->q
[h
->intr_mode
]);
7723 irq_set_affinity_hint(h
->intr
[h
->intr_mode
], NULL
);
7726 dev_err(&h
->pdev
->dev
, "failed to get irq %d for %s\n",
7727 h
->intr
[h
->intr_mode
], h
->devname
);
7734 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
7737 hpsa_send_host_reset(h
, RAID_CTLR_LUNID
, HPSA_RESET_TYPE_CONTROLLER
);
7739 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
7740 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
);
7742 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
7746 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
7747 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
7749 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
7750 "after soft reset.\n");
7757 static void hpsa_free_reply_queues(struct ctlr_info
*h
)
7761 for (i
= 0; i
< h
->nreply_queues
; i
++) {
7762 if (!h
->reply_queue
[i
].head
)
7764 pci_free_consistent(h
->pdev
,
7765 h
->reply_queue_size
,
7766 h
->reply_queue
[i
].head
,
7767 h
->reply_queue
[i
].busaddr
);
7768 h
->reply_queue
[i
].head
= NULL
;
7769 h
->reply_queue
[i
].busaddr
= 0;
7771 h
->reply_queue_size
= 0;
7774 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
7776 hpsa_free_performant_mode(h
); /* init_one 7 */
7777 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
7778 hpsa_free_cmd_pool(h
); /* init_one 5 */
7779 hpsa_free_irqs(h
); /* init_one 4 */
7780 scsi_host_put(h
->scsi_host
); /* init_one 3 */
7781 h
->scsi_host
= NULL
; /* init_one 3 */
7782 hpsa_free_pci_init(h
); /* init_one 2_5 */
7783 free_percpu(h
->lockup_detected
); /* init_one 2 */
7784 h
->lockup_detected
= NULL
; /* init_one 2 */
7785 if (h
->resubmit_wq
) {
7786 destroy_workqueue(h
->resubmit_wq
); /* init_one 1 */
7787 h
->resubmit_wq
= NULL
;
7789 if (h
->rescan_ctlr_wq
) {
7790 destroy_workqueue(h
->rescan_ctlr_wq
);
7791 h
->rescan_ctlr_wq
= NULL
;
7793 kfree(h
); /* init_one 1 */
7796 /* Called when controller lockup detected. */
7797 static void fail_all_outstanding_cmds(struct ctlr_info
*h
)
7800 struct CommandList
*c
;
7803 flush_workqueue(h
->resubmit_wq
); /* ensure all cmds are fully built */
7804 for (i
= 0; i
< h
->nr_cmds
; i
++) {
7805 c
= h
->cmd_pool
+ i
;
7806 refcount
= atomic_inc_return(&c
->refcount
);
7808 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
7810 atomic_dec(&h
->commands_outstanding
);
7815 dev_warn(&h
->pdev
->dev
,
7816 "failed %d commands in fail_all\n", failcount
);
7819 static void set_lockup_detected_for_all_cpus(struct ctlr_info
*h
, u32 value
)
7823 for_each_online_cpu(cpu
) {
7824 u32
*lockup_detected
;
7825 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
7826 *lockup_detected
= value
;
7828 wmb(); /* be sure the per-cpu variables are out to memory */
7831 static void controller_lockup_detected(struct ctlr_info
*h
)
7833 unsigned long flags
;
7834 u32 lockup_detected
;
7836 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
7837 spin_lock_irqsave(&h
->lock
, flags
);
7838 lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
7839 if (!lockup_detected
) {
7840 /* no heartbeat, but controller gave us a zero. */
7841 dev_warn(&h
->pdev
->dev
,
7842 "lockup detected after %d but scratchpad register is zero\n",
7843 h
->heartbeat_sample_interval
/ HZ
);
7844 lockup_detected
= 0xffffffff;
7846 set_lockup_detected_for_all_cpus(h
, lockup_detected
);
7847 spin_unlock_irqrestore(&h
->lock
, flags
);
7848 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x after %d\n",
7849 lockup_detected
, h
->heartbeat_sample_interval
/ HZ
);
7850 pci_disable_device(h
->pdev
);
7851 fail_all_outstanding_cmds(h
);
7854 static int detect_controller_lockup(struct ctlr_info
*h
)
7858 unsigned long flags
;
7860 now
= get_jiffies_64();
7861 /* If we've received an interrupt recently, we're ok. */
7862 if (time_after64(h
->last_intr_timestamp
+
7863 (h
->heartbeat_sample_interval
), now
))
7867 * If we've already checked the heartbeat recently, we're ok.
7868 * This could happen if someone sends us a signal. We
7869 * otherwise don't care about signals in this thread.
7871 if (time_after64(h
->last_heartbeat_timestamp
+
7872 (h
->heartbeat_sample_interval
), now
))
7875 /* If heartbeat has not changed since we last looked, we're not ok. */
7876 spin_lock_irqsave(&h
->lock
, flags
);
7877 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
7878 spin_unlock_irqrestore(&h
->lock
, flags
);
7879 if (h
->last_heartbeat
== heartbeat
) {
7880 controller_lockup_detected(h
);
7885 h
->last_heartbeat
= heartbeat
;
7886 h
->last_heartbeat_timestamp
= now
;
7890 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
7895 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
7898 /* Ask the controller to clear the events we're handling. */
7899 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
7900 | CFGTBL_Trans_io_accel2
)) &&
7901 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
7902 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
7904 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
7905 event_type
= "state change";
7906 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
7907 event_type
= "configuration change";
7908 /* Stop sending new RAID offload reqs via the IO accelerator */
7909 scsi_block_requests(h
->scsi_host
);
7910 for (i
= 0; i
< h
->ndevices
; i
++)
7911 h
->dev
[i
]->offload_enabled
= 0;
7912 hpsa_drain_accel_commands(h
);
7913 /* Set 'accelerator path config change' bit */
7914 dev_warn(&h
->pdev
->dev
,
7915 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7916 h
->events
, event_type
);
7917 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
7918 /* Set the "clear event notify field update" bit 6 */
7919 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
7920 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7921 hpsa_wait_for_clear_event_notify_ack(h
);
7922 scsi_unblock_requests(h
->scsi_host
);
7924 /* Acknowledge controller notification events. */
7925 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
7926 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
7927 hpsa_wait_for_clear_event_notify_ack(h
);
7929 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
7930 hpsa_wait_for_mode_change_ack(h
);
7936 /* Check a register on the controller to see if there are configuration
7937 * changes (added/changed/removed logical drives, etc.) which mean that
7938 * we should rescan the controller for devices.
7939 * Also check flag for driver-initiated rescan.
7941 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
7943 if (h
->drv_req_rescan
) {
7944 h
->drv_req_rescan
= 0;
7948 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
7951 h
->events
= readl(&(h
->cfgtable
->event_notify
));
7952 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
7956 * Check if any of the offline devices have become ready
7958 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
7960 unsigned long flags
;
7961 struct offline_device_entry
*d
;
7962 struct list_head
*this, *tmp
;
7964 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
7965 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
7966 d
= list_entry(this, struct offline_device_entry
,
7968 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
7969 if (!hpsa_volume_offline(h
, d
->scsi3addr
)) {
7970 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
7971 list_del(&d
->offline_list
);
7972 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
7975 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
7977 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
7981 static void hpsa_rescan_ctlr_worker(struct work_struct
*work
)
7983 unsigned long flags
;
7984 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
7985 struct ctlr_info
, rescan_ctlr_work
);
7988 if (h
->remove_in_progress
)
7991 if (hpsa_ctlr_needs_rescan(h
) || hpsa_offline_devices_ready(h
)) {
7992 scsi_host_get(h
->scsi_host
);
7993 hpsa_ack_ctlr_events(h
);
7994 hpsa_scan_start(h
->scsi_host
);
7995 scsi_host_put(h
->scsi_host
);
7997 spin_lock_irqsave(&h
->lock
, flags
);
7998 if (!h
->remove_in_progress
)
7999 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8000 h
->heartbeat_sample_interval
);
8001 spin_unlock_irqrestore(&h
->lock
, flags
);
8004 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
8006 unsigned long flags
;
8007 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8008 struct ctlr_info
, monitor_ctlr_work
);
8010 detect_controller_lockup(h
);
8011 if (lockup_detected(h
))
8014 spin_lock_irqsave(&h
->lock
, flags
);
8015 if (!h
->remove_in_progress
)
8016 schedule_delayed_work(&h
->monitor_ctlr_work
,
8017 h
->heartbeat_sample_interval
);
8018 spin_unlock_irqrestore(&h
->lock
, flags
);
8021 static struct workqueue_struct
*hpsa_create_controller_wq(struct ctlr_info
*h
,
8024 struct workqueue_struct
*wq
= NULL
;
8026 wq
= alloc_ordered_workqueue("%s_%d_hpsa", 0, name
, h
->ctlr
);
8028 dev_err(&h
->pdev
->dev
, "failed to create %s workqueue\n", name
);
8033 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8036 struct ctlr_info
*h
;
8037 int try_soft_reset
= 0;
8038 unsigned long flags
;
8041 if (number_of_controllers
== 0)
8042 printk(KERN_INFO DRIVER_NAME
"\n");
8044 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
8046 dev_warn(&pdev
->dev
, "Board ID not found\n");
8050 rc
= hpsa_init_reset_devices(pdev
, board_id
);
8052 if (rc
!= -ENOTSUPP
)
8054 /* If the reset fails in a particular way (it has no way to do
8055 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8056 * a soft reset once we get the controller configured up to the
8057 * point that it can accept a command.
8063 reinit_after_soft_reset
:
8065 /* Command structures must be aligned on a 32-byte boundary because
8066 * the 5 lower bits of the address are used by the hardware. and by
8067 * the driver. See comments in hpsa.h for more info.
8069 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
8070 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
8072 dev_err(&pdev
->dev
, "Failed to allocate controller head\n");
8078 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
8079 INIT_LIST_HEAD(&h
->offline_device_list
);
8080 spin_lock_init(&h
->lock
);
8081 spin_lock_init(&h
->offline_device_lock
);
8082 spin_lock_init(&h
->scan_lock
);
8083 atomic_set(&h
->passthru_cmds_avail
, HPSA_MAX_CONCURRENT_PASSTHRUS
);
8084 atomic_set(&h
->abort_cmds_available
, HPSA_CMDS_RESERVED_FOR_ABORTS
);
8086 /* Allocate and clear per-cpu variable lockup_detected */
8087 h
->lockup_detected
= alloc_percpu(u32
);
8088 if (!h
->lockup_detected
) {
8089 dev_err(&h
->pdev
->dev
, "Failed to allocate lockup detector\n");
8091 goto clean1
; /* aer/h */
8093 set_lockup_detected_for_all_cpus(h
, 0);
8095 rc
= hpsa_pci_init(h
);
8097 goto clean2
; /* lu, aer/h */
8099 /* relies on h-> settings made by hpsa_pci_init, including
8100 * interrupt_mode h->intr */
8101 rc
= hpsa_scsi_host_alloc(h
);
8103 goto clean2_5
; /* pci, lu, aer/h */
8105 sprintf(h
->devname
, HPSA
"%d", h
->scsi_host
->host_no
);
8106 h
->ctlr
= number_of_controllers
;
8107 number_of_controllers
++;
8109 /* configure PCI DMA stuff */
8110 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
8114 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
8118 dev_err(&pdev
->dev
, "no suitable DMA available\n");
8119 goto clean3
; /* shost, pci, lu, aer/h */
8123 /* make sure the board interrupts are off */
8124 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8126 rc
= hpsa_request_irqs(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
);
8128 goto clean3
; /* shost, pci, lu, aer/h */
8129 rc
= hpsa_alloc_cmd_pool(h
);
8131 goto clean4
; /* irq, shost, pci, lu, aer/h */
8132 rc
= hpsa_alloc_sg_chain_blocks(h
);
8134 goto clean5
; /* cmd, irq, shost, pci, lu, aer/h */
8135 init_waitqueue_head(&h
->scan_wait_queue
);
8136 init_waitqueue_head(&h
->abort_cmd_wait_queue
);
8137 init_waitqueue_head(&h
->event_sync_wait_queue
);
8138 mutex_init(&h
->reset_mutex
);
8139 h
->scan_finished
= 1; /* no scan currently in progress */
8141 pci_set_drvdata(pdev
, h
);
8144 spin_lock_init(&h
->devlock
);
8145 rc
= hpsa_put_ctlr_into_performant_mode(h
);
8147 goto clean6
; /* sg, cmd, irq, shost, pci, lu, aer/h */
8149 /* hook into SCSI subsystem */
8150 rc
= hpsa_scsi_add_host(h
);
8152 goto clean7
; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8154 /* create the resubmit workqueue */
8155 h
->rescan_ctlr_wq
= hpsa_create_controller_wq(h
, "rescan");
8156 if (!h
->rescan_ctlr_wq
) {
8161 h
->resubmit_wq
= hpsa_create_controller_wq(h
, "resubmit");
8162 if (!h
->resubmit_wq
) {
8164 goto clean7
; /* aer/h */
8168 * At this point, the controller is ready to take commands.
8169 * Now, if reset_devices and the hard reset didn't work, try
8170 * the soft reset and see if that works.
8172 if (try_soft_reset
) {
8174 /* This is kind of gross. We may or may not get a completion
8175 * from the soft reset command, and if we do, then the value
8176 * from the fifo may or may not be valid. So, we wait 10 secs
8177 * after the reset throwing away any completions we get during
8178 * that time. Unregister the interrupt handler and register
8179 * fake ones to scoop up any residual completions.
8181 spin_lock_irqsave(&h
->lock
, flags
);
8182 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8183 spin_unlock_irqrestore(&h
->lock
, flags
);
8185 rc
= hpsa_request_irqs(h
, hpsa_msix_discard_completions
,
8186 hpsa_intx_discard_completions
);
8188 dev_warn(&h
->pdev
->dev
,
8189 "Failed to request_irq after soft reset.\n");
8191 * cannot goto clean7 or free_irqs will be called
8192 * again. Instead, do its work
8194 hpsa_free_performant_mode(h
); /* clean7 */
8195 hpsa_free_sg_chain_blocks(h
); /* clean6 */
8196 hpsa_free_cmd_pool(h
); /* clean5 */
8198 * skip hpsa_free_irqs(h) clean4 since that
8199 * was just called before request_irqs failed
8204 rc
= hpsa_kdump_soft_reset(h
);
8206 /* Neither hard nor soft reset worked, we're hosed. */
8209 dev_info(&h
->pdev
->dev
, "Board READY.\n");
8210 dev_info(&h
->pdev
->dev
,
8211 "Waiting for stale completions to drain.\n");
8212 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8214 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8216 rc
= controller_reset_failed(h
->cfgtable
);
8218 dev_info(&h
->pdev
->dev
,
8219 "Soft reset appears to have failed.\n");
8221 /* since the controller's reset, we have to go back and re-init
8222 * everything. Easiest to just forget what we've done and do it
8225 hpsa_undo_allocations_after_kdump_soft_reset(h
);
8228 /* don't goto clean, we already unallocated */
8231 goto reinit_after_soft_reset
;
8234 /* Enable Accelerated IO path at driver layer */
8235 h
->acciopath_status
= 1;
8238 /* Turn the interrupts on so we can service requests */
8239 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8241 hpsa_hba_inquiry(h
);
8243 /* Monitor the controller for firmware lockups */
8244 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
8245 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
8246 schedule_delayed_work(&h
->monitor_ctlr_work
,
8247 h
->heartbeat_sample_interval
);
8248 INIT_DELAYED_WORK(&h
->rescan_ctlr_work
, hpsa_rescan_ctlr_worker
);
8249 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8250 h
->heartbeat_sample_interval
);
8253 clean7
: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8254 hpsa_free_performant_mode(h
);
8255 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8256 clean6
: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8257 hpsa_free_sg_chain_blocks(h
);
8258 clean5
: /* cmd, irq, shost, pci, lu, aer/h */
8259 hpsa_free_cmd_pool(h
);
8260 clean4
: /* irq, shost, pci, lu, aer/h */
8262 clean3
: /* shost, pci, lu, aer/h */
8263 scsi_host_put(h
->scsi_host
);
8264 h
->scsi_host
= NULL
;
8265 clean2_5
: /* pci, lu, aer/h */
8266 hpsa_free_pci_init(h
);
8267 clean2
: /* lu, aer/h */
8268 if (h
->lockup_detected
) {
8269 free_percpu(h
->lockup_detected
);
8270 h
->lockup_detected
= NULL
;
8272 clean1
: /* wq/aer/h */
8273 if (h
->resubmit_wq
) {
8274 destroy_workqueue(h
->resubmit_wq
);
8275 h
->resubmit_wq
= NULL
;
8277 if (h
->rescan_ctlr_wq
) {
8278 destroy_workqueue(h
->rescan_ctlr_wq
);
8279 h
->rescan_ctlr_wq
= NULL
;
8285 static void hpsa_flush_cache(struct ctlr_info
*h
)
8288 struct CommandList
*c
;
8291 if (unlikely(lockup_detected(h
)))
8293 flush_buf
= kzalloc(4, GFP_KERNEL
);
8299 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
8300 RAID_CTLR_LUNID
, TYPE_CMD
)) {
8303 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8304 PCI_DMA_TODEVICE
, NO_TIMEOUT
);
8307 if (c
->err_info
->CommandStatus
!= 0)
8309 dev_warn(&h
->pdev
->dev
,
8310 "error flushing cache on controller\n");
8315 static void hpsa_shutdown(struct pci_dev
*pdev
)
8317 struct ctlr_info
*h
;
8319 h
= pci_get_drvdata(pdev
);
8320 /* Turn board interrupts off and send the flush cache command
8321 * sendcmd will turn off interrupt, and send the flush...
8322 * To write all data in the battery backed cache to disks
8324 hpsa_flush_cache(h
);
8325 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8326 hpsa_free_irqs(h
); /* init_one 4 */
8327 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
8330 static void hpsa_free_device_info(struct ctlr_info
*h
)
8334 for (i
= 0; i
< h
->ndevices
; i
++) {
8340 static void hpsa_remove_one(struct pci_dev
*pdev
)
8342 struct ctlr_info
*h
;
8343 unsigned long flags
;
8345 if (pci_get_drvdata(pdev
) == NULL
) {
8346 dev_err(&pdev
->dev
, "unable to remove device\n");
8349 h
= pci_get_drvdata(pdev
);
8351 /* Get rid of any controller monitoring work items */
8352 spin_lock_irqsave(&h
->lock
, flags
);
8353 h
->remove_in_progress
= 1;
8354 spin_unlock_irqrestore(&h
->lock
, flags
);
8355 cancel_delayed_work_sync(&h
->monitor_ctlr_work
);
8356 cancel_delayed_work_sync(&h
->rescan_ctlr_work
);
8357 destroy_workqueue(h
->rescan_ctlr_wq
);
8358 destroy_workqueue(h
->resubmit_wq
);
8361 * Call before disabling interrupts.
8362 * scsi_remove_host can trigger I/O operations especially
8363 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8364 * operations which cannot complete and will hang the system.
8367 scsi_remove_host(h
->scsi_host
); /* init_one 8 */
8368 /* includes hpsa_free_irqs - init_one 4 */
8369 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8370 hpsa_shutdown(pdev
);
8372 hpsa_free_device_info(h
); /* scan */
8374 kfree(h
->hba_inquiry_data
); /* init_one 10 */
8375 h
->hba_inquiry_data
= NULL
; /* init_one 10 */
8376 hpsa_free_ioaccel2_sg_chain_blocks(h
);
8377 hpsa_free_performant_mode(h
); /* init_one 7 */
8378 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
8379 hpsa_free_cmd_pool(h
); /* init_one 5 */
8381 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8383 scsi_host_put(h
->scsi_host
); /* init_one 3 */
8384 h
->scsi_host
= NULL
; /* init_one 3 */
8386 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8387 hpsa_free_pci_init(h
); /* init_one 2.5 */
8389 free_percpu(h
->lockup_detected
); /* init_one 2 */
8390 h
->lockup_detected
= NULL
; /* init_one 2 */
8391 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8392 kfree(h
); /* init_one 1 */
8395 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
8396 __attribute__((unused
)) pm_message_t state
)
8401 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
8406 static struct pci_driver hpsa_pci_driver
= {
8408 .probe
= hpsa_init_one
,
8409 .remove
= hpsa_remove_one
,
8410 .id_table
= hpsa_pci_device_id
, /* id_table */
8411 .shutdown
= hpsa_shutdown
,
8412 .suspend
= hpsa_suspend
,
8413 .resume
= hpsa_resume
,
8416 /* Fill in bucket_map[], given nsgs (the max number of
8417 * scatter gather elements supported) and bucket[],
8418 * which is an array of 8 integers. The bucket[] array
8419 * contains 8 different DMA transfer sizes (in 16
8420 * byte increments) which the controller uses to fetch
8421 * commands. This function fills in bucket_map[], which
8422 * maps a given number of scatter gather elements to one of
8423 * the 8 DMA transfer sizes. The point of it is to allow the
8424 * controller to only do as much DMA as needed to fetch the
8425 * command, with the DMA transfer size encoded in the lower
8426 * bits of the command address.
8428 static void calc_bucket_map(int bucket
[], int num_buckets
,
8429 int nsgs
, int min_blocks
, u32
*bucket_map
)
8433 /* Note, bucket_map must have nsgs+1 entries. */
8434 for (i
= 0; i
<= nsgs
; i
++) {
8435 /* Compute size of a command with i SG entries */
8436 size
= i
+ min_blocks
;
8437 b
= num_buckets
; /* Assume the biggest bucket */
8438 /* Find the bucket that is just big enough */
8439 for (j
= 0; j
< num_buckets
; j
++) {
8440 if (bucket
[j
] >= size
) {
8445 /* for a command with i SG entries, use bucket b. */
8451 * return -ENODEV on err, 0 on success (or no action)
8452 * allocates numerous items that must be freed later
8454 static int hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
8457 unsigned long register_value
;
8458 unsigned long transMethod
= CFGTBL_Trans_Performant
|
8459 (trans_support
& CFGTBL_Trans_use_short_tags
) |
8460 CFGTBL_Trans_enable_directed_msix
|
8461 (trans_support
& (CFGTBL_Trans_io_accel1
|
8462 CFGTBL_Trans_io_accel2
));
8463 struct access_method access
= SA5_performant_access
;
8465 /* This is a bit complicated. There are 8 registers on
8466 * the controller which we write to to tell it 8 different
8467 * sizes of commands which there may be. It's a way of
8468 * reducing the DMA done to fetch each command. Encoded into
8469 * each command's tag are 3 bits which communicate to the controller
8470 * which of the eight sizes that command fits within. The size of
8471 * each command depends on how many scatter gather entries there are.
8472 * Each SG entry requires 16 bytes. The eight registers are programmed
8473 * with the number of 16-byte blocks a command of that size requires.
8474 * The smallest command possible requires 5 such 16 byte blocks.
8475 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8476 * blocks. Note, this only extends to the SG entries contained
8477 * within the command block, and does not extend to chained blocks
8478 * of SG elements. bft[] contains the eight values we write to
8479 * the registers. They are not evenly distributed, but have more
8480 * sizes for small commands, and fewer sizes for larger commands.
8482 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
8483 #define MIN_IOACCEL2_BFT_ENTRY 5
8484 #define HPSA_IOACCEL2_HEADER_SZ 4
8485 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
8486 13, 14, 15, 16, 17, 18, 19,
8487 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
8488 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
8489 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
8490 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
8491 16 * MIN_IOACCEL2_BFT_ENTRY
);
8492 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
8493 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
8494 /* 5 = 1 s/g entry or 4k
8495 * 6 = 2 s/g entry or 8k
8496 * 8 = 4 s/g entry or 16k
8497 * 10 = 6 s/g entry or 24k
8500 /* If the controller supports either ioaccel method then
8501 * we can also use the RAID stack submit path that does not
8502 * perform the superfluous readl() after each command submission.
8504 if (trans_support
& (CFGTBL_Trans_io_accel1
| CFGTBL_Trans_io_accel2
))
8505 access
= SA5_performant_access_no_read
;
8507 /* Controller spec: zero out this buffer. */
8508 for (i
= 0; i
< h
->nreply_queues
; i
++)
8509 memset(h
->reply_queue
[i
].head
, 0, h
->reply_queue_size
);
8511 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
8512 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
8513 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
8514 for (i
= 0; i
< 8; i
++)
8515 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
8517 /* size of controller ring buffer */
8518 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
8519 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
8520 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
8521 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
8523 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8524 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
8525 writel(h
->reply_queue
[i
].busaddr
,
8526 &h
->transtable
->RepQAddr
[i
].lower
);
8529 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
8530 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
8532 * enable outbound interrupt coalescing in accelerator mode;
8534 if (trans_support
& CFGTBL_Trans_io_accel1
) {
8535 access
= SA5_ioaccel_mode1_access
;
8536 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
8537 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
8539 if (trans_support
& CFGTBL_Trans_io_accel2
) {
8540 access
= SA5_ioaccel_mode2_access
;
8541 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
8542 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
8545 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
8546 if (hpsa_wait_for_mode_change_ack(h
)) {
8547 dev_err(&h
->pdev
->dev
,
8548 "performant mode problem - doorbell timeout\n");
8551 register_value
= readl(&(h
->cfgtable
->TransportActive
));
8552 if (!(register_value
& CFGTBL_Trans_Performant
)) {
8553 dev_err(&h
->pdev
->dev
,
8554 "performant mode problem - transport not active\n");
8557 /* Change the access methods to the performant access methods */
8559 h
->transMethod
= transMethod
;
8561 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
8562 (trans_support
& CFGTBL_Trans_io_accel2
)))
8565 if (trans_support
& CFGTBL_Trans_io_accel1
) {
8566 /* Set up I/O accelerator mode */
8567 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8568 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
8569 h
->reply_queue
[i
].current_entry
=
8570 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
8572 bft
[7] = h
->ioaccel_maxsg
+ 8;
8573 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
8574 h
->ioaccel1_blockFetchTable
);
8576 /* initialize all reply queue entries to unused */
8577 for (i
= 0; i
< h
->nreply_queues
; i
++)
8578 memset(h
->reply_queue
[i
].head
,
8579 (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
8580 h
->reply_queue_size
);
8582 /* set all the constant fields in the accelerator command
8583 * frames once at init time to save CPU cycles later.
8585 for (i
= 0; i
< h
->nr_cmds
; i
++) {
8586 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
8588 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
8589 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
8590 (i
* sizeof(struct ErrorInfo
)));
8591 cp
->err_info_len
= sizeof(struct ErrorInfo
);
8592 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
8593 cp
->host_context_flags
=
8594 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT
);
8595 cp
->timeout_sec
= 0;
8598 cpu_to_le64((i
<< DIRECT_LOOKUP_SHIFT
));
8600 cpu_to_le64(h
->ioaccel_cmd_pool_dhandle
+
8601 (i
* sizeof(struct io_accel1_cmd
)));
8603 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
8604 u64 cfg_offset
, cfg_base_addr_index
;
8605 u32 bft2_offset
, cfg_base_addr
;
8608 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
8609 &cfg_base_addr_index
, &cfg_offset
);
8610 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
8611 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
8612 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
8613 4, h
->ioaccel2_blockFetchTable
);
8614 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
8615 BUILD_BUG_ON(offsetof(struct CfgTable
,
8616 io_accel_request_size_offset
) != 0xb8);
8617 h
->ioaccel2_bft2_regs
=
8618 remap_pci_mem(pci_resource_start(h
->pdev
,
8619 cfg_base_addr_index
) +
8620 cfg_offset
+ bft2_offset
,
8622 sizeof(*h
->ioaccel2_bft2_regs
));
8623 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
8624 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
8626 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
8627 if (hpsa_wait_for_mode_change_ack(h
)) {
8628 dev_err(&h
->pdev
->dev
,
8629 "performant mode problem - enabling ioaccel mode\n");
8635 /* Free ioaccel1 mode command blocks and block fetch table */
8636 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
8638 if (h
->ioaccel_cmd_pool
) {
8639 pci_free_consistent(h
->pdev
,
8640 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
8641 h
->ioaccel_cmd_pool
,
8642 h
->ioaccel_cmd_pool_dhandle
);
8643 h
->ioaccel_cmd_pool
= NULL
;
8644 h
->ioaccel_cmd_pool_dhandle
= 0;
8646 kfree(h
->ioaccel1_blockFetchTable
);
8647 h
->ioaccel1_blockFetchTable
= NULL
;
8650 /* Allocate ioaccel1 mode command blocks and block fetch table */
8651 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
8654 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
8655 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
8656 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
8658 /* Command structures must be aligned on a 128-byte boundary
8659 * because the 7 lower bits of the address are used by the
8662 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
8663 IOACCEL1_COMMANDLIST_ALIGNMENT
);
8664 h
->ioaccel_cmd_pool
=
8665 pci_alloc_consistent(h
->pdev
,
8666 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
8667 &(h
->ioaccel_cmd_pool_dhandle
));
8669 h
->ioaccel1_blockFetchTable
=
8670 kmalloc(((h
->ioaccel_maxsg
+ 1) *
8671 sizeof(u32
)), GFP_KERNEL
);
8673 if ((h
->ioaccel_cmd_pool
== NULL
) ||
8674 (h
->ioaccel1_blockFetchTable
== NULL
))
8677 memset(h
->ioaccel_cmd_pool
, 0,
8678 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
8682 hpsa_free_ioaccel1_cmd_and_bft(h
);
8686 /* Free ioaccel2 mode command blocks and block fetch table */
8687 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
8689 hpsa_free_ioaccel2_sg_chain_blocks(h
);
8691 if (h
->ioaccel2_cmd_pool
) {
8692 pci_free_consistent(h
->pdev
,
8693 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
8694 h
->ioaccel2_cmd_pool
,
8695 h
->ioaccel2_cmd_pool_dhandle
);
8696 h
->ioaccel2_cmd_pool
= NULL
;
8697 h
->ioaccel2_cmd_pool_dhandle
= 0;
8699 kfree(h
->ioaccel2_blockFetchTable
);
8700 h
->ioaccel2_blockFetchTable
= NULL
;
8703 /* Allocate ioaccel2 mode command blocks and block fetch table */
8704 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
8708 /* Allocate ioaccel2 mode command blocks and block fetch table */
8711 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
8712 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
8713 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
8715 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
8716 IOACCEL2_COMMANDLIST_ALIGNMENT
);
8717 h
->ioaccel2_cmd_pool
=
8718 pci_alloc_consistent(h
->pdev
,
8719 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
8720 &(h
->ioaccel2_cmd_pool_dhandle
));
8722 h
->ioaccel2_blockFetchTable
=
8723 kmalloc(((h
->ioaccel_maxsg
+ 1) *
8724 sizeof(u32
)), GFP_KERNEL
);
8726 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
8727 (h
->ioaccel2_blockFetchTable
== NULL
)) {
8732 rc
= hpsa_allocate_ioaccel2_sg_chain_blocks(h
);
8736 memset(h
->ioaccel2_cmd_pool
, 0,
8737 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
8741 hpsa_free_ioaccel2_cmd_and_bft(h
);
8745 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8746 static void hpsa_free_performant_mode(struct ctlr_info
*h
)
8748 kfree(h
->blockFetchTable
);
8749 h
->blockFetchTable
= NULL
;
8750 hpsa_free_reply_queues(h
);
8751 hpsa_free_ioaccel1_cmd_and_bft(h
);
8752 hpsa_free_ioaccel2_cmd_and_bft(h
);
8755 /* return -ENODEV on error, 0 on success (or no action)
8756 * allocates numerous items that must be freed later
8758 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
8761 unsigned long transMethod
= CFGTBL_Trans_Performant
|
8762 CFGTBL_Trans_use_short_tags
;
8765 if (hpsa_simple_mode
)
8768 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
8769 if (!(trans_support
& PERFORMANT_MODE
))
8772 /* Check for I/O accelerator mode support */
8773 if (trans_support
& CFGTBL_Trans_io_accel1
) {
8774 transMethod
|= CFGTBL_Trans_io_accel1
|
8775 CFGTBL_Trans_enable_directed_msix
;
8776 rc
= hpsa_alloc_ioaccel1_cmd_and_bft(h
);
8779 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
8780 transMethod
|= CFGTBL_Trans_io_accel2
|
8781 CFGTBL_Trans_enable_directed_msix
;
8782 rc
= hpsa_alloc_ioaccel2_cmd_and_bft(h
);
8787 h
->nreply_queues
= h
->msix_vector
> 0 ? h
->msix_vector
: 1;
8788 hpsa_get_max_perf_mode_cmds(h
);
8789 /* Performant mode ring buffer and supporting data structures */
8790 h
->reply_queue_size
= h
->max_commands
* sizeof(u64
);
8792 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8793 h
->reply_queue
[i
].head
= pci_alloc_consistent(h
->pdev
,
8794 h
->reply_queue_size
,
8795 &(h
->reply_queue
[i
].busaddr
));
8796 if (!h
->reply_queue
[i
].head
) {
8798 goto clean1
; /* rq, ioaccel */
8800 h
->reply_queue
[i
].size
= h
->max_commands
;
8801 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
8802 h
->reply_queue
[i
].current_entry
= 0;
8805 /* Need a block fetch table for performant mode */
8806 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
8807 sizeof(u32
)), GFP_KERNEL
);
8808 if (!h
->blockFetchTable
) {
8810 goto clean1
; /* rq, ioaccel */
8813 rc
= hpsa_enter_performant_mode(h
, trans_support
);
8815 goto clean2
; /* bft, rq, ioaccel */
8818 clean2
: /* bft, rq, ioaccel */
8819 kfree(h
->blockFetchTable
);
8820 h
->blockFetchTable
= NULL
;
8821 clean1
: /* rq, ioaccel */
8822 hpsa_free_reply_queues(h
);
8823 hpsa_free_ioaccel1_cmd_and_bft(h
);
8824 hpsa_free_ioaccel2_cmd_and_bft(h
);
8828 static int is_accelerated_cmd(struct CommandList
*c
)
8830 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
8833 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
8835 struct CommandList
*c
= NULL
;
8836 int i
, accel_cmds_out
;
8839 do { /* wait for all outstanding ioaccel commands to drain out */
8841 for (i
= 0; i
< h
->nr_cmds
; i
++) {
8842 c
= h
->cmd_pool
+ i
;
8843 refcount
= atomic_inc_return(&c
->refcount
);
8844 if (refcount
> 1) /* Command is allocated */
8845 accel_cmds_out
+= is_accelerated_cmd(c
);
8848 if (accel_cmds_out
<= 0)
8855 * This is it. Register the PCI driver information for the cards we control
8856 * the OS will call our registered routines when it finds one of our cards.
8858 static int __init
hpsa_init(void)
8860 return pci_register_driver(&hpsa_pci_driver
);
8863 static void __exit
hpsa_cleanup(void)
8865 pci_unregister_driver(&hpsa_pci_driver
);
8868 static void __attribute__((unused
)) verify_offsets(void)
8870 #define VERIFY_OFFSET(member, offset) \
8871 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8873 VERIFY_OFFSET(structure_size
, 0);
8874 VERIFY_OFFSET(volume_blk_size
, 4);
8875 VERIFY_OFFSET(volume_blk_cnt
, 8);
8876 VERIFY_OFFSET(phys_blk_shift
, 16);
8877 VERIFY_OFFSET(parity_rotation_shift
, 17);
8878 VERIFY_OFFSET(strip_size
, 18);
8879 VERIFY_OFFSET(disk_starting_blk
, 20);
8880 VERIFY_OFFSET(disk_blk_cnt
, 28);
8881 VERIFY_OFFSET(data_disks_per_row
, 36);
8882 VERIFY_OFFSET(metadata_disks_per_row
, 38);
8883 VERIFY_OFFSET(row_cnt
, 40);
8884 VERIFY_OFFSET(layout_map_count
, 42);
8885 VERIFY_OFFSET(flags
, 44);
8886 VERIFY_OFFSET(dekindex
, 46);
8887 /* VERIFY_OFFSET(reserved, 48 */
8888 VERIFY_OFFSET(data
, 64);
8890 #undef VERIFY_OFFSET
8892 #define VERIFY_OFFSET(member, offset) \
8893 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8895 VERIFY_OFFSET(IU_type
, 0);
8896 VERIFY_OFFSET(direction
, 1);
8897 VERIFY_OFFSET(reply_queue
, 2);
8898 /* VERIFY_OFFSET(reserved1, 3); */
8899 VERIFY_OFFSET(scsi_nexus
, 4);
8900 VERIFY_OFFSET(Tag
, 8);
8901 VERIFY_OFFSET(cdb
, 16);
8902 VERIFY_OFFSET(cciss_lun
, 32);
8903 VERIFY_OFFSET(data_len
, 40);
8904 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
8905 VERIFY_OFFSET(sg_count
, 45);
8906 /* VERIFY_OFFSET(reserved3 */
8907 VERIFY_OFFSET(err_ptr
, 48);
8908 VERIFY_OFFSET(err_len
, 56);
8909 /* VERIFY_OFFSET(reserved4 */
8910 VERIFY_OFFSET(sg
, 64);
8912 #undef VERIFY_OFFSET
8914 #define VERIFY_OFFSET(member, offset) \
8915 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8917 VERIFY_OFFSET(dev_handle
, 0x00);
8918 VERIFY_OFFSET(reserved1
, 0x02);
8919 VERIFY_OFFSET(function
, 0x03);
8920 VERIFY_OFFSET(reserved2
, 0x04);
8921 VERIFY_OFFSET(err_info
, 0x0C);
8922 VERIFY_OFFSET(reserved3
, 0x10);
8923 VERIFY_OFFSET(err_info_len
, 0x12);
8924 VERIFY_OFFSET(reserved4
, 0x13);
8925 VERIFY_OFFSET(sgl_offset
, 0x14);
8926 VERIFY_OFFSET(reserved5
, 0x15);
8927 VERIFY_OFFSET(transfer_len
, 0x1C);
8928 VERIFY_OFFSET(reserved6
, 0x20);
8929 VERIFY_OFFSET(io_flags
, 0x24);
8930 VERIFY_OFFSET(reserved7
, 0x26);
8931 VERIFY_OFFSET(LUN
, 0x34);
8932 VERIFY_OFFSET(control
, 0x3C);
8933 VERIFY_OFFSET(CDB
, 0x40);
8934 VERIFY_OFFSET(reserved8
, 0x50);
8935 VERIFY_OFFSET(host_context_flags
, 0x60);
8936 VERIFY_OFFSET(timeout_sec
, 0x62);
8937 VERIFY_OFFSET(ReplyQueue
, 0x64);
8938 VERIFY_OFFSET(reserved9
, 0x65);
8939 VERIFY_OFFSET(tag
, 0x68);
8940 VERIFY_OFFSET(host_addr
, 0x70);
8941 VERIFY_OFFSET(CISS_LUN
, 0x78);
8942 VERIFY_OFFSET(SG
, 0x78 + 8);
8943 #undef VERIFY_OFFSET
8946 module_init(hpsa_init
);
8947 module_exit(hpsa_cleanup
);