2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
63 #define HPSA_DRIVER_VERSION "3.4.18-0"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION
);
83 MODULE_LICENSE("GPL");
85 static int hpsa_allow_any
;
86 module_param(hpsa_allow_any
, int, S_IRUGO
|S_IWUSR
);
87 MODULE_PARM_DESC(hpsa_allow_any
,
88 "Allow hpsa driver to access unknown HP Smart Array hardware");
89 static int hpsa_simple_mode
;
90 module_param(hpsa_simple_mode
, int, S_IRUGO
|S_IWUSR
);
91 MODULE_PARM_DESC(hpsa_simple_mode
,
92 "Use 'simple mode' rather than 'performant mode'");
94 /* define the PCI info for the cards we can control */
95 static const struct pci_device_id hpsa_pci_device_id
[] = {
96 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3241},
97 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3243},
98 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3245},
99 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3247},
100 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3249},
101 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324A},
102 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x324B},
103 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSE
, 0x103C, 0x3233},
104 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3350},
105 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3351},
106 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3352},
107 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3353},
108 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3354},
109 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3355},
110 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSF
, 0x103C, 0x3356},
111 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103c, 0x1920},
112 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1921},
113 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1922},
114 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1923},
115 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1924},
116 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103c, 0x1925},
117 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1926},
118 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1928},
119 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSH
, 0x103C, 0x1929},
120 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BD},
121 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BE},
122 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21BF},
123 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C0},
124 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C1},
125 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C2},
126 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C3},
127 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C4},
128 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C5},
129 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C6},
130 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C7},
131 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C8},
132 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21C9},
133 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CA},
134 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CB},
135 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CC},
136 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CD},
137 {PCI_VENDOR_ID_HP
, PCI_DEVICE_ID_HP_CISSI
, 0x103C, 0x21CE},
138 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0580},
139 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0581},
140 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0582},
141 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0583},
142 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0584},
143 {PCI_VENDOR_ID_ADAPTEC2
, 0x0290, 0x9005, 0x0585},
144 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0076},
145 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0087},
146 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x007D},
147 {PCI_VENDOR_ID_HP_3PAR
, 0x0075, 0x1590, 0x0088},
148 {PCI_VENDOR_ID_HP
, 0x333f, 0x103c, 0x333f},
149 {PCI_VENDOR_ID_HP
, PCI_ANY_ID
, PCI_ANY_ID
, PCI_ANY_ID
,
150 PCI_CLASS_STORAGE_RAID
<< 8, 0xffff << 8, 0},
154 MODULE_DEVICE_TABLE(pci
, hpsa_pci_device_id
);
156 /* board_id = Subsystem Device ID & Vendor ID
157 * product = Marketing Name for the board
158 * access = Address of the struct of function pointers
160 static struct board_type products
[] = {
161 {0x3241103C, "Smart Array P212", &SA5_access
},
162 {0x3243103C, "Smart Array P410", &SA5_access
},
163 {0x3245103C, "Smart Array P410i", &SA5_access
},
164 {0x3247103C, "Smart Array P411", &SA5_access
},
165 {0x3249103C, "Smart Array P812", &SA5_access
},
166 {0x324A103C, "Smart Array P712m", &SA5_access
},
167 {0x324B103C, "Smart Array P711m", &SA5_access
},
168 {0x3233103C, "HP StorageWorks 1210m", &SA5_access
}, /* alias of 333f */
169 {0x3350103C, "Smart Array P222", &SA5_access
},
170 {0x3351103C, "Smart Array P420", &SA5_access
},
171 {0x3352103C, "Smart Array P421", &SA5_access
},
172 {0x3353103C, "Smart Array P822", &SA5_access
},
173 {0x3354103C, "Smart Array P420i", &SA5_access
},
174 {0x3355103C, "Smart Array P220i", &SA5_access
},
175 {0x3356103C, "Smart Array P721m", &SA5_access
},
176 {0x1920103C, "Smart Array P430i", &SA5_access
},
177 {0x1921103C, "Smart Array P830i", &SA5_access
},
178 {0x1922103C, "Smart Array P430", &SA5_access
},
179 {0x1923103C, "Smart Array P431", &SA5_access
},
180 {0x1924103C, "Smart Array P830", &SA5_access
},
181 {0x1925103C, "Smart Array P831", &SA5_access
},
182 {0x1926103C, "Smart Array P731m", &SA5_access
},
183 {0x1928103C, "Smart Array P230i", &SA5_access
},
184 {0x1929103C, "Smart Array P530", &SA5_access
},
185 {0x21BD103C, "Smart Array P244br", &SA5_access
},
186 {0x21BE103C, "Smart Array P741m", &SA5_access
},
187 {0x21BF103C, "Smart HBA H240ar", &SA5_access
},
188 {0x21C0103C, "Smart Array P440ar", &SA5_access
},
189 {0x21C1103C, "Smart Array P840ar", &SA5_access
},
190 {0x21C2103C, "Smart Array P440", &SA5_access
},
191 {0x21C3103C, "Smart Array P441", &SA5_access
},
192 {0x21C4103C, "Smart Array", &SA5_access
},
193 {0x21C5103C, "Smart Array P841", &SA5_access
},
194 {0x21C6103C, "Smart HBA H244br", &SA5_access
},
195 {0x21C7103C, "Smart HBA H240", &SA5_access
},
196 {0x21C8103C, "Smart HBA H241", &SA5_access
},
197 {0x21C9103C, "Smart Array", &SA5_access
},
198 {0x21CA103C, "Smart Array P246br", &SA5_access
},
199 {0x21CB103C, "Smart Array P840", &SA5_access
},
200 {0x21CC103C, "Smart Array", &SA5_access
},
201 {0x21CD103C, "Smart Array", &SA5_access
},
202 {0x21CE103C, "Smart HBA", &SA5_access
},
203 {0x05809005, "SmartHBA-SA", &SA5_access
},
204 {0x05819005, "SmartHBA-SA 8i", &SA5_access
},
205 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access
},
206 {0x05839005, "SmartHBA-SA 8e", &SA5_access
},
207 {0x05849005, "SmartHBA-SA 16i", &SA5_access
},
208 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access
},
209 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access
},
210 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access
},
211 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access
},
212 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access
},
213 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access
},
214 {0xFFFF103C, "Unknown Smart Array", &SA5_access
},
217 static struct scsi_transport_template
*hpsa_sas_transport_template
;
218 static int hpsa_add_sas_host(struct ctlr_info
*h
);
219 static void hpsa_delete_sas_host(struct ctlr_info
*h
);
220 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
221 struct hpsa_scsi_dev_t
*device
);
222 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
);
223 static struct hpsa_scsi_dev_t
224 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
225 struct sas_rphy
*rphy
);
227 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
228 static const struct scsi_cmnd hpsa_cmd_busy
;
229 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
230 static const struct scsi_cmnd hpsa_cmd_idle
;
231 static int number_of_controllers
;
233 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *dev_id
);
234 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *dev_id
);
235 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
);
238 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
,
242 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
);
243 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
);
244 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
);
245 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
246 struct scsi_cmnd
*scmd
);
247 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
248 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
250 static void hpsa_free_cmd_pool(struct ctlr_info
*h
);
251 #define VPD_PAGE (1 << 8)
252 #define HPSA_SIMPLE_ERROR_BITS 0x03
254 static int hpsa_scsi_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
255 static void hpsa_scan_start(struct Scsi_Host
*);
256 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
257 unsigned long elapsed_time
);
258 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
);
260 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
);
261 static int hpsa_eh_abort_handler(struct scsi_cmnd
*scsicmd
);
262 static int hpsa_slave_alloc(struct scsi_device
*sdev
);
263 static int hpsa_slave_configure(struct scsi_device
*sdev
);
264 static void hpsa_slave_destroy(struct scsi_device
*sdev
);
266 static void hpsa_update_scsi_devices(struct ctlr_info
*h
);
267 static int check_for_unit_attention(struct ctlr_info
*h
,
268 struct CommandList
*c
);
269 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
270 struct CommandList
*c
);
271 /* performant mode helper functions */
272 static void calc_bucket_map(int *bucket
, int num_buckets
,
273 int nsgs
, int min_blocks
, u32
*bucket_map
);
274 static void hpsa_free_performant_mode(struct ctlr_info
*h
);
275 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
);
276 static inline u32
next_command(struct ctlr_info
*h
, u8 q
);
277 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
278 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
280 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
281 unsigned long *memory_bar
);
282 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
);
283 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
284 unsigned char lunaddr
[],
286 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
288 static inline void finish_cmd(struct CommandList
*c
);
289 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
);
290 #define BOARD_NOT_READY 0
291 #define BOARD_READY 1
292 static void hpsa_drain_accel_commands(struct ctlr_info
*h
);
293 static void hpsa_flush_cache(struct ctlr_info
*h
);
294 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
295 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
296 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
);
297 static void hpsa_command_resubmit_worker(struct work_struct
*work
);
298 static u32
lockup_detected(struct ctlr_info
*h
);
299 static int detect_controller_lockup(struct ctlr_info
*h
);
300 static void hpsa_disable_rld_caching(struct ctlr_info
*h
);
301 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
302 struct ReportExtendedLUNdata
*buf
, int bufsize
);
303 static bool hpsa_vpd_page_supported(struct ctlr_info
*h
,
304 unsigned char scsi3addr
[], u8 page
);
305 static int hpsa_luns_changed(struct ctlr_info
*h
);
306 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
307 struct hpsa_scsi_dev_t
*dev
,
308 unsigned char *scsi3addr
);
310 static inline struct ctlr_info
*sdev_to_hba(struct scsi_device
*sdev
)
312 unsigned long *priv
= shost_priv(sdev
->host
);
313 return (struct ctlr_info
*) *priv
;
316 static inline struct ctlr_info
*shost_to_hba(struct Scsi_Host
*sh
)
318 unsigned long *priv
= shost_priv(sh
);
319 return (struct ctlr_info
*) *priv
;
322 static inline bool hpsa_is_cmd_idle(struct CommandList
*c
)
324 return c
->scsi_cmd
== SCSI_CMD_IDLE
;
327 static inline bool hpsa_is_pending_event(struct CommandList
*c
)
329 return c
->abort_pending
|| c
->reset_pending
;
332 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
333 static void decode_sense_data(const u8
*sense_data
, int sense_data_len
,
334 u8
*sense_key
, u8
*asc
, u8
*ascq
)
336 struct scsi_sense_hdr sshdr
;
343 if (sense_data_len
< 1)
346 rc
= scsi_normalize_sense(sense_data
, sense_data_len
, &sshdr
);
348 *sense_key
= sshdr
.sense_key
;
354 static int check_for_unit_attention(struct ctlr_info
*h
,
355 struct CommandList
*c
)
357 u8 sense_key
, asc
, ascq
;
360 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
361 sense_len
= sizeof(c
->err_info
->SenseInfo
);
363 sense_len
= c
->err_info
->SenseLen
;
365 decode_sense_data(c
->err_info
->SenseInfo
, sense_len
,
366 &sense_key
, &asc
, &ascq
);
367 if (sense_key
!= UNIT_ATTENTION
|| asc
== 0xff)
372 dev_warn(&h
->pdev
->dev
,
373 "%s: a state change detected, command retried\n",
377 dev_warn(&h
->pdev
->dev
,
378 "%s: LUN failure detected\n", h
->devname
);
380 case REPORT_LUNS_CHANGED
:
381 dev_warn(&h
->pdev
->dev
,
382 "%s: report LUN data changed\n", h
->devname
);
384 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
385 * target (array) devices.
389 dev_warn(&h
->pdev
->dev
,
390 "%s: a power on or device reset detected\n",
393 case UNIT_ATTENTION_CLEARED
:
394 dev_warn(&h
->pdev
->dev
,
395 "%s: unit attention cleared by another initiator\n",
399 dev_warn(&h
->pdev
->dev
,
400 "%s: unknown unit attention detected\n",
407 static int check_for_busy(struct ctlr_info
*h
, struct CommandList
*c
)
409 if (c
->err_info
->CommandStatus
!= CMD_TARGET_STATUS
||
410 (c
->err_info
->ScsiStatus
!= SAM_STAT_BUSY
&&
411 c
->err_info
->ScsiStatus
!= SAM_STAT_TASK_SET_FULL
))
413 dev_warn(&h
->pdev
->dev
, HPSA
"device busy");
417 static u32
lockup_detected(struct ctlr_info
*h
);
418 static ssize_t
host_show_lockup_detected(struct device
*dev
,
419 struct device_attribute
*attr
, char *buf
)
423 struct Scsi_Host
*shost
= class_to_shost(dev
);
425 h
= shost_to_hba(shost
);
426 ld
= lockup_detected(h
);
428 return sprintf(buf
, "ld=%d\n", ld
);
431 static ssize_t
host_store_hp_ssd_smart_path_status(struct device
*dev
,
432 struct device_attribute
*attr
,
433 const char *buf
, size_t count
)
437 struct Scsi_Host
*shost
= class_to_shost(dev
);
440 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
442 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
443 strncpy(tmpbuf
, buf
, len
);
445 if (sscanf(tmpbuf
, "%d", &status
) != 1)
447 h
= shost_to_hba(shost
);
448 h
->acciopath_status
= !!status
;
449 dev_warn(&h
->pdev
->dev
,
450 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
451 h
->acciopath_status
? "enabled" : "disabled");
455 static ssize_t
host_store_raid_offload_debug(struct device
*dev
,
456 struct device_attribute
*attr
,
457 const char *buf
, size_t count
)
459 int debug_level
, len
;
461 struct Scsi_Host
*shost
= class_to_shost(dev
);
464 if (!capable(CAP_SYS_ADMIN
) || !capable(CAP_SYS_RAWIO
))
466 len
= count
> sizeof(tmpbuf
) - 1 ? sizeof(tmpbuf
) - 1 : count
;
467 strncpy(tmpbuf
, buf
, len
);
469 if (sscanf(tmpbuf
, "%d", &debug_level
) != 1)
473 h
= shost_to_hba(shost
);
474 h
->raid_offload_debug
= debug_level
;
475 dev_warn(&h
->pdev
->dev
, "hpsa: Set raid_offload_debug level = %d\n",
476 h
->raid_offload_debug
);
480 static ssize_t
host_store_rescan(struct device
*dev
,
481 struct device_attribute
*attr
,
482 const char *buf
, size_t count
)
485 struct Scsi_Host
*shost
= class_to_shost(dev
);
486 h
= shost_to_hba(shost
);
487 hpsa_scan_start(h
->scsi_host
);
491 static ssize_t
host_show_firmware_revision(struct device
*dev
,
492 struct device_attribute
*attr
, char *buf
)
495 struct Scsi_Host
*shost
= class_to_shost(dev
);
496 unsigned char *fwrev
;
498 h
= shost_to_hba(shost
);
499 if (!h
->hba_inquiry_data
)
501 fwrev
= &h
->hba_inquiry_data
[32];
502 return snprintf(buf
, 20, "%c%c%c%c\n",
503 fwrev
[0], fwrev
[1], fwrev
[2], fwrev
[3]);
506 static ssize_t
host_show_commands_outstanding(struct device
*dev
,
507 struct device_attribute
*attr
, char *buf
)
509 struct Scsi_Host
*shost
= class_to_shost(dev
);
510 struct ctlr_info
*h
= shost_to_hba(shost
);
512 return snprintf(buf
, 20, "%d\n",
513 atomic_read(&h
->commands_outstanding
));
516 static ssize_t
host_show_transport_mode(struct device
*dev
,
517 struct device_attribute
*attr
, char *buf
)
520 struct Scsi_Host
*shost
= class_to_shost(dev
);
522 h
= shost_to_hba(shost
);
523 return snprintf(buf
, 20, "%s\n",
524 h
->transMethod
& CFGTBL_Trans_Performant
?
525 "performant" : "simple");
528 static ssize_t
host_show_hp_ssd_smart_path_status(struct device
*dev
,
529 struct device_attribute
*attr
, char *buf
)
532 struct Scsi_Host
*shost
= class_to_shost(dev
);
534 h
= shost_to_hba(shost
);
535 return snprintf(buf
, 30, "HP SSD Smart Path %s\n",
536 (h
->acciopath_status
== 1) ? "enabled" : "disabled");
539 /* List of controllers which cannot be hard reset on kexec with reset_devices */
540 static u32 unresettable_controller
[] = {
541 0x324a103C, /* Smart Array P712m */
542 0x324b103C, /* Smart Array P711m */
543 0x3223103C, /* Smart Array P800 */
544 0x3234103C, /* Smart Array P400 */
545 0x3235103C, /* Smart Array P400i */
546 0x3211103C, /* Smart Array E200i */
547 0x3212103C, /* Smart Array E200 */
548 0x3213103C, /* Smart Array E200i */
549 0x3214103C, /* Smart Array E200i */
550 0x3215103C, /* Smart Array E200i */
551 0x3237103C, /* Smart Array E500 */
552 0x323D103C, /* Smart Array P700m */
553 0x40800E11, /* Smart Array 5i */
554 0x409C0E11, /* Smart Array 6400 */
555 0x409D0E11, /* Smart Array 6400 EM */
556 0x40700E11, /* Smart Array 5300 */
557 0x40820E11, /* Smart Array 532 */
558 0x40830E11, /* Smart Array 5312 */
559 0x409A0E11, /* Smart Array 641 */
560 0x409B0E11, /* Smart Array 642 */
561 0x40910E11, /* Smart Array 6i */
564 /* List of controllers which cannot even be soft reset */
565 static u32 soft_unresettable_controller
[] = {
566 0x40800E11, /* Smart Array 5i */
567 0x40700E11, /* Smart Array 5300 */
568 0x40820E11, /* Smart Array 532 */
569 0x40830E11, /* Smart Array 5312 */
570 0x409A0E11, /* Smart Array 641 */
571 0x409B0E11, /* Smart Array 642 */
572 0x40910E11, /* Smart Array 6i */
573 /* Exclude 640x boards. These are two pci devices in one slot
574 * which share a battery backed cache module. One controls the
575 * cache, the other accesses the cache through the one that controls
576 * it. If we reset the one controlling the cache, the other will
577 * likely not be happy. Just forbid resetting this conjoined mess.
578 * The 640x isn't really supported by hpsa anyway.
580 0x409C0E11, /* Smart Array 6400 */
581 0x409D0E11, /* Smart Array 6400 EM */
584 static u32 needs_abort_tags_swizzled
[] = {
585 0x323D103C, /* Smart Array P700m */
586 0x324a103C, /* Smart Array P712m */
587 0x324b103C, /* SmartArray P711m */
590 static int board_id_in_array(u32 a
[], int nelems
, u32 board_id
)
594 for (i
= 0; i
< nelems
; i
++)
595 if (a
[i
] == board_id
)
600 static int ctlr_is_hard_resettable(u32 board_id
)
602 return !board_id_in_array(unresettable_controller
,
603 ARRAY_SIZE(unresettable_controller
), board_id
);
606 static int ctlr_is_soft_resettable(u32 board_id
)
608 return !board_id_in_array(soft_unresettable_controller
,
609 ARRAY_SIZE(soft_unresettable_controller
), board_id
);
612 static int ctlr_is_resettable(u32 board_id
)
614 return ctlr_is_hard_resettable(board_id
) ||
615 ctlr_is_soft_resettable(board_id
);
618 static int ctlr_needs_abort_tags_swizzled(u32 board_id
)
620 return board_id_in_array(needs_abort_tags_swizzled
,
621 ARRAY_SIZE(needs_abort_tags_swizzled
), board_id
);
624 static ssize_t
host_show_resettable(struct device
*dev
,
625 struct device_attribute
*attr
, char *buf
)
628 struct Scsi_Host
*shost
= class_to_shost(dev
);
630 h
= shost_to_hba(shost
);
631 return snprintf(buf
, 20, "%d\n", ctlr_is_resettable(h
->board_id
));
634 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr
[])
636 return (scsi3addr
[3] & 0xC0) == 0x40;
639 static const char * const raid_label
[] = { "0", "4", "1(+0)", "5", "5+1", "6",
640 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
642 #define HPSA_RAID_0 0
643 #define HPSA_RAID_4 1
644 #define HPSA_RAID_1 2 /* also used for RAID 10 */
645 #define HPSA_RAID_5 3 /* also used for RAID 50 */
646 #define HPSA_RAID_51 4
647 #define HPSA_RAID_6 5 /* also used for RAID 60 */
648 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
649 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
650 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
652 static inline bool is_logical_device(struct hpsa_scsi_dev_t
*device
)
654 return !device
->physical_device
;
657 static ssize_t
raid_level_show(struct device
*dev
,
658 struct device_attribute
*attr
, char *buf
)
661 unsigned char rlevel
;
663 struct scsi_device
*sdev
;
664 struct hpsa_scsi_dev_t
*hdev
;
667 sdev
= to_scsi_device(dev
);
668 h
= sdev_to_hba(sdev
);
669 spin_lock_irqsave(&h
->lock
, flags
);
670 hdev
= sdev
->hostdata
;
672 spin_unlock_irqrestore(&h
->lock
, flags
);
676 /* Is this even a logical drive? */
677 if (!is_logical_device(hdev
)) {
678 spin_unlock_irqrestore(&h
->lock
, flags
);
679 l
= snprintf(buf
, PAGE_SIZE
, "N/A\n");
683 rlevel
= hdev
->raid_level
;
684 spin_unlock_irqrestore(&h
->lock
, flags
);
685 if (rlevel
> RAID_UNKNOWN
)
686 rlevel
= RAID_UNKNOWN
;
687 l
= snprintf(buf
, PAGE_SIZE
, "RAID %s\n", raid_label
[rlevel
]);
691 static ssize_t
lunid_show(struct device
*dev
,
692 struct device_attribute
*attr
, char *buf
)
695 struct scsi_device
*sdev
;
696 struct hpsa_scsi_dev_t
*hdev
;
698 unsigned char lunid
[8];
700 sdev
= to_scsi_device(dev
);
701 h
= sdev_to_hba(sdev
);
702 spin_lock_irqsave(&h
->lock
, flags
);
703 hdev
= sdev
->hostdata
;
705 spin_unlock_irqrestore(&h
->lock
, flags
);
708 memcpy(lunid
, hdev
->scsi3addr
, sizeof(lunid
));
709 spin_unlock_irqrestore(&h
->lock
, flags
);
710 return snprintf(buf
, 20, "0x%8phN\n", lunid
);
713 static ssize_t
unique_id_show(struct device
*dev
,
714 struct device_attribute
*attr
, char *buf
)
717 struct scsi_device
*sdev
;
718 struct hpsa_scsi_dev_t
*hdev
;
720 unsigned char sn
[16];
722 sdev
= to_scsi_device(dev
);
723 h
= sdev_to_hba(sdev
);
724 spin_lock_irqsave(&h
->lock
, flags
);
725 hdev
= sdev
->hostdata
;
727 spin_unlock_irqrestore(&h
->lock
, flags
);
730 memcpy(sn
, hdev
->device_id
, sizeof(sn
));
731 spin_unlock_irqrestore(&h
->lock
, flags
);
732 return snprintf(buf
, 16 * 2 + 2,
733 "%02X%02X%02X%02X%02X%02X%02X%02X"
734 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
735 sn
[0], sn
[1], sn
[2], sn
[3],
736 sn
[4], sn
[5], sn
[6], sn
[7],
737 sn
[8], sn
[9], sn
[10], sn
[11],
738 sn
[12], sn
[13], sn
[14], sn
[15]);
741 static ssize_t
sas_address_show(struct device
*dev
,
742 struct device_attribute
*attr
, char *buf
)
745 struct scsi_device
*sdev
;
746 struct hpsa_scsi_dev_t
*hdev
;
750 sdev
= to_scsi_device(dev
);
751 h
= sdev_to_hba(sdev
);
752 spin_lock_irqsave(&h
->lock
, flags
);
753 hdev
= sdev
->hostdata
;
754 if (!hdev
|| is_logical_device(hdev
) || !hdev
->expose_device
) {
755 spin_unlock_irqrestore(&h
->lock
, flags
);
758 sas_address
= hdev
->sas_address
;
759 spin_unlock_irqrestore(&h
->lock
, flags
);
761 return snprintf(buf
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
764 static ssize_t
host_show_hp_ssd_smart_path_enabled(struct device
*dev
,
765 struct device_attribute
*attr
, char *buf
)
768 struct scsi_device
*sdev
;
769 struct hpsa_scsi_dev_t
*hdev
;
773 sdev
= to_scsi_device(dev
);
774 h
= sdev_to_hba(sdev
);
775 spin_lock_irqsave(&h
->lock
, flags
);
776 hdev
= sdev
->hostdata
;
778 spin_unlock_irqrestore(&h
->lock
, flags
);
781 offload_enabled
= hdev
->offload_enabled
;
782 spin_unlock_irqrestore(&h
->lock
, flags
);
783 return snprintf(buf
, 20, "%d\n", offload_enabled
);
787 static ssize_t
path_info_show(struct device
*dev
,
788 struct device_attribute
*attr
, char *buf
)
791 struct scsi_device
*sdev
;
792 struct hpsa_scsi_dev_t
*hdev
;
798 u8 path_map_index
= 0;
800 unsigned char phys_connector
[2];
802 sdev
= to_scsi_device(dev
);
803 h
= sdev_to_hba(sdev
);
804 spin_lock_irqsave(&h
->devlock
, flags
);
805 hdev
= sdev
->hostdata
;
807 spin_unlock_irqrestore(&h
->devlock
, flags
);
812 for (i
= 0; i
< MAX_PATHS
; i
++) {
813 path_map_index
= 1<<i
;
814 if (i
== hdev
->active_path_index
)
816 else if (hdev
->path_map
& path_map_index
)
821 output_len
+= scnprintf(buf
+ output_len
,
822 PAGE_SIZE
- output_len
,
823 "[%d:%d:%d:%d] %20.20s ",
824 h
->scsi_host
->host_no
,
825 hdev
->bus
, hdev
->target
, hdev
->lun
,
826 scsi_device_type(hdev
->devtype
));
828 if (hdev
->devtype
== TYPE_RAID
|| is_logical_device(hdev
)) {
829 output_len
+= scnprintf(buf
+ output_len
,
830 PAGE_SIZE
- output_len
,
836 memcpy(&phys_connector
, &hdev
->phys_connector
[i
],
837 sizeof(phys_connector
));
838 if (phys_connector
[0] < '0')
839 phys_connector
[0] = '0';
840 if (phys_connector
[1] < '0')
841 phys_connector
[1] = '0';
842 output_len
+= scnprintf(buf
+ output_len
,
843 PAGE_SIZE
- output_len
,
846 if ((hdev
->devtype
== TYPE_DISK
|| hdev
->devtype
== TYPE_ZBC
) &&
847 hdev
->expose_device
) {
848 if (box
== 0 || box
== 0xFF) {
849 output_len
+= scnprintf(buf
+ output_len
,
850 PAGE_SIZE
- output_len
,
854 output_len
+= scnprintf(buf
+ output_len
,
855 PAGE_SIZE
- output_len
,
856 "BOX: %hhu BAY: %hhu %s\n",
859 } else if (box
!= 0 && box
!= 0xFF) {
860 output_len
+= scnprintf(buf
+ output_len
,
861 PAGE_SIZE
- output_len
, "BOX: %hhu %s\n",
864 output_len
+= scnprintf(buf
+ output_len
,
865 PAGE_SIZE
- output_len
, "%s\n", active
);
868 spin_unlock_irqrestore(&h
->devlock
, flags
);
872 static ssize_t
host_show_ctlr_num(struct device
*dev
,
873 struct device_attribute
*attr
, char *buf
)
876 struct Scsi_Host
*shost
= class_to_shost(dev
);
878 h
= shost_to_hba(shost
);
879 return snprintf(buf
, 20, "%d\n", h
->ctlr
);
882 static DEVICE_ATTR(raid_level
, S_IRUGO
, raid_level_show
, NULL
);
883 static DEVICE_ATTR(lunid
, S_IRUGO
, lunid_show
, NULL
);
884 static DEVICE_ATTR(unique_id
, S_IRUGO
, unique_id_show
, NULL
);
885 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, host_store_rescan
);
886 static DEVICE_ATTR(sas_address
, S_IRUGO
, sas_address_show
, NULL
);
887 static DEVICE_ATTR(hp_ssd_smart_path_enabled
, S_IRUGO
,
888 host_show_hp_ssd_smart_path_enabled
, NULL
);
889 static DEVICE_ATTR(path_info
, S_IRUGO
, path_info_show
, NULL
);
890 static DEVICE_ATTR(hp_ssd_smart_path_status
, S_IWUSR
|S_IRUGO
|S_IROTH
,
891 host_show_hp_ssd_smart_path_status
,
892 host_store_hp_ssd_smart_path_status
);
893 static DEVICE_ATTR(raid_offload_debug
, S_IWUSR
, NULL
,
894 host_store_raid_offload_debug
);
895 static DEVICE_ATTR(firmware_revision
, S_IRUGO
,
896 host_show_firmware_revision
, NULL
);
897 static DEVICE_ATTR(commands_outstanding
, S_IRUGO
,
898 host_show_commands_outstanding
, NULL
);
899 static DEVICE_ATTR(transport_mode
, S_IRUGO
,
900 host_show_transport_mode
, NULL
);
901 static DEVICE_ATTR(resettable
, S_IRUGO
,
902 host_show_resettable
, NULL
);
903 static DEVICE_ATTR(lockup_detected
, S_IRUGO
,
904 host_show_lockup_detected
, NULL
);
905 static DEVICE_ATTR(ctlr_num
, S_IRUGO
,
906 host_show_ctlr_num
, NULL
);
908 static struct device_attribute
*hpsa_sdev_attrs
[] = {
909 &dev_attr_raid_level
,
912 &dev_attr_hp_ssd_smart_path_enabled
,
914 &dev_attr_sas_address
,
918 static struct device_attribute
*hpsa_shost_attrs
[] = {
920 &dev_attr_firmware_revision
,
921 &dev_attr_commands_outstanding
,
922 &dev_attr_transport_mode
,
923 &dev_attr_resettable
,
924 &dev_attr_hp_ssd_smart_path_status
,
925 &dev_attr_raid_offload_debug
,
926 &dev_attr_lockup_detected
,
931 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
932 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
934 static struct scsi_host_template hpsa_driver_template
= {
935 .module
= THIS_MODULE
,
938 .queuecommand
= hpsa_scsi_queue_command
,
939 .scan_start
= hpsa_scan_start
,
940 .scan_finished
= hpsa_scan_finished
,
941 .change_queue_depth
= hpsa_change_queue_depth
,
943 .use_clustering
= ENABLE_CLUSTERING
,
944 .eh_abort_handler
= hpsa_eh_abort_handler
,
945 .eh_device_reset_handler
= hpsa_eh_device_reset_handler
,
947 .slave_alloc
= hpsa_slave_alloc
,
948 .slave_configure
= hpsa_slave_configure
,
949 .slave_destroy
= hpsa_slave_destroy
,
951 .compat_ioctl
= hpsa_compat_ioctl
,
953 .sdev_attrs
= hpsa_sdev_attrs
,
954 .shost_attrs
= hpsa_shost_attrs
,
959 static inline u32
next_command(struct ctlr_info
*h
, u8 q
)
962 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
964 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
965 return h
->access
.command_completed(h
, q
);
967 if (unlikely(!(h
->transMethod
& CFGTBL_Trans_Performant
)))
968 return h
->access
.command_completed(h
, q
);
970 if ((rq
->head
[rq
->current_entry
] & 1) == rq
->wraparound
) {
971 a
= rq
->head
[rq
->current_entry
];
973 atomic_dec(&h
->commands_outstanding
);
977 /* Check for wraparound */
978 if (rq
->current_entry
== h
->max_commands
) {
979 rq
->current_entry
= 0;
986 * There are some special bits in the bus address of the
987 * command that we have to set for the controller to know
988 * how to process the command:
990 * Normal performant mode:
991 * bit 0: 1 means performant mode, 0 means simple mode.
992 * bits 1-3 = block fetch table entry
993 * bits 4-6 = command type (== 0)
996 * bit 0 = "performant mode" bit.
997 * bits 1-3 = block fetch table entry
998 * bits 4-6 = command type (== 110)
999 * (command type is needed because ioaccel1 mode
1000 * commands are submitted through the same register as normal
1001 * mode commands, so this is how the controller knows whether
1002 * the command is normal mode or ioaccel1 mode.)
1005 * bit 0 = "performant mode" bit.
1006 * bits 1-4 = block fetch table entry (note extra bit)
1007 * bits 4-6 = not needed, because ioaccel2 mode has
1008 * a separate special register for submitting commands.
1012 * set_performant_mode: Modify the tag for cciss performant
1013 * set bit 0 for pull model, bits 3-1 for block fetch
1016 #define DEFAULT_REPLY_QUEUE (-1)
1017 static void set_performant_mode(struct ctlr_info
*h
, struct CommandList
*c
,
1020 if (likely(h
->transMethod
& CFGTBL_Trans_Performant
)) {
1021 c
->busaddr
|= 1 | (h
->blockFetchTable
[c
->Header
.SGList
] << 1);
1022 if (unlikely(!h
->msix_vectors
))
1024 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1025 c
->Header
.ReplyQueue
=
1026 raw_smp_processor_id() % h
->nreply_queues
;
1028 c
->Header
.ReplyQueue
= reply_queue
% h
->nreply_queues
;
1032 static void set_ioaccel1_performant_mode(struct ctlr_info
*h
,
1033 struct CommandList
*c
,
1036 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
1039 * Tell the controller to post the reply to the queue for this
1040 * processor. This seems to give the best I/O throughput.
1042 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1043 cp
->ReplyQueue
= smp_processor_id() % h
->nreply_queues
;
1045 cp
->ReplyQueue
= reply_queue
% h
->nreply_queues
;
1047 * Set the bits in the address sent down to include:
1048 * - performant mode bit (bit 0)
1049 * - pull count (bits 1-3)
1050 * - command type (bits 4-6)
1052 c
->busaddr
|= 1 | (h
->ioaccel1_blockFetchTable
[c
->Header
.SGList
] << 1) |
1053 IOACCEL1_BUSADDR_CMDTYPE
;
1056 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info
*h
,
1057 struct CommandList
*c
,
1060 struct hpsa_tmf_struct
*cp
= (struct hpsa_tmf_struct
*)
1061 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1063 /* Tell the controller to post the reply to the queue for this
1064 * processor. This seems to give the best I/O throughput.
1066 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1067 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1069 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1070 /* Set the bits in the address sent down to include:
1071 * - performant mode bit not used in ioaccel mode 2
1072 * - pull count (bits 0-3)
1073 * - command type isn't needed for ioaccel2
1075 c
->busaddr
|= h
->ioaccel2_blockFetchTable
[0];
1078 static void set_ioaccel2_performant_mode(struct ctlr_info
*h
,
1079 struct CommandList
*c
,
1082 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
1085 * Tell the controller to post the reply to the queue for this
1086 * processor. This seems to give the best I/O throughput.
1088 if (likely(reply_queue
== DEFAULT_REPLY_QUEUE
))
1089 cp
->reply_queue
= smp_processor_id() % h
->nreply_queues
;
1091 cp
->reply_queue
= reply_queue
% h
->nreply_queues
;
1093 * Set the bits in the address sent down to include:
1094 * - performant mode bit not used in ioaccel mode 2
1095 * - pull count (bits 0-3)
1096 * - command type isn't needed for ioaccel2
1098 c
->busaddr
|= (h
->ioaccel2_blockFetchTable
[cp
->sg_count
]);
1101 static int is_firmware_flash_cmd(u8
*cdb
)
1103 return cdb
[0] == BMIC_WRITE
&& cdb
[6] == BMIC_FLASH_FIRMWARE
;
1107 * During firmware flash, the heartbeat register may not update as frequently
1108 * as it should. So we dial down lockup detection during firmware flash. and
1109 * dial it back up when firmware flash completes.
1111 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1112 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1113 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info
*h
,
1114 struct CommandList
*c
)
1116 if (!is_firmware_flash_cmd(c
->Request
.CDB
))
1118 atomic_inc(&h
->firmware_flash_in_progress
);
1119 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH
;
1122 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info
*h
,
1123 struct CommandList
*c
)
1125 if (is_firmware_flash_cmd(c
->Request
.CDB
) &&
1126 atomic_dec_and_test(&h
->firmware_flash_in_progress
))
1127 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
1130 static void __enqueue_cmd_and_start_io(struct ctlr_info
*h
,
1131 struct CommandList
*c
, int reply_queue
)
1133 dial_down_lockup_detection_during_fw_flash(h
, c
);
1134 atomic_inc(&h
->commands_outstanding
);
1135 switch (c
->cmd_type
) {
1137 set_ioaccel1_performant_mode(h
, c
, reply_queue
);
1138 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
1141 set_ioaccel2_performant_mode(h
, c
, reply_queue
);
1142 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1145 set_ioaccel2_tmf_performant_mode(h
, c
, reply_queue
);
1146 writel(c
->busaddr
, h
->vaddr
+ IOACCEL2_INBOUND_POSTQ_32
);
1149 set_performant_mode(h
, c
, reply_queue
);
1150 h
->access
.submit_command(h
, c
);
1154 static void enqueue_cmd_and_start_io(struct ctlr_info
*h
, struct CommandList
*c
)
1156 if (unlikely(hpsa_is_pending_event(c
)))
1157 return finish_cmd(c
);
1159 __enqueue_cmd_and_start_io(h
, c
, DEFAULT_REPLY_QUEUE
);
1162 static inline int is_hba_lunid(unsigned char scsi3addr
[])
1164 return memcmp(scsi3addr
, RAID_CTLR_LUNID
, 8) == 0;
1167 static inline int is_scsi_rev_5(struct ctlr_info
*h
)
1169 if (!h
->hba_inquiry_data
)
1171 if ((h
->hba_inquiry_data
[2] & 0x07) == 5)
1176 static int hpsa_find_target_lun(struct ctlr_info
*h
,
1177 unsigned char scsi3addr
[], int bus
, int *target
, int *lun
)
1179 /* finds an unused bus, target, lun for a new physical device
1180 * assumes h->devlock is held
1183 DECLARE_BITMAP(lun_taken
, HPSA_MAX_DEVICES
);
1185 bitmap_zero(lun_taken
, HPSA_MAX_DEVICES
);
1187 for (i
= 0; i
< h
->ndevices
; i
++) {
1188 if (h
->dev
[i
]->bus
== bus
&& h
->dev
[i
]->target
!= -1)
1189 __set_bit(h
->dev
[i
]->target
, lun_taken
);
1192 i
= find_first_zero_bit(lun_taken
, HPSA_MAX_DEVICES
);
1193 if (i
< HPSA_MAX_DEVICES
) {
1202 static void hpsa_show_dev_msg(const char *level
, struct ctlr_info
*h
,
1203 struct hpsa_scsi_dev_t
*dev
, char *description
)
1205 #define LABEL_SIZE 25
1206 char label
[LABEL_SIZE
];
1208 if (h
== NULL
|| h
->pdev
== NULL
|| h
->scsi_host
== NULL
)
1211 switch (dev
->devtype
) {
1213 snprintf(label
, LABEL_SIZE
, "controller");
1215 case TYPE_ENCLOSURE
:
1216 snprintf(label
, LABEL_SIZE
, "enclosure");
1221 snprintf(label
, LABEL_SIZE
, "external");
1222 else if (!is_logical_dev_addr_mode(dev
->scsi3addr
))
1223 snprintf(label
, LABEL_SIZE
, "%s",
1224 raid_label
[PHYSICAL_DRIVE
]);
1226 snprintf(label
, LABEL_SIZE
, "RAID-%s",
1227 dev
->raid_level
> RAID_UNKNOWN
? "?" :
1228 raid_label
[dev
->raid_level
]);
1231 snprintf(label
, LABEL_SIZE
, "rom");
1234 snprintf(label
, LABEL_SIZE
, "tape");
1236 case TYPE_MEDIUM_CHANGER
:
1237 snprintf(label
, LABEL_SIZE
, "changer");
1240 snprintf(label
, LABEL_SIZE
, "UNKNOWN");
1244 dev_printk(level
, &h
->pdev
->dev
,
1245 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1246 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
1248 scsi_device_type(dev
->devtype
),
1252 dev
->offload_config
? '+' : '-',
1253 dev
->offload_enabled
? '+' : '-',
1254 dev
->expose_device
);
1257 /* Add an entry into h->dev[] array. */
1258 static int hpsa_scsi_add_entry(struct ctlr_info
*h
,
1259 struct hpsa_scsi_dev_t
*device
,
1260 struct hpsa_scsi_dev_t
*added
[], int *nadded
)
1262 /* assumes h->devlock is held */
1263 int n
= h
->ndevices
;
1265 unsigned char addr1
[8], addr2
[8];
1266 struct hpsa_scsi_dev_t
*sd
;
1268 if (n
>= HPSA_MAX_DEVICES
) {
1269 dev_err(&h
->pdev
->dev
, "too many devices, some will be "
1274 /* physical devices do not have lun or target assigned until now. */
1275 if (device
->lun
!= -1)
1276 /* Logical device, lun is already assigned. */
1279 /* If this device a non-zero lun of a multi-lun device
1280 * byte 4 of the 8-byte LUN addr will contain the logical
1281 * unit no, zero otherwise.
1283 if (device
->scsi3addr
[4] == 0) {
1284 /* This is not a non-zero lun of a multi-lun device */
1285 if (hpsa_find_target_lun(h
, device
->scsi3addr
,
1286 device
->bus
, &device
->target
, &device
->lun
) != 0)
1291 /* This is a non-zero lun of a multi-lun device.
1292 * Search through our list and find the device which
1293 * has the same 8 byte LUN address, excepting byte 4 and 5.
1294 * Assign the same bus and target for this new LUN.
1295 * Use the logical unit number from the firmware.
1297 memcpy(addr1
, device
->scsi3addr
, 8);
1300 for (i
= 0; i
< n
; i
++) {
1302 memcpy(addr2
, sd
->scsi3addr
, 8);
1305 /* differ only in byte 4 and 5? */
1306 if (memcmp(addr1
, addr2
, 8) == 0) {
1307 device
->bus
= sd
->bus
;
1308 device
->target
= sd
->target
;
1309 device
->lun
= device
->scsi3addr
[4];
1313 if (device
->lun
== -1) {
1314 dev_warn(&h
->pdev
->dev
, "physical device with no LUN=0,"
1315 " suspect firmware bug or unsupported hardware "
1316 "configuration.\n");
1324 added
[*nadded
] = device
;
1326 hpsa_show_dev_msg(KERN_INFO
, h
, device
,
1327 device
->expose_device
? "added" : "masked");
1328 device
->offload_to_be_enabled
= device
->offload_enabled
;
1329 device
->offload_enabled
= 0;
1333 /* Update an entry in h->dev[] array. */
1334 static void hpsa_scsi_update_entry(struct ctlr_info
*h
,
1335 int entry
, struct hpsa_scsi_dev_t
*new_entry
)
1337 int offload_enabled
;
1338 /* assumes h->devlock is held */
1339 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1341 /* Raid level changed. */
1342 h
->dev
[entry
]->raid_level
= new_entry
->raid_level
;
1344 /* Raid offload parameters changed. Careful about the ordering. */
1345 if (new_entry
->offload_config
&& new_entry
->offload_enabled
) {
1347 * if drive is newly offload_enabled, we want to copy the
1348 * raid map data first. If previously offload_enabled and
1349 * offload_config were set, raid map data had better be
1350 * the same as it was before. if raid map data is changed
1351 * then it had better be the case that
1352 * h->dev[entry]->offload_enabled is currently 0.
1354 h
->dev
[entry
]->raid_map
= new_entry
->raid_map
;
1355 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1357 if (new_entry
->hba_ioaccel_enabled
) {
1358 h
->dev
[entry
]->ioaccel_handle
= new_entry
->ioaccel_handle
;
1359 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1361 h
->dev
[entry
]->hba_ioaccel_enabled
= new_entry
->hba_ioaccel_enabled
;
1362 h
->dev
[entry
]->offload_config
= new_entry
->offload_config
;
1363 h
->dev
[entry
]->offload_to_mirror
= new_entry
->offload_to_mirror
;
1364 h
->dev
[entry
]->queue_depth
= new_entry
->queue_depth
;
1367 * We can turn off ioaccel offload now, but need to delay turning
1368 * it on until we can update h->dev[entry]->phys_disk[], but we
1369 * can't do that until all the devices are updated.
1371 h
->dev
[entry
]->offload_to_be_enabled
= new_entry
->offload_enabled
;
1372 if (!new_entry
->offload_enabled
)
1373 h
->dev
[entry
]->offload_enabled
= 0;
1375 offload_enabled
= h
->dev
[entry
]->offload_enabled
;
1376 h
->dev
[entry
]->offload_enabled
= h
->dev
[entry
]->offload_to_be_enabled
;
1377 hpsa_show_dev_msg(KERN_INFO
, h
, h
->dev
[entry
], "updated");
1378 h
->dev
[entry
]->offload_enabled
= offload_enabled
;
1381 /* Replace an entry from h->dev[] array. */
1382 static void hpsa_scsi_replace_entry(struct ctlr_info
*h
,
1383 int entry
, struct hpsa_scsi_dev_t
*new_entry
,
1384 struct hpsa_scsi_dev_t
*added
[], int *nadded
,
1385 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1387 /* assumes h->devlock is held */
1388 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1389 removed
[*nremoved
] = h
->dev
[entry
];
1393 * New physical devices won't have target/lun assigned yet
1394 * so we need to preserve the values in the slot we are replacing.
1396 if (new_entry
->target
== -1) {
1397 new_entry
->target
= h
->dev
[entry
]->target
;
1398 new_entry
->lun
= h
->dev
[entry
]->lun
;
1401 h
->dev
[entry
] = new_entry
;
1402 added
[*nadded
] = new_entry
;
1404 hpsa_show_dev_msg(KERN_INFO
, h
, new_entry
, "replaced");
1405 new_entry
->offload_to_be_enabled
= new_entry
->offload_enabled
;
1406 new_entry
->offload_enabled
= 0;
1409 /* Remove an entry from h->dev[] array. */
1410 static void hpsa_scsi_remove_entry(struct ctlr_info
*h
, int entry
,
1411 struct hpsa_scsi_dev_t
*removed
[], int *nremoved
)
1413 /* assumes h->devlock is held */
1415 struct hpsa_scsi_dev_t
*sd
;
1417 BUG_ON(entry
< 0 || entry
>= HPSA_MAX_DEVICES
);
1420 removed
[*nremoved
] = h
->dev
[entry
];
1423 for (i
= entry
; i
< h
->ndevices
-1; i
++)
1424 h
->dev
[i
] = h
->dev
[i
+1];
1426 hpsa_show_dev_msg(KERN_INFO
, h
, sd
, "removed");
1429 #define SCSI3ADDR_EQ(a, b) ( \
1430 (a)[7] == (b)[7] && \
1431 (a)[6] == (b)[6] && \
1432 (a)[5] == (b)[5] && \
1433 (a)[4] == (b)[4] && \
1434 (a)[3] == (b)[3] && \
1435 (a)[2] == (b)[2] && \
1436 (a)[1] == (b)[1] && \
1439 static void fixup_botched_add(struct ctlr_info
*h
,
1440 struct hpsa_scsi_dev_t
*added
)
1442 /* called when scsi_add_device fails in order to re-adjust
1443 * h->dev[] to match the mid layer's view.
1445 unsigned long flags
;
1448 spin_lock_irqsave(&h
->lock
, flags
);
1449 for (i
= 0; i
< h
->ndevices
; i
++) {
1450 if (h
->dev
[i
] == added
) {
1451 for (j
= i
; j
< h
->ndevices
-1; j
++)
1452 h
->dev
[j
] = h
->dev
[j
+1];
1457 spin_unlock_irqrestore(&h
->lock
, flags
);
1461 static inline int device_is_the_same(struct hpsa_scsi_dev_t
*dev1
,
1462 struct hpsa_scsi_dev_t
*dev2
)
1464 /* we compare everything except lun and target as these
1465 * are not yet assigned. Compare parts likely
1468 if (memcmp(dev1
->scsi3addr
, dev2
->scsi3addr
,
1469 sizeof(dev1
->scsi3addr
)) != 0)
1471 if (memcmp(dev1
->device_id
, dev2
->device_id
,
1472 sizeof(dev1
->device_id
)) != 0)
1474 if (memcmp(dev1
->model
, dev2
->model
, sizeof(dev1
->model
)) != 0)
1476 if (memcmp(dev1
->vendor
, dev2
->vendor
, sizeof(dev1
->vendor
)) != 0)
1478 if (dev1
->devtype
!= dev2
->devtype
)
1480 if (dev1
->bus
!= dev2
->bus
)
1485 static inline int device_updated(struct hpsa_scsi_dev_t
*dev1
,
1486 struct hpsa_scsi_dev_t
*dev2
)
1488 /* Device attributes that can change, but don't mean
1489 * that the device is a different device, nor that the OS
1490 * needs to be told anything about the change.
1492 if (dev1
->raid_level
!= dev2
->raid_level
)
1494 if (dev1
->offload_config
!= dev2
->offload_config
)
1496 if (dev1
->offload_enabled
!= dev2
->offload_enabled
)
1498 if (!is_logical_dev_addr_mode(dev1
->scsi3addr
))
1499 if (dev1
->queue_depth
!= dev2
->queue_depth
)
1504 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1505 * and return needle location in *index. If scsi3addr matches, but not
1506 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1507 * location in *index.
1508 * In the case of a minor device attribute change, such as RAID level, just
1509 * return DEVICE_UPDATED, along with the updated device's location in index.
1510 * If needle not found, return DEVICE_NOT_FOUND.
1512 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t
*needle
,
1513 struct hpsa_scsi_dev_t
*haystack
[], int haystack_size
,
1517 #define DEVICE_NOT_FOUND 0
1518 #define DEVICE_CHANGED 1
1519 #define DEVICE_SAME 2
1520 #define DEVICE_UPDATED 3
1522 return DEVICE_NOT_FOUND
;
1524 for (i
= 0; i
< haystack_size
; i
++) {
1525 if (haystack
[i
] == NULL
) /* previously removed. */
1527 if (SCSI3ADDR_EQ(needle
->scsi3addr
, haystack
[i
]->scsi3addr
)) {
1529 if (device_is_the_same(needle
, haystack
[i
])) {
1530 if (device_updated(needle
, haystack
[i
]))
1531 return DEVICE_UPDATED
;
1534 /* Keep offline devices offline */
1535 if (needle
->volume_offline
)
1536 return DEVICE_NOT_FOUND
;
1537 return DEVICE_CHANGED
;
1542 return DEVICE_NOT_FOUND
;
1545 static void hpsa_monitor_offline_device(struct ctlr_info
*h
,
1546 unsigned char scsi3addr
[])
1548 struct offline_device_entry
*device
;
1549 unsigned long flags
;
1551 /* Check to see if device is already on the list */
1552 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1553 list_for_each_entry(device
, &h
->offline_device_list
, offline_list
) {
1554 if (memcmp(device
->scsi3addr
, scsi3addr
,
1555 sizeof(device
->scsi3addr
)) == 0) {
1556 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1560 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1562 /* Device is not on the list, add it. */
1563 device
= kmalloc(sizeof(*device
), GFP_KERNEL
);
1567 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1568 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
1569 list_add_tail(&device
->offline_list
, &h
->offline_device_list
);
1570 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
1573 /* Print a message explaining various offline volume states */
1574 static void hpsa_show_volume_status(struct ctlr_info
*h
,
1575 struct hpsa_scsi_dev_t
*sd
)
1577 if (sd
->volume_offline
== HPSA_VPD_LV_STATUS_UNSUPPORTED
)
1578 dev_info(&h
->pdev
->dev
,
1579 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1580 h
->scsi_host
->host_no
,
1581 sd
->bus
, sd
->target
, sd
->lun
);
1582 switch (sd
->volume_offline
) {
1585 case HPSA_LV_UNDERGOING_ERASE
:
1586 dev_info(&h
->pdev
->dev
,
1587 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1588 h
->scsi_host
->host_no
,
1589 sd
->bus
, sd
->target
, sd
->lun
);
1591 case HPSA_LV_NOT_AVAILABLE
:
1592 dev_info(&h
->pdev
->dev
,
1593 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1594 h
->scsi_host
->host_no
,
1595 sd
->bus
, sd
->target
, sd
->lun
);
1597 case HPSA_LV_UNDERGOING_RPI
:
1598 dev_info(&h
->pdev
->dev
,
1599 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1600 h
->scsi_host
->host_no
,
1601 sd
->bus
, sd
->target
, sd
->lun
);
1603 case HPSA_LV_PENDING_RPI
:
1604 dev_info(&h
->pdev
->dev
,
1605 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1606 h
->scsi_host
->host_no
,
1607 sd
->bus
, sd
->target
, sd
->lun
);
1609 case HPSA_LV_ENCRYPTED_NO_KEY
:
1610 dev_info(&h
->pdev
->dev
,
1611 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1612 h
->scsi_host
->host_no
,
1613 sd
->bus
, sd
->target
, sd
->lun
);
1615 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
1616 dev_info(&h
->pdev
->dev
,
1617 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1618 h
->scsi_host
->host_no
,
1619 sd
->bus
, sd
->target
, sd
->lun
);
1621 case HPSA_LV_UNDERGOING_ENCRYPTION
:
1622 dev_info(&h
->pdev
->dev
,
1623 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1624 h
->scsi_host
->host_no
,
1625 sd
->bus
, sd
->target
, sd
->lun
);
1627 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1628 dev_info(&h
->pdev
->dev
,
1629 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1630 h
->scsi_host
->host_no
,
1631 sd
->bus
, sd
->target
, sd
->lun
);
1633 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1634 dev_info(&h
->pdev
->dev
,
1635 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1636 h
->scsi_host
->host_no
,
1637 sd
->bus
, sd
->target
, sd
->lun
);
1639 case HPSA_LV_PENDING_ENCRYPTION
:
1640 dev_info(&h
->pdev
->dev
,
1641 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1642 h
->scsi_host
->host_no
,
1643 sd
->bus
, sd
->target
, sd
->lun
);
1645 case HPSA_LV_PENDING_ENCRYPTION_REKEYING
:
1646 dev_info(&h
->pdev
->dev
,
1647 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1648 h
->scsi_host
->host_no
,
1649 sd
->bus
, sd
->target
, sd
->lun
);
1655 * Figure the list of physical drive pointers for a logical drive with
1656 * raid offload configured.
1658 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info
*h
,
1659 struct hpsa_scsi_dev_t
*dev
[], int ndevices
,
1660 struct hpsa_scsi_dev_t
*logical_drive
)
1662 struct raid_map_data
*map
= &logical_drive
->raid_map
;
1663 struct raid_map_disk_data
*dd
= &map
->data
[0];
1665 int total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
1666 le16_to_cpu(map
->metadata_disks_per_row
);
1667 int nraid_map_entries
= le16_to_cpu(map
->row_cnt
) *
1668 le16_to_cpu(map
->layout_map_count
) *
1669 total_disks_per_row
;
1670 int nphys_disk
= le16_to_cpu(map
->layout_map_count
) *
1671 total_disks_per_row
;
1674 if (nraid_map_entries
> RAID_MAP_MAX_ENTRIES
)
1675 nraid_map_entries
= RAID_MAP_MAX_ENTRIES
;
1677 logical_drive
->nphysical_disks
= nraid_map_entries
;
1680 for (i
= 0; i
< nraid_map_entries
; i
++) {
1681 logical_drive
->phys_disk
[i
] = NULL
;
1682 if (!logical_drive
->offload_config
)
1684 for (j
= 0; j
< ndevices
; j
++) {
1687 if (dev
[j
]->devtype
!= TYPE_DISK
&&
1688 dev
[j
]->devtype
!= TYPE_ZBC
)
1690 if (is_logical_device(dev
[j
]))
1692 if (dev
[j
]->ioaccel_handle
!= dd
[i
].ioaccel_handle
)
1695 logical_drive
->phys_disk
[i
] = dev
[j
];
1697 qdepth
= min(h
->nr_cmds
, qdepth
+
1698 logical_drive
->phys_disk
[i
]->queue_depth
);
1703 * This can happen if a physical drive is removed and
1704 * the logical drive is degraded. In that case, the RAID
1705 * map data will refer to a physical disk which isn't actually
1706 * present. And in that case offload_enabled should already
1707 * be 0, but we'll turn it off here just in case
1709 if (!logical_drive
->phys_disk
[i
]) {
1710 logical_drive
->offload_enabled
= 0;
1711 logical_drive
->offload_to_be_enabled
= 0;
1712 logical_drive
->queue_depth
= 8;
1715 if (nraid_map_entries
)
1717 * This is correct for reads, too high for full stripe writes,
1718 * way too high for partial stripe writes
1720 logical_drive
->queue_depth
= qdepth
;
1722 logical_drive
->queue_depth
= h
->nr_cmds
;
1725 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info
*h
,
1726 struct hpsa_scsi_dev_t
*dev
[], int ndevices
)
1730 for (i
= 0; i
< ndevices
; i
++) {
1733 if (dev
[i
]->devtype
!= TYPE_DISK
&&
1734 dev
[i
]->devtype
!= TYPE_ZBC
)
1736 if (!is_logical_device(dev
[i
]))
1740 * If offload is currently enabled, the RAID map and
1741 * phys_disk[] assignment *better* not be changing
1742 * and since it isn't changing, we do not need to
1745 if (dev
[i
]->offload_enabled
)
1748 hpsa_figure_phys_disk_ptrs(h
, dev
, ndevices
, dev
[i
]);
1752 static int hpsa_add_device(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*device
)
1759 if (is_logical_device(device
)) /* RAID */
1760 rc
= scsi_add_device(h
->scsi_host
, device
->bus
,
1761 device
->target
, device
->lun
);
1763 rc
= hpsa_add_sas_device(h
->sas_host
, device
);
1768 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info
*h
,
1769 struct hpsa_scsi_dev_t
*dev
)
1774 for (i
= 0; i
< h
->nr_cmds
; i
++) {
1775 struct CommandList
*c
= h
->cmd_pool
+ i
;
1776 int refcount
= atomic_inc_return(&c
->refcount
);
1778 if (refcount
> 1 && hpsa_cmd_dev_match(h
, c
, dev
,
1780 unsigned long flags
;
1782 spin_lock_irqsave(&h
->lock
, flags
); /* Implied MB */
1783 if (!hpsa_is_cmd_idle(c
))
1785 spin_unlock_irqrestore(&h
->lock
, flags
);
1794 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info
*h
,
1795 struct hpsa_scsi_dev_t
*device
)
1801 cmds
= hpsa_find_outstanding_commands_for_dev(h
, device
);
1806 dev_warn(&h
->pdev
->dev
,
1807 "%s: removing device with %d outstanding commands!\n",
1813 static void hpsa_remove_device(struct ctlr_info
*h
,
1814 struct hpsa_scsi_dev_t
*device
)
1816 struct scsi_device
*sdev
= NULL
;
1821 if (is_logical_device(device
)) { /* RAID */
1822 sdev
= scsi_device_lookup(h
->scsi_host
, device
->bus
,
1823 device
->target
, device
->lun
);
1825 scsi_remove_device(sdev
);
1826 scsi_device_put(sdev
);
1829 * We don't expect to get here. Future commands
1830 * to this device will get a selection timeout as
1831 * if the device were gone.
1833 hpsa_show_dev_msg(KERN_WARNING
, h
, device
,
1834 "didn't find device for removal.");
1838 device
->removed
= 1;
1839 hpsa_wait_for_outstanding_commands_for_dev(h
, device
);
1841 hpsa_remove_sas_device(device
);
1845 static void adjust_hpsa_scsi_table(struct ctlr_info
*h
,
1846 struct hpsa_scsi_dev_t
*sd
[], int nsds
)
1848 /* sd contains scsi3 addresses and devtypes, and inquiry
1849 * data. This function takes what's in sd to be the current
1850 * reality and updates h->dev[] to reflect that reality.
1852 int i
, entry
, device_change
, changes
= 0;
1853 struct hpsa_scsi_dev_t
*csd
;
1854 unsigned long flags
;
1855 struct hpsa_scsi_dev_t
**added
, **removed
;
1856 int nadded
, nremoved
;
1859 * A reset can cause a device status to change
1860 * re-schedule the scan to see what happened.
1862 if (h
->reset_in_progress
) {
1863 h
->drv_req_rescan
= 1;
1867 added
= kzalloc(sizeof(*added
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1868 removed
= kzalloc(sizeof(*removed
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
1870 if (!added
|| !removed
) {
1871 dev_warn(&h
->pdev
->dev
, "out of memory in "
1872 "adjust_hpsa_scsi_table\n");
1876 spin_lock_irqsave(&h
->devlock
, flags
);
1878 /* find any devices in h->dev[] that are not in
1879 * sd[] and remove them from h->dev[], and for any
1880 * devices which have changed, remove the old device
1881 * info and add the new device info.
1882 * If minor device attributes change, just update
1883 * the existing device structure.
1888 while (i
< h
->ndevices
) {
1890 device_change
= hpsa_scsi_find_entry(csd
, sd
, nsds
, &entry
);
1891 if (device_change
== DEVICE_NOT_FOUND
) {
1893 hpsa_scsi_remove_entry(h
, i
, removed
, &nremoved
);
1894 continue; /* remove ^^^, hence i not incremented */
1895 } else if (device_change
== DEVICE_CHANGED
) {
1897 hpsa_scsi_replace_entry(h
, i
, sd
[entry
],
1898 added
, &nadded
, removed
, &nremoved
);
1899 /* Set it to NULL to prevent it from being freed
1900 * at the bottom of hpsa_update_scsi_devices()
1903 } else if (device_change
== DEVICE_UPDATED
) {
1904 hpsa_scsi_update_entry(h
, i
, sd
[entry
]);
1909 /* Now, make sure every device listed in sd[] is also
1910 * listed in h->dev[], adding them if they aren't found
1913 for (i
= 0; i
< nsds
; i
++) {
1914 if (!sd
[i
]) /* if already added above. */
1917 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1918 * as the SCSI mid-layer does not handle such devices well.
1919 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1920 * at 160Hz, and prevents the system from coming up.
1922 if (sd
[i
]->volume_offline
) {
1923 hpsa_show_volume_status(h
, sd
[i
]);
1924 hpsa_show_dev_msg(KERN_INFO
, h
, sd
[i
], "offline");
1928 device_change
= hpsa_scsi_find_entry(sd
[i
], h
->dev
,
1929 h
->ndevices
, &entry
);
1930 if (device_change
== DEVICE_NOT_FOUND
) {
1932 if (hpsa_scsi_add_entry(h
, sd
[i
], added
, &nadded
) != 0)
1934 sd
[i
] = NULL
; /* prevent from being freed later. */
1935 } else if (device_change
== DEVICE_CHANGED
) {
1936 /* should never happen... */
1938 dev_warn(&h
->pdev
->dev
,
1939 "device unexpectedly changed.\n");
1940 /* but if it does happen, we just ignore that device */
1943 hpsa_update_log_drive_phys_drive_ptrs(h
, h
->dev
, h
->ndevices
);
1945 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1946 * any logical drives that need it enabled.
1948 for (i
= 0; i
< h
->ndevices
; i
++) {
1949 if (h
->dev
[i
] == NULL
)
1951 h
->dev
[i
]->offload_enabled
= h
->dev
[i
]->offload_to_be_enabled
;
1954 spin_unlock_irqrestore(&h
->devlock
, flags
);
1956 /* Monitor devices which are in one of several NOT READY states to be
1957 * brought online later. This must be done without holding h->devlock,
1958 * so don't touch h->dev[]
1960 for (i
= 0; i
< nsds
; i
++) {
1961 if (!sd
[i
]) /* if already added above. */
1963 if (sd
[i
]->volume_offline
)
1964 hpsa_monitor_offline_device(h
, sd
[i
]->scsi3addr
);
1967 /* Don't notify scsi mid layer of any changes the first time through
1968 * (or if there are no changes) scsi_scan_host will do it later the
1969 * first time through.
1974 /* Notify scsi mid layer of any removed devices */
1975 for (i
= 0; i
< nremoved
; i
++) {
1976 if (removed
[i
] == NULL
)
1978 if (removed
[i
]->expose_device
)
1979 hpsa_remove_device(h
, removed
[i
]);
1984 /* Notify scsi mid layer of any added devices */
1985 for (i
= 0; i
< nadded
; i
++) {
1988 if (added
[i
] == NULL
)
1990 if (!(added
[i
]->expose_device
))
1992 rc
= hpsa_add_device(h
, added
[i
]);
1995 dev_warn(&h
->pdev
->dev
,
1996 "addition failed %d, device not added.", rc
);
1997 /* now we have to remove it from h->dev,
1998 * since it didn't get added to scsi mid layer
2000 fixup_botched_add(h
, added
[i
]);
2001 h
->drv_req_rescan
= 1;
2010 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2011 * Assume's h->devlock is held.
2013 static struct hpsa_scsi_dev_t
*lookup_hpsa_scsi_dev(struct ctlr_info
*h
,
2014 int bus
, int target
, int lun
)
2017 struct hpsa_scsi_dev_t
*sd
;
2019 for (i
= 0; i
< h
->ndevices
; i
++) {
2021 if (sd
->bus
== bus
&& sd
->target
== target
&& sd
->lun
== lun
)
2027 static int hpsa_slave_alloc(struct scsi_device
*sdev
)
2029 struct hpsa_scsi_dev_t
*sd
= NULL
;
2030 unsigned long flags
;
2031 struct ctlr_info
*h
;
2033 h
= sdev_to_hba(sdev
);
2034 spin_lock_irqsave(&h
->devlock
, flags
);
2035 if (sdev_channel(sdev
) == HPSA_PHYSICAL_DEVICE_BUS
) {
2036 struct scsi_target
*starget
;
2037 struct sas_rphy
*rphy
;
2039 starget
= scsi_target(sdev
);
2040 rphy
= target_to_rphy(starget
);
2041 sd
= hpsa_find_device_by_sas_rphy(h
, rphy
);
2043 sd
->target
= sdev_id(sdev
);
2044 sd
->lun
= sdev
->lun
;
2048 sd
= lookup_hpsa_scsi_dev(h
, sdev_channel(sdev
),
2049 sdev_id(sdev
), sdev
->lun
);
2051 if (sd
&& sd
->expose_device
) {
2052 atomic_set(&sd
->ioaccel_cmds_out
, 0);
2053 sdev
->hostdata
= sd
;
2055 sdev
->hostdata
= NULL
;
2056 spin_unlock_irqrestore(&h
->devlock
, flags
);
2060 /* configure scsi device based on internal per-device structure */
2061 static int hpsa_slave_configure(struct scsi_device
*sdev
)
2063 struct hpsa_scsi_dev_t
*sd
;
2066 sd
= sdev
->hostdata
;
2067 sdev
->no_uld_attach
= !sd
|| !sd
->expose_device
;
2070 queue_depth
= sd
->queue_depth
!= 0 ?
2071 sd
->queue_depth
: sdev
->host
->can_queue
;
2073 queue_depth
= sdev
->host
->can_queue
;
2075 scsi_change_queue_depth(sdev
, queue_depth
);
2080 static void hpsa_slave_destroy(struct scsi_device
*sdev
)
2082 /* nothing to do. */
2085 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
2089 if (!h
->ioaccel2_cmd_sg_list
)
2091 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2092 kfree(h
->ioaccel2_cmd_sg_list
[i
]);
2093 h
->ioaccel2_cmd_sg_list
[i
] = NULL
;
2095 kfree(h
->ioaccel2_cmd_sg_list
);
2096 h
->ioaccel2_cmd_sg_list
= NULL
;
2099 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info
*h
)
2103 if (h
->chainsize
<= 0)
2106 h
->ioaccel2_cmd_sg_list
=
2107 kzalloc(sizeof(*h
->ioaccel2_cmd_sg_list
) * h
->nr_cmds
,
2109 if (!h
->ioaccel2_cmd_sg_list
)
2111 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2112 h
->ioaccel2_cmd_sg_list
[i
] =
2113 kmalloc(sizeof(*h
->ioaccel2_cmd_sg_list
[i
]) *
2114 h
->maxsgentries
, GFP_KERNEL
);
2115 if (!h
->ioaccel2_cmd_sg_list
[i
])
2121 hpsa_free_ioaccel2_sg_chain_blocks(h
);
2125 static void hpsa_free_sg_chain_blocks(struct ctlr_info
*h
)
2129 if (!h
->cmd_sg_list
)
2131 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2132 kfree(h
->cmd_sg_list
[i
]);
2133 h
->cmd_sg_list
[i
] = NULL
;
2135 kfree(h
->cmd_sg_list
);
2136 h
->cmd_sg_list
= NULL
;
2139 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info
*h
)
2143 if (h
->chainsize
<= 0)
2146 h
->cmd_sg_list
= kzalloc(sizeof(*h
->cmd_sg_list
) * h
->nr_cmds
,
2148 if (!h
->cmd_sg_list
)
2151 for (i
= 0; i
< h
->nr_cmds
; i
++) {
2152 h
->cmd_sg_list
[i
] = kmalloc(sizeof(*h
->cmd_sg_list
[i
]) *
2153 h
->chainsize
, GFP_KERNEL
);
2154 if (!h
->cmd_sg_list
[i
])
2161 hpsa_free_sg_chain_blocks(h
);
2165 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2166 struct io_accel2_cmd
*cp
, struct CommandList
*c
)
2168 struct ioaccel2_sg_element
*chain_block
;
2172 chain_block
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
2173 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2174 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_size
,
2176 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2177 /* prevent subsequent unmapping */
2178 cp
->sg
->address
= 0;
2181 cp
->sg
->address
= cpu_to_le64(temp64
);
2185 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info
*h
,
2186 struct io_accel2_cmd
*cp
)
2188 struct ioaccel2_sg_element
*chain_sg
;
2193 temp64
= le64_to_cpu(chain_sg
->address
);
2194 chain_size
= le32_to_cpu(cp
->sg
[0].length
);
2195 pci_unmap_single(h
->pdev
, temp64
, chain_size
, PCI_DMA_TODEVICE
);
2198 static int hpsa_map_sg_chain_block(struct ctlr_info
*h
,
2199 struct CommandList
*c
)
2201 struct SGDescriptor
*chain_sg
, *chain_block
;
2205 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2206 chain_block
= h
->cmd_sg_list
[c
->cmdindex
];
2207 chain_sg
->Ext
= cpu_to_le32(HPSA_SG_CHAIN
);
2208 chain_len
= sizeof(*chain_sg
) *
2209 (le16_to_cpu(c
->Header
.SGTotal
) - h
->max_cmd_sg_entries
);
2210 chain_sg
->Len
= cpu_to_le32(chain_len
);
2211 temp64
= pci_map_single(h
->pdev
, chain_block
, chain_len
,
2213 if (dma_mapping_error(&h
->pdev
->dev
, temp64
)) {
2214 /* prevent subsequent unmapping */
2215 chain_sg
->Addr
= cpu_to_le64(0);
2218 chain_sg
->Addr
= cpu_to_le64(temp64
);
2222 static void hpsa_unmap_sg_chain_block(struct ctlr_info
*h
,
2223 struct CommandList
*c
)
2225 struct SGDescriptor
*chain_sg
;
2227 if (le16_to_cpu(c
->Header
.SGTotal
) <= h
->max_cmd_sg_entries
)
2230 chain_sg
= &c
->SG
[h
->max_cmd_sg_entries
- 1];
2231 pci_unmap_single(h
->pdev
, le64_to_cpu(chain_sg
->Addr
),
2232 le32_to_cpu(chain_sg
->Len
), PCI_DMA_TODEVICE
);
2236 /* Decode the various types of errors on ioaccel2 path.
2237 * Return 1 for any error that should generate a RAID path retry.
2238 * Return 0 for errors that don't require a RAID path retry.
2240 static int handle_ioaccel_mode2_error(struct ctlr_info
*h
,
2241 struct CommandList
*c
,
2242 struct scsi_cmnd
*cmd
,
2243 struct io_accel2_cmd
*c2
,
2244 struct hpsa_scsi_dev_t
*dev
)
2248 u32 ioaccel2_resid
= 0;
2250 switch (c2
->error_data
.serv_response
) {
2251 case IOACCEL2_SERV_RESPONSE_COMPLETE
:
2252 switch (c2
->error_data
.status
) {
2253 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD
:
2255 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND
:
2256 cmd
->result
|= SAM_STAT_CHECK_CONDITION
;
2257 if (c2
->error_data
.data_present
!=
2258 IOACCEL2_SENSE_DATA_PRESENT
) {
2259 memset(cmd
->sense_buffer
, 0,
2260 SCSI_SENSE_BUFFERSIZE
);
2263 /* copy the sense data */
2264 data_len
= c2
->error_data
.sense_data_len
;
2265 if (data_len
> SCSI_SENSE_BUFFERSIZE
)
2266 data_len
= SCSI_SENSE_BUFFERSIZE
;
2267 if (data_len
> sizeof(c2
->error_data
.sense_data_buff
))
2269 sizeof(c2
->error_data
.sense_data_buff
);
2270 memcpy(cmd
->sense_buffer
,
2271 c2
->error_data
.sense_data_buff
, data_len
);
2274 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY
:
2277 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON
:
2280 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
:
2283 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED
:
2291 case IOACCEL2_SERV_RESPONSE_FAILURE
:
2292 switch (c2
->error_data
.status
) {
2293 case IOACCEL2_STATUS_SR_IO_ERROR
:
2294 case IOACCEL2_STATUS_SR_IO_ABORTED
:
2295 case IOACCEL2_STATUS_SR_OVERRUN
:
2298 case IOACCEL2_STATUS_SR_UNDERRUN
:
2299 cmd
->result
= (DID_OK
<< 16); /* host byte */
2300 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2301 ioaccel2_resid
= get_unaligned_le32(
2302 &c2
->error_data
.resid_cnt
[0]);
2303 scsi_set_resid(cmd
, ioaccel2_resid
);
2305 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE
:
2306 case IOACCEL2_STATUS_SR_INVALID_DEVICE
:
2307 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED
:
2309 * Did an HBA disk disappear? We will eventually
2310 * get a state change event from the controller but
2311 * in the meantime, we need to tell the OS that the
2312 * HBA disk is no longer there and stop I/O
2313 * from going down. This allows the potential re-insert
2314 * of the disk to get the same device node.
2316 if (dev
->physical_device
&& dev
->expose_device
) {
2317 cmd
->result
= DID_NO_CONNECT
<< 16;
2319 h
->drv_req_rescan
= 1;
2320 dev_warn(&h
->pdev
->dev
,
2321 "%s: device is gone!\n", __func__
);
2324 * Retry by sending down the RAID path.
2325 * We will get an event from ctlr to
2326 * trigger rescan regardless.
2334 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
2336 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
2338 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
2341 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
2348 return retry
; /* retry on raid path? */
2351 static void hpsa_cmd_resolve_events(struct ctlr_info
*h
,
2352 struct CommandList
*c
)
2354 bool do_wake
= false;
2357 * Prevent the following race in the abort handler:
2359 * 1. LLD is requested to abort a SCSI command
2360 * 2. The SCSI command completes
2361 * 3. The struct CommandList associated with step 2 is made available
2362 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2363 * 5. Abort handler follows scsi_cmnd->host_scribble and
2364 * finds struct CommandList and tries to aborts it
2365 * Now we have aborted the wrong command.
2367 * Reset c->scsi_cmd here so that the abort or reset handler will know
2368 * this command has completed. Then, check to see if the handler is
2369 * waiting for this command, and, if so, wake it.
2371 c
->scsi_cmd
= SCSI_CMD_IDLE
;
2372 mb(); /* Declare command idle before checking for pending events. */
2373 if (c
->abort_pending
) {
2375 c
->abort_pending
= false;
2377 if (c
->reset_pending
) {
2378 unsigned long flags
;
2379 struct hpsa_scsi_dev_t
*dev
;
2382 * There appears to be a reset pending; lock the lock and
2383 * reconfirm. If so, then decrement the count of outstanding
2384 * commands and wake the reset command if this is the last one.
2386 spin_lock_irqsave(&h
->lock
, flags
);
2387 dev
= c
->reset_pending
; /* Re-fetch under the lock. */
2388 if (dev
&& atomic_dec_and_test(&dev
->reset_cmds_out
))
2390 c
->reset_pending
= NULL
;
2391 spin_unlock_irqrestore(&h
->lock
, flags
);
2395 wake_up_all(&h
->event_sync_wait_queue
);
2398 static void hpsa_cmd_resolve_and_free(struct ctlr_info
*h
,
2399 struct CommandList
*c
)
2401 hpsa_cmd_resolve_events(h
, c
);
2402 cmd_tagged_free(h
, c
);
2405 static void hpsa_cmd_free_and_done(struct ctlr_info
*h
,
2406 struct CommandList
*c
, struct scsi_cmnd
*cmd
)
2408 hpsa_cmd_resolve_and_free(h
, c
);
2409 if (cmd
&& cmd
->scsi_done
)
2410 cmd
->scsi_done(cmd
);
2413 static void hpsa_retry_cmd(struct ctlr_info
*h
, struct CommandList
*c
)
2415 INIT_WORK(&c
->work
, hpsa_command_resubmit_worker
);
2416 queue_work_on(raw_smp_processor_id(), h
->resubmit_wq
, &c
->work
);
2419 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd
*cmd
)
2421 cmd
->result
= DID_ABORT
<< 16;
2424 static void hpsa_cmd_abort_and_free(struct ctlr_info
*h
, struct CommandList
*c
,
2425 struct scsi_cmnd
*cmd
)
2427 hpsa_set_scsi_cmd_aborted(cmd
);
2428 dev_warn(&h
->pdev
->dev
, "CDB %16phN was aborted with status 0x%x\n",
2429 c
->Request
.CDB
, c
->err_info
->ScsiStatus
);
2430 hpsa_cmd_resolve_and_free(h
, c
);
2433 static void process_ioaccel2_completion(struct ctlr_info
*h
,
2434 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
2435 struct hpsa_scsi_dev_t
*dev
)
2437 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2439 /* check for good status */
2440 if (likely(c2
->error_data
.serv_response
== 0 &&
2441 c2
->error_data
.status
== 0))
2442 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2445 * Any RAID offload error results in retry which will use
2446 * the normal I/O path so the controller can handle whatever's
2449 if (is_logical_device(dev
) &&
2450 c2
->error_data
.serv_response
==
2451 IOACCEL2_SERV_RESPONSE_FAILURE
) {
2452 if (c2
->error_data
.status
==
2453 IOACCEL2_STATUS_SR_IOACCEL_DISABLED
) {
2454 dev
->offload_enabled
= 0;
2455 dev
->offload_to_be_enabled
= 0;
2458 return hpsa_retry_cmd(h
, c
);
2461 if (handle_ioaccel_mode2_error(h
, c
, cmd
, c2
, dev
))
2462 return hpsa_retry_cmd(h
, c
);
2464 return hpsa_cmd_free_and_done(h
, c
, cmd
);
2467 /* Returns 0 on success, < 0 otherwise. */
2468 static int hpsa_evaluate_tmf_status(struct ctlr_info
*h
,
2469 struct CommandList
*cp
)
2471 u8 tmf_status
= cp
->err_info
->ScsiStatus
;
2473 switch (tmf_status
) {
2474 case CISS_TMF_COMPLETE
:
2476 * CISS_TMF_COMPLETE never happens, instead,
2477 * ei->CommandStatus == 0 for this case.
2479 case CISS_TMF_SUCCESS
:
2481 case CISS_TMF_INVALID_FRAME
:
2482 case CISS_TMF_NOT_SUPPORTED
:
2483 case CISS_TMF_FAILED
:
2484 case CISS_TMF_WRONG_LUN
:
2485 case CISS_TMF_OVERLAPPED_TAG
:
2488 dev_warn(&h
->pdev
->dev
, "Unknown TMF status: 0x%02x\n",
2495 static void complete_scsi_command(struct CommandList
*cp
)
2497 struct scsi_cmnd
*cmd
;
2498 struct ctlr_info
*h
;
2499 struct ErrorInfo
*ei
;
2500 struct hpsa_scsi_dev_t
*dev
;
2501 struct io_accel2_cmd
*c2
;
2504 u8 asc
; /* additional sense code */
2505 u8 ascq
; /* additional sense code qualifier */
2506 unsigned long sense_data_size
;
2513 cmd
->result
= DID_NO_CONNECT
<< 16;
2514 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2517 dev
= cmd
->device
->hostdata
;
2519 cmd
->result
= DID_NO_CONNECT
<< 16;
2520 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2522 c2
= &h
->ioaccel2_cmd_pool
[cp
->cmdindex
];
2524 scsi_dma_unmap(cmd
); /* undo the DMA mappings */
2525 if ((cp
->cmd_type
== CMD_SCSI
) &&
2526 (le16_to_cpu(cp
->Header
.SGTotal
) > h
->max_cmd_sg_entries
))
2527 hpsa_unmap_sg_chain_block(h
, cp
);
2529 if ((cp
->cmd_type
== CMD_IOACCEL2
) &&
2530 (c2
->sg
[0].chain_indicator
== IOACCEL2_CHAIN
))
2531 hpsa_unmap_ioaccel2_sg_chain_block(h
, c2
);
2533 cmd
->result
= (DID_OK
<< 16); /* host byte */
2534 cmd
->result
|= (COMMAND_COMPLETE
<< 8); /* msg byte */
2536 if (cp
->cmd_type
== CMD_IOACCEL2
|| cp
->cmd_type
== CMD_IOACCEL1
) {
2537 if (dev
->physical_device
&& dev
->expose_device
&&
2539 cmd
->result
= DID_NO_CONNECT
<< 16;
2540 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2542 if (likely(cp
->phys_disk
!= NULL
))
2543 atomic_dec(&cp
->phys_disk
->ioaccel_cmds_out
);
2547 * We check for lockup status here as it may be set for
2548 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2549 * fail_all_oustanding_cmds()
2551 if (unlikely(ei
->CommandStatus
== CMD_CTLR_LOCKUP
)) {
2552 /* DID_NO_CONNECT will prevent a retry */
2553 cmd
->result
= DID_NO_CONNECT
<< 16;
2554 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2557 if ((unlikely(hpsa_is_pending_event(cp
)))) {
2558 if (cp
->reset_pending
)
2559 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2560 if (cp
->abort_pending
)
2561 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2564 if (cp
->cmd_type
== CMD_IOACCEL2
)
2565 return process_ioaccel2_completion(h
, cp
, cmd
, dev
);
2567 scsi_set_resid(cmd
, ei
->ResidualCnt
);
2568 if (ei
->CommandStatus
== 0)
2569 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2571 /* For I/O accelerator commands, copy over some fields to the normal
2572 * CISS header used below for error handling.
2574 if (cp
->cmd_type
== CMD_IOACCEL1
) {
2575 struct io_accel1_cmd
*c
= &h
->ioaccel_cmd_pool
[cp
->cmdindex
];
2576 cp
->Header
.SGList
= scsi_sg_count(cmd
);
2577 cp
->Header
.SGTotal
= cpu_to_le16(cp
->Header
.SGList
);
2578 cp
->Request
.CDBLen
= le16_to_cpu(c
->io_flags
) &
2579 IOACCEL1_IOFLAGS_CDBLEN_MASK
;
2580 cp
->Header
.tag
= c
->tag
;
2581 memcpy(cp
->Header
.LUN
.LunAddrBytes
, c
->CISS_LUN
, 8);
2582 memcpy(cp
->Request
.CDB
, c
->CDB
, cp
->Request
.CDBLen
);
2584 /* Any RAID offload error results in retry which will use
2585 * the normal I/O path so the controller can handle whatever's
2588 if (is_logical_device(dev
)) {
2589 if (ei
->CommandStatus
== CMD_IOACCEL_DISABLED
)
2590 dev
->offload_enabled
= 0;
2591 return hpsa_retry_cmd(h
, cp
);
2595 /* an error has occurred */
2596 switch (ei
->CommandStatus
) {
2598 case CMD_TARGET_STATUS
:
2599 cmd
->result
|= ei
->ScsiStatus
;
2600 /* copy the sense data */
2601 if (SCSI_SENSE_BUFFERSIZE
< sizeof(ei
->SenseInfo
))
2602 sense_data_size
= SCSI_SENSE_BUFFERSIZE
;
2604 sense_data_size
= sizeof(ei
->SenseInfo
);
2605 if (ei
->SenseLen
< sense_data_size
)
2606 sense_data_size
= ei
->SenseLen
;
2607 memcpy(cmd
->sense_buffer
, ei
->SenseInfo
, sense_data_size
);
2609 decode_sense_data(ei
->SenseInfo
, sense_data_size
,
2610 &sense_key
, &asc
, &ascq
);
2611 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
) {
2612 if (sense_key
== ABORTED_COMMAND
) {
2613 cmd
->result
|= DID_SOFT_ERROR
<< 16;
2618 /* Problem was not a check condition
2619 * Pass it up to the upper layers...
2621 if (ei
->ScsiStatus
) {
2622 dev_warn(&h
->pdev
->dev
, "cp %p has status 0x%x "
2623 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2624 "Returning result: 0x%x\n",
2626 sense_key
, asc
, ascq
,
2628 } else { /* scsi status is zero??? How??? */
2629 dev_warn(&h
->pdev
->dev
, "cp %p SCSI status was 0. "
2630 "Returning no connection.\n", cp
),
2632 /* Ordinarily, this case should never happen,
2633 * but there is a bug in some released firmware
2634 * revisions that allows it to happen if, for
2635 * example, a 4100 backplane loses power and
2636 * the tape drive is in it. We assume that
2637 * it's a fatal error of some kind because we
2638 * can't show that it wasn't. We will make it
2639 * look like selection timeout since that is
2640 * the most common reason for this to occur,
2641 * and it's severe enough.
2644 cmd
->result
= DID_NO_CONNECT
<< 16;
2648 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2650 case CMD_DATA_OVERRUN
:
2651 dev_warn(&h
->pdev
->dev
,
2652 "CDB %16phN data overrun\n", cp
->Request
.CDB
);
2655 /* print_bytes(cp, sizeof(*cp), 1, 0);
2657 /* We get CMD_INVALID if you address a non-existent device
2658 * instead of a selection timeout (no response). You will
2659 * see this if you yank out a drive, then try to access it.
2660 * This is kind of a shame because it means that any other
2661 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2662 * missing target. */
2663 cmd
->result
= DID_NO_CONNECT
<< 16;
2666 case CMD_PROTOCOL_ERR
:
2667 cmd
->result
= DID_ERROR
<< 16;
2668 dev_warn(&h
->pdev
->dev
, "CDB %16phN : protocol error\n",
2671 case CMD_HARDWARE_ERR
:
2672 cmd
->result
= DID_ERROR
<< 16;
2673 dev_warn(&h
->pdev
->dev
, "CDB %16phN : hardware error\n",
2676 case CMD_CONNECTION_LOST
:
2677 cmd
->result
= DID_ERROR
<< 16;
2678 dev_warn(&h
->pdev
->dev
, "CDB %16phN : connection lost\n",
2682 /* Return now to avoid calling scsi_done(). */
2683 return hpsa_cmd_abort_and_free(h
, cp
, cmd
);
2684 case CMD_ABORT_FAILED
:
2685 cmd
->result
= DID_ERROR
<< 16;
2686 dev_warn(&h
->pdev
->dev
, "CDB %16phN : abort failed\n",
2689 case CMD_UNSOLICITED_ABORT
:
2690 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry the command */
2691 dev_warn(&h
->pdev
->dev
, "CDB %16phN : unsolicited abort\n",
2695 cmd
->result
= DID_TIME_OUT
<< 16;
2696 dev_warn(&h
->pdev
->dev
, "CDB %16phN timed out\n",
2699 case CMD_UNABORTABLE
:
2700 cmd
->result
= DID_ERROR
<< 16;
2701 dev_warn(&h
->pdev
->dev
, "Command unabortable\n");
2703 case CMD_TMF_STATUS
:
2704 if (hpsa_evaluate_tmf_status(h
, cp
)) /* TMF failed? */
2705 cmd
->result
= DID_ERROR
<< 16;
2707 case CMD_IOACCEL_DISABLED
:
2708 /* This only handles the direct pass-through case since RAID
2709 * offload is handled above. Just attempt a retry.
2711 cmd
->result
= DID_SOFT_ERROR
<< 16;
2712 dev_warn(&h
->pdev
->dev
,
2713 "cp %p had HP SSD Smart Path error\n", cp
);
2716 cmd
->result
= DID_ERROR
<< 16;
2717 dev_warn(&h
->pdev
->dev
, "cp %p returned unknown status %x\n",
2718 cp
, ei
->CommandStatus
);
2721 return hpsa_cmd_free_and_done(h
, cp
, cmd
);
2724 static void hpsa_pci_unmap(struct pci_dev
*pdev
,
2725 struct CommandList
*c
, int sg_used
, int data_direction
)
2729 for (i
= 0; i
< sg_used
; i
++)
2730 pci_unmap_single(pdev
, (dma_addr_t
) le64_to_cpu(c
->SG
[i
].Addr
),
2731 le32_to_cpu(c
->SG
[i
].Len
),
2735 static int hpsa_map_one(struct pci_dev
*pdev
,
2736 struct CommandList
*cp
,
2743 if (buflen
== 0 || data_direction
== PCI_DMA_NONE
) {
2744 cp
->Header
.SGList
= 0;
2745 cp
->Header
.SGTotal
= cpu_to_le16(0);
2749 addr64
= pci_map_single(pdev
, buf
, buflen
, data_direction
);
2750 if (dma_mapping_error(&pdev
->dev
, addr64
)) {
2751 /* Prevent subsequent unmap of something never mapped */
2752 cp
->Header
.SGList
= 0;
2753 cp
->Header
.SGTotal
= cpu_to_le16(0);
2756 cp
->SG
[0].Addr
= cpu_to_le64(addr64
);
2757 cp
->SG
[0].Len
= cpu_to_le32(buflen
);
2758 cp
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* we are not chaining */
2759 cp
->Header
.SGList
= 1; /* no. SGs contig in this cmd */
2760 cp
->Header
.SGTotal
= cpu_to_le16(1); /* total sgs in cmd list */
2764 #define NO_TIMEOUT ((unsigned long) -1)
2765 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2766 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info
*h
,
2767 struct CommandList
*c
, int reply_queue
, unsigned long timeout_msecs
)
2769 DECLARE_COMPLETION_ONSTACK(wait
);
2772 __enqueue_cmd_and_start_io(h
, c
, reply_queue
);
2773 if (timeout_msecs
== NO_TIMEOUT
) {
2774 /* TODO: get rid of this no-timeout thing */
2775 wait_for_completion_io(&wait
);
2778 if (!wait_for_completion_io_timeout(&wait
,
2779 msecs_to_jiffies(timeout_msecs
))) {
2780 dev_warn(&h
->pdev
->dev
, "Command timed out.\n");
2786 static int hpsa_scsi_do_simple_cmd(struct ctlr_info
*h
, struct CommandList
*c
,
2787 int reply_queue
, unsigned long timeout_msecs
)
2789 if (unlikely(lockup_detected(h
))) {
2790 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
2793 return hpsa_scsi_do_simple_cmd_core(h
, c
, reply_queue
, timeout_msecs
);
2796 static u32
lockup_detected(struct ctlr_info
*h
)
2799 u32 rc
, *lockup_detected
;
2802 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
2803 rc
= *lockup_detected
;
2808 #define MAX_DRIVER_CMD_RETRIES 25
2809 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info
*h
,
2810 struct CommandList
*c
, int data_direction
, unsigned long timeout_msecs
)
2812 int backoff_time
= 10, retry_count
= 0;
2816 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
2817 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
2822 if (retry_count
> 3) {
2823 msleep(backoff_time
);
2824 if (backoff_time
< 1000)
2827 } while ((check_for_unit_attention(h
, c
) ||
2828 check_for_busy(h
, c
)) &&
2829 retry_count
<= MAX_DRIVER_CMD_RETRIES
);
2830 hpsa_pci_unmap(h
->pdev
, c
, 1, data_direction
);
2831 if (retry_count
> MAX_DRIVER_CMD_RETRIES
)
2836 static void hpsa_print_cmd(struct ctlr_info
*h
, char *txt
,
2837 struct CommandList
*c
)
2839 const u8
*cdb
= c
->Request
.CDB
;
2840 const u8
*lun
= c
->Header
.LUN
.LunAddrBytes
;
2842 dev_warn(&h
->pdev
->dev
, "%s: LUN:%8phN CDB:%16phN\n",
2846 static void hpsa_scsi_interpret_error(struct ctlr_info
*h
,
2847 struct CommandList
*cp
)
2849 const struct ErrorInfo
*ei
= cp
->err_info
;
2850 struct device
*d
= &cp
->h
->pdev
->dev
;
2851 u8 sense_key
, asc
, ascq
;
2854 switch (ei
->CommandStatus
) {
2855 case CMD_TARGET_STATUS
:
2856 if (ei
->SenseLen
> sizeof(ei
->SenseInfo
))
2857 sense_len
= sizeof(ei
->SenseInfo
);
2859 sense_len
= ei
->SenseLen
;
2860 decode_sense_data(ei
->SenseInfo
, sense_len
,
2861 &sense_key
, &asc
, &ascq
);
2862 hpsa_print_cmd(h
, "SCSI status", cp
);
2863 if (ei
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
)
2864 dev_warn(d
, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2865 sense_key
, asc
, ascq
);
2867 dev_warn(d
, "SCSI Status = 0x%02x\n", ei
->ScsiStatus
);
2868 if (ei
->ScsiStatus
== 0)
2869 dev_warn(d
, "SCSI status is abnormally zero. "
2870 "(probably indicates selection timeout "
2871 "reported incorrectly due to a known "
2872 "firmware bug, circa July, 2001.)\n");
2874 case CMD_DATA_UNDERRUN
: /* let mid layer handle it. */
2876 case CMD_DATA_OVERRUN
:
2877 hpsa_print_cmd(h
, "overrun condition", cp
);
2880 /* controller unfortunately reports SCSI passthru's
2881 * to non-existent targets as invalid commands.
2883 hpsa_print_cmd(h
, "invalid command", cp
);
2884 dev_warn(d
, "probably means device no longer present\n");
2887 case CMD_PROTOCOL_ERR
:
2888 hpsa_print_cmd(h
, "protocol error", cp
);
2890 case CMD_HARDWARE_ERR
:
2891 hpsa_print_cmd(h
, "hardware error", cp
);
2893 case CMD_CONNECTION_LOST
:
2894 hpsa_print_cmd(h
, "connection lost", cp
);
2897 hpsa_print_cmd(h
, "aborted", cp
);
2899 case CMD_ABORT_FAILED
:
2900 hpsa_print_cmd(h
, "abort failed", cp
);
2902 case CMD_UNSOLICITED_ABORT
:
2903 hpsa_print_cmd(h
, "unsolicited abort", cp
);
2906 hpsa_print_cmd(h
, "timed out", cp
);
2908 case CMD_UNABORTABLE
:
2909 hpsa_print_cmd(h
, "unabortable", cp
);
2911 case CMD_CTLR_LOCKUP
:
2912 hpsa_print_cmd(h
, "controller lockup detected", cp
);
2915 hpsa_print_cmd(h
, "unknown status", cp
);
2916 dev_warn(d
, "Unknown command status %x\n",
2921 static int hpsa_scsi_do_inquiry(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2922 u16 page
, unsigned char *buf
,
2923 unsigned char bufsize
)
2926 struct CommandList
*c
;
2927 struct ErrorInfo
*ei
;
2931 if (fill_cmd(c
, HPSA_INQUIRY
, h
, buf
, bufsize
,
2932 page
, scsi3addr
, TYPE_CMD
)) {
2936 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
2937 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
2941 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
2942 hpsa_scsi_interpret_error(h
, c
);
2950 static int hpsa_send_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
2951 u8 reset_type
, int reply_queue
)
2954 struct CommandList
*c
;
2955 struct ErrorInfo
*ei
;
2960 /* fill_cmd can't fail here, no data buffer to map. */
2961 (void) fill_cmd(c
, reset_type
, h
, NULL
, 0, 0,
2962 scsi3addr
, TYPE_MSG
);
2963 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, NO_TIMEOUT
);
2965 dev_warn(&h
->pdev
->dev
, "Failed to send reset command\n");
2968 /* no unmap needed here because no data xfer. */
2971 if (ei
->CommandStatus
!= 0) {
2972 hpsa_scsi_interpret_error(h
, c
);
2980 static bool hpsa_cmd_dev_match(struct ctlr_info
*h
, struct CommandList
*c
,
2981 struct hpsa_scsi_dev_t
*dev
,
2982 unsigned char *scsi3addr
)
2986 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
2987 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
2989 if (hpsa_is_cmd_idle(c
))
2992 switch (c
->cmd_type
) {
2994 case CMD_IOCTL_PEND
:
2995 match
= !memcmp(scsi3addr
, &c
->Header
.LUN
.LunAddrBytes
,
2996 sizeof(c
->Header
.LUN
.LunAddrBytes
));
3001 if (c
->phys_disk
== dev
) {
3002 /* HBA mode match */
3005 /* Possible RAID mode -- check each phys dev. */
3006 /* FIXME: Do we need to take out a lock here? If
3007 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
3009 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
3010 /* FIXME: an alternate test might be
3012 * match = dev->phys_disk[i]->ioaccel_handle
3013 * == c2->scsi_nexus; */
3014 match
= dev
->phys_disk
[i
] == c
->phys_disk
;
3020 for (i
= 0; i
< dev
->nphysical_disks
&& !match
; i
++) {
3021 match
= dev
->phys_disk
[i
]->ioaccel_handle
==
3022 le32_to_cpu(ac
->it_nexus
);
3026 case 0: /* The command is in the middle of being initialized. */
3031 dev_err(&h
->pdev
->dev
, "unexpected cmd_type: %d\n",
3039 static int hpsa_do_reset(struct ctlr_info
*h
, struct hpsa_scsi_dev_t
*dev
,
3040 unsigned char *scsi3addr
, u8 reset_type
, int reply_queue
)
3045 /* We can really only handle one reset at a time */
3046 if (mutex_lock_interruptible(&h
->reset_mutex
) == -EINTR
) {
3047 dev_warn(&h
->pdev
->dev
, "concurrent reset wait interrupted.\n");
3051 BUG_ON(atomic_read(&dev
->reset_cmds_out
) != 0);
3053 for (i
= 0; i
< h
->nr_cmds
; i
++) {
3054 struct CommandList
*c
= h
->cmd_pool
+ i
;
3055 int refcount
= atomic_inc_return(&c
->refcount
);
3057 if (refcount
> 1 && hpsa_cmd_dev_match(h
, c
, dev
, scsi3addr
)) {
3058 unsigned long flags
;
3061 * Mark the target command as having a reset pending,
3062 * then lock a lock so that the command cannot complete
3063 * while we're considering it. If the command is not
3064 * idle then count it; otherwise revoke the event.
3066 c
->reset_pending
= dev
;
3067 spin_lock_irqsave(&h
->lock
, flags
); /* Implied MB */
3068 if (!hpsa_is_cmd_idle(c
))
3069 atomic_inc(&dev
->reset_cmds_out
);
3071 c
->reset_pending
= NULL
;
3072 spin_unlock_irqrestore(&h
->lock
, flags
);
3078 rc
= hpsa_send_reset(h
, scsi3addr
, reset_type
, reply_queue
);
3080 wait_event(h
->event_sync_wait_queue
,
3081 atomic_read(&dev
->reset_cmds_out
) == 0 ||
3082 lockup_detected(h
));
3084 if (unlikely(lockup_detected(h
))) {
3085 dev_warn(&h
->pdev
->dev
,
3086 "Controller lockup detected during reset wait\n");
3091 atomic_set(&dev
->reset_cmds_out
, 0);
3093 wait_for_device_to_become_ready(h
, scsi3addr
, 0);
3095 mutex_unlock(&h
->reset_mutex
);
3099 static void hpsa_get_raid_level(struct ctlr_info
*h
,
3100 unsigned char *scsi3addr
, unsigned char *raid_level
)
3105 *raid_level
= RAID_UNKNOWN
;
3106 buf
= kzalloc(64, GFP_KERNEL
);
3110 if (!hpsa_vpd_page_supported(h
, scsi3addr
,
3111 HPSA_VPD_LV_DEVICE_GEOMETRY
))
3114 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
|
3115 HPSA_VPD_LV_DEVICE_GEOMETRY
, buf
, 64);
3118 *raid_level
= buf
[8];
3119 if (*raid_level
> RAID_UNKNOWN
)
3120 *raid_level
= RAID_UNKNOWN
;
3126 #define HPSA_MAP_DEBUG
3127 #ifdef HPSA_MAP_DEBUG
3128 static void hpsa_debug_map_buff(struct ctlr_info
*h
, int rc
,
3129 struct raid_map_data
*map_buff
)
3131 struct raid_map_disk_data
*dd
= &map_buff
->data
[0];
3133 u16 map_cnt
, row_cnt
, disks_per_row
;
3138 /* Show details only if debugging has been activated. */
3139 if (h
->raid_offload_debug
< 2)
3142 dev_info(&h
->pdev
->dev
, "structure_size = %u\n",
3143 le32_to_cpu(map_buff
->structure_size
));
3144 dev_info(&h
->pdev
->dev
, "volume_blk_size = %u\n",
3145 le32_to_cpu(map_buff
->volume_blk_size
));
3146 dev_info(&h
->pdev
->dev
, "volume_blk_cnt = 0x%llx\n",
3147 le64_to_cpu(map_buff
->volume_blk_cnt
));
3148 dev_info(&h
->pdev
->dev
, "physicalBlockShift = %u\n",
3149 map_buff
->phys_blk_shift
);
3150 dev_info(&h
->pdev
->dev
, "parity_rotation_shift = %u\n",
3151 map_buff
->parity_rotation_shift
);
3152 dev_info(&h
->pdev
->dev
, "strip_size = %u\n",
3153 le16_to_cpu(map_buff
->strip_size
));
3154 dev_info(&h
->pdev
->dev
, "disk_starting_blk = 0x%llx\n",
3155 le64_to_cpu(map_buff
->disk_starting_blk
));
3156 dev_info(&h
->pdev
->dev
, "disk_blk_cnt = 0x%llx\n",
3157 le64_to_cpu(map_buff
->disk_blk_cnt
));
3158 dev_info(&h
->pdev
->dev
, "data_disks_per_row = %u\n",
3159 le16_to_cpu(map_buff
->data_disks_per_row
));
3160 dev_info(&h
->pdev
->dev
, "metadata_disks_per_row = %u\n",
3161 le16_to_cpu(map_buff
->metadata_disks_per_row
));
3162 dev_info(&h
->pdev
->dev
, "row_cnt = %u\n",
3163 le16_to_cpu(map_buff
->row_cnt
));
3164 dev_info(&h
->pdev
->dev
, "layout_map_count = %u\n",
3165 le16_to_cpu(map_buff
->layout_map_count
));
3166 dev_info(&h
->pdev
->dev
, "flags = 0x%x\n",
3167 le16_to_cpu(map_buff
->flags
));
3168 dev_info(&h
->pdev
->dev
, "encrypytion = %s\n",
3169 le16_to_cpu(map_buff
->flags
) &
3170 RAID_MAP_FLAG_ENCRYPT_ON
? "ON" : "OFF");
3171 dev_info(&h
->pdev
->dev
, "dekindex = %u\n",
3172 le16_to_cpu(map_buff
->dekindex
));
3173 map_cnt
= le16_to_cpu(map_buff
->layout_map_count
);
3174 for (map
= 0; map
< map_cnt
; map
++) {
3175 dev_info(&h
->pdev
->dev
, "Map%u:\n", map
);
3176 row_cnt
= le16_to_cpu(map_buff
->row_cnt
);
3177 for (row
= 0; row
< row_cnt
; row
++) {
3178 dev_info(&h
->pdev
->dev
, " Row%u:\n", row
);
3180 le16_to_cpu(map_buff
->data_disks_per_row
);
3181 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3182 dev_info(&h
->pdev
->dev
,
3183 " D%02u: h=0x%04x xor=%u,%u\n",
3184 col
, dd
->ioaccel_handle
,
3185 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3187 le16_to_cpu(map_buff
->metadata_disks_per_row
);
3188 for (col
= 0; col
< disks_per_row
; col
++, dd
++)
3189 dev_info(&h
->pdev
->dev
,
3190 " M%02u: h=0x%04x xor=%u,%u\n",
3191 col
, dd
->ioaccel_handle
,
3192 dd
->xor_mult
[0], dd
->xor_mult
[1]);
3197 static void hpsa_debug_map_buff(__attribute__((unused
)) struct ctlr_info
*h
,
3198 __attribute__((unused
)) int rc
,
3199 __attribute__((unused
)) struct raid_map_data
*map_buff
)
3204 static int hpsa_get_raid_map(struct ctlr_info
*h
,
3205 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3208 struct CommandList
*c
;
3209 struct ErrorInfo
*ei
;
3213 if (fill_cmd(c
, HPSA_GET_RAID_MAP
, h
, &this_device
->raid_map
,
3214 sizeof(this_device
->raid_map
), 0,
3215 scsi3addr
, TYPE_CMD
)) {
3216 dev_warn(&h
->pdev
->dev
, "hpsa_get_raid_map fill_cmd failed\n");
3220 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3221 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
3225 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3226 hpsa_scsi_interpret_error(h
, c
);
3232 /* @todo in the future, dynamically allocate RAID map memory */
3233 if (le32_to_cpu(this_device
->raid_map
.structure_size
) >
3234 sizeof(this_device
->raid_map
)) {
3235 dev_warn(&h
->pdev
->dev
, "RAID map size is too large!\n");
3238 hpsa_debug_map_buff(h
, rc
, &this_device
->raid_map
);
3245 static int hpsa_bmic_sense_subsystem_information(struct ctlr_info
*h
,
3246 unsigned char scsi3addr
[], u16 bmic_device_index
,
3247 struct bmic_sense_subsystem_info
*buf
, size_t bufsize
)
3250 struct CommandList
*c
;
3251 struct ErrorInfo
*ei
;
3255 rc
= fill_cmd(c
, BMIC_SENSE_SUBSYSTEM_INFORMATION
, h
, buf
, bufsize
,
3256 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3260 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3261 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3263 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3264 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
3268 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3269 hpsa_scsi_interpret_error(h
, c
);
3277 static int hpsa_bmic_id_controller(struct ctlr_info
*h
,
3278 struct bmic_identify_controller
*buf
, size_t bufsize
)
3281 struct CommandList
*c
;
3282 struct ErrorInfo
*ei
;
3286 rc
= fill_cmd(c
, BMIC_IDENTIFY_CONTROLLER
, h
, buf
, bufsize
,
3287 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3291 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3292 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
3296 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3297 hpsa_scsi_interpret_error(h
, c
);
3305 static int hpsa_bmic_id_physical_device(struct ctlr_info
*h
,
3306 unsigned char scsi3addr
[], u16 bmic_device_index
,
3307 struct bmic_identify_physical_device
*buf
, size_t bufsize
)
3310 struct CommandList
*c
;
3311 struct ErrorInfo
*ei
;
3314 rc
= fill_cmd(c
, BMIC_IDENTIFY_PHYSICAL_DEVICE
, h
, buf
, bufsize
,
3315 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3319 c
->Request
.CDB
[2] = bmic_device_index
& 0xff;
3320 c
->Request
.CDB
[9] = (bmic_device_index
>> 8) & 0xff;
3322 hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
,
3325 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3326 hpsa_scsi_interpret_error(h
, c
);
3336 * get enclosure information
3337 * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
3338 * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
3339 * Uses id_physical_device to determine the box_index.
3341 static void hpsa_get_enclosure_info(struct ctlr_info
*h
,
3342 unsigned char *scsi3addr
,
3343 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
3344 struct hpsa_scsi_dev_t
*encl_dev
)
3347 struct CommandList
*c
= NULL
;
3348 struct ErrorInfo
*ei
= NULL
;
3349 struct bmic_sense_storage_box_params
*bssbp
= NULL
;
3350 struct bmic_identify_physical_device
*id_phys
= NULL
;
3351 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
3352 u16 bmic_device_index
= 0;
3354 bmic_device_index
= GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]);
3356 if (bmic_device_index
== 0xFF00 || MASKED_DEVICE(&rle
->lunid
[0])) {
3361 bssbp
= kzalloc(sizeof(*bssbp
), GFP_KERNEL
);
3365 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
3369 rc
= hpsa_bmic_id_physical_device(h
, scsi3addr
, bmic_device_index
,
3370 id_phys
, sizeof(*id_phys
));
3372 dev_warn(&h
->pdev
->dev
, "%s: id_phys failed %d bdi[0x%x]\n",
3373 __func__
, encl_dev
->external
, bmic_device_index
);
3379 rc
= fill_cmd(c
, BMIC_SENSE_STORAGE_BOX_PARAMS
, h
, bssbp
,
3380 sizeof(*bssbp
), 0, RAID_CTLR_LUNID
, TYPE_CMD
);
3385 if (id_phys
->phys_connector
[1] == 'E')
3386 c
->Request
.CDB
[5] = id_phys
->box_index
;
3388 c
->Request
.CDB
[5] = 0;
3390 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
, PCI_DMA_FROMDEVICE
,
3396 if (ei
->CommandStatus
!= 0 && ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3401 encl_dev
->box
[id_phys
->active_path_number
] = bssbp
->phys_box_on_port
;
3402 memcpy(&encl_dev
->phys_connector
[id_phys
->active_path_number
],
3403 bssbp
->phys_connector
, sizeof(bssbp
->phys_connector
));
3414 hpsa_show_dev_msg(KERN_INFO
, h
, encl_dev
,
3415 "Error, could not get enclosure information\n");
3418 static u64
hpsa_get_sas_address_from_report_physical(struct ctlr_info
*h
,
3419 unsigned char *scsi3addr
)
3421 struct ReportExtendedLUNdata
*physdev
;
3426 physdev
= kzalloc(sizeof(*physdev
), GFP_KERNEL
);
3430 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
3431 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
3435 nphysicals
= get_unaligned_be32(physdev
->LUNListLength
) / 24;
3437 for (i
= 0; i
< nphysicals
; i
++)
3438 if (!memcmp(&physdev
->LUN
[i
].lunid
[0], scsi3addr
, 8)) {
3439 sa
= get_unaligned_be64(&physdev
->LUN
[i
].wwid
[0]);
3448 static void hpsa_get_sas_address(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3449 struct hpsa_scsi_dev_t
*dev
)
3454 if (is_hba_lunid(scsi3addr
)) {
3455 struct bmic_sense_subsystem_info
*ssi
;
3457 ssi
= kzalloc(sizeof(*ssi
), GFP_KERNEL
);
3461 rc
= hpsa_bmic_sense_subsystem_information(h
,
3462 scsi3addr
, 0, ssi
, sizeof(*ssi
));
3464 sa
= get_unaligned_be64(ssi
->primary_world_wide_id
);
3465 h
->sas_address
= sa
;
3470 sa
= hpsa_get_sas_address_from_report_physical(h
, scsi3addr
);
3472 dev
->sas_address
= sa
;
3475 /* Get a device id from inquiry page 0x83 */
3476 static bool hpsa_vpd_page_supported(struct ctlr_info
*h
,
3477 unsigned char scsi3addr
[], u8 page
)
3482 unsigned char *buf
, bufsize
;
3484 buf
= kzalloc(256, GFP_KERNEL
);
3488 /* Get the size of the page list first */
3489 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3490 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3491 buf
, HPSA_VPD_HEADER_SZ
);
3493 goto exit_unsupported
;
3495 if ((pages
+ HPSA_VPD_HEADER_SZ
) <= 255)
3496 bufsize
= pages
+ HPSA_VPD_HEADER_SZ
;
3500 /* Get the whole VPD page list */
3501 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3502 VPD_PAGE
| HPSA_VPD_SUPPORTED_PAGES
,
3505 goto exit_unsupported
;
3508 for (i
= 1; i
<= pages
; i
++)
3509 if (buf
[3 + i
] == page
)
3510 goto exit_supported
;
3519 static void hpsa_get_ioaccel_status(struct ctlr_info
*h
,
3520 unsigned char *scsi3addr
, struct hpsa_scsi_dev_t
*this_device
)
3526 this_device
->offload_config
= 0;
3527 this_device
->offload_enabled
= 0;
3528 this_device
->offload_to_be_enabled
= 0;
3530 buf
= kzalloc(64, GFP_KERNEL
);
3533 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_IOACCEL_STATUS
))
3535 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
,
3536 VPD_PAGE
| HPSA_VPD_LV_IOACCEL_STATUS
, buf
, 64);
3540 #define IOACCEL_STATUS_BYTE 4
3541 #define OFFLOAD_CONFIGURED_BIT 0x01
3542 #define OFFLOAD_ENABLED_BIT 0x02
3543 ioaccel_status
= buf
[IOACCEL_STATUS_BYTE
];
3544 this_device
->offload_config
=
3545 !!(ioaccel_status
& OFFLOAD_CONFIGURED_BIT
);
3546 if (this_device
->offload_config
) {
3547 this_device
->offload_enabled
=
3548 !!(ioaccel_status
& OFFLOAD_ENABLED_BIT
);
3549 if (hpsa_get_raid_map(h
, scsi3addr
, this_device
))
3550 this_device
->offload_enabled
= 0;
3552 this_device
->offload_to_be_enabled
= this_device
->offload_enabled
;
3558 /* Get the device id from inquiry page 0x83 */
3559 static int hpsa_get_device_id(struct ctlr_info
*h
, unsigned char *scsi3addr
,
3560 unsigned char *device_id
, int index
, int buflen
)
3565 /* Does controller have VPD for device id? */
3566 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_DEVICE_ID
))
3567 return 1; /* not supported */
3569 buf
= kzalloc(64, GFP_KERNEL
);
3573 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
|
3574 HPSA_VPD_LV_DEVICE_ID
, buf
, 64);
3578 memcpy(device_id
, &buf
[8], buflen
);
3583 return rc
; /*0 - got id, otherwise, didn't */
3586 static int hpsa_scsi_do_report_luns(struct ctlr_info
*h
, int logical
,
3587 void *buf
, int bufsize
,
3588 int extended_response
)
3591 struct CommandList
*c
;
3592 unsigned char scsi3addr
[8];
3593 struct ErrorInfo
*ei
;
3597 /* address the controller */
3598 memset(scsi3addr
, 0, sizeof(scsi3addr
));
3599 if (fill_cmd(c
, logical
? HPSA_REPORT_LOG
: HPSA_REPORT_PHYS
, h
,
3600 buf
, bufsize
, 0, scsi3addr
, TYPE_CMD
)) {
3604 if (extended_response
)
3605 c
->Request
.CDB
[1] = extended_response
;
3606 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
3607 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
3611 if (ei
->CommandStatus
!= 0 &&
3612 ei
->CommandStatus
!= CMD_DATA_UNDERRUN
) {
3613 hpsa_scsi_interpret_error(h
, c
);
3616 struct ReportLUNdata
*rld
= buf
;
3618 if (rld
->extended_response_flag
!= extended_response
) {
3619 dev_err(&h
->pdev
->dev
,
3620 "report luns requested format %u, got %u\n",
3622 rld
->extended_response_flag
);
3631 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info
*h
,
3632 struct ReportExtendedLUNdata
*buf
, int bufsize
)
3635 struct ReportLUNdata
*lbuf
;
3637 rc
= hpsa_scsi_do_report_luns(h
, 0, buf
, bufsize
,
3638 HPSA_REPORT_PHYS_EXTENDED
);
3639 if (!rc
|| !hpsa_allow_any
)
3642 /* REPORT PHYS EXTENDED is not supported */
3643 lbuf
= kzalloc(sizeof(*lbuf
), GFP_KERNEL
);
3647 rc
= hpsa_scsi_do_report_luns(h
, 0, lbuf
, sizeof(*lbuf
), 0);
3652 /* Copy ReportLUNdata header */
3653 memcpy(buf
, lbuf
, 8);
3654 nphys
= be32_to_cpu(*((__be32
*)lbuf
->LUNListLength
)) / 8;
3655 for (i
= 0; i
< nphys
; i
++)
3656 memcpy(buf
->LUN
[i
].lunid
, lbuf
->LUN
[i
], 8);
3662 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info
*h
,
3663 struct ReportLUNdata
*buf
, int bufsize
)
3665 return hpsa_scsi_do_report_luns(h
, 1, buf
, bufsize
, 0);
3668 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t
*device
,
3669 int bus
, int target
, int lun
)
3672 device
->target
= target
;
3676 /* Use VPD inquiry to get details of volume status */
3677 static int hpsa_get_volume_status(struct ctlr_info
*h
,
3678 unsigned char scsi3addr
[])
3685 buf
= kzalloc(64, GFP_KERNEL
);
3687 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3689 /* Does controller have VPD for logical volume status? */
3690 if (!hpsa_vpd_page_supported(h
, scsi3addr
, HPSA_VPD_LV_STATUS
))
3693 /* Get the size of the VPD return buffer */
3694 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3695 buf
, HPSA_VPD_HEADER_SZ
);
3700 /* Now get the whole VPD buffer */
3701 rc
= hpsa_scsi_do_inquiry(h
, scsi3addr
, VPD_PAGE
| HPSA_VPD_LV_STATUS
,
3702 buf
, size
+ HPSA_VPD_HEADER_SZ
);
3705 status
= buf
[4]; /* status byte */
3711 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3714 /* Determine offline status of a volume.
3717 * 0xff (offline for unknown reasons)
3718 * # (integer code indicating one of several NOT READY states
3719 * describing why a volume is to be kept offline)
3721 static unsigned char hpsa_volume_offline(struct ctlr_info
*h
,
3722 unsigned char scsi3addr
[])
3724 struct CommandList
*c
;
3725 unsigned char *sense
;
3726 u8 sense_key
, asc
, ascq
;
3731 #define ASC_LUN_NOT_READY 0x04
3732 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3733 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3737 (void) fill_cmd(c
, TEST_UNIT_READY
, h
, NULL
, 0, 0, scsi3addr
, TYPE_CMD
);
3738 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
3742 return HPSA_VPD_LV_STATUS_UNSUPPORTED
;
3744 sense
= c
->err_info
->SenseInfo
;
3745 if (c
->err_info
->SenseLen
> sizeof(c
->err_info
->SenseInfo
))
3746 sense_len
= sizeof(c
->err_info
->SenseInfo
);
3748 sense_len
= c
->err_info
->SenseLen
;
3749 decode_sense_data(sense
, sense_len
, &sense_key
, &asc
, &ascq
);
3750 cmd_status
= c
->err_info
->CommandStatus
;
3751 scsi_status
= c
->err_info
->ScsiStatus
;
3754 /* Determine the reason for not ready state */
3755 ldstat
= hpsa_get_volume_status(h
, scsi3addr
);
3757 /* Keep volume offline in certain cases: */
3759 case HPSA_LV_FAILED
:
3760 case HPSA_LV_UNDERGOING_ERASE
:
3761 case HPSA_LV_NOT_AVAILABLE
:
3762 case HPSA_LV_UNDERGOING_RPI
:
3763 case HPSA_LV_PENDING_RPI
:
3764 case HPSA_LV_ENCRYPTED_NO_KEY
:
3765 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER
:
3766 case HPSA_LV_UNDERGOING_ENCRYPTION
:
3767 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING
:
3768 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
3770 case HPSA_VPD_LV_STATUS_UNSUPPORTED
:
3771 /* If VPD status page isn't available,
3772 * use ASC/ASCQ to determine state
3774 if ((ascq
== ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS
) ||
3775 (ascq
== ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ
))
3785 * Find out if a logical device supports aborts by simply trying one.
3786 * Smart Array may claim not to support aborts on logical drives, but
3787 * if a MSA2000 * is connected, the drives on that will be presented
3788 * by the Smart Array as logical drives, and aborts may be sent to
3789 * those devices successfully. So the simplest way to find out is
3790 * to simply try an abort and see how the device responds.
3792 static int hpsa_device_supports_aborts(struct ctlr_info
*h
,
3793 unsigned char *scsi3addr
)
3795 struct CommandList
*c
;
3796 struct ErrorInfo
*ei
;
3799 u64 tag
= (u64
) -1; /* bogus tag */
3801 /* Assume that physical devices support aborts */
3802 if (!is_logical_dev_addr_mode(scsi3addr
))
3807 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &tag
, 0, 0, scsi3addr
, TYPE_MSG
);
3808 (void) hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
3810 /* no unmap needed here because no data xfer. */
3812 switch (ei
->CommandStatus
) {
3816 case CMD_UNABORTABLE
:
3817 case CMD_ABORT_FAILED
:
3820 case CMD_TMF_STATUS
:
3821 rc
= hpsa_evaluate_tmf_status(h
, c
);
3831 static int hpsa_update_device_info(struct ctlr_info
*h
,
3832 unsigned char scsi3addr
[], struct hpsa_scsi_dev_t
*this_device
,
3833 unsigned char *is_OBDR_device
)
3836 #define OBDR_SIG_OFFSET 43
3837 #define OBDR_TAPE_SIG "$DR-10"
3838 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3839 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3841 unsigned char *inq_buff
;
3842 unsigned char *obdr_sig
;
3845 inq_buff
= kzalloc(OBDR_TAPE_INQ_SIZE
, GFP_KERNEL
);
3851 /* Do an inquiry to the device to see what it is. */
3852 if (hpsa_scsi_do_inquiry(h
, scsi3addr
, 0, inq_buff
,
3853 (unsigned char) OBDR_TAPE_INQ_SIZE
) != 0) {
3854 dev_err(&h
->pdev
->dev
,
3855 "%s: inquiry failed, device will be skipped.\n",
3857 rc
= HPSA_INQUIRY_FAILED
;
3861 scsi_sanitize_inquiry_string(&inq_buff
[8], 8);
3862 scsi_sanitize_inquiry_string(&inq_buff
[16], 16);
3864 this_device
->devtype
= (inq_buff
[0] & 0x1f);
3865 memcpy(this_device
->scsi3addr
, scsi3addr
, 8);
3866 memcpy(this_device
->vendor
, &inq_buff
[8],
3867 sizeof(this_device
->vendor
));
3868 memcpy(this_device
->model
, &inq_buff
[16],
3869 sizeof(this_device
->model
));
3870 this_device
->rev
= inq_buff
[2];
3871 memset(this_device
->device_id
, 0,
3872 sizeof(this_device
->device_id
));
3873 if (hpsa_get_device_id(h
, scsi3addr
, this_device
->device_id
, 8,
3874 sizeof(this_device
->device_id
)))
3875 dev_err(&h
->pdev
->dev
,
3876 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3878 h
->scsi_host
->host_no
,
3879 this_device
->target
, this_device
->lun
,
3880 scsi_device_type(this_device
->devtype
),
3881 this_device
->model
);
3883 if ((this_device
->devtype
== TYPE_DISK
||
3884 this_device
->devtype
== TYPE_ZBC
) &&
3885 is_logical_dev_addr_mode(scsi3addr
)) {
3886 unsigned char volume_offline
;
3888 hpsa_get_raid_level(h
, scsi3addr
, &this_device
->raid_level
);
3889 if (h
->fw_support
& MISC_FW_RAID_OFFLOAD_BASIC
)
3890 hpsa_get_ioaccel_status(h
, scsi3addr
, this_device
);
3891 volume_offline
= hpsa_volume_offline(h
, scsi3addr
);
3892 this_device
->volume_offline
= volume_offline
;
3893 if (volume_offline
== HPSA_LV_FAILED
) {
3894 rc
= HPSA_LV_FAILED
;
3895 dev_err(&h
->pdev
->dev
,
3896 "%s: LV failed, device will be skipped.\n",
3901 this_device
->raid_level
= RAID_UNKNOWN
;
3902 this_device
->offload_config
= 0;
3903 this_device
->offload_enabled
= 0;
3904 this_device
->offload_to_be_enabled
= 0;
3905 this_device
->hba_ioaccel_enabled
= 0;
3906 this_device
->volume_offline
= 0;
3907 this_device
->queue_depth
= h
->nr_cmds
;
3910 if (is_OBDR_device
) {
3911 /* See if this is a One-Button-Disaster-Recovery device
3912 * by looking for "$DR-10" at offset 43 in inquiry data.
3914 obdr_sig
= &inq_buff
[OBDR_SIG_OFFSET
];
3915 *is_OBDR_device
= (this_device
->devtype
== TYPE_ROM
&&
3916 strncmp(obdr_sig
, OBDR_TAPE_SIG
,
3917 OBDR_SIG_LEN
) == 0);
3927 static void hpsa_update_device_supports_aborts(struct ctlr_info
*h
,
3928 struct hpsa_scsi_dev_t
*dev
, u8
*scsi3addr
)
3930 unsigned long flags
;
3933 * See if this device supports aborts. If we already know
3934 * the device, we already know if it supports aborts, otherwise
3935 * we have to find out if it supports aborts by trying one.
3937 spin_lock_irqsave(&h
->devlock
, flags
);
3938 rc
= hpsa_scsi_find_entry(dev
, h
->dev
, h
->ndevices
, &entry
);
3939 if ((rc
== DEVICE_SAME
|| rc
== DEVICE_UPDATED
) &&
3940 entry
>= 0 && entry
< h
->ndevices
) {
3941 dev
->supports_aborts
= h
->dev
[entry
]->supports_aborts
;
3942 spin_unlock_irqrestore(&h
->devlock
, flags
);
3944 spin_unlock_irqrestore(&h
->devlock
, flags
);
3945 dev
->supports_aborts
=
3946 hpsa_device_supports_aborts(h
, scsi3addr
);
3947 if (dev
->supports_aborts
< 0)
3948 dev
->supports_aborts
= 0;
3953 * Helper function to assign bus, target, lun mapping of devices.
3954 * Logical drive target and lun are assigned at this time, but
3955 * physical device lun and target assignment are deferred (assigned
3956 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3958 static void figure_bus_target_lun(struct ctlr_info
*h
,
3959 u8
*lunaddrbytes
, struct hpsa_scsi_dev_t
*device
)
3961 u32 lunid
= get_unaligned_le32(lunaddrbytes
);
3963 if (!is_logical_dev_addr_mode(lunaddrbytes
)) {
3964 /* physical device, target and lun filled in later */
3965 if (is_hba_lunid(lunaddrbytes
)) {
3966 int bus
= HPSA_HBA_BUS
;
3969 bus
= HPSA_LEGACY_HBA_BUS
;
3970 hpsa_set_bus_target_lun(device
,
3971 bus
, 0, lunid
& 0x3fff);
3973 /* defer target, lun assignment for physical devices */
3974 hpsa_set_bus_target_lun(device
,
3975 HPSA_PHYSICAL_DEVICE_BUS
, -1, -1);
3978 /* It's a logical device */
3979 if (device
->external
) {
3980 hpsa_set_bus_target_lun(device
,
3981 HPSA_EXTERNAL_RAID_VOLUME_BUS
, (lunid
>> 16) & 0x3fff,
3985 hpsa_set_bus_target_lun(device
, HPSA_RAID_VOLUME_BUS
,
3991 * Get address of physical disk used for an ioaccel2 mode command:
3992 * 1. Extract ioaccel2 handle from the command.
3993 * 2. Find a matching ioaccel2 handle from list of physical disks.
3995 * 1 and set scsi3addr to address of matching physical
3996 * 0 if no matching physical disk was found.
3998 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info
*h
,
3999 struct CommandList
*ioaccel2_cmd_to_abort
, unsigned char *scsi3addr
)
4001 struct io_accel2_cmd
*c2
=
4002 &h
->ioaccel2_cmd_pool
[ioaccel2_cmd_to_abort
->cmdindex
];
4003 unsigned long flags
;
4006 spin_lock_irqsave(&h
->devlock
, flags
);
4007 for (i
= 0; i
< h
->ndevices
; i
++)
4008 if (h
->dev
[i
]->ioaccel_handle
== le32_to_cpu(c2
->scsi_nexus
)) {
4009 memcpy(scsi3addr
, h
->dev
[i
]->scsi3addr
,
4010 sizeof(h
->dev
[i
]->scsi3addr
));
4011 spin_unlock_irqrestore(&h
->devlock
, flags
);
4014 spin_unlock_irqrestore(&h
->devlock
, flags
);
4018 static int figure_external_status(struct ctlr_info
*h
, int raid_ctlr_position
,
4019 int i
, int nphysicals
, int nlocal_logicals
)
4021 /* In report logicals, local logicals are listed first,
4022 * then any externals.
4024 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
4026 if (i
== raid_ctlr_position
)
4029 if (i
< logicals_start
)
4032 /* i is in logicals range, but still within local logicals */
4033 if ((i
- nphysicals
- (raid_ctlr_position
== 0)) < nlocal_logicals
)
4036 return 1; /* it's an external lun */
4040 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
4041 * logdev. The number of luns in physdev and logdev are returned in
4042 * *nphysicals and *nlogicals, respectively.
4043 * Returns 0 on success, -1 otherwise.
4045 static int hpsa_gather_lun_info(struct ctlr_info
*h
,
4046 struct ReportExtendedLUNdata
*physdev
, u32
*nphysicals
,
4047 struct ReportLUNdata
*logdev
, u32
*nlogicals
)
4049 if (hpsa_scsi_do_report_phys_luns(h
, physdev
, sizeof(*physdev
))) {
4050 dev_err(&h
->pdev
->dev
, "report physical LUNs failed.\n");
4053 *nphysicals
= be32_to_cpu(*((__be32
*)physdev
->LUNListLength
)) / 24;
4054 if (*nphysicals
> HPSA_MAX_PHYS_LUN
) {
4055 dev_warn(&h
->pdev
->dev
, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
4056 HPSA_MAX_PHYS_LUN
, *nphysicals
- HPSA_MAX_PHYS_LUN
);
4057 *nphysicals
= HPSA_MAX_PHYS_LUN
;
4059 if (hpsa_scsi_do_report_log_luns(h
, logdev
, sizeof(*logdev
))) {
4060 dev_err(&h
->pdev
->dev
, "report logical LUNs failed.\n");
4063 *nlogicals
= be32_to_cpu(*((__be32
*) logdev
->LUNListLength
)) / 8;
4064 /* Reject Logicals in excess of our max capability. */
4065 if (*nlogicals
> HPSA_MAX_LUN
) {
4066 dev_warn(&h
->pdev
->dev
,
4067 "maximum logical LUNs (%d) exceeded. "
4068 "%d LUNs ignored.\n", HPSA_MAX_LUN
,
4069 *nlogicals
- HPSA_MAX_LUN
);
4070 *nlogicals
= HPSA_MAX_LUN
;
4072 if (*nlogicals
+ *nphysicals
> HPSA_MAX_PHYS_LUN
) {
4073 dev_warn(&h
->pdev
->dev
,
4074 "maximum logical + physical LUNs (%d) exceeded. "
4075 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN
,
4076 *nphysicals
+ *nlogicals
- HPSA_MAX_PHYS_LUN
);
4077 *nlogicals
= HPSA_MAX_PHYS_LUN
- *nphysicals
;
4082 static u8
*figure_lunaddrbytes(struct ctlr_info
*h
, int raid_ctlr_position
,
4083 int i
, int nphysicals
, int nlogicals
,
4084 struct ReportExtendedLUNdata
*physdev_list
,
4085 struct ReportLUNdata
*logdev_list
)
4087 /* Helper function, figure out where the LUN ID info is coming from
4088 * given index i, lists of physical and logical devices, where in
4089 * the list the raid controller is supposed to appear (first or last)
4092 int logicals_start
= nphysicals
+ (raid_ctlr_position
== 0);
4093 int last_device
= nphysicals
+ nlogicals
+ (raid_ctlr_position
== 0);
4095 if (i
== raid_ctlr_position
)
4096 return RAID_CTLR_LUNID
;
4098 if (i
< logicals_start
)
4099 return &physdev_list
->LUN
[i
-
4100 (raid_ctlr_position
== 0)].lunid
[0];
4102 if (i
< last_device
)
4103 return &logdev_list
->LUN
[i
- nphysicals
-
4104 (raid_ctlr_position
== 0)][0];
4109 /* get physical drive ioaccel handle and queue depth */
4110 static void hpsa_get_ioaccel_drive_info(struct ctlr_info
*h
,
4111 struct hpsa_scsi_dev_t
*dev
,
4112 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
4113 struct bmic_identify_physical_device
*id_phys
)
4116 struct ext_report_lun_entry
*rle
;
4119 * external targets don't support BMIC
4121 if (dev
->external
) {
4122 dev
->queue_depth
= 7;
4126 rle
= &rlep
->LUN
[rle_index
];
4128 dev
->ioaccel_handle
= rle
->ioaccel_handle
;
4129 if ((rle
->device_flags
& 0x08) && dev
->ioaccel_handle
)
4130 dev
->hba_ioaccel_enabled
= 1;
4131 memset(id_phys
, 0, sizeof(*id_phys
));
4132 rc
= hpsa_bmic_id_physical_device(h
, &rle
->lunid
[0],
4133 GET_BMIC_DRIVE_NUMBER(&rle
->lunid
[0]), id_phys
,
4136 /* Reserve space for FW operations */
4137 #define DRIVE_CMDS_RESERVED_FOR_FW 2
4138 #define DRIVE_QUEUE_DEPTH 7
4140 le16_to_cpu(id_phys
->current_queue_depth_limit
) -
4141 DRIVE_CMDS_RESERVED_FOR_FW
;
4143 dev
->queue_depth
= DRIVE_QUEUE_DEPTH
; /* conservative */
4146 static void hpsa_get_path_info(struct hpsa_scsi_dev_t
*this_device
,
4147 struct ReportExtendedLUNdata
*rlep
, int rle_index
,
4148 struct bmic_identify_physical_device
*id_phys
)
4150 struct ext_report_lun_entry
*rle
= &rlep
->LUN
[rle_index
];
4152 if ((rle
->device_flags
& 0x08) && this_device
->ioaccel_handle
)
4153 this_device
->hba_ioaccel_enabled
= 1;
4155 memcpy(&this_device
->active_path_index
,
4156 &id_phys
->active_path_number
,
4157 sizeof(this_device
->active_path_index
));
4158 memcpy(&this_device
->path_map
,
4159 &id_phys
->redundant_path_present_map
,
4160 sizeof(this_device
->path_map
));
4161 memcpy(&this_device
->box
,
4162 &id_phys
->alternate_paths_phys_box_on_port
,
4163 sizeof(this_device
->box
));
4164 memcpy(&this_device
->phys_connector
,
4165 &id_phys
->alternate_paths_phys_connector
,
4166 sizeof(this_device
->phys_connector
));
4167 memcpy(&this_device
->bay
,
4168 &id_phys
->phys_bay_in_box
,
4169 sizeof(this_device
->bay
));
4172 /* get number of local logical disks. */
4173 static int hpsa_set_local_logical_count(struct ctlr_info
*h
,
4174 struct bmic_identify_controller
*id_ctlr
,
4180 dev_warn(&h
->pdev
->dev
, "%s: id_ctlr buffer is NULL.\n",
4184 memset(id_ctlr
, 0, sizeof(*id_ctlr
));
4185 rc
= hpsa_bmic_id_controller(h
, id_ctlr
, sizeof(*id_ctlr
));
4187 if (id_ctlr
->configured_logical_drive_count
< 256)
4188 *nlocals
= id_ctlr
->configured_logical_drive_count
;
4190 *nlocals
= le16_to_cpu(
4191 id_ctlr
->extended_logical_unit_count
);
4197 static bool hpsa_is_disk_spare(struct ctlr_info
*h
, u8
*lunaddrbytes
)
4199 struct bmic_identify_physical_device
*id_phys
;
4200 bool is_spare
= false;
4203 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
4207 rc
= hpsa_bmic_id_physical_device(h
,
4209 GET_BMIC_DRIVE_NUMBER(lunaddrbytes
),
4210 id_phys
, sizeof(*id_phys
));
4212 is_spare
= (id_phys
->more_flags
>> 6) & 0x01;
4218 #define RPL_DEV_FLAG_NON_DISK 0x1
4219 #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
4220 #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
4222 #define BMIC_DEVICE_TYPE_ENCLOSURE 6
4224 static bool hpsa_skip_device(struct ctlr_info
*h
, u8
*lunaddrbytes
,
4225 struct ext_report_lun_entry
*rle
)
4230 if (!MASKED_DEVICE(lunaddrbytes
))
4233 device_flags
= rle
->device_flags
;
4234 device_type
= rle
->device_type
;
4236 if (device_flags
& RPL_DEV_FLAG_NON_DISK
) {
4237 if (device_type
== BMIC_DEVICE_TYPE_ENCLOSURE
)
4242 if (!(device_flags
& RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED
))
4245 if (device_flags
& RPL_DEV_FLAG_UNCONFIG_DISK
)
4249 * Spares may be spun down, we do not want to
4250 * do an Inquiry to a RAID set spare drive as
4251 * that would have them spun up, that is a
4252 * performance hit because I/O to the RAID device
4253 * stops while the spin up occurs which can take
4256 if (hpsa_is_disk_spare(h
, lunaddrbytes
))
4262 static void hpsa_update_scsi_devices(struct ctlr_info
*h
)
4264 /* the idea here is we could get notified
4265 * that some devices have changed, so we do a report
4266 * physical luns and report logical luns cmd, and adjust
4267 * our list of devices accordingly.
4269 * The scsi3addr's of devices won't change so long as the
4270 * adapter is not reset. That means we can rescan and
4271 * tell which devices we already know about, vs. new
4272 * devices, vs. disappearing devices.
4274 struct ReportExtendedLUNdata
*physdev_list
= NULL
;
4275 struct ReportLUNdata
*logdev_list
= NULL
;
4276 struct bmic_identify_physical_device
*id_phys
= NULL
;
4277 struct bmic_identify_controller
*id_ctlr
= NULL
;
4280 u32 nlocal_logicals
= 0;
4281 u32 ndev_allocated
= 0;
4282 struct hpsa_scsi_dev_t
**currentsd
, *this_device
, *tmpdevice
;
4284 int i
, n_ext_target_devs
, ndevs_to_allocate
;
4285 int raid_ctlr_position
;
4286 bool physical_device
;
4287 DECLARE_BITMAP(lunzerobits
, MAX_EXT_TARGETS
);
4289 currentsd
= kzalloc(sizeof(*currentsd
) * HPSA_MAX_DEVICES
, GFP_KERNEL
);
4290 physdev_list
= kzalloc(sizeof(*physdev_list
), GFP_KERNEL
);
4291 logdev_list
= kzalloc(sizeof(*logdev_list
), GFP_KERNEL
);
4292 tmpdevice
= kzalloc(sizeof(*tmpdevice
), GFP_KERNEL
);
4293 id_phys
= kzalloc(sizeof(*id_phys
), GFP_KERNEL
);
4294 id_ctlr
= kzalloc(sizeof(*id_ctlr
), GFP_KERNEL
);
4296 if (!currentsd
|| !physdev_list
|| !logdev_list
||
4297 !tmpdevice
|| !id_phys
|| !id_ctlr
) {
4298 dev_err(&h
->pdev
->dev
, "out of memory\n");
4301 memset(lunzerobits
, 0, sizeof(lunzerobits
));
4303 h
->drv_req_rescan
= 0; /* cancel scheduled rescan - we're doing it. */
4305 if (hpsa_gather_lun_info(h
, physdev_list
, &nphysicals
,
4306 logdev_list
, &nlogicals
)) {
4307 h
->drv_req_rescan
= 1;
4311 /* Set number of local logicals (non PTRAID) */
4312 if (hpsa_set_local_logical_count(h
, id_ctlr
, &nlocal_logicals
)) {
4313 dev_warn(&h
->pdev
->dev
,
4314 "%s: Can't determine number of local logical devices.\n",
4318 /* We might see up to the maximum number of logical and physical disks
4319 * plus external target devices, and a device for the local RAID
4322 ndevs_to_allocate
= nphysicals
+ nlogicals
+ MAX_EXT_TARGETS
+ 1;
4324 /* Allocate the per device structures */
4325 for (i
= 0; i
< ndevs_to_allocate
; i
++) {
4326 if (i
>= HPSA_MAX_DEVICES
) {
4327 dev_warn(&h
->pdev
->dev
, "maximum devices (%d) exceeded."
4328 " %d devices ignored.\n", HPSA_MAX_DEVICES
,
4329 ndevs_to_allocate
- HPSA_MAX_DEVICES
);
4333 currentsd
[i
] = kzalloc(sizeof(*currentsd
[i
]), GFP_KERNEL
);
4334 if (!currentsd
[i
]) {
4335 h
->drv_req_rescan
= 1;
4341 if (is_scsi_rev_5(h
))
4342 raid_ctlr_position
= 0;
4344 raid_ctlr_position
= nphysicals
+ nlogicals
;
4346 /* adjust our table of devices */
4347 n_ext_target_devs
= 0;
4348 for (i
= 0; i
< nphysicals
+ nlogicals
+ 1; i
++) {
4349 u8
*lunaddrbytes
, is_OBDR
= 0;
4351 int phys_dev_index
= i
- (raid_ctlr_position
== 0);
4352 bool skip_device
= false;
4354 physical_device
= i
< nphysicals
+ (raid_ctlr_position
== 0);
4356 /* Figure out where the LUN ID info is coming from */
4357 lunaddrbytes
= figure_lunaddrbytes(h
, raid_ctlr_position
,
4358 i
, nphysicals
, nlogicals
, physdev_list
, logdev_list
);
4360 /* Determine if this is a lun from an external target array */
4361 tmpdevice
->external
=
4362 figure_external_status(h
, raid_ctlr_position
, i
,
4363 nphysicals
, nlocal_logicals
);
4366 * Skip over some devices such as a spare.
4368 if (!tmpdevice
->external
&& physical_device
) {
4369 skip_device
= hpsa_skip_device(h
, lunaddrbytes
,
4370 &physdev_list
->LUN
[phys_dev_index
]);
4375 /* Get device type, vendor, model, device id */
4376 rc
= hpsa_update_device_info(h
, lunaddrbytes
, tmpdevice
,
4378 if (rc
== -ENOMEM
) {
4379 dev_warn(&h
->pdev
->dev
,
4380 "Out of memory, rescan deferred.\n");
4381 h
->drv_req_rescan
= 1;
4385 h
->drv_req_rescan
= 1;
4389 figure_bus_target_lun(h
, lunaddrbytes
, tmpdevice
);
4390 hpsa_update_device_supports_aborts(h
, tmpdevice
, lunaddrbytes
);
4391 this_device
= currentsd
[ncurrent
];
4393 /* Turn on discovery_polling if there are ext target devices.
4394 * Event-based change notification is unreliable for those.
4396 if (!h
->discovery_polling
) {
4397 if (tmpdevice
->external
) {
4398 h
->discovery_polling
= 1;
4399 dev_info(&h
->pdev
->dev
,
4400 "External target, activate discovery polling.\n");
4405 *this_device
= *tmpdevice
;
4406 this_device
->physical_device
= physical_device
;
4409 * Expose all devices except for physical devices that
4412 if (MASKED_DEVICE(lunaddrbytes
) && this_device
->physical_device
)
4413 this_device
->expose_device
= 0;
4415 this_device
->expose_device
= 1;
4419 * Get the SAS address for physical devices that are exposed.
4421 if (this_device
->physical_device
&& this_device
->expose_device
)
4422 hpsa_get_sas_address(h
, lunaddrbytes
, this_device
);
4424 switch (this_device
->devtype
) {
4426 /* We don't *really* support actual CD-ROM devices,
4427 * just "One Button Disaster Recovery" tape drive
4428 * which temporarily pretends to be a CD-ROM drive.
4429 * So we check that the device is really an OBDR tape
4430 * device by checking for "$DR-10" in bytes 43-48 of
4438 if (this_device
->physical_device
) {
4439 /* The disk is in HBA mode. */
4440 /* Never use RAID mapper in HBA mode. */
4441 this_device
->offload_enabled
= 0;
4442 hpsa_get_ioaccel_drive_info(h
, this_device
,
4443 physdev_list
, phys_dev_index
, id_phys
);
4444 hpsa_get_path_info(this_device
,
4445 physdev_list
, phys_dev_index
, id_phys
);
4450 case TYPE_MEDIUM_CHANGER
:
4453 case TYPE_ENCLOSURE
:
4454 if (!this_device
->external
)
4455 hpsa_get_enclosure_info(h
, lunaddrbytes
,
4456 physdev_list
, phys_dev_index
,
4461 /* Only present the Smartarray HBA as a RAID controller.
4462 * If it's a RAID controller other than the HBA itself
4463 * (an external RAID controller, MSA500 or similar)
4466 if (!is_hba_lunid(lunaddrbytes
))
4473 if (ncurrent
>= HPSA_MAX_DEVICES
)
4477 if (h
->sas_host
== NULL
) {
4480 rc
= hpsa_add_sas_host(h
);
4482 dev_warn(&h
->pdev
->dev
,
4483 "Could not add sas host %d\n", rc
);
4488 adjust_hpsa_scsi_table(h
, currentsd
, ncurrent
);
4491 for (i
= 0; i
< ndev_allocated
; i
++)
4492 kfree(currentsd
[i
]);
4494 kfree(physdev_list
);
4500 static void hpsa_set_sg_descriptor(struct SGDescriptor
*desc
,
4501 struct scatterlist
*sg
)
4503 u64 addr64
= (u64
) sg_dma_address(sg
);
4504 unsigned int len
= sg_dma_len(sg
);
4506 desc
->Addr
= cpu_to_le64(addr64
);
4507 desc
->Len
= cpu_to_le32(len
);
4512 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
4513 * dma mapping and fills in the scatter gather entries of the
4516 static int hpsa_scatter_gather(struct ctlr_info
*h
,
4517 struct CommandList
*cp
,
4518 struct scsi_cmnd
*cmd
)
4520 struct scatterlist
*sg
;
4521 int use_sg
, i
, sg_limit
, chained
, last_sg
;
4522 struct SGDescriptor
*curr_sg
;
4524 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4526 use_sg
= scsi_dma_map(cmd
);
4531 goto sglist_finished
;
4534 * If the number of entries is greater than the max for a single list,
4535 * then we have a chained list; we will set up all but one entry in the
4536 * first list (the last entry is saved for link information);
4537 * otherwise, we don't have a chained list and we'll set up at each of
4538 * the entries in the one list.
4541 chained
= use_sg
> h
->max_cmd_sg_entries
;
4542 sg_limit
= chained
? h
->max_cmd_sg_entries
- 1 : use_sg
;
4543 last_sg
= scsi_sg_count(cmd
) - 1;
4544 scsi_for_each_sg(cmd
, sg
, sg_limit
, i
) {
4545 hpsa_set_sg_descriptor(curr_sg
, sg
);
4551 * Continue with the chained list. Set curr_sg to the chained
4552 * list. Modify the limit to the total count less the entries
4553 * we've already set up. Resume the scan at the list entry
4554 * where the previous loop left off.
4556 curr_sg
= h
->cmd_sg_list
[cp
->cmdindex
];
4557 sg_limit
= use_sg
- sg_limit
;
4558 for_each_sg(sg
, sg
, sg_limit
, i
) {
4559 hpsa_set_sg_descriptor(curr_sg
, sg
);
4564 /* Back the pointer up to the last entry and mark it as "last". */
4565 (curr_sg
- 1)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4567 if (use_sg
+ chained
> h
->maxSG
)
4568 h
->maxSG
= use_sg
+ chained
;
4571 cp
->Header
.SGList
= h
->max_cmd_sg_entries
;
4572 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
+ 1);
4573 if (hpsa_map_sg_chain_block(h
, cp
)) {
4574 scsi_dma_unmap(cmd
);
4582 cp
->Header
.SGList
= (u8
) use_sg
; /* no. SGs contig in this cmd */
4583 cp
->Header
.SGTotal
= cpu_to_le16(use_sg
); /* total sgs in cmd list */
4587 #define IO_ACCEL_INELIGIBLE (1)
4588 static int fixup_ioaccel_cdb(u8
*cdb
, int *cdb_len
)
4594 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4601 if (*cdb_len
== 6) {
4602 block
= (((cdb
[1] & 0x1F) << 16) |
4609 BUG_ON(*cdb_len
!= 12);
4610 block
= get_unaligned_be32(&cdb
[2]);
4611 block_cnt
= get_unaligned_be32(&cdb
[6]);
4613 if (block_cnt
> 0xffff)
4614 return IO_ACCEL_INELIGIBLE
;
4616 cdb
[0] = is_write
? WRITE_10
: READ_10
;
4618 cdb
[2] = (u8
) (block
>> 24);
4619 cdb
[3] = (u8
) (block
>> 16);
4620 cdb
[4] = (u8
) (block
>> 8);
4621 cdb
[5] = (u8
) (block
);
4623 cdb
[7] = (u8
) (block_cnt
>> 8);
4624 cdb
[8] = (u8
) (block_cnt
);
4632 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info
*h
,
4633 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4634 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4636 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4637 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[c
->cmdindex
];
4639 unsigned int total_len
= 0;
4640 struct scatterlist
*sg
;
4643 struct SGDescriptor
*curr_sg
;
4644 u32 control
= IOACCEL1_CONTROL_SIMPLEQUEUE
;
4646 /* TODO: implement chaining support */
4647 if (scsi_sg_count(cmd
) > h
->ioaccel_maxsg
) {
4648 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4649 return IO_ACCEL_INELIGIBLE
;
4652 BUG_ON(cmd
->cmd_len
> IOACCEL1_IOFLAGS_CDBLEN_MAX
);
4654 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4655 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4656 return IO_ACCEL_INELIGIBLE
;
4659 c
->cmd_type
= CMD_IOACCEL1
;
4661 /* Adjust the DMA address to point to the accelerated command buffer */
4662 c
->busaddr
= (u32
) h
->ioaccel_cmd_pool_dhandle
+
4663 (c
->cmdindex
* sizeof(*cp
));
4664 BUG_ON(c
->busaddr
& 0x0000007F);
4666 use_sg
= scsi_dma_map(cmd
);
4668 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4674 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4675 addr64
= (u64
) sg_dma_address(sg
);
4676 len
= sg_dma_len(sg
);
4678 curr_sg
->Addr
= cpu_to_le64(addr64
);
4679 curr_sg
->Len
= cpu_to_le32(len
);
4680 curr_sg
->Ext
= cpu_to_le32(0);
4683 (--curr_sg
)->Ext
= cpu_to_le32(HPSA_SG_LAST
);
4685 switch (cmd
->sc_data_direction
) {
4687 control
|= IOACCEL1_CONTROL_DATA_OUT
;
4689 case DMA_FROM_DEVICE
:
4690 control
|= IOACCEL1_CONTROL_DATA_IN
;
4693 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4696 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4697 cmd
->sc_data_direction
);
4702 control
|= IOACCEL1_CONTROL_NODATAXFER
;
4705 c
->Header
.SGList
= use_sg
;
4706 /* Fill out the command structure to submit */
4707 cp
->dev_handle
= cpu_to_le16(ioaccel_handle
& 0xFFFF);
4708 cp
->transfer_len
= cpu_to_le32(total_len
);
4709 cp
->io_flags
= cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ
|
4710 (cdb_len
& IOACCEL1_IOFLAGS_CDBLEN_MASK
));
4711 cp
->control
= cpu_to_le32(control
);
4712 memcpy(cp
->CDB
, cdb
, cdb_len
);
4713 memcpy(cp
->CISS_LUN
, scsi3addr
, 8);
4714 /* Tag was already set at init time. */
4715 enqueue_cmd_and_start_io(h
, c
);
4720 * Queue a command directly to a device behind the controller using the
4721 * I/O accelerator path.
4723 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info
*h
,
4724 struct CommandList
*c
)
4726 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4727 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4734 return hpsa_scsi_ioaccel_queue_command(h
, c
, dev
->ioaccel_handle
,
4735 cmd
->cmnd
, cmd
->cmd_len
, dev
->scsi3addr
, dev
);
4739 * Set encryption parameters for the ioaccel2 request
4741 static void set_encrypt_ioaccel2(struct ctlr_info
*h
,
4742 struct CommandList
*c
, struct io_accel2_cmd
*cp
)
4744 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4745 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4746 struct raid_map_data
*map
= &dev
->raid_map
;
4749 /* Are we doing encryption on this device */
4750 if (!(le16_to_cpu(map
->flags
) & RAID_MAP_FLAG_ENCRYPT_ON
))
4752 /* Set the data encryption key index. */
4753 cp
->dekindex
= map
->dekindex
;
4755 /* Set the encryption enable flag, encoded into direction field. */
4756 cp
->direction
|= IOACCEL2_DIRECTION_ENCRYPT_MASK
;
4758 /* Set encryption tweak values based on logical block address
4759 * If block size is 512, tweak value is LBA.
4760 * For other block sizes, tweak is (LBA * block size)/ 512)
4762 switch (cmd
->cmnd
[0]) {
4763 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4766 first_block
= (((cmd
->cmnd
[1] & 0x1F) << 16) |
4767 (cmd
->cmnd
[2] << 8) |
4772 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4775 first_block
= get_unaligned_be32(&cmd
->cmnd
[2]);
4779 first_block
= get_unaligned_be64(&cmd
->cmnd
[2]);
4782 dev_err(&h
->pdev
->dev
,
4783 "ERROR: %s: size (0x%x) not supported for encryption\n",
4784 __func__
, cmd
->cmnd
[0]);
4789 if (le32_to_cpu(map
->volume_blk_size
) != 512)
4790 first_block
= first_block
*
4791 le32_to_cpu(map
->volume_blk_size
)/512;
4793 cp
->tweak_lower
= cpu_to_le32(first_block
);
4794 cp
->tweak_upper
= cpu_to_le32(first_block
>> 32);
4797 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info
*h
,
4798 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4799 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4801 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4802 struct io_accel2_cmd
*cp
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
4803 struct ioaccel2_sg_element
*curr_sg
;
4805 struct scatterlist
*sg
;
4813 if (!cmd
->device
->hostdata
)
4816 BUG_ON(scsi_sg_count(cmd
) > h
->maxsgentries
);
4818 if (fixup_ioaccel_cdb(cdb
, &cdb_len
)) {
4819 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4820 return IO_ACCEL_INELIGIBLE
;
4823 c
->cmd_type
= CMD_IOACCEL2
;
4824 /* Adjust the DMA address to point to the accelerated command buffer */
4825 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
4826 (c
->cmdindex
* sizeof(*cp
));
4827 BUG_ON(c
->busaddr
& 0x0000007F);
4829 memset(cp
, 0, sizeof(*cp
));
4830 cp
->IU_type
= IOACCEL2_IU_TYPE
;
4832 use_sg
= scsi_dma_map(cmd
);
4834 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4840 if (use_sg
> h
->ioaccel_maxsg
) {
4841 addr64
= le64_to_cpu(
4842 h
->ioaccel2_cmd_sg_list
[c
->cmdindex
]->address
);
4843 curr_sg
->address
= cpu_to_le64(addr64
);
4844 curr_sg
->length
= 0;
4845 curr_sg
->reserved
[0] = 0;
4846 curr_sg
->reserved
[1] = 0;
4847 curr_sg
->reserved
[2] = 0;
4848 curr_sg
->chain_indicator
= 0x80;
4850 curr_sg
= h
->ioaccel2_cmd_sg_list
[c
->cmdindex
];
4852 scsi_for_each_sg(cmd
, sg
, use_sg
, i
) {
4853 addr64
= (u64
) sg_dma_address(sg
);
4854 len
= sg_dma_len(sg
);
4856 curr_sg
->address
= cpu_to_le64(addr64
);
4857 curr_sg
->length
= cpu_to_le32(len
);
4858 curr_sg
->reserved
[0] = 0;
4859 curr_sg
->reserved
[1] = 0;
4860 curr_sg
->reserved
[2] = 0;
4861 curr_sg
->chain_indicator
= 0;
4865 switch (cmd
->sc_data_direction
) {
4867 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4868 cp
->direction
|= IOACCEL2_DIR_DATA_OUT
;
4870 case DMA_FROM_DEVICE
:
4871 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4872 cp
->direction
|= IOACCEL2_DIR_DATA_IN
;
4875 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4876 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4879 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
4880 cmd
->sc_data_direction
);
4885 cp
->direction
&= ~IOACCEL2_DIRECTION_MASK
;
4886 cp
->direction
|= IOACCEL2_DIR_NO_DATA
;
4889 /* Set encryption parameters, if necessary */
4890 set_encrypt_ioaccel2(h
, c
, cp
);
4892 cp
->scsi_nexus
= cpu_to_le32(ioaccel_handle
);
4893 cp
->Tag
= cpu_to_le32(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
4894 memcpy(cp
->cdb
, cdb
, sizeof(cp
->cdb
));
4896 cp
->data_len
= cpu_to_le32(total_len
);
4897 cp
->err_ptr
= cpu_to_le64(c
->busaddr
+
4898 offsetof(struct io_accel2_cmd
, error_data
));
4899 cp
->err_len
= cpu_to_le32(sizeof(cp
->error_data
));
4901 /* fill in sg elements */
4902 if (use_sg
> h
->ioaccel_maxsg
) {
4904 cp
->sg
[0].length
= cpu_to_le32(use_sg
* sizeof(cp
->sg
[0]));
4905 if (hpsa_map_ioaccel2_sg_chain_block(h
, cp
, c
)) {
4906 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4907 scsi_dma_unmap(cmd
);
4911 cp
->sg_count
= (u8
) use_sg
;
4913 enqueue_cmd_and_start_io(h
, c
);
4918 * Queue a command to the correct I/O accelerator path.
4920 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info
*h
,
4921 struct CommandList
*c
, u32 ioaccel_handle
, u8
*cdb
, int cdb_len
,
4922 u8
*scsi3addr
, struct hpsa_scsi_dev_t
*phys_disk
)
4924 if (!c
->scsi_cmd
->device
)
4927 if (!c
->scsi_cmd
->device
->hostdata
)
4930 /* Try to honor the device's queue depth */
4931 if (atomic_inc_return(&phys_disk
->ioaccel_cmds_out
) >
4932 phys_disk
->queue_depth
) {
4933 atomic_dec(&phys_disk
->ioaccel_cmds_out
);
4934 return IO_ACCEL_INELIGIBLE
;
4936 if (h
->transMethod
& CFGTBL_Trans_io_accel1
)
4937 return hpsa_scsi_ioaccel1_queue_command(h
, c
, ioaccel_handle
,
4938 cdb
, cdb_len
, scsi3addr
,
4941 return hpsa_scsi_ioaccel2_queue_command(h
, c
, ioaccel_handle
,
4942 cdb
, cdb_len
, scsi3addr
,
4946 static void raid_map_helper(struct raid_map_data
*map
,
4947 int offload_to_mirror
, u32
*map_index
, u32
*current_group
)
4949 if (offload_to_mirror
== 0) {
4950 /* use physical disk in the first mirrored group. */
4951 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4955 /* determine mirror group that *map_index indicates */
4956 *current_group
= *map_index
/
4957 le16_to_cpu(map
->data_disks_per_row
);
4958 if (offload_to_mirror
== *current_group
)
4960 if (*current_group
< le16_to_cpu(map
->layout_map_count
) - 1) {
4961 /* select map index from next group */
4962 *map_index
+= le16_to_cpu(map
->data_disks_per_row
);
4965 /* select map index from first group */
4966 *map_index
%= le16_to_cpu(map
->data_disks_per_row
);
4969 } while (offload_to_mirror
!= *current_group
);
4973 * Attempt to perform offload RAID mapping for a logical volume I/O.
4975 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info
*h
,
4976 struct CommandList
*c
)
4978 struct scsi_cmnd
*cmd
= c
->scsi_cmd
;
4979 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
4980 struct raid_map_data
*map
= &dev
->raid_map
;
4981 struct raid_map_disk_data
*dd
= &map
->data
[0];
4984 u64 first_block
, last_block
;
4987 u64 first_row
, last_row
;
4988 u32 first_row_offset
, last_row_offset
;
4989 u32 first_column
, last_column
;
4990 u64 r0_first_row
, r0_last_row
;
4991 u32 r5or6_blocks_per_row
;
4992 u64 r5or6_first_row
, r5or6_last_row
;
4993 u32 r5or6_first_row_offset
, r5or6_last_row_offset
;
4994 u32 r5or6_first_column
, r5or6_last_column
;
4995 u32 total_disks_per_row
;
4997 u32 first_group
, last_group
, current_group
;
5005 #if BITS_PER_LONG == 32
5008 int offload_to_mirror
;
5013 /* check for valid opcode, get LBA and block count */
5014 switch (cmd
->cmnd
[0]) {
5018 first_block
= (((cmd
->cmnd
[1] & 0x1F) << 16) |
5019 (cmd
->cmnd
[2] << 8) |
5021 block_cnt
= cmd
->cmnd
[4];
5029 (((u64
) cmd
->cmnd
[2]) << 24) |
5030 (((u64
) cmd
->cmnd
[3]) << 16) |
5031 (((u64
) cmd
->cmnd
[4]) << 8) |
5034 (((u32
) cmd
->cmnd
[7]) << 8) |
5041 (((u64
) cmd
->cmnd
[2]) << 24) |
5042 (((u64
) cmd
->cmnd
[3]) << 16) |
5043 (((u64
) cmd
->cmnd
[4]) << 8) |
5046 (((u32
) cmd
->cmnd
[6]) << 24) |
5047 (((u32
) cmd
->cmnd
[7]) << 16) |
5048 (((u32
) cmd
->cmnd
[8]) << 8) |
5055 (((u64
) cmd
->cmnd
[2]) << 56) |
5056 (((u64
) cmd
->cmnd
[3]) << 48) |
5057 (((u64
) cmd
->cmnd
[4]) << 40) |
5058 (((u64
) cmd
->cmnd
[5]) << 32) |
5059 (((u64
) cmd
->cmnd
[6]) << 24) |
5060 (((u64
) cmd
->cmnd
[7]) << 16) |
5061 (((u64
) cmd
->cmnd
[8]) << 8) |
5064 (((u32
) cmd
->cmnd
[10]) << 24) |
5065 (((u32
) cmd
->cmnd
[11]) << 16) |
5066 (((u32
) cmd
->cmnd
[12]) << 8) |
5070 return IO_ACCEL_INELIGIBLE
; /* process via normal I/O path */
5072 last_block
= first_block
+ block_cnt
- 1;
5074 /* check for write to non-RAID-0 */
5075 if (is_write
&& dev
->raid_level
!= 0)
5076 return IO_ACCEL_INELIGIBLE
;
5078 /* check for invalid block or wraparound */
5079 if (last_block
>= le64_to_cpu(map
->volume_blk_cnt
) ||
5080 last_block
< first_block
)
5081 return IO_ACCEL_INELIGIBLE
;
5083 /* calculate stripe information for the request */
5084 blocks_per_row
= le16_to_cpu(map
->data_disks_per_row
) *
5085 le16_to_cpu(map
->strip_size
);
5086 strip_size
= le16_to_cpu(map
->strip_size
);
5087 #if BITS_PER_LONG == 32
5088 tmpdiv
= first_block
;
5089 (void) do_div(tmpdiv
, blocks_per_row
);
5091 tmpdiv
= last_block
;
5092 (void) do_div(tmpdiv
, blocks_per_row
);
5094 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
5095 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
5096 tmpdiv
= first_row_offset
;
5097 (void) do_div(tmpdiv
, strip_size
);
5098 first_column
= tmpdiv
;
5099 tmpdiv
= last_row_offset
;
5100 (void) do_div(tmpdiv
, strip_size
);
5101 last_column
= tmpdiv
;
5103 first_row
= first_block
/ blocks_per_row
;
5104 last_row
= last_block
/ blocks_per_row
;
5105 first_row_offset
= (u32
) (first_block
- (first_row
* blocks_per_row
));
5106 last_row_offset
= (u32
) (last_block
- (last_row
* blocks_per_row
));
5107 first_column
= first_row_offset
/ strip_size
;
5108 last_column
= last_row_offset
/ strip_size
;
5111 /* if this isn't a single row/column then give to the controller */
5112 if ((first_row
!= last_row
) || (first_column
!= last_column
))
5113 return IO_ACCEL_INELIGIBLE
;
5115 /* proceeding with driver mapping */
5116 total_disks_per_row
= le16_to_cpu(map
->data_disks_per_row
) +
5117 le16_to_cpu(map
->metadata_disks_per_row
);
5118 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
5119 le16_to_cpu(map
->row_cnt
);
5120 map_index
= (map_row
* total_disks_per_row
) + first_column
;
5122 switch (dev
->raid_level
) {
5124 break; /* nothing special to do */
5126 /* Handles load balance across RAID 1 members.
5127 * (2-drive R1 and R10 with even # of drives.)
5128 * Appropriate for SSDs, not optimal for HDDs
5130 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 2);
5131 if (dev
->offload_to_mirror
)
5132 map_index
+= le16_to_cpu(map
->data_disks_per_row
);
5133 dev
->offload_to_mirror
= !dev
->offload_to_mirror
;
5136 /* Handles N-way mirrors (R1-ADM)
5137 * and R10 with # of drives divisible by 3.)
5139 BUG_ON(le16_to_cpu(map
->layout_map_count
) != 3);
5141 offload_to_mirror
= dev
->offload_to_mirror
;
5142 raid_map_helper(map
, offload_to_mirror
,
5143 &map_index
, ¤t_group
);
5144 /* set mirror group to use next time */
5146 (offload_to_mirror
>=
5147 le16_to_cpu(map
->layout_map_count
) - 1)
5148 ? 0 : offload_to_mirror
+ 1;
5149 dev
->offload_to_mirror
= offload_to_mirror
;
5150 /* Avoid direct use of dev->offload_to_mirror within this
5151 * function since multiple threads might simultaneously
5152 * increment it beyond the range of dev->layout_map_count -1.
5157 if (le16_to_cpu(map
->layout_map_count
) <= 1)
5160 /* Verify first and last block are in same RAID group */
5161 r5or6_blocks_per_row
=
5162 le16_to_cpu(map
->strip_size
) *
5163 le16_to_cpu(map
->data_disks_per_row
);
5164 BUG_ON(r5or6_blocks_per_row
== 0);
5165 stripesize
= r5or6_blocks_per_row
*
5166 le16_to_cpu(map
->layout_map_count
);
5167 #if BITS_PER_LONG == 32
5168 tmpdiv
= first_block
;
5169 first_group
= do_div(tmpdiv
, stripesize
);
5170 tmpdiv
= first_group
;
5171 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
5172 first_group
= tmpdiv
;
5173 tmpdiv
= last_block
;
5174 last_group
= do_div(tmpdiv
, stripesize
);
5175 tmpdiv
= last_group
;
5176 (void) do_div(tmpdiv
, r5or6_blocks_per_row
);
5177 last_group
= tmpdiv
;
5179 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
5180 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
5182 if (first_group
!= last_group
)
5183 return IO_ACCEL_INELIGIBLE
;
5185 /* Verify request is in a single row of RAID 5/6 */
5186 #if BITS_PER_LONG == 32
5187 tmpdiv
= first_block
;
5188 (void) do_div(tmpdiv
, stripesize
);
5189 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
5190 tmpdiv
= last_block
;
5191 (void) do_div(tmpdiv
, stripesize
);
5192 r5or6_last_row
= r0_last_row
= tmpdiv
;
5194 first_row
= r5or6_first_row
= r0_first_row
=
5195 first_block
/ stripesize
;
5196 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
5198 if (r5or6_first_row
!= r5or6_last_row
)
5199 return IO_ACCEL_INELIGIBLE
;
5202 /* Verify request is in a single column */
5203 #if BITS_PER_LONG == 32
5204 tmpdiv
= first_block
;
5205 first_row_offset
= do_div(tmpdiv
, stripesize
);
5206 tmpdiv
= first_row_offset
;
5207 first_row_offset
= (u32
) do_div(tmpdiv
, r5or6_blocks_per_row
);
5208 r5or6_first_row_offset
= first_row_offset
;
5209 tmpdiv
= last_block
;
5210 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
5211 tmpdiv
= r5or6_last_row_offset
;
5212 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
5213 tmpdiv
= r5or6_first_row_offset
;
5214 (void) do_div(tmpdiv
, map
->strip_size
);
5215 first_column
= r5or6_first_column
= tmpdiv
;
5216 tmpdiv
= r5or6_last_row_offset
;
5217 (void) do_div(tmpdiv
, map
->strip_size
);
5218 r5or6_last_column
= tmpdiv
;
5220 first_row_offset
= r5or6_first_row_offset
=
5221 (u32
)((first_block
% stripesize
) %
5222 r5or6_blocks_per_row
);
5224 r5or6_last_row_offset
=
5225 (u32
)((last_block
% stripesize
) %
5226 r5or6_blocks_per_row
);
5228 first_column
= r5or6_first_column
=
5229 r5or6_first_row_offset
/ le16_to_cpu(map
->strip_size
);
5231 r5or6_last_row_offset
/ le16_to_cpu(map
->strip_size
);
5233 if (r5or6_first_column
!= r5or6_last_column
)
5234 return IO_ACCEL_INELIGIBLE
;
5236 /* Request is eligible */
5237 map_row
= ((u32
)(first_row
>> map
->parity_rotation_shift
)) %
5238 le16_to_cpu(map
->row_cnt
);
5240 map_index
= (first_group
*
5241 (le16_to_cpu(map
->row_cnt
) * total_disks_per_row
)) +
5242 (map_row
* total_disks_per_row
) + first_column
;
5245 return IO_ACCEL_INELIGIBLE
;
5248 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
5249 return IO_ACCEL_INELIGIBLE
;
5251 c
->phys_disk
= dev
->phys_disk
[map_index
];
5253 return IO_ACCEL_INELIGIBLE
;
5255 disk_handle
= dd
[map_index
].ioaccel_handle
;
5256 disk_block
= le64_to_cpu(map
->disk_starting_blk
) +
5257 first_row
* le16_to_cpu(map
->strip_size
) +
5258 (first_row_offset
- first_column
*
5259 le16_to_cpu(map
->strip_size
));
5260 disk_block_cnt
= block_cnt
;
5262 /* handle differing logical/physical block sizes */
5263 if (map
->phys_blk_shift
) {
5264 disk_block
<<= map
->phys_blk_shift
;
5265 disk_block_cnt
<<= map
->phys_blk_shift
;
5267 BUG_ON(disk_block_cnt
> 0xffff);
5269 /* build the new CDB for the physical disk I/O */
5270 if (disk_block
> 0xffffffff) {
5271 cdb
[0] = is_write
? WRITE_16
: READ_16
;
5273 cdb
[2] = (u8
) (disk_block
>> 56);
5274 cdb
[3] = (u8
) (disk_block
>> 48);
5275 cdb
[4] = (u8
) (disk_block
>> 40);
5276 cdb
[5] = (u8
) (disk_block
>> 32);
5277 cdb
[6] = (u8
) (disk_block
>> 24);
5278 cdb
[7] = (u8
) (disk_block
>> 16);
5279 cdb
[8] = (u8
) (disk_block
>> 8);
5280 cdb
[9] = (u8
) (disk_block
);
5281 cdb
[10] = (u8
) (disk_block_cnt
>> 24);
5282 cdb
[11] = (u8
) (disk_block_cnt
>> 16);
5283 cdb
[12] = (u8
) (disk_block_cnt
>> 8);
5284 cdb
[13] = (u8
) (disk_block_cnt
);
5289 cdb
[0] = is_write
? WRITE_10
: READ_10
;
5291 cdb
[2] = (u8
) (disk_block
>> 24);
5292 cdb
[3] = (u8
) (disk_block
>> 16);
5293 cdb
[4] = (u8
) (disk_block
>> 8);
5294 cdb
[5] = (u8
) (disk_block
);
5296 cdb
[7] = (u8
) (disk_block_cnt
>> 8);
5297 cdb
[8] = (u8
) (disk_block_cnt
);
5301 return hpsa_scsi_ioaccel_queue_command(h
, c
, disk_handle
, cdb
, cdb_len
,
5303 dev
->phys_disk
[map_index
]);
5307 * Submit commands down the "normal" RAID stack path
5308 * All callers to hpsa_ciss_submit must check lockup_detected
5309 * beforehand, before (opt.) and after calling cmd_alloc
5311 static int hpsa_ciss_submit(struct ctlr_info
*h
,
5312 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
5313 unsigned char scsi3addr
[])
5315 cmd
->host_scribble
= (unsigned char *) c
;
5316 c
->cmd_type
= CMD_SCSI
;
5318 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
5319 memcpy(&c
->Header
.LUN
.LunAddrBytes
[0], &scsi3addr
[0], 8);
5320 c
->Header
.tag
= cpu_to_le64((c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
));
5322 /* Fill in the request block... */
5324 c
->Request
.Timeout
= 0;
5325 BUG_ON(cmd
->cmd_len
> sizeof(c
->Request
.CDB
));
5326 c
->Request
.CDBLen
= cmd
->cmd_len
;
5327 memcpy(c
->Request
.CDB
, cmd
->cmnd
, cmd
->cmd_len
);
5328 switch (cmd
->sc_data_direction
) {
5330 c
->Request
.type_attr_dir
=
5331 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_WRITE
);
5333 case DMA_FROM_DEVICE
:
5334 c
->Request
.type_attr_dir
=
5335 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_READ
);
5338 c
->Request
.type_attr_dir
=
5339 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_NONE
);
5341 case DMA_BIDIRECTIONAL
:
5342 /* This can happen if a buggy application does a scsi passthru
5343 * and sets both inlen and outlen to non-zero. ( see
5344 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
5347 c
->Request
.type_attr_dir
=
5348 TYPE_ATTR_DIR(TYPE_CMD
, ATTR_SIMPLE
, XFER_RSVD
);
5349 /* This is technically wrong, and hpsa controllers should
5350 * reject it with CMD_INVALID, which is the most correct
5351 * response, but non-fibre backends appear to let it
5352 * slide by, and give the same results as if this field
5353 * were set correctly. Either way is acceptable for
5354 * our purposes here.
5360 dev_err(&h
->pdev
->dev
, "unknown data direction: %d\n",
5361 cmd
->sc_data_direction
);
5366 if (hpsa_scatter_gather(h
, c
, cmd
) < 0) { /* Fill SG list */
5367 hpsa_cmd_resolve_and_free(h
, c
);
5368 return SCSI_MLQUEUE_HOST_BUSY
;
5370 enqueue_cmd_and_start_io(h
, c
);
5371 /* the cmd'll come back via intr handler in complete_scsi_command() */
5375 static void hpsa_cmd_init(struct ctlr_info
*h
, int index
,
5376 struct CommandList
*c
)
5378 dma_addr_t cmd_dma_handle
, err_dma_handle
;
5380 /* Zero out all of commandlist except the last field, refcount */
5381 memset(c
, 0, offsetof(struct CommandList
, refcount
));
5382 c
->Header
.tag
= cpu_to_le64((u64
) (index
<< DIRECT_LOOKUP_SHIFT
));
5383 cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5384 c
->err_info
= h
->errinfo_pool
+ index
;
5385 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5386 err_dma_handle
= h
->errinfo_pool_dhandle
5387 + index
* sizeof(*c
->err_info
);
5388 c
->cmdindex
= index
;
5389 c
->busaddr
= (u32
) cmd_dma_handle
;
5390 c
->ErrDesc
.Addr
= cpu_to_le64((u64
) err_dma_handle
);
5391 c
->ErrDesc
.Len
= cpu_to_le32((u32
) sizeof(*c
->err_info
));
5393 c
->scsi_cmd
= SCSI_CMD_IDLE
;
5396 static void hpsa_preinitialize_commands(struct ctlr_info
*h
)
5400 for (i
= 0; i
< h
->nr_cmds
; i
++) {
5401 struct CommandList
*c
= h
->cmd_pool
+ i
;
5403 hpsa_cmd_init(h
, i
, c
);
5404 atomic_set(&c
->refcount
, 0);
5408 static inline void hpsa_cmd_partial_init(struct ctlr_info
*h
, int index
,
5409 struct CommandList
*c
)
5411 dma_addr_t cmd_dma_handle
= h
->cmd_pool_dhandle
+ index
* sizeof(*c
);
5413 BUG_ON(c
->cmdindex
!= index
);
5415 memset(c
->Request
.CDB
, 0, sizeof(c
->Request
.CDB
));
5416 memset(c
->err_info
, 0, sizeof(*c
->err_info
));
5417 c
->busaddr
= (u32
) cmd_dma_handle
;
5420 static int hpsa_ioaccel_submit(struct ctlr_info
*h
,
5421 struct CommandList
*c
, struct scsi_cmnd
*cmd
,
5422 unsigned char *scsi3addr
)
5424 struct hpsa_scsi_dev_t
*dev
= cmd
->device
->hostdata
;
5425 int rc
= IO_ACCEL_INELIGIBLE
;
5428 return SCSI_MLQUEUE_HOST_BUSY
;
5430 cmd
->host_scribble
= (unsigned char *) c
;
5432 if (dev
->offload_enabled
) {
5433 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5434 c
->cmd_type
= CMD_SCSI
;
5436 rc
= hpsa_scsi_ioaccel_raid_map(h
, c
);
5437 if (rc
< 0) /* scsi_dma_map failed. */
5438 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5439 } else if (dev
->hba_ioaccel_enabled
) {
5440 hpsa_cmd_init(h
, c
->cmdindex
, c
);
5441 c
->cmd_type
= CMD_SCSI
;
5443 rc
= hpsa_scsi_ioaccel_direct_map(h
, c
);
5444 if (rc
< 0) /* scsi_dma_map failed. */
5445 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5450 static void hpsa_command_resubmit_worker(struct work_struct
*work
)
5452 struct scsi_cmnd
*cmd
;
5453 struct hpsa_scsi_dev_t
*dev
;
5454 struct CommandList
*c
= container_of(work
, struct CommandList
, work
);
5457 dev
= cmd
->device
->hostdata
;
5459 cmd
->result
= DID_NO_CONNECT
<< 16;
5460 return hpsa_cmd_free_and_done(c
->h
, c
, cmd
);
5462 if (c
->reset_pending
)
5463 return hpsa_cmd_resolve_and_free(c
->h
, c
);
5464 if (c
->abort_pending
)
5465 return hpsa_cmd_abort_and_free(c
->h
, c
, cmd
);
5466 if (c
->cmd_type
== CMD_IOACCEL2
) {
5467 struct ctlr_info
*h
= c
->h
;
5468 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5471 if (c2
->error_data
.serv_response
==
5472 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL
) {
5473 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, dev
->scsi3addr
);
5476 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5478 * If we get here, it means dma mapping failed.
5479 * Try again via scsi mid layer, which will
5480 * then get SCSI_MLQUEUE_HOST_BUSY.
5482 cmd
->result
= DID_IMM_RETRY
<< 16;
5483 return hpsa_cmd_free_and_done(h
, c
, cmd
);
5485 /* else, fall thru and resubmit down CISS path */
5488 hpsa_cmd_partial_init(c
->h
, c
->cmdindex
, c
);
5489 if (hpsa_ciss_submit(c
->h
, c
, cmd
, dev
->scsi3addr
)) {
5491 * If we get here, it means dma mapping failed. Try
5492 * again via scsi mid layer, which will then get
5493 * SCSI_MLQUEUE_HOST_BUSY.
5495 * hpsa_ciss_submit will have already freed c
5496 * if it encountered a dma mapping failure.
5498 cmd
->result
= DID_IMM_RETRY
<< 16;
5499 cmd
->scsi_done(cmd
);
5503 /* Running in struct Scsi_Host->host_lock less mode */
5504 static int hpsa_scsi_queue_command(struct Scsi_Host
*sh
, struct scsi_cmnd
*cmd
)
5506 struct ctlr_info
*h
;
5507 struct hpsa_scsi_dev_t
*dev
;
5508 unsigned char scsi3addr
[8];
5509 struct CommandList
*c
;
5512 /* Get the ptr to our adapter structure out of cmd->host. */
5513 h
= sdev_to_hba(cmd
->device
);
5515 BUG_ON(cmd
->request
->tag
< 0);
5517 dev
= cmd
->device
->hostdata
;
5519 cmd
->result
= DID_NO_CONNECT
<< 16;
5520 cmd
->scsi_done(cmd
);
5525 cmd
->result
= DID_NO_CONNECT
<< 16;
5526 cmd
->scsi_done(cmd
);
5530 memcpy(scsi3addr
, dev
->scsi3addr
, sizeof(scsi3addr
));
5532 if (unlikely(lockup_detected(h
))) {
5533 cmd
->result
= DID_NO_CONNECT
<< 16;
5534 cmd
->scsi_done(cmd
);
5537 c
= cmd_tagged_alloc(h
, cmd
);
5540 * Call alternate submit routine for I/O accelerated commands.
5541 * Retries always go down the normal I/O path.
5543 if (likely(cmd
->retries
== 0 &&
5544 !blk_rq_is_passthrough(cmd
->request
) &&
5545 h
->acciopath_status
)) {
5546 rc
= hpsa_ioaccel_submit(h
, c
, cmd
, scsi3addr
);
5549 if (rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5550 hpsa_cmd_resolve_and_free(h
, c
);
5551 return SCSI_MLQUEUE_HOST_BUSY
;
5554 return hpsa_ciss_submit(h
, c
, cmd
, scsi3addr
);
5557 static void hpsa_scan_complete(struct ctlr_info
*h
)
5559 unsigned long flags
;
5561 spin_lock_irqsave(&h
->scan_lock
, flags
);
5562 h
->scan_finished
= 1;
5563 wake_up(&h
->scan_wait_queue
);
5564 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5567 static void hpsa_scan_start(struct Scsi_Host
*sh
)
5569 struct ctlr_info
*h
= shost_to_hba(sh
);
5570 unsigned long flags
;
5573 * Don't let rescans be initiated on a controller known to be locked
5574 * up. If the controller locks up *during* a rescan, that thread is
5575 * probably hosed, but at least we can prevent new rescan threads from
5576 * piling up on a locked up controller.
5578 if (unlikely(lockup_detected(h
)))
5579 return hpsa_scan_complete(h
);
5582 * If a scan is already waiting to run, no need to add another
5584 spin_lock_irqsave(&h
->scan_lock
, flags
);
5585 if (h
->scan_waiting
) {
5586 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5590 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5592 /* wait until any scan already in progress is finished. */
5594 spin_lock_irqsave(&h
->scan_lock
, flags
);
5595 if (h
->scan_finished
)
5597 h
->scan_waiting
= 1;
5598 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5599 wait_event(h
->scan_wait_queue
, h
->scan_finished
);
5600 /* Note: We don't need to worry about a race between this
5601 * thread and driver unload because the midlayer will
5602 * have incremented the reference count, so unload won't
5603 * happen if we're in here.
5606 h
->scan_finished
= 0; /* mark scan as in progress */
5607 h
->scan_waiting
= 0;
5608 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5610 if (unlikely(lockup_detected(h
)))
5611 return hpsa_scan_complete(h
);
5614 * Do the scan after a reset completion
5616 if (h
->reset_in_progress
) {
5617 h
->drv_req_rescan
= 1;
5621 hpsa_update_scsi_devices(h
);
5623 hpsa_scan_complete(h
);
5626 static int hpsa_change_queue_depth(struct scsi_device
*sdev
, int qdepth
)
5628 struct hpsa_scsi_dev_t
*logical_drive
= sdev
->hostdata
;
5635 else if (qdepth
> logical_drive
->queue_depth
)
5636 qdepth
= logical_drive
->queue_depth
;
5638 return scsi_change_queue_depth(sdev
, qdepth
);
5641 static int hpsa_scan_finished(struct Scsi_Host
*sh
,
5642 unsigned long elapsed_time
)
5644 struct ctlr_info
*h
= shost_to_hba(sh
);
5645 unsigned long flags
;
5648 spin_lock_irqsave(&h
->scan_lock
, flags
);
5649 finished
= h
->scan_finished
;
5650 spin_unlock_irqrestore(&h
->scan_lock
, flags
);
5654 static int hpsa_scsi_host_alloc(struct ctlr_info
*h
)
5656 struct Scsi_Host
*sh
;
5658 sh
= scsi_host_alloc(&hpsa_driver_template
, sizeof(h
));
5660 dev_err(&h
->pdev
->dev
, "scsi_host_alloc failed\n");
5667 sh
->max_channel
= 3;
5668 sh
->max_cmd_len
= MAX_COMMAND_SIZE
;
5669 sh
->max_lun
= HPSA_MAX_LUN
;
5670 sh
->max_id
= HPSA_MAX_LUN
;
5671 sh
->can_queue
= h
->nr_cmds
- HPSA_NRESERVED_CMDS
;
5672 sh
->cmd_per_lun
= sh
->can_queue
;
5673 sh
->sg_tablesize
= h
->maxsgentries
;
5674 sh
->transportt
= hpsa_sas_transport_template
;
5675 sh
->hostdata
[0] = (unsigned long) h
;
5676 sh
->irq
= pci_irq_vector(h
->pdev
, 0);
5677 sh
->unique_id
= sh
->irq
;
5683 static int hpsa_scsi_add_host(struct ctlr_info
*h
)
5687 rv
= scsi_add_host(h
->scsi_host
, &h
->pdev
->dev
);
5689 dev_err(&h
->pdev
->dev
, "scsi_add_host failed\n");
5692 scsi_scan_host(h
->scsi_host
);
5697 * The block layer has already gone to the trouble of picking out a unique,
5698 * small-integer tag for this request. We use an offset from that value as
5699 * an index to select our command block. (The offset allows us to reserve the
5700 * low-numbered entries for our own uses.)
5702 static int hpsa_get_cmd_index(struct scsi_cmnd
*scmd
)
5704 int idx
= scmd
->request
->tag
;
5709 /* Offset to leave space for internal cmds. */
5710 return idx
+= HPSA_NRESERVED_CMDS
;
5714 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5715 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5717 static int hpsa_send_test_unit_ready(struct ctlr_info
*h
,
5718 struct CommandList
*c
, unsigned char lunaddr
[],
5723 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5724 (void) fill_cmd(c
, TEST_UNIT_READY
, h
,
5725 NULL
, 0, 0, lunaddr
, TYPE_CMD
);
5726 rc
= hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, DEFAULT_TIMEOUT
);
5729 /* no unmap needed here because no data xfer. */
5731 /* Check if the unit is already ready. */
5732 if (c
->err_info
->CommandStatus
== CMD_SUCCESS
)
5736 * The first command sent after reset will receive "unit attention" to
5737 * indicate that the LUN has been reset...this is actually what we're
5738 * looking for (but, success is good too).
5740 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
5741 c
->err_info
->ScsiStatus
== SAM_STAT_CHECK_CONDITION
&&
5742 (c
->err_info
->SenseInfo
[2] == NO_SENSE
||
5743 c
->err_info
->SenseInfo
[2] == UNIT_ATTENTION
))
5750 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5751 * returns zero when the unit is ready, and non-zero when giving up.
5753 static int hpsa_wait_for_test_unit_ready(struct ctlr_info
*h
,
5754 struct CommandList
*c
,
5755 unsigned char lunaddr
[], int reply_queue
)
5759 int waittime
= 1; /* seconds */
5761 /* Send test unit ready until device ready, or give up. */
5762 for (count
= 0; count
< HPSA_TUR_RETRY_LIMIT
; count
++) {
5765 * Wait for a bit. do this first, because if we send
5766 * the TUR right away, the reset will just abort it.
5768 msleep(1000 * waittime
);
5770 rc
= hpsa_send_test_unit_ready(h
, c
, lunaddr
, reply_queue
);
5774 /* Increase wait time with each try, up to a point. */
5775 if (waittime
< HPSA_MAX_WAIT_INTERVAL_SECS
)
5778 dev_warn(&h
->pdev
->dev
,
5779 "waiting %d secs for device to become ready.\n",
5786 static int wait_for_device_to_become_ready(struct ctlr_info
*h
,
5787 unsigned char lunaddr
[],
5794 struct CommandList
*c
;
5799 * If no specific reply queue was requested, then send the TUR
5800 * repeatedly, requesting a reply on each reply queue; otherwise execute
5801 * the loop exactly once using only the specified queue.
5803 if (reply_queue
== DEFAULT_REPLY_QUEUE
) {
5805 last_queue
= h
->nreply_queues
- 1;
5807 first_queue
= reply_queue
;
5808 last_queue
= reply_queue
;
5811 for (rq
= first_queue
; rq
<= last_queue
; rq
++) {
5812 rc
= hpsa_wait_for_test_unit_ready(h
, c
, lunaddr
, rq
);
5818 dev_warn(&h
->pdev
->dev
, "giving up on device.\n");
5820 dev_warn(&h
->pdev
->dev
, "device is ready.\n");
5826 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5827 * complaining. Doing a host- or bus-reset can't do anything good here.
5829 static int hpsa_eh_device_reset_handler(struct scsi_cmnd
*scsicmd
)
5832 struct ctlr_info
*h
;
5833 struct hpsa_scsi_dev_t
*dev
;
5837 /* find the controller to which the command to be aborted was sent */
5838 h
= sdev_to_hba(scsicmd
->device
);
5839 if (h
== NULL
) /* paranoia */
5842 if (lockup_detected(h
))
5845 dev
= scsicmd
->device
->hostdata
;
5847 dev_err(&h
->pdev
->dev
, "%s: device lookup failed\n", __func__
);
5851 /* if controller locked up, we can guarantee command won't complete */
5852 if (lockup_detected(h
)) {
5853 snprintf(msg
, sizeof(msg
),
5854 "cmd %d RESET FAILED, lockup detected",
5855 hpsa_get_cmd_index(scsicmd
));
5856 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5860 /* this reset request might be the result of a lockup; check */
5861 if (detect_controller_lockup(h
)) {
5862 snprintf(msg
, sizeof(msg
),
5863 "cmd %d RESET FAILED, new lockup detected",
5864 hpsa_get_cmd_index(scsicmd
));
5865 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5869 /* Do not attempt on controller */
5870 if (is_hba_lunid(dev
->scsi3addr
))
5873 if (is_logical_dev_addr_mode(dev
->scsi3addr
))
5874 reset_type
= HPSA_DEVICE_RESET_MSG
;
5876 reset_type
= HPSA_PHYS_TARGET_RESET
;
5878 sprintf(msg
, "resetting %s",
5879 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ");
5880 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5882 h
->reset_in_progress
= 1;
5884 /* send a reset to the SCSI LUN which the command was sent to */
5885 rc
= hpsa_do_reset(h
, dev
, dev
->scsi3addr
, reset_type
,
5886 DEFAULT_REPLY_QUEUE
);
5887 sprintf(msg
, "reset %s %s",
5888 reset_type
== HPSA_DEVICE_RESET_MSG
? "logical " : "physical ",
5889 rc
== 0 ? "completed successfully" : "failed");
5890 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, msg
);
5891 h
->reset_in_progress
= 0;
5892 return rc
== 0 ? SUCCESS
: FAILED
;
5895 static void swizzle_abort_tag(u8
*tag
)
5899 memcpy(original_tag
, tag
, 8);
5900 tag
[0] = original_tag
[3];
5901 tag
[1] = original_tag
[2];
5902 tag
[2] = original_tag
[1];
5903 tag
[3] = original_tag
[0];
5904 tag
[4] = original_tag
[7];
5905 tag
[5] = original_tag
[6];
5906 tag
[6] = original_tag
[5];
5907 tag
[7] = original_tag
[4];
5910 static void hpsa_get_tag(struct ctlr_info
*h
,
5911 struct CommandList
*c
, __le32
*taglower
, __le32
*tagupper
)
5914 if (c
->cmd_type
== CMD_IOACCEL1
) {
5915 struct io_accel1_cmd
*cm1
= (struct io_accel1_cmd
*)
5916 &h
->ioaccel_cmd_pool
[c
->cmdindex
];
5917 tag
= le64_to_cpu(cm1
->tag
);
5918 *tagupper
= cpu_to_le32(tag
>> 32);
5919 *taglower
= cpu_to_le32(tag
);
5922 if (c
->cmd_type
== CMD_IOACCEL2
) {
5923 struct io_accel2_cmd
*cm2
= (struct io_accel2_cmd
*)
5924 &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5925 /* upper tag not used in ioaccel2 mode */
5926 memset(tagupper
, 0, sizeof(*tagupper
));
5927 *taglower
= cm2
->Tag
;
5930 tag
= le64_to_cpu(c
->Header
.tag
);
5931 *tagupper
= cpu_to_le32(tag
>> 32);
5932 *taglower
= cpu_to_le32(tag
);
5935 static int hpsa_send_abort(struct ctlr_info
*h
, unsigned char *scsi3addr
,
5936 struct CommandList
*abort
, int reply_queue
)
5939 struct CommandList
*c
;
5940 struct ErrorInfo
*ei
;
5941 __le32 tagupper
, taglower
;
5945 /* fill_cmd can't fail here, no buffer to map */
5946 (void) fill_cmd(c
, HPSA_ABORT_MSG
, h
, &abort
->Header
.tag
,
5947 0, 0, scsi3addr
, TYPE_MSG
);
5948 if (h
->needs_abort_tags_swizzled
)
5949 swizzle_abort_tag(&c
->Request
.CDB
[4]);
5950 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, DEFAULT_TIMEOUT
);
5951 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
5952 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5953 __func__
, tagupper
, taglower
);
5954 /* no unmap needed here because no data xfer. */
5957 switch (ei
->CommandStatus
) {
5960 case CMD_TMF_STATUS
:
5961 rc
= hpsa_evaluate_tmf_status(h
, c
);
5963 case CMD_UNABORTABLE
: /* Very common, don't make noise. */
5967 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5968 __func__
, tagupper
, taglower
);
5969 hpsa_scsi_interpret_error(h
, c
);
5974 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n",
5975 __func__
, tagupper
, taglower
);
5979 static void setup_ioaccel2_abort_cmd(struct CommandList
*c
, struct ctlr_info
*h
,
5980 struct CommandList
*command_to_abort
, int reply_queue
)
5982 struct io_accel2_cmd
*c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
5983 struct hpsa_tmf_struct
*ac
= (struct hpsa_tmf_struct
*) c2
;
5984 struct io_accel2_cmd
*c2a
=
5985 &h
->ioaccel2_cmd_pool
[command_to_abort
->cmdindex
];
5986 struct scsi_cmnd
*scmd
= command_to_abort
->scsi_cmd
;
5987 struct hpsa_scsi_dev_t
*dev
= scmd
->device
->hostdata
;
5993 * We're overlaying struct hpsa_tmf_struct on top of something which
5994 * was allocated as a struct io_accel2_cmd, so we better be sure it
5995 * actually fits, and doesn't overrun the error info space.
5997 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct
) >
5998 sizeof(struct io_accel2_cmd
));
5999 BUG_ON(offsetof(struct io_accel2_cmd
, error_data
) <
6000 offsetof(struct hpsa_tmf_struct
, error_len
) +
6001 sizeof(ac
->error_len
));
6003 c
->cmd_type
= IOACCEL2_TMF
;
6004 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6006 /* Adjust the DMA address to point to the accelerated command buffer */
6007 c
->busaddr
= (u32
) h
->ioaccel2_cmd_pool_dhandle
+
6008 (c
->cmdindex
* sizeof(struct io_accel2_cmd
));
6009 BUG_ON(c
->busaddr
& 0x0000007F);
6011 memset(ac
, 0, sizeof(*c2
)); /* yes this is correct */
6012 ac
->iu_type
= IOACCEL2_IU_TMF_TYPE
;
6013 ac
->reply_queue
= reply_queue
;
6014 ac
->tmf
= IOACCEL2_TMF_ABORT
;
6015 ac
->it_nexus
= cpu_to_le32(dev
->ioaccel_handle
);
6016 memset(ac
->lun_id
, 0, sizeof(ac
->lun_id
));
6017 ac
->tag
= cpu_to_le64(c
->cmdindex
<< DIRECT_LOOKUP_SHIFT
);
6018 ac
->abort_tag
= cpu_to_le64(le32_to_cpu(c2a
->Tag
));
6019 ac
->error_ptr
= cpu_to_le64(c
->busaddr
+
6020 offsetof(struct io_accel2_cmd
, error_data
));
6021 ac
->error_len
= cpu_to_le32(sizeof(c2
->error_data
));
6024 /* ioaccel2 path firmware cannot handle abort task requests.
6025 * Change abort requests to physical target reset, and send to the
6026 * address of the physical disk used for the ioaccel 2 command.
6027 * Return 0 on success (IO_OK)
6031 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info
*h
,
6032 unsigned char *scsi3addr
, struct CommandList
*abort
, int reply_queue
)
6035 struct scsi_cmnd
*scmd
; /* scsi command within request being aborted */
6036 struct hpsa_scsi_dev_t
*dev
; /* device to which scsi cmd was sent */
6037 unsigned char phys_scsi3addr
[8]; /* addr of phys disk with volume */
6038 unsigned char *psa
= &phys_scsi3addr
[0];
6040 /* Get a pointer to the hpsa logical device. */
6041 scmd
= abort
->scsi_cmd
;
6042 dev
= (struct hpsa_scsi_dev_t
*)(scmd
->device
->hostdata
);
6044 dev_warn(&h
->pdev
->dev
,
6045 "Cannot abort: no device pointer for command.\n");
6046 return -1; /* not abortable */
6049 if (h
->raid_offload_debug
> 0)
6050 dev_info(&h
->pdev
->dev
,
6051 "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n",
6052 h
->scsi_host
->host_no
, dev
->bus
, dev
->target
, dev
->lun
,
6053 "Reset as abort", scsi3addr
);
6055 if (!dev
->offload_enabled
) {
6056 dev_warn(&h
->pdev
->dev
,
6057 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
6058 return -1; /* not abortable */
6061 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
6062 if (!hpsa_get_pdisk_of_ioaccel2(h
, abort
, psa
)) {
6063 dev_warn(&h
->pdev
->dev
, "Can't abort: Failed lookup of physical address.\n");
6064 return -1; /* not abortable */
6067 /* send the reset */
6068 if (h
->raid_offload_debug
> 0)
6069 dev_info(&h
->pdev
->dev
,
6070 "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n",
6072 rc
= hpsa_do_reset(h
, dev
, psa
, HPSA_PHYS_TARGET_RESET
, reply_queue
);
6074 dev_warn(&h
->pdev
->dev
,
6075 "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n",
6077 return rc
; /* failed to reset */
6080 /* wait for device to recover */
6081 if (wait_for_device_to_become_ready(h
, psa
, reply_queue
) != 0) {
6082 dev_warn(&h
->pdev
->dev
,
6083 "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n",
6085 return -1; /* failed to recover */
6088 /* device recovered */
6089 dev_info(&h
->pdev
->dev
,
6090 "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n",
6093 return rc
; /* success */
6096 static int hpsa_send_abort_ioaccel2(struct ctlr_info
*h
,
6097 struct CommandList
*abort
, int reply_queue
)
6100 struct CommandList
*c
;
6101 __le32 taglower
, tagupper
;
6102 struct hpsa_scsi_dev_t
*dev
;
6103 struct io_accel2_cmd
*c2
;
6105 dev
= abort
->scsi_cmd
->device
->hostdata
;
6109 if (!dev
->offload_enabled
&& !dev
->hba_ioaccel_enabled
)
6113 setup_ioaccel2_abort_cmd(c
, h
, abort
, reply_queue
);
6114 c2
= &h
->ioaccel2_cmd_pool
[c
->cmdindex
];
6115 (void) hpsa_scsi_do_simple_cmd(h
, c
, reply_queue
, DEFAULT_TIMEOUT
);
6116 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
6117 dev_dbg(&h
->pdev
->dev
,
6118 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
6119 __func__
, tagupper
, taglower
);
6120 /* no unmap needed here because no data xfer. */
6122 dev_dbg(&h
->pdev
->dev
,
6123 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
6124 __func__
, tagupper
, taglower
, c2
->error_data
.serv_response
);
6125 switch (c2
->error_data
.serv_response
) {
6126 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE
:
6127 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS
:
6130 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED
:
6131 case IOACCEL2_SERV_RESPONSE_FAILURE
:
6132 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN
:
6136 dev_warn(&h
->pdev
->dev
,
6137 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
6138 __func__
, tagupper
, taglower
,
6139 c2
->error_data
.serv_response
);
6143 dev_dbg(&h
->pdev
->dev
, "%s: Tag:0x%08x:%08x: Finished.\n", __func__
,
6144 tagupper
, taglower
);
6148 static int hpsa_send_abort_both_ways(struct ctlr_info
*h
,
6149 struct hpsa_scsi_dev_t
*dev
, struct CommandList
*abort
, int reply_queue
)
6152 * ioccelerator mode 2 commands should be aborted via the
6153 * accelerated path, since RAID path is unaware of these commands,
6154 * but not all underlying firmware can handle abort TMF.
6155 * Change abort to physical device reset when abort TMF is unsupported.
6157 if (abort
->cmd_type
== CMD_IOACCEL2
) {
6158 if ((HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
) ||
6159 dev
->physical_device
)
6160 return hpsa_send_abort_ioaccel2(h
, abort
,
6163 return hpsa_send_reset_as_abort_ioaccel2(h
,
6165 abort
, reply_queue
);
6167 return hpsa_send_abort(h
, dev
->scsi3addr
, abort
, reply_queue
);
6170 /* Find out which reply queue a command was meant to return on */
6171 static int hpsa_extract_reply_queue(struct ctlr_info
*h
,
6172 struct CommandList
*c
)
6174 if (c
->cmd_type
== CMD_IOACCEL2
)
6175 return h
->ioaccel2_cmd_pool
[c
->cmdindex
].reply_queue
;
6176 return c
->Header
.ReplyQueue
;
6180 * Limit concurrency of abort commands to prevent
6181 * over-subscription of commands
6183 static inline int wait_for_available_abort_cmd(struct ctlr_info
*h
)
6185 #define ABORT_CMD_WAIT_MSECS 5000
6186 return !wait_event_timeout(h
->abort_cmd_wait_queue
,
6187 atomic_dec_if_positive(&h
->abort_cmds_available
) >= 0,
6188 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS
));
6191 /* Send an abort for the specified command.
6192 * If the device and controller support it,
6193 * send a task abort request.
6195 static int hpsa_eh_abort_handler(struct scsi_cmnd
*sc
)
6199 struct ctlr_info
*h
;
6200 struct hpsa_scsi_dev_t
*dev
;
6201 struct CommandList
*abort
; /* pointer to command to be aborted */
6202 struct scsi_cmnd
*as
; /* ptr to scsi cmd inside aborted command. */
6203 char msg
[256]; /* For debug messaging. */
6205 __le32 tagupper
, taglower
;
6206 int refcount
, reply_queue
;
6211 if (sc
->device
== NULL
)
6214 /* Find the controller of the command to be aborted */
6215 h
= sdev_to_hba(sc
->device
);
6219 /* Find the device of the command to be aborted */
6220 dev
= sc
->device
->hostdata
;
6222 dev_err(&h
->pdev
->dev
, "%s FAILED, Device lookup failed.\n",
6227 /* If controller locked up, we can guarantee command won't complete */
6228 if (lockup_detected(h
)) {
6229 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
6230 "ABORT FAILED, lockup detected");
6234 /* This is a good time to check if controller lockup has occurred */
6235 if (detect_controller_lockup(h
)) {
6236 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
6237 "ABORT FAILED, new lockup detected");
6241 /* Check that controller supports some kind of task abort */
6242 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
) &&
6243 !(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
6246 memset(msg
, 0, sizeof(msg
));
6247 ml
+= sprintf(msg
+ml
, "scsi %d:%d:%d:%llu %s %p",
6248 h
->scsi_host
->host_no
, sc
->device
->channel
,
6249 sc
->device
->id
, sc
->device
->lun
,
6250 "Aborting command", sc
);
6252 /* Get SCSI command to be aborted */
6253 abort
= (struct CommandList
*) sc
->host_scribble
;
6254 if (abort
== NULL
) {
6255 /* This can happen if the command already completed. */
6258 refcount
= atomic_inc_return(&abort
->refcount
);
6259 if (refcount
== 1) { /* Command is done already. */
6264 /* Don't bother trying the abort if we know it won't work. */
6265 if (abort
->cmd_type
!= CMD_IOACCEL2
&&
6266 abort
->cmd_type
!= CMD_IOACCEL1
&& !dev
->supports_aborts
) {
6272 * Check that we're aborting the right command.
6273 * It's possible the CommandList already completed and got re-used.
6275 if (abort
->scsi_cmd
!= sc
) {
6280 abort
->abort_pending
= true;
6281 hpsa_get_tag(h
, abort
, &taglower
, &tagupper
);
6282 reply_queue
= hpsa_extract_reply_queue(h
, abort
);
6283 ml
+= sprintf(msg
+ml
, "Tag:0x%08x:%08x ", tagupper
, taglower
);
6284 as
= abort
->scsi_cmd
;
6286 ml
+= sprintf(msg
+ml
,
6287 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
6288 as
->cmd_len
, as
->cmnd
[0], as
->cmnd
[1],
6290 dev_warn(&h
->pdev
->dev
, "%s BEING SENT\n", msg
);
6291 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
, "Aborting command");
6294 * Command is in flight, or possibly already completed
6295 * by the firmware (but not to the scsi mid layer) but we can't
6296 * distinguish which. Send the abort down.
6298 if (wait_for_available_abort_cmd(h
)) {
6299 dev_warn(&h
->pdev
->dev
,
6300 "%s FAILED, timeout waiting for an abort command to become available.\n",
6305 rc
= hpsa_send_abort_both_ways(h
, dev
, abort
, reply_queue
);
6306 atomic_inc(&h
->abort_cmds_available
);
6307 wake_up_all(&h
->abort_cmd_wait_queue
);
6309 dev_warn(&h
->pdev
->dev
, "%s SENT, FAILED\n", msg
);
6310 hpsa_show_dev_msg(KERN_WARNING
, h
, dev
,
6311 "FAILED to abort command");
6315 dev_info(&h
->pdev
->dev
, "%s SENT, SUCCESS\n", msg
);
6316 wait_event(h
->event_sync_wait_queue
,
6317 abort
->scsi_cmd
!= sc
|| lockup_detected(h
));
6319 return !lockup_detected(h
) ? SUCCESS
: FAILED
;
6323 * For operations with an associated SCSI command, a command block is allocated
6324 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
6325 * block request tag as an index into a table of entries. cmd_tagged_free() is
6326 * the complement, although cmd_free() may be called instead.
6328 static struct CommandList
*cmd_tagged_alloc(struct ctlr_info
*h
,
6329 struct scsi_cmnd
*scmd
)
6331 int idx
= hpsa_get_cmd_index(scmd
);
6332 struct CommandList
*c
= h
->cmd_pool
+ idx
;
6334 if (idx
< HPSA_NRESERVED_CMDS
|| idx
>= h
->nr_cmds
) {
6335 dev_err(&h
->pdev
->dev
, "Bad block tag: %d not in [%d..%d]\n",
6336 idx
, HPSA_NRESERVED_CMDS
, h
->nr_cmds
- 1);
6337 /* The index value comes from the block layer, so if it's out of
6338 * bounds, it's probably not our bug.
6343 atomic_inc(&c
->refcount
);
6344 if (unlikely(!hpsa_is_cmd_idle(c
))) {
6346 * We expect that the SCSI layer will hand us a unique tag
6347 * value. Thus, there should never be a collision here between
6348 * two requests...because if the selected command isn't idle
6349 * then someone is going to be very disappointed.
6351 dev_err(&h
->pdev
->dev
,
6352 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
6354 if (c
->scsi_cmd
!= NULL
)
6355 scsi_print_command(c
->scsi_cmd
);
6356 scsi_print_command(scmd
);
6359 hpsa_cmd_partial_init(h
, idx
, c
);
6363 static void cmd_tagged_free(struct ctlr_info
*h
, struct CommandList
*c
)
6366 * Release our reference to the block. We don't need to do anything
6367 * else to free it, because it is accessed by index. (There's no point
6368 * in checking the result of the decrement, since we cannot guarantee
6369 * that there isn't a concurrent abort which is also accessing it.)
6371 (void)atomic_dec(&c
->refcount
);
6375 * For operations that cannot sleep, a command block is allocated at init,
6376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
6377 * which ones are free or in use. Lock must be held when calling this.
6378 * cmd_free() is the complement.
6379 * This function never gives up and returns NULL. If it hangs,
6380 * another thread must call cmd_free() to free some tags.
6383 static struct CommandList
*cmd_alloc(struct ctlr_info
*h
)
6385 struct CommandList
*c
;
6390 * There is some *extremely* small but non-zero chance that that
6391 * multiple threads could get in here, and one thread could
6392 * be scanning through the list of bits looking for a free
6393 * one, but the free ones are always behind him, and other
6394 * threads sneak in behind him and eat them before he can
6395 * get to them, so that while there is always a free one, a
6396 * very unlucky thread might be starved anyway, never able to
6397 * beat the other threads. In reality, this happens so
6398 * infrequently as to be indistinguishable from never.
6400 * Note that we start allocating commands before the SCSI host structure
6401 * is initialized. Since the search starts at bit zero, this
6402 * all works, since we have at least one command structure available;
6403 * however, it means that the structures with the low indexes have to be
6404 * reserved for driver-initiated requests, while requests from the block
6405 * layer will use the higher indexes.
6409 i
= find_next_zero_bit(h
->cmd_pool_bits
,
6410 HPSA_NRESERVED_CMDS
,
6412 if (unlikely(i
>= HPSA_NRESERVED_CMDS
)) {
6416 c
= h
->cmd_pool
+ i
;
6417 refcount
= atomic_inc_return(&c
->refcount
);
6418 if (unlikely(refcount
> 1)) {
6419 cmd_free(h
, c
); /* already in use */
6420 offset
= (i
+ 1) % HPSA_NRESERVED_CMDS
;
6423 set_bit(i
& (BITS_PER_LONG
- 1),
6424 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6425 break; /* it's ours now. */
6427 hpsa_cmd_partial_init(h
, i
, c
);
6432 * This is the complementary operation to cmd_alloc(). Note, however, in some
6433 * corner cases it may also be used to free blocks allocated by
6434 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
6435 * the clear-bit is harmless.
6437 static void cmd_free(struct ctlr_info
*h
, struct CommandList
*c
)
6439 if (atomic_dec_and_test(&c
->refcount
)) {
6442 i
= c
- h
->cmd_pool
;
6443 clear_bit(i
& (BITS_PER_LONG
- 1),
6444 h
->cmd_pool_bits
+ (i
/ BITS_PER_LONG
));
6448 #ifdef CONFIG_COMPAT
6450 static int hpsa_ioctl32_passthru(struct scsi_device
*dev
, int cmd
,
6453 IOCTL32_Command_struct __user
*arg32
=
6454 (IOCTL32_Command_struct __user
*) arg
;
6455 IOCTL_Command_struct arg64
;
6456 IOCTL_Command_struct __user
*p
= compat_alloc_user_space(sizeof(arg64
));
6460 memset(&arg64
, 0, sizeof(arg64
));
6462 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6463 sizeof(arg64
.LUN_info
));
6464 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6465 sizeof(arg64
.Request
));
6466 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6467 sizeof(arg64
.error_info
));
6468 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6469 err
|= get_user(cp
, &arg32
->buf
);
6470 arg64
.buf
= compat_ptr(cp
);
6471 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6476 err
= hpsa_ioctl(dev
, CCISS_PASSTHRU
, p
);
6479 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6480 sizeof(arg32
->error_info
));
6486 static int hpsa_ioctl32_big_passthru(struct scsi_device
*dev
,
6487 int cmd
, void __user
*arg
)
6489 BIG_IOCTL32_Command_struct __user
*arg32
=
6490 (BIG_IOCTL32_Command_struct __user
*) arg
;
6491 BIG_IOCTL_Command_struct arg64
;
6492 BIG_IOCTL_Command_struct __user
*p
=
6493 compat_alloc_user_space(sizeof(arg64
));
6497 memset(&arg64
, 0, sizeof(arg64
));
6499 err
|= copy_from_user(&arg64
.LUN_info
, &arg32
->LUN_info
,
6500 sizeof(arg64
.LUN_info
));
6501 err
|= copy_from_user(&arg64
.Request
, &arg32
->Request
,
6502 sizeof(arg64
.Request
));
6503 err
|= copy_from_user(&arg64
.error_info
, &arg32
->error_info
,
6504 sizeof(arg64
.error_info
));
6505 err
|= get_user(arg64
.buf_size
, &arg32
->buf_size
);
6506 err
|= get_user(arg64
.malloc_size
, &arg32
->malloc_size
);
6507 err
|= get_user(cp
, &arg32
->buf
);
6508 arg64
.buf
= compat_ptr(cp
);
6509 err
|= copy_to_user(p
, &arg64
, sizeof(arg64
));
6514 err
= hpsa_ioctl(dev
, CCISS_BIG_PASSTHRU
, p
);
6517 err
|= copy_in_user(&arg32
->error_info
, &p
->error_info
,
6518 sizeof(arg32
->error_info
));
6524 static int hpsa_compat_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
6527 case CCISS_GETPCIINFO
:
6528 case CCISS_GETINTINFO
:
6529 case CCISS_SETINTINFO
:
6530 case CCISS_GETNODENAME
:
6531 case CCISS_SETNODENAME
:
6532 case CCISS_GETHEARTBEAT
:
6533 case CCISS_GETBUSTYPES
:
6534 case CCISS_GETFIRMVER
:
6535 case CCISS_GETDRIVVER
:
6536 case CCISS_REVALIDVOLS
:
6537 case CCISS_DEREGDISK
:
6538 case CCISS_REGNEWDISK
:
6540 case CCISS_RESCANDISK
:
6541 case CCISS_GETLUNINFO
:
6542 return hpsa_ioctl(dev
, cmd
, arg
);
6544 case CCISS_PASSTHRU32
:
6545 return hpsa_ioctl32_passthru(dev
, cmd
, arg
);
6546 case CCISS_BIG_PASSTHRU32
:
6547 return hpsa_ioctl32_big_passthru(dev
, cmd
, arg
);
6550 return -ENOIOCTLCMD
;
6555 static int hpsa_getpciinfo_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6557 struct hpsa_pci_info pciinfo
;
6561 pciinfo
.domain
= pci_domain_nr(h
->pdev
->bus
);
6562 pciinfo
.bus
= h
->pdev
->bus
->number
;
6563 pciinfo
.dev_fn
= h
->pdev
->devfn
;
6564 pciinfo
.board_id
= h
->board_id
;
6565 if (copy_to_user(argp
, &pciinfo
, sizeof(pciinfo
)))
6570 static int hpsa_getdrivver_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6572 DriverVer_type DriverVer
;
6573 unsigned char vmaj
, vmin
, vsubmin
;
6576 rc
= sscanf(HPSA_DRIVER_VERSION
, "%hhu.%hhu.%hhu",
6577 &vmaj
, &vmin
, &vsubmin
);
6579 dev_info(&h
->pdev
->dev
, "driver version string '%s' "
6580 "unrecognized.", HPSA_DRIVER_VERSION
);
6585 DriverVer
= (vmaj
<< 16) | (vmin
<< 8) | vsubmin
;
6588 if (copy_to_user(argp
, &DriverVer
, sizeof(DriverVer_type
)))
6593 static int hpsa_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6595 IOCTL_Command_struct iocommand
;
6596 struct CommandList
*c
;
6603 if (!capable(CAP_SYS_RAWIO
))
6605 if (copy_from_user(&iocommand
, argp
, sizeof(iocommand
)))
6607 if ((iocommand
.buf_size
< 1) &&
6608 (iocommand
.Request
.Type
.Direction
!= XFER_NONE
)) {
6611 if (iocommand
.buf_size
> 0) {
6612 buff
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
6615 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
6616 /* Copy the data into the buffer we created */
6617 if (copy_from_user(buff
, iocommand
.buf
,
6618 iocommand
.buf_size
)) {
6623 memset(buff
, 0, iocommand
.buf_size
);
6628 /* Fill in the command type */
6629 c
->cmd_type
= CMD_IOCTL_PEND
;
6630 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6631 /* Fill in Command Header */
6632 c
->Header
.ReplyQueue
= 0; /* unused in simple mode */
6633 if (iocommand
.buf_size
> 0) { /* buffer to fill */
6634 c
->Header
.SGList
= 1;
6635 c
->Header
.SGTotal
= cpu_to_le16(1);
6636 } else { /* no buffers to fill */
6637 c
->Header
.SGList
= 0;
6638 c
->Header
.SGTotal
= cpu_to_le16(0);
6640 memcpy(&c
->Header
.LUN
, &iocommand
.LUN_info
, sizeof(c
->Header
.LUN
));
6642 /* Fill in Request block */
6643 memcpy(&c
->Request
, &iocommand
.Request
,
6644 sizeof(c
->Request
));
6646 /* Fill in the scatter gather information */
6647 if (iocommand
.buf_size
> 0) {
6648 temp64
= pci_map_single(h
->pdev
, buff
,
6649 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
6650 if (dma_mapping_error(&h
->pdev
->dev
, (dma_addr_t
) temp64
)) {
6651 c
->SG
[0].Addr
= cpu_to_le64(0);
6652 c
->SG
[0].Len
= cpu_to_le32(0);
6656 c
->SG
[0].Addr
= cpu_to_le64(temp64
);
6657 c
->SG
[0].Len
= cpu_to_le32(iocommand
.buf_size
);
6658 c
->SG
[0].Ext
= cpu_to_le32(HPSA_SG_LAST
); /* not chaining */
6660 rc
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
6662 if (iocommand
.buf_size
> 0)
6663 hpsa_pci_unmap(h
->pdev
, c
, 1, PCI_DMA_BIDIRECTIONAL
);
6664 check_ioctl_unit_attention(h
, c
);
6670 /* Copy the error information out */
6671 memcpy(&iocommand
.error_info
, c
->err_info
,
6672 sizeof(iocommand
.error_info
));
6673 if (copy_to_user(argp
, &iocommand
, sizeof(iocommand
))) {
6677 if ((iocommand
.Request
.Type
.Direction
& XFER_READ
) &&
6678 iocommand
.buf_size
> 0) {
6679 /* Copy the data out of the buffer we created */
6680 if (copy_to_user(iocommand
.buf
, buff
, iocommand
.buf_size
)) {
6692 static int hpsa_big_passthru_ioctl(struct ctlr_info
*h
, void __user
*argp
)
6694 BIG_IOCTL_Command_struct
*ioc
;
6695 struct CommandList
*c
;
6696 unsigned char **buff
= NULL
;
6697 int *buff_size
= NULL
;
6703 BYTE __user
*data_ptr
;
6707 if (!capable(CAP_SYS_RAWIO
))
6709 ioc
= kmalloc(sizeof(*ioc
), GFP_KERNEL
);
6714 if (copy_from_user(ioc
, argp
, sizeof(*ioc
))) {
6718 if ((ioc
->buf_size
< 1) &&
6719 (ioc
->Request
.Type
.Direction
!= XFER_NONE
)) {
6723 /* Check kmalloc limits using all SGs */
6724 if (ioc
->malloc_size
> MAX_KMALLOC_SIZE
) {
6728 if (ioc
->buf_size
> ioc
->malloc_size
* SG_ENTRIES_IN_CMD
) {
6732 buff
= kzalloc(SG_ENTRIES_IN_CMD
* sizeof(char *), GFP_KERNEL
);
6737 buff_size
= kmalloc(SG_ENTRIES_IN_CMD
* sizeof(int), GFP_KERNEL
);
6742 left
= ioc
->buf_size
;
6743 data_ptr
= ioc
->buf
;
6745 sz
= (left
> ioc
->malloc_size
) ? ioc
->malloc_size
: left
;
6746 buff_size
[sg_used
] = sz
;
6747 buff
[sg_used
] = kmalloc(sz
, GFP_KERNEL
);
6748 if (buff
[sg_used
] == NULL
) {
6752 if (ioc
->Request
.Type
.Direction
& XFER_WRITE
) {
6753 if (copy_from_user(buff
[sg_used
], data_ptr
, sz
)) {
6758 memset(buff
[sg_used
], 0, sz
);
6765 c
->cmd_type
= CMD_IOCTL_PEND
;
6766 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6767 c
->Header
.ReplyQueue
= 0;
6768 c
->Header
.SGList
= (u8
) sg_used
;
6769 c
->Header
.SGTotal
= cpu_to_le16(sg_used
);
6770 memcpy(&c
->Header
.LUN
, &ioc
->LUN_info
, sizeof(c
->Header
.LUN
));
6771 memcpy(&c
->Request
, &ioc
->Request
, sizeof(c
->Request
));
6772 if (ioc
->buf_size
> 0) {
6774 for (i
= 0; i
< sg_used
; i
++) {
6775 temp64
= pci_map_single(h
->pdev
, buff
[i
],
6776 buff_size
[i
], PCI_DMA_BIDIRECTIONAL
);
6777 if (dma_mapping_error(&h
->pdev
->dev
,
6778 (dma_addr_t
) temp64
)) {
6779 c
->SG
[i
].Addr
= cpu_to_le64(0);
6780 c
->SG
[i
].Len
= cpu_to_le32(0);
6781 hpsa_pci_unmap(h
->pdev
, c
, i
,
6782 PCI_DMA_BIDIRECTIONAL
);
6786 c
->SG
[i
].Addr
= cpu_to_le64(temp64
);
6787 c
->SG
[i
].Len
= cpu_to_le32(buff_size
[i
]);
6788 c
->SG
[i
].Ext
= cpu_to_le32(0);
6790 c
->SG
[--i
].Ext
= cpu_to_le32(HPSA_SG_LAST
);
6792 status
= hpsa_scsi_do_simple_cmd(h
, c
, DEFAULT_REPLY_QUEUE
,
6795 hpsa_pci_unmap(h
->pdev
, c
, sg_used
, PCI_DMA_BIDIRECTIONAL
);
6796 check_ioctl_unit_attention(h
, c
);
6802 /* Copy the error information out */
6803 memcpy(&ioc
->error_info
, c
->err_info
, sizeof(ioc
->error_info
));
6804 if (copy_to_user(argp
, ioc
, sizeof(*ioc
))) {
6808 if ((ioc
->Request
.Type
.Direction
& XFER_READ
) && ioc
->buf_size
> 0) {
6811 /* Copy the data out of the buffer we created */
6812 BYTE __user
*ptr
= ioc
->buf
;
6813 for (i
= 0; i
< sg_used
; i
++) {
6814 if (copy_to_user(ptr
, buff
[i
], buff_size
[i
])) {
6818 ptr
+= buff_size
[i
];
6828 for (i
= 0; i
< sg_used
; i
++)
6837 static void check_ioctl_unit_attention(struct ctlr_info
*h
,
6838 struct CommandList
*c
)
6840 if (c
->err_info
->CommandStatus
== CMD_TARGET_STATUS
&&
6841 c
->err_info
->ScsiStatus
!= SAM_STAT_CHECK_CONDITION
)
6842 (void) check_for_unit_attention(h
, c
);
6848 static int hpsa_ioctl(struct scsi_device
*dev
, int cmd
, void __user
*arg
)
6850 struct ctlr_info
*h
;
6851 void __user
*argp
= (void __user
*)arg
;
6854 h
= sdev_to_hba(dev
);
6857 case CCISS_DEREGDISK
:
6858 case CCISS_REGNEWDISK
:
6860 hpsa_scan_start(h
->scsi_host
);
6862 case CCISS_GETPCIINFO
:
6863 return hpsa_getpciinfo_ioctl(h
, argp
);
6864 case CCISS_GETDRIVVER
:
6865 return hpsa_getdrivver_ioctl(h
, argp
);
6866 case CCISS_PASSTHRU
:
6867 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6869 rc
= hpsa_passthru_ioctl(h
, argp
);
6870 atomic_inc(&h
->passthru_cmds_avail
);
6872 case CCISS_BIG_PASSTHRU
:
6873 if (atomic_dec_if_positive(&h
->passthru_cmds_avail
) < 0)
6875 rc
= hpsa_big_passthru_ioctl(h
, argp
);
6876 atomic_inc(&h
->passthru_cmds_avail
);
6883 static void hpsa_send_host_reset(struct ctlr_info
*h
, unsigned char *scsi3addr
,
6886 struct CommandList
*c
;
6890 /* fill_cmd can't fail here, no data buffer to map */
6891 (void) fill_cmd(c
, HPSA_DEVICE_RESET_MSG
, h
, NULL
, 0, 0,
6892 RAID_CTLR_LUNID
, TYPE_MSG
);
6893 c
->Request
.CDB
[1] = reset_type
; /* fill_cmd defaults to target reset */
6895 enqueue_cmd_and_start_io(h
, c
);
6896 /* Don't wait for completion, the reset won't complete. Don't free
6897 * the command either. This is the last command we will send before
6898 * re-initializing everything, so it doesn't matter and won't leak.
6903 static int fill_cmd(struct CommandList
*c
, u8 cmd
, struct ctlr_info
*h
,
6904 void *buff
, size_t size
, u16 page_code
, unsigned char *scsi3addr
,
6907 int pci_dir
= XFER_NONE
;
6908 u64 tag
; /* for commands to be aborted */
6910 c
->cmd_type
= CMD_IOCTL_PEND
;
6911 c
->scsi_cmd
= SCSI_CMD_BUSY
;
6912 c
->Header
.ReplyQueue
= 0;
6913 if (buff
!= NULL
&& size
> 0) {
6914 c
->Header
.SGList
= 1;
6915 c
->Header
.SGTotal
= cpu_to_le16(1);
6917 c
->Header
.SGList
= 0;
6918 c
->Header
.SGTotal
= cpu_to_le16(0);
6920 memcpy(c
->Header
.LUN
.LunAddrBytes
, scsi3addr
, 8);
6922 if (cmd_type
== TYPE_CMD
) {
6925 /* are we trying to read a vital product page */
6926 if (page_code
& VPD_PAGE
) {
6927 c
->Request
.CDB
[1] = 0x01;
6928 c
->Request
.CDB
[2] = (page_code
& 0xff);
6930 c
->Request
.CDBLen
= 6;
6931 c
->Request
.type_attr_dir
=
6932 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6933 c
->Request
.Timeout
= 0;
6934 c
->Request
.CDB
[0] = HPSA_INQUIRY
;
6935 c
->Request
.CDB
[4] = size
& 0xFF;
6937 case HPSA_REPORT_LOG
:
6938 case HPSA_REPORT_PHYS
:
6939 /* Talking to controller so It's a physical command
6940 mode = 00 target = 0. Nothing to write.
6942 c
->Request
.CDBLen
= 12;
6943 c
->Request
.type_attr_dir
=
6944 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6945 c
->Request
.Timeout
= 0;
6946 c
->Request
.CDB
[0] = cmd
;
6947 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6948 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6949 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6950 c
->Request
.CDB
[9] = size
& 0xFF;
6952 case BMIC_SENSE_DIAG_OPTIONS
:
6953 c
->Request
.CDBLen
= 16;
6954 c
->Request
.type_attr_dir
=
6955 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6956 c
->Request
.Timeout
= 0;
6957 /* Spec says this should be BMIC_WRITE */
6958 c
->Request
.CDB
[0] = BMIC_READ
;
6959 c
->Request
.CDB
[6] = BMIC_SENSE_DIAG_OPTIONS
;
6961 case BMIC_SET_DIAG_OPTIONS
:
6962 c
->Request
.CDBLen
= 16;
6963 c
->Request
.type_attr_dir
=
6964 TYPE_ATTR_DIR(cmd_type
,
6965 ATTR_SIMPLE
, XFER_WRITE
);
6966 c
->Request
.Timeout
= 0;
6967 c
->Request
.CDB
[0] = BMIC_WRITE
;
6968 c
->Request
.CDB
[6] = BMIC_SET_DIAG_OPTIONS
;
6970 case HPSA_CACHE_FLUSH
:
6971 c
->Request
.CDBLen
= 12;
6972 c
->Request
.type_attr_dir
=
6973 TYPE_ATTR_DIR(cmd_type
,
6974 ATTR_SIMPLE
, XFER_WRITE
);
6975 c
->Request
.Timeout
= 0;
6976 c
->Request
.CDB
[0] = BMIC_WRITE
;
6977 c
->Request
.CDB
[6] = BMIC_CACHE_FLUSH
;
6978 c
->Request
.CDB
[7] = (size
>> 8) & 0xFF;
6979 c
->Request
.CDB
[8] = size
& 0xFF;
6981 case TEST_UNIT_READY
:
6982 c
->Request
.CDBLen
= 6;
6983 c
->Request
.type_attr_dir
=
6984 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
6985 c
->Request
.Timeout
= 0;
6987 case HPSA_GET_RAID_MAP
:
6988 c
->Request
.CDBLen
= 12;
6989 c
->Request
.type_attr_dir
=
6990 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
6991 c
->Request
.Timeout
= 0;
6992 c
->Request
.CDB
[0] = HPSA_CISS_READ
;
6993 c
->Request
.CDB
[1] = cmd
;
6994 c
->Request
.CDB
[6] = (size
>> 24) & 0xFF; /* MSB */
6995 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
6996 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
6997 c
->Request
.CDB
[9] = size
& 0xFF;
6999 case BMIC_SENSE_CONTROLLER_PARAMETERS
:
7000 c
->Request
.CDBLen
= 10;
7001 c
->Request
.type_attr_dir
=
7002 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
7003 c
->Request
.Timeout
= 0;
7004 c
->Request
.CDB
[0] = BMIC_READ
;
7005 c
->Request
.CDB
[6] = BMIC_SENSE_CONTROLLER_PARAMETERS
;
7006 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
7007 c
->Request
.CDB
[8] = (size
>> 8) & 0xFF;
7009 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
7010 c
->Request
.CDBLen
= 10;
7011 c
->Request
.type_attr_dir
=
7012 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
7013 c
->Request
.Timeout
= 0;
7014 c
->Request
.CDB
[0] = BMIC_READ
;
7015 c
->Request
.CDB
[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE
;
7016 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
7017 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
7019 case BMIC_SENSE_SUBSYSTEM_INFORMATION
:
7020 c
->Request
.CDBLen
= 10;
7021 c
->Request
.type_attr_dir
=
7022 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
7023 c
->Request
.Timeout
= 0;
7024 c
->Request
.CDB
[0] = BMIC_READ
;
7025 c
->Request
.CDB
[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION
;
7026 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
7027 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
7029 case BMIC_SENSE_STORAGE_BOX_PARAMS
:
7030 c
->Request
.CDBLen
= 10;
7031 c
->Request
.type_attr_dir
=
7032 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
7033 c
->Request
.Timeout
= 0;
7034 c
->Request
.CDB
[0] = BMIC_READ
;
7035 c
->Request
.CDB
[6] = BMIC_SENSE_STORAGE_BOX_PARAMS
;
7036 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
7037 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
7039 case BMIC_IDENTIFY_CONTROLLER
:
7040 c
->Request
.CDBLen
= 10;
7041 c
->Request
.type_attr_dir
=
7042 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_READ
);
7043 c
->Request
.Timeout
= 0;
7044 c
->Request
.CDB
[0] = BMIC_READ
;
7045 c
->Request
.CDB
[1] = 0;
7046 c
->Request
.CDB
[2] = 0;
7047 c
->Request
.CDB
[3] = 0;
7048 c
->Request
.CDB
[4] = 0;
7049 c
->Request
.CDB
[5] = 0;
7050 c
->Request
.CDB
[6] = BMIC_IDENTIFY_CONTROLLER
;
7051 c
->Request
.CDB
[7] = (size
>> 16) & 0xFF;
7052 c
->Request
.CDB
[8] = (size
>> 8) & 0XFF;
7053 c
->Request
.CDB
[9] = 0;
7056 dev_warn(&h
->pdev
->dev
, "unknown command 0x%c\n", cmd
);
7060 } else if (cmd_type
== TYPE_MSG
) {
7063 case HPSA_PHYS_TARGET_RESET
:
7064 c
->Request
.CDBLen
= 16;
7065 c
->Request
.type_attr_dir
=
7066 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
7067 c
->Request
.Timeout
= 0; /* Don't time out */
7068 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
7069 c
->Request
.CDB
[0] = HPSA_RESET
;
7070 c
->Request
.CDB
[1] = HPSA_TARGET_RESET_TYPE
;
7071 /* Physical target reset needs no control bytes 4-7*/
7072 c
->Request
.CDB
[4] = 0x00;
7073 c
->Request
.CDB
[5] = 0x00;
7074 c
->Request
.CDB
[6] = 0x00;
7075 c
->Request
.CDB
[7] = 0x00;
7077 case HPSA_DEVICE_RESET_MSG
:
7078 c
->Request
.CDBLen
= 16;
7079 c
->Request
.type_attr_dir
=
7080 TYPE_ATTR_DIR(cmd_type
, ATTR_SIMPLE
, XFER_NONE
);
7081 c
->Request
.Timeout
= 0; /* Don't time out */
7082 memset(&c
->Request
.CDB
[0], 0, sizeof(c
->Request
.CDB
));
7083 c
->Request
.CDB
[0] = cmd
;
7084 c
->Request
.CDB
[1] = HPSA_RESET_TYPE_LUN
;
7085 /* If bytes 4-7 are zero, it means reset the */
7087 c
->Request
.CDB
[4] = 0x00;
7088 c
->Request
.CDB
[5] = 0x00;
7089 c
->Request
.CDB
[6] = 0x00;
7090 c
->Request
.CDB
[7] = 0x00;
7092 case HPSA_ABORT_MSG
:
7093 memcpy(&tag
, buff
, sizeof(tag
));
7094 dev_dbg(&h
->pdev
->dev
,
7095 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
7096 tag
, c
->Header
.tag
);
7097 c
->Request
.CDBLen
= 16;
7098 c
->Request
.type_attr_dir
=
7099 TYPE_ATTR_DIR(cmd_type
,
7100 ATTR_SIMPLE
, XFER_WRITE
);
7101 c
->Request
.Timeout
= 0; /* Don't time out */
7102 c
->Request
.CDB
[0] = HPSA_TASK_MANAGEMENT
;
7103 c
->Request
.CDB
[1] = HPSA_TMF_ABORT_TASK
;
7104 c
->Request
.CDB
[2] = 0x00; /* reserved */
7105 c
->Request
.CDB
[3] = 0x00; /* reserved */
7106 /* Tag to abort goes in CDB[4]-CDB[11] */
7107 memcpy(&c
->Request
.CDB
[4], &tag
, sizeof(tag
));
7108 c
->Request
.CDB
[12] = 0x00; /* reserved */
7109 c
->Request
.CDB
[13] = 0x00; /* reserved */
7110 c
->Request
.CDB
[14] = 0x00; /* reserved */
7111 c
->Request
.CDB
[15] = 0x00; /* reserved */
7114 dev_warn(&h
->pdev
->dev
, "unknown message type %d\n",
7119 dev_warn(&h
->pdev
->dev
, "unknown command type %d\n", cmd_type
);
7123 switch (GET_DIR(c
->Request
.type_attr_dir
)) {
7125 pci_dir
= PCI_DMA_FROMDEVICE
;
7128 pci_dir
= PCI_DMA_TODEVICE
;
7131 pci_dir
= PCI_DMA_NONE
;
7134 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
7136 if (hpsa_map_one(h
->pdev
, c
, buff
, size
, pci_dir
))
7142 * Map (physical) PCI mem into (virtual) kernel space
7144 static void __iomem
*remap_pci_mem(ulong base
, ulong size
)
7146 ulong page_base
= ((ulong
) base
) & PAGE_MASK
;
7147 ulong page_offs
= ((ulong
) base
) - page_base
;
7148 void __iomem
*page_remapped
= ioremap_nocache(page_base
,
7151 return page_remapped
? (page_remapped
+ page_offs
) : NULL
;
7154 static inline unsigned long get_next_completion(struct ctlr_info
*h
, u8 q
)
7156 return h
->access
.command_completed(h
, q
);
7159 static inline bool interrupt_pending(struct ctlr_info
*h
)
7161 return h
->access
.intr_pending(h
);
7164 static inline long interrupt_not_for_us(struct ctlr_info
*h
)
7166 return (h
->access
.intr_pending(h
) == 0) ||
7167 (h
->interrupts_enabled
== 0);
7170 static inline int bad_tag(struct ctlr_info
*h
, u32 tag_index
,
7173 if (unlikely(tag_index
>= h
->nr_cmds
)) {
7174 dev_warn(&h
->pdev
->dev
, "bad tag 0x%08x ignored.\n", raw_tag
);
7180 static inline void finish_cmd(struct CommandList
*c
)
7182 dial_up_lockup_detection_on_fw_flash_complete(c
->h
, c
);
7183 if (likely(c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_SCSI
7184 || c
->cmd_type
== CMD_IOACCEL2
))
7185 complete_scsi_command(c
);
7186 else if (c
->cmd_type
== CMD_IOCTL_PEND
|| c
->cmd_type
== IOACCEL2_TMF
)
7187 complete(c
->waiting
);
7190 /* process completion of an indexed ("direct lookup") command */
7191 static inline void process_indexed_cmd(struct ctlr_info
*h
,
7195 struct CommandList
*c
;
7197 tag_index
= raw_tag
>> DIRECT_LOOKUP_SHIFT
;
7198 if (!bad_tag(h
, tag_index
, raw_tag
)) {
7199 c
= h
->cmd_pool
+ tag_index
;
7204 /* Some controllers, like p400, will give us one interrupt
7205 * after a soft reset, even if we turned interrupts off.
7206 * Only need to check for this in the hpsa_xxx_discard_completions
7209 static int ignore_bogus_interrupt(struct ctlr_info
*h
)
7211 if (likely(!reset_devices
))
7214 if (likely(h
->interrupts_enabled
))
7217 dev_info(&h
->pdev
->dev
, "Received interrupt while interrupts disabled "
7218 "(known firmware bug.) Ignoring.\n");
7224 * Convert &h->q[x] (passed to interrupt handlers) back to h.
7225 * Relies on (h-q[x] == x) being true for x such that
7226 * 0 <= x < MAX_REPLY_QUEUES.
7228 static struct ctlr_info
*queue_to_hba(u8
*queue
)
7230 return container_of((queue
- *queue
), struct ctlr_info
, q
[0]);
7233 static irqreturn_t
hpsa_intx_discard_completions(int irq
, void *queue
)
7235 struct ctlr_info
*h
= queue_to_hba(queue
);
7236 u8 q
= *(u8
*) queue
;
7239 if (ignore_bogus_interrupt(h
))
7242 if (interrupt_not_for_us(h
))
7244 h
->last_intr_timestamp
= get_jiffies_64();
7245 while (interrupt_pending(h
)) {
7246 raw_tag
= get_next_completion(h
, q
);
7247 while (raw_tag
!= FIFO_EMPTY
)
7248 raw_tag
= next_command(h
, q
);
7253 static irqreturn_t
hpsa_msix_discard_completions(int irq
, void *queue
)
7255 struct ctlr_info
*h
= queue_to_hba(queue
);
7257 u8 q
= *(u8
*) queue
;
7259 if (ignore_bogus_interrupt(h
))
7262 h
->last_intr_timestamp
= get_jiffies_64();
7263 raw_tag
= get_next_completion(h
, q
);
7264 while (raw_tag
!= FIFO_EMPTY
)
7265 raw_tag
= next_command(h
, q
);
7269 static irqreturn_t
do_hpsa_intr_intx(int irq
, void *queue
)
7271 struct ctlr_info
*h
= queue_to_hba((u8
*) queue
);
7273 u8 q
= *(u8
*) queue
;
7275 if (interrupt_not_for_us(h
))
7277 h
->last_intr_timestamp
= get_jiffies_64();
7278 while (interrupt_pending(h
)) {
7279 raw_tag
= get_next_completion(h
, q
);
7280 while (raw_tag
!= FIFO_EMPTY
) {
7281 process_indexed_cmd(h
, raw_tag
);
7282 raw_tag
= next_command(h
, q
);
7288 static irqreturn_t
do_hpsa_intr_msi(int irq
, void *queue
)
7290 struct ctlr_info
*h
= queue_to_hba(queue
);
7292 u8 q
= *(u8
*) queue
;
7294 h
->last_intr_timestamp
= get_jiffies_64();
7295 raw_tag
= get_next_completion(h
, q
);
7296 while (raw_tag
!= FIFO_EMPTY
) {
7297 process_indexed_cmd(h
, raw_tag
);
7298 raw_tag
= next_command(h
, q
);
7303 /* Send a message CDB to the firmware. Careful, this only works
7304 * in simple mode, not performant mode due to the tag lookup.
7305 * We only ever use this immediately after a controller reset.
7307 static int hpsa_message(struct pci_dev
*pdev
, unsigned char opcode
,
7311 struct CommandListHeader CommandHeader
;
7312 struct RequestBlock Request
;
7313 struct ErrDescriptor ErrorDescriptor
;
7315 struct Command
*cmd
;
7316 static const size_t cmd_sz
= sizeof(*cmd
) +
7317 sizeof(cmd
->ErrorDescriptor
);
7321 void __iomem
*vaddr
;
7324 vaddr
= pci_ioremap_bar(pdev
, 0);
7328 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
7329 * CCISS commands, so they must be allocated from the lower 4GiB of
7332 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
7338 cmd
= pci_alloc_consistent(pdev
, cmd_sz
, &paddr64
);
7344 /* This must fit, because of the 32-bit consistent DMA mask. Also,
7345 * although there's no guarantee, we assume that the address is at
7346 * least 4-byte aligned (most likely, it's page-aligned).
7348 paddr32
= cpu_to_le32(paddr64
);
7350 cmd
->CommandHeader
.ReplyQueue
= 0;
7351 cmd
->CommandHeader
.SGList
= 0;
7352 cmd
->CommandHeader
.SGTotal
= cpu_to_le16(0);
7353 cmd
->CommandHeader
.tag
= cpu_to_le64(paddr64
);
7354 memset(&cmd
->CommandHeader
.LUN
.LunAddrBytes
, 0, 8);
7356 cmd
->Request
.CDBLen
= 16;
7357 cmd
->Request
.type_attr_dir
=
7358 TYPE_ATTR_DIR(TYPE_MSG
, ATTR_HEADOFQUEUE
, XFER_NONE
);
7359 cmd
->Request
.Timeout
= 0; /* Don't time out */
7360 cmd
->Request
.CDB
[0] = opcode
;
7361 cmd
->Request
.CDB
[1] = type
;
7362 memset(&cmd
->Request
.CDB
[2], 0, 14); /* rest of the CDB is reserved */
7363 cmd
->ErrorDescriptor
.Addr
=
7364 cpu_to_le64((le32_to_cpu(paddr32
) + sizeof(*cmd
)));
7365 cmd
->ErrorDescriptor
.Len
= cpu_to_le32(sizeof(struct ErrorInfo
));
7367 writel(le32_to_cpu(paddr32
), vaddr
+ SA5_REQUEST_PORT_OFFSET
);
7369 for (i
= 0; i
< HPSA_MSG_SEND_RETRY_LIMIT
; i
++) {
7370 tag
= readl(vaddr
+ SA5_REPLY_PORT_OFFSET
);
7371 if ((tag
& ~HPSA_SIMPLE_ERROR_BITS
) == paddr64
)
7373 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS
);
7378 /* we leak the DMA buffer here ... no choice since the controller could
7379 * still complete the command.
7381 if (i
== HPSA_MSG_SEND_RETRY_LIMIT
) {
7382 dev_err(&pdev
->dev
, "controller message %02x:%02x timed out\n",
7387 pci_free_consistent(pdev
, cmd_sz
, cmd
, paddr64
);
7389 if (tag
& HPSA_ERROR_BIT
) {
7390 dev_err(&pdev
->dev
, "controller message %02x:%02x failed\n",
7395 dev_info(&pdev
->dev
, "controller message %02x:%02x succeeded\n",
7400 #define hpsa_noop(p) hpsa_message(p, 3, 0)
7402 static int hpsa_controller_hard_reset(struct pci_dev
*pdev
,
7403 void __iomem
*vaddr
, u32 use_doorbell
)
7407 /* For everything after the P600, the PCI power state method
7408 * of resetting the controller doesn't work, so we have this
7409 * other way using the doorbell register.
7411 dev_info(&pdev
->dev
, "using doorbell to reset controller\n");
7412 writel(use_doorbell
, vaddr
+ SA5_DOORBELL
);
7414 /* PMC hardware guys tell us we need a 10 second delay after
7415 * doorbell reset and before any attempt to talk to the board
7416 * at all to ensure that this actually works and doesn't fall
7417 * over in some weird corner cases.
7420 } else { /* Try to do it the PCI power state way */
7422 /* Quoting from the Open CISS Specification: "The Power
7423 * Management Control/Status Register (CSR) controls the power
7424 * state of the device. The normal operating state is D0,
7425 * CSR=00h. The software off state is D3, CSR=03h. To reset
7426 * the controller, place the interface device in D3 then to D0,
7427 * this causes a secondary PCI reset which will reset the
7432 dev_info(&pdev
->dev
, "using PCI PM to reset controller\n");
7434 /* enter the D3hot power management state */
7435 rc
= pci_set_power_state(pdev
, PCI_D3hot
);
7441 /* enter the D0 power management state */
7442 rc
= pci_set_power_state(pdev
, PCI_D0
);
7447 * The P600 requires a small delay when changing states.
7448 * Otherwise we may think the board did not reset and we bail.
7449 * This for kdump only and is particular to the P600.
7456 static void init_driver_version(char *driver_version
, int len
)
7458 memset(driver_version
, 0, len
);
7459 strncpy(driver_version
, HPSA
" " HPSA_DRIVER_VERSION
, len
- 1);
7462 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem
*cfgtable
)
7464 char *driver_version
;
7465 int i
, size
= sizeof(cfgtable
->driver_version
);
7467 driver_version
= kmalloc(size
, GFP_KERNEL
);
7468 if (!driver_version
)
7471 init_driver_version(driver_version
, size
);
7472 for (i
= 0; i
< size
; i
++)
7473 writeb(driver_version
[i
], &cfgtable
->driver_version
[i
]);
7474 kfree(driver_version
);
7478 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem
*cfgtable
,
7479 unsigned char *driver_ver
)
7483 for (i
= 0; i
< sizeof(cfgtable
->driver_version
); i
++)
7484 driver_ver
[i
] = readb(&cfgtable
->driver_version
[i
]);
7487 static int controller_reset_failed(struct CfgTable __iomem
*cfgtable
)
7490 char *driver_ver
, *old_driver_ver
;
7491 int rc
, size
= sizeof(cfgtable
->driver_version
);
7493 old_driver_ver
= kmalloc(2 * size
, GFP_KERNEL
);
7494 if (!old_driver_ver
)
7496 driver_ver
= old_driver_ver
+ size
;
7498 /* After a reset, the 32 bytes of "driver version" in the cfgtable
7499 * should have been changed, otherwise we know the reset failed.
7501 init_driver_version(old_driver_ver
, size
);
7502 read_driver_ver_from_cfgtable(cfgtable
, driver_ver
);
7503 rc
= !memcmp(driver_ver
, old_driver_ver
, size
);
7504 kfree(old_driver_ver
);
7507 /* This does a hard reset of the controller using PCI power management
7508 * states or the using the doorbell register.
7510 static int hpsa_kdump_hard_reset_controller(struct pci_dev
*pdev
, u32 board_id
)
7514 u64 cfg_base_addr_index
;
7515 void __iomem
*vaddr
;
7516 unsigned long paddr
;
7517 u32 misc_fw_support
;
7519 struct CfgTable __iomem
*cfgtable
;
7521 u16 command_register
;
7523 /* For controllers as old as the P600, this is very nearly
7526 * pci_save_state(pci_dev);
7527 * pci_set_power_state(pci_dev, PCI_D3hot);
7528 * pci_set_power_state(pci_dev, PCI_D0);
7529 * pci_restore_state(pci_dev);
7531 * For controllers newer than the P600, the pci power state
7532 * method of resetting doesn't work so we have another way
7533 * using the doorbell register.
7536 if (!ctlr_is_resettable(board_id
)) {
7537 dev_warn(&pdev
->dev
, "Controller not resettable\n");
7541 /* if controller is soft- but not hard resettable... */
7542 if (!ctlr_is_hard_resettable(board_id
))
7543 return -ENOTSUPP
; /* try soft reset later. */
7545 /* Save the PCI command register */
7546 pci_read_config_word(pdev
, 4, &command_register
);
7547 pci_save_state(pdev
);
7549 /* find the first memory BAR, so we can find the cfg table */
7550 rc
= hpsa_pci_find_memory_BAR(pdev
, &paddr
);
7553 vaddr
= remap_pci_mem(paddr
, 0x250);
7557 /* find cfgtable in order to check if reset via doorbell is supported */
7558 rc
= hpsa_find_cfg_addrs(pdev
, vaddr
, &cfg_base_addr
,
7559 &cfg_base_addr_index
, &cfg_offset
);
7562 cfgtable
= remap_pci_mem(pci_resource_start(pdev
,
7563 cfg_base_addr_index
) + cfg_offset
, sizeof(*cfgtable
));
7568 rc
= write_driver_ver_to_cfgtable(cfgtable
);
7570 goto unmap_cfgtable
;
7572 /* If reset via doorbell register is supported, use that.
7573 * There are two such methods. Favor the newest method.
7575 misc_fw_support
= readl(&cfgtable
->misc_fw_support
);
7576 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET2
;
7578 use_doorbell
= DOORBELL_CTLR_RESET2
;
7580 use_doorbell
= misc_fw_support
& MISC_FW_DOORBELL_RESET
;
7582 dev_warn(&pdev
->dev
,
7583 "Soft reset not supported. Firmware update is required.\n");
7584 rc
= -ENOTSUPP
; /* try soft reset */
7585 goto unmap_cfgtable
;
7589 rc
= hpsa_controller_hard_reset(pdev
, vaddr
, use_doorbell
);
7591 goto unmap_cfgtable
;
7593 pci_restore_state(pdev
);
7594 pci_write_config_word(pdev
, 4, command_register
);
7596 /* Some devices (notably the HP Smart Array 5i Controller)
7597 need a little pause here */
7598 msleep(HPSA_POST_RESET_PAUSE_MSECS
);
7600 rc
= hpsa_wait_for_board_state(pdev
, vaddr
, BOARD_READY
);
7602 dev_warn(&pdev
->dev
,
7603 "Failed waiting for board to become ready after hard reset\n");
7604 goto unmap_cfgtable
;
7607 rc
= controller_reset_failed(vaddr
);
7609 goto unmap_cfgtable
;
7611 dev_warn(&pdev
->dev
, "Unable to successfully reset "
7612 "controller. Will try soft reset.\n");
7615 dev_info(&pdev
->dev
, "board ready after hard reset.\n");
7627 * We cannot read the structure directly, for portability we must use
7629 * This is for debug only.
7631 static void print_cfg_table(struct device
*dev
, struct CfgTable __iomem
*tb
)
7637 dev_info(dev
, "Controller Configuration information\n");
7638 dev_info(dev
, "------------------------------------\n");
7639 for (i
= 0; i
< 4; i
++)
7640 temp_name
[i
] = readb(&(tb
->Signature
[i
]));
7641 temp_name
[4] = '\0';
7642 dev_info(dev
, " Signature = %s\n", temp_name
);
7643 dev_info(dev
, " Spec Number = %d\n", readl(&(tb
->SpecValence
)));
7644 dev_info(dev
, " Transport methods supported = 0x%x\n",
7645 readl(&(tb
->TransportSupport
)));
7646 dev_info(dev
, " Transport methods active = 0x%x\n",
7647 readl(&(tb
->TransportActive
)));
7648 dev_info(dev
, " Requested transport Method = 0x%x\n",
7649 readl(&(tb
->HostWrite
.TransportRequest
)));
7650 dev_info(dev
, " Coalesce Interrupt Delay = 0x%x\n",
7651 readl(&(tb
->HostWrite
.CoalIntDelay
)));
7652 dev_info(dev
, " Coalesce Interrupt Count = 0x%x\n",
7653 readl(&(tb
->HostWrite
.CoalIntCount
)));
7654 dev_info(dev
, " Max outstanding commands = %d\n",
7655 readl(&(tb
->CmdsOutMax
)));
7656 dev_info(dev
, " Bus Types = 0x%x\n", readl(&(tb
->BusTypes
)));
7657 for (i
= 0; i
< 16; i
++)
7658 temp_name
[i
] = readb(&(tb
->ServerName
[i
]));
7659 temp_name
[16] = '\0';
7660 dev_info(dev
, " Server Name = %s\n", temp_name
);
7661 dev_info(dev
, " Heartbeat Counter = 0x%x\n\n\n",
7662 readl(&(tb
->HeartBeat
)));
7663 #endif /* HPSA_DEBUG */
7666 static int find_PCI_BAR_index(struct pci_dev
*pdev
, unsigned long pci_bar_addr
)
7668 int i
, offset
, mem_type
, bar_type
;
7670 if (pci_bar_addr
== PCI_BASE_ADDRESS_0
) /* looking for BAR zero? */
7673 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
7674 bar_type
= pci_resource_flags(pdev
, i
) & PCI_BASE_ADDRESS_SPACE
;
7675 if (bar_type
== PCI_BASE_ADDRESS_SPACE_IO
)
7678 mem_type
= pci_resource_flags(pdev
, i
) &
7679 PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
7681 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
7682 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
7683 offset
+= 4; /* 32 bit */
7685 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
7688 default: /* reserved in PCI 2.2 */
7689 dev_warn(&pdev
->dev
,
7690 "base address is invalid\n");
7695 if (offset
== pci_bar_addr
- PCI_BASE_ADDRESS_0
)
7701 static void hpsa_disable_interrupt_mode(struct ctlr_info
*h
)
7703 pci_free_irq_vectors(h
->pdev
);
7704 h
->msix_vectors
= 0;
7707 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7708 * controllers that are capable. If not, we use legacy INTx mode.
7710 static int hpsa_interrupt_mode(struct ctlr_info
*h
)
7712 unsigned int flags
= PCI_IRQ_LEGACY
;
7715 /* Some boards advertise MSI but don't really support it */
7716 switch (h
->board_id
) {
7723 ret
= pci_alloc_irq_vectors(h
->pdev
, 1, MAX_REPLY_QUEUES
,
7724 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
7726 h
->msix_vectors
= ret
;
7730 flags
|= PCI_IRQ_MSI
;
7734 ret
= pci_alloc_irq_vectors(h
->pdev
, 1, 1, flags
);
7740 static int hpsa_lookup_board_id(struct pci_dev
*pdev
, u32
*board_id
)
7743 u32 subsystem_vendor_id
, subsystem_device_id
;
7745 subsystem_vendor_id
= pdev
->subsystem_vendor
;
7746 subsystem_device_id
= pdev
->subsystem_device
;
7747 *board_id
= ((subsystem_device_id
<< 16) & 0xffff0000) |
7748 subsystem_vendor_id
;
7750 for (i
= 0; i
< ARRAY_SIZE(products
); i
++)
7751 if (*board_id
== products
[i
].board_id
)
7754 if ((subsystem_vendor_id
!= PCI_VENDOR_ID_HP
&&
7755 subsystem_vendor_id
!= PCI_VENDOR_ID_COMPAQ
) ||
7757 dev_warn(&pdev
->dev
, "unrecognized board ID: "
7758 "0x%08x, ignoring.\n", *board_id
);
7761 return ARRAY_SIZE(products
) - 1; /* generic unknown smart array */
7764 static int hpsa_pci_find_memory_BAR(struct pci_dev
*pdev
,
7765 unsigned long *memory_bar
)
7769 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++)
7770 if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
7771 /* addressing mode bits already removed */
7772 *memory_bar
= pci_resource_start(pdev
, i
);
7773 dev_dbg(&pdev
->dev
, "memory BAR = %lx\n",
7777 dev_warn(&pdev
->dev
, "no memory BAR found\n");
7781 static int hpsa_wait_for_board_state(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7787 iterations
= HPSA_BOARD_READY_ITERATIONS
;
7789 iterations
= HPSA_BOARD_NOT_READY_ITERATIONS
;
7791 for (i
= 0; i
< iterations
; i
++) {
7792 scratchpad
= readl(vaddr
+ SA5_SCRATCHPAD_OFFSET
);
7793 if (wait_for_ready
) {
7794 if (scratchpad
== HPSA_FIRMWARE_READY
)
7797 if (scratchpad
!= HPSA_FIRMWARE_READY
)
7800 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS
);
7802 dev_warn(&pdev
->dev
, "board not ready, timed out.\n");
7806 static int hpsa_find_cfg_addrs(struct pci_dev
*pdev
, void __iomem
*vaddr
,
7807 u32
*cfg_base_addr
, u64
*cfg_base_addr_index
,
7810 *cfg_base_addr
= readl(vaddr
+ SA5_CTCFG_OFFSET
);
7811 *cfg_offset
= readl(vaddr
+ SA5_CTMEM_OFFSET
);
7812 *cfg_base_addr
&= (u32
) 0x0000ffff;
7813 *cfg_base_addr_index
= find_PCI_BAR_index(pdev
, *cfg_base_addr
);
7814 if (*cfg_base_addr_index
== -1) {
7815 dev_warn(&pdev
->dev
, "cannot find cfg_base_addr_index\n");
7821 static void hpsa_free_cfgtables(struct ctlr_info
*h
)
7823 if (h
->transtable
) {
7824 iounmap(h
->transtable
);
7825 h
->transtable
= NULL
;
7828 iounmap(h
->cfgtable
);
7833 /* Find and map CISS config table and transfer table
7834 + * several items must be unmapped (freed) later
7836 static int hpsa_find_cfgtables(struct ctlr_info
*h
)
7840 u64 cfg_base_addr_index
;
7844 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
7845 &cfg_base_addr_index
, &cfg_offset
);
7848 h
->cfgtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7849 cfg_base_addr_index
) + cfg_offset
, sizeof(*h
->cfgtable
));
7851 dev_err(&h
->pdev
->dev
, "Failed mapping cfgtable\n");
7854 rc
= write_driver_ver_to_cfgtable(h
->cfgtable
);
7857 /* Find performant mode table. */
7858 trans_offset
= readl(&h
->cfgtable
->TransMethodOffset
);
7859 h
->transtable
= remap_pci_mem(pci_resource_start(h
->pdev
,
7860 cfg_base_addr_index
)+cfg_offset
+trans_offset
,
7861 sizeof(*h
->transtable
));
7862 if (!h
->transtable
) {
7863 dev_err(&h
->pdev
->dev
, "Failed mapping transfer table\n");
7864 hpsa_free_cfgtables(h
);
7870 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info
*h
)
7872 #define MIN_MAX_COMMANDS 16
7873 BUILD_BUG_ON(MIN_MAX_COMMANDS
<= HPSA_NRESERVED_CMDS
);
7875 h
->max_commands
= readl(&h
->cfgtable
->MaxPerformantModeCommands
);
7877 /* Limit commands in memory limited kdump scenario. */
7878 if (reset_devices
&& h
->max_commands
> 32)
7879 h
->max_commands
= 32;
7881 if (h
->max_commands
< MIN_MAX_COMMANDS
) {
7882 dev_warn(&h
->pdev
->dev
,
7883 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7886 h
->max_commands
= MIN_MAX_COMMANDS
;
7890 /* If the controller reports that the total max sg entries is greater than 512,
7891 * then we know that chained SG blocks work. (Original smart arrays did not
7892 * support chained SG blocks and would return zero for max sg entries.)
7894 static int hpsa_supports_chained_sg_blocks(struct ctlr_info
*h
)
7896 return h
->maxsgentries
> 512;
7899 /* Interrogate the hardware for some limits:
7900 * max commands, max SG elements without chaining, and with chaining,
7901 * SG chain block size, etc.
7903 static void hpsa_find_board_params(struct ctlr_info
*h
)
7905 hpsa_get_max_perf_mode_cmds(h
);
7906 h
->nr_cmds
= h
->max_commands
;
7907 h
->maxsgentries
= readl(&(h
->cfgtable
->MaxScatterGatherElements
));
7908 h
->fw_support
= readl(&(h
->cfgtable
->misc_fw_support
));
7909 if (hpsa_supports_chained_sg_blocks(h
)) {
7910 /* Limit in-command s/g elements to 32 save dma'able memory. */
7911 h
->max_cmd_sg_entries
= 32;
7912 h
->chainsize
= h
->maxsgentries
- h
->max_cmd_sg_entries
;
7913 h
->maxsgentries
--; /* save one for chain pointer */
7916 * Original smart arrays supported at most 31 s/g entries
7917 * embedded inline in the command (trying to use more
7918 * would lock up the controller)
7920 h
->max_cmd_sg_entries
= 31;
7921 h
->maxsgentries
= 31; /* default to traditional values */
7925 /* Find out what task management functions are supported and cache */
7926 h
->TMFSupportFlags
= readl(&(h
->cfgtable
->TMFSupportFlags
));
7927 if (!(HPSATMF_PHYS_TASK_ABORT
& h
->TMFSupportFlags
))
7928 dev_warn(&h
->pdev
->dev
, "Physical aborts not supported\n");
7929 if (!(HPSATMF_LOG_TASK_ABORT
& h
->TMFSupportFlags
))
7930 dev_warn(&h
->pdev
->dev
, "Logical aborts not supported\n");
7931 if (!(HPSATMF_IOACCEL_ENABLED
& h
->TMFSupportFlags
))
7932 dev_warn(&h
->pdev
->dev
, "HP SSD Smart Path aborts not supported\n");
7935 static inline bool hpsa_CISS_signature_present(struct ctlr_info
*h
)
7937 if (!check_signature(h
->cfgtable
->Signature
, "CISS", 4)) {
7938 dev_err(&h
->pdev
->dev
, "not a valid CISS config table\n");
7944 static inline void hpsa_set_driver_support_bits(struct ctlr_info
*h
)
7948 driver_support
= readl(&(h
->cfgtable
->driver_support
));
7949 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7951 driver_support
|= ENABLE_SCSI_PREFETCH
;
7953 driver_support
|= ENABLE_UNIT_ATTN
;
7954 writel(driver_support
, &(h
->cfgtable
->driver_support
));
7957 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7958 * in a prefetch beyond physical memory.
7960 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info
*h
)
7964 if (h
->board_id
!= 0x3225103C)
7966 dma_prefetch
= readl(h
->vaddr
+ I2O_DMA1_CFG
);
7967 dma_prefetch
|= 0x8000;
7968 writel(dma_prefetch
, h
->vaddr
+ I2O_DMA1_CFG
);
7971 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info
*h
)
7975 unsigned long flags
;
7976 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7977 for (i
= 0; i
< MAX_CLEAR_EVENT_WAIT
; i
++) {
7978 spin_lock_irqsave(&h
->lock
, flags
);
7979 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
7980 spin_unlock_irqrestore(&h
->lock
, flags
);
7981 if (!(doorbell_value
& DOORBELL_CLEAR_EVENTS
))
7983 /* delay and try again */
7984 msleep(CLEAR_EVENT_WAIT_INTERVAL
);
7991 static int hpsa_wait_for_mode_change_ack(struct ctlr_info
*h
)
7995 unsigned long flags
;
7997 /* under certain very rare conditions, this can take awhile.
7998 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7999 * as we enter this code.)
8001 for (i
= 0; i
< MAX_MODE_CHANGE_WAIT
; i
++) {
8002 if (h
->remove_in_progress
)
8004 spin_lock_irqsave(&h
->lock
, flags
);
8005 doorbell_value
= readl(h
->vaddr
+ SA5_DOORBELL
);
8006 spin_unlock_irqrestore(&h
->lock
, flags
);
8007 if (!(doorbell_value
& CFGTBL_ChangeReq
))
8009 /* delay and try again */
8010 msleep(MODE_CHANGE_WAIT_INTERVAL
);
8017 /* return -ENODEV or other reason on error, 0 on success */
8018 static int hpsa_enter_simple_mode(struct ctlr_info
*h
)
8022 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
8023 if (!(trans_support
& SIMPLE_MODE
))
8026 h
->max_commands
= readl(&(h
->cfgtable
->CmdsOutMax
));
8028 /* Update the field, and then ring the doorbell */
8029 writel(CFGTBL_Trans_Simple
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
8030 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
8031 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
8032 if (hpsa_wait_for_mode_change_ack(h
))
8034 print_cfg_table(&h
->pdev
->dev
, h
->cfgtable
);
8035 if (!(readl(&(h
->cfgtable
->TransportActive
)) & CFGTBL_Trans_Simple
))
8037 h
->transMethod
= CFGTBL_Trans_Simple
;
8040 dev_err(&h
->pdev
->dev
, "failed to enter simple mode\n");
8044 /* free items allocated or mapped by hpsa_pci_init */
8045 static void hpsa_free_pci_init(struct ctlr_info
*h
)
8047 hpsa_free_cfgtables(h
); /* pci_init 4 */
8048 iounmap(h
->vaddr
); /* pci_init 3 */
8050 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
8052 * call pci_disable_device before pci_release_regions per
8053 * Documentation/PCI/pci.txt
8055 pci_disable_device(h
->pdev
); /* pci_init 1 */
8056 pci_release_regions(h
->pdev
); /* pci_init 2 */
8059 /* several items must be freed later */
8060 static int hpsa_pci_init(struct ctlr_info
*h
)
8062 int prod_index
, err
;
8064 prod_index
= hpsa_lookup_board_id(h
->pdev
, &h
->board_id
);
8067 h
->product_name
= products
[prod_index
].product_name
;
8068 h
->access
= *(products
[prod_index
].access
);
8070 h
->needs_abort_tags_swizzled
=
8071 ctlr_needs_abort_tags_swizzled(h
->board_id
);
8073 pci_disable_link_state(h
->pdev
, PCIE_LINK_STATE_L0S
|
8074 PCIE_LINK_STATE_L1
| PCIE_LINK_STATE_CLKPM
);
8076 err
= pci_enable_device(h
->pdev
);
8078 dev_err(&h
->pdev
->dev
, "failed to enable PCI device\n");
8079 pci_disable_device(h
->pdev
);
8083 err
= pci_request_regions(h
->pdev
, HPSA
);
8085 dev_err(&h
->pdev
->dev
,
8086 "failed to obtain PCI resources\n");
8087 pci_disable_device(h
->pdev
);
8091 pci_set_master(h
->pdev
);
8093 err
= hpsa_interrupt_mode(h
);
8096 err
= hpsa_pci_find_memory_BAR(h
->pdev
, &h
->paddr
);
8098 goto clean2
; /* intmode+region, pci */
8099 h
->vaddr
= remap_pci_mem(h
->paddr
, 0x250);
8101 dev_err(&h
->pdev
->dev
, "failed to remap PCI mem\n");
8103 goto clean2
; /* intmode+region, pci */
8105 err
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
8107 goto clean3
; /* vaddr, intmode+region, pci */
8108 err
= hpsa_find_cfgtables(h
);
8110 goto clean3
; /* vaddr, intmode+region, pci */
8111 hpsa_find_board_params(h
);
8113 if (!hpsa_CISS_signature_present(h
)) {
8115 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
8117 hpsa_set_driver_support_bits(h
);
8118 hpsa_p600_dma_prefetch_quirk(h
);
8119 err
= hpsa_enter_simple_mode(h
);
8121 goto clean4
; /* cfgtables, vaddr, intmode+region, pci */
8124 clean4
: /* cfgtables, vaddr, intmode+region, pci */
8125 hpsa_free_cfgtables(h
);
8126 clean3
: /* vaddr, intmode+region, pci */
8129 clean2
: /* intmode+region, pci */
8130 hpsa_disable_interrupt_mode(h
);
8133 * call pci_disable_device before pci_release_regions per
8134 * Documentation/PCI/pci.txt
8136 pci_disable_device(h
->pdev
);
8137 pci_release_regions(h
->pdev
);
8141 static void hpsa_hba_inquiry(struct ctlr_info
*h
)
8145 #define HBA_INQUIRY_BYTE_COUNT 64
8146 h
->hba_inquiry_data
= kmalloc(HBA_INQUIRY_BYTE_COUNT
, GFP_KERNEL
);
8147 if (!h
->hba_inquiry_data
)
8149 rc
= hpsa_scsi_do_inquiry(h
, RAID_CTLR_LUNID
, 0,
8150 h
->hba_inquiry_data
, HBA_INQUIRY_BYTE_COUNT
);
8152 kfree(h
->hba_inquiry_data
);
8153 h
->hba_inquiry_data
= NULL
;
8157 static int hpsa_init_reset_devices(struct pci_dev
*pdev
, u32 board_id
)
8160 void __iomem
*vaddr
;
8165 /* kdump kernel is loading, we don't know in which state is
8166 * the pci interface. The dev->enable_cnt is equal zero
8167 * so we call enable+disable, wait a while and switch it on.
8169 rc
= pci_enable_device(pdev
);
8171 dev_warn(&pdev
->dev
, "Failed to enable PCI device\n");
8174 pci_disable_device(pdev
);
8175 msleep(260); /* a randomly chosen number */
8176 rc
= pci_enable_device(pdev
);
8178 dev_warn(&pdev
->dev
, "failed to enable device.\n");
8182 pci_set_master(pdev
);
8184 vaddr
= pci_ioremap_bar(pdev
, 0);
8185 if (vaddr
== NULL
) {
8189 writel(SA5_INTR_OFF
, vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
8192 /* Reset the controller with a PCI power-cycle or via doorbell */
8193 rc
= hpsa_kdump_hard_reset_controller(pdev
, board_id
);
8195 /* -ENOTSUPP here means we cannot reset the controller
8196 * but it's already (and still) up and running in
8197 * "performant mode". Or, it might be 640x, which can't reset
8198 * due to concerns about shared bbwc between 6402/6404 pair.
8203 /* Now try to get the controller to respond to a no-op */
8204 dev_info(&pdev
->dev
, "Waiting for controller to respond to no-op\n");
8205 for (i
= 0; i
< HPSA_POST_RESET_NOOP_RETRIES
; i
++) {
8206 if (hpsa_noop(pdev
) == 0)
8209 dev_warn(&pdev
->dev
, "no-op failed%s\n",
8210 (i
< 11 ? "; re-trying" : ""));
8215 pci_disable_device(pdev
);
8219 static void hpsa_free_cmd_pool(struct ctlr_info
*h
)
8221 kfree(h
->cmd_pool_bits
);
8222 h
->cmd_pool_bits
= NULL
;
8224 pci_free_consistent(h
->pdev
,
8225 h
->nr_cmds
* sizeof(struct CommandList
),
8227 h
->cmd_pool_dhandle
);
8229 h
->cmd_pool_dhandle
= 0;
8231 if (h
->errinfo_pool
) {
8232 pci_free_consistent(h
->pdev
,
8233 h
->nr_cmds
* sizeof(struct ErrorInfo
),
8235 h
->errinfo_pool_dhandle
);
8236 h
->errinfo_pool
= NULL
;
8237 h
->errinfo_pool_dhandle
= 0;
8241 static int hpsa_alloc_cmd_pool(struct ctlr_info
*h
)
8243 h
->cmd_pool_bits
= kzalloc(
8244 DIV_ROUND_UP(h
->nr_cmds
, BITS_PER_LONG
) *
8245 sizeof(unsigned long), GFP_KERNEL
);
8246 h
->cmd_pool
= pci_alloc_consistent(h
->pdev
,
8247 h
->nr_cmds
* sizeof(*h
->cmd_pool
),
8248 &(h
->cmd_pool_dhandle
));
8249 h
->errinfo_pool
= pci_alloc_consistent(h
->pdev
,
8250 h
->nr_cmds
* sizeof(*h
->errinfo_pool
),
8251 &(h
->errinfo_pool_dhandle
));
8252 if ((h
->cmd_pool_bits
== NULL
)
8253 || (h
->cmd_pool
== NULL
)
8254 || (h
->errinfo_pool
== NULL
)) {
8255 dev_err(&h
->pdev
->dev
, "out of memory in %s", __func__
);
8258 hpsa_preinitialize_commands(h
);
8261 hpsa_free_cmd_pool(h
);
8265 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
8266 static void hpsa_free_irqs(struct ctlr_info
*h
)
8270 if (!h
->msix_vectors
|| h
->intr_mode
!= PERF_MODE_INT
) {
8271 /* Single reply queue, only one irq to free */
8272 free_irq(pci_irq_vector(h
->pdev
, 0), &h
->q
[h
->intr_mode
]);
8273 h
->q
[h
->intr_mode
] = 0;
8277 for (i
= 0; i
< h
->msix_vectors
; i
++) {
8278 free_irq(pci_irq_vector(h
->pdev
, i
), &h
->q
[i
]);
8281 for (; i
< MAX_REPLY_QUEUES
; i
++)
8285 /* returns 0 on success; cleans up and returns -Enn on error */
8286 static int hpsa_request_irqs(struct ctlr_info
*h
,
8287 irqreturn_t (*msixhandler
)(int, void *),
8288 irqreturn_t (*intxhandler
)(int, void *))
8293 * initialize h->q[x] = x so that interrupt handlers know which
8296 for (i
= 0; i
< MAX_REPLY_QUEUES
; i
++)
8299 if (h
->intr_mode
== PERF_MODE_INT
&& h
->msix_vectors
> 0) {
8300 /* If performant mode and MSI-X, use multiple reply queues */
8301 for (i
= 0; i
< h
->msix_vectors
; i
++) {
8302 sprintf(h
->intrname
[i
], "%s-msix%d", h
->devname
, i
);
8303 rc
= request_irq(pci_irq_vector(h
->pdev
, i
), msixhandler
,
8309 dev_err(&h
->pdev
->dev
,
8310 "failed to get irq %d for %s\n",
8311 pci_irq_vector(h
->pdev
, i
), h
->devname
);
8312 for (j
= 0; j
< i
; j
++) {
8313 free_irq(pci_irq_vector(h
->pdev
, j
), &h
->q
[j
]);
8316 for (; j
< MAX_REPLY_QUEUES
; j
++)
8322 /* Use single reply pool */
8323 if (h
->msix_vectors
> 0 || h
->pdev
->msi_enabled
) {
8324 sprintf(h
->intrname
[0], "%s-msi%s", h
->devname
,
8325 h
->msix_vectors
? "x" : "");
8326 rc
= request_irq(pci_irq_vector(h
->pdev
, 0),
8329 &h
->q
[h
->intr_mode
]);
8331 sprintf(h
->intrname
[h
->intr_mode
],
8332 "%s-intx", h
->devname
);
8333 rc
= request_irq(pci_irq_vector(h
->pdev
, 0),
8334 intxhandler
, IRQF_SHARED
,
8336 &h
->q
[h
->intr_mode
]);
8340 dev_err(&h
->pdev
->dev
, "failed to get irq %d for %s\n",
8341 pci_irq_vector(h
->pdev
, 0), h
->devname
);
8348 static int hpsa_kdump_soft_reset(struct ctlr_info
*h
)
8351 hpsa_send_host_reset(h
, RAID_CTLR_LUNID
, HPSA_RESET_TYPE_CONTROLLER
);
8353 dev_info(&h
->pdev
->dev
, "Waiting for board to soft reset.\n");
8354 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_NOT_READY
);
8356 dev_warn(&h
->pdev
->dev
, "Soft reset had no effect.\n");
8360 dev_info(&h
->pdev
->dev
, "Board reset, awaiting READY status.\n");
8361 rc
= hpsa_wait_for_board_state(h
->pdev
, h
->vaddr
, BOARD_READY
);
8363 dev_warn(&h
->pdev
->dev
, "Board failed to become ready "
8364 "after soft reset.\n");
8371 static void hpsa_free_reply_queues(struct ctlr_info
*h
)
8375 for (i
= 0; i
< h
->nreply_queues
; i
++) {
8376 if (!h
->reply_queue
[i
].head
)
8378 pci_free_consistent(h
->pdev
,
8379 h
->reply_queue_size
,
8380 h
->reply_queue
[i
].head
,
8381 h
->reply_queue
[i
].busaddr
);
8382 h
->reply_queue
[i
].head
= NULL
;
8383 h
->reply_queue
[i
].busaddr
= 0;
8385 h
->reply_queue_size
= 0;
8388 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info
*h
)
8390 hpsa_free_performant_mode(h
); /* init_one 7 */
8391 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
8392 hpsa_free_cmd_pool(h
); /* init_one 5 */
8393 hpsa_free_irqs(h
); /* init_one 4 */
8394 scsi_host_put(h
->scsi_host
); /* init_one 3 */
8395 h
->scsi_host
= NULL
; /* init_one 3 */
8396 hpsa_free_pci_init(h
); /* init_one 2_5 */
8397 free_percpu(h
->lockup_detected
); /* init_one 2 */
8398 h
->lockup_detected
= NULL
; /* init_one 2 */
8399 if (h
->resubmit_wq
) {
8400 destroy_workqueue(h
->resubmit_wq
); /* init_one 1 */
8401 h
->resubmit_wq
= NULL
;
8403 if (h
->rescan_ctlr_wq
) {
8404 destroy_workqueue(h
->rescan_ctlr_wq
);
8405 h
->rescan_ctlr_wq
= NULL
;
8407 kfree(h
); /* init_one 1 */
8410 /* Called when controller lockup detected. */
8411 static void fail_all_outstanding_cmds(struct ctlr_info
*h
)
8414 struct CommandList
*c
;
8417 flush_workqueue(h
->resubmit_wq
); /* ensure all cmds are fully built */
8418 for (i
= 0; i
< h
->nr_cmds
; i
++) {
8419 c
= h
->cmd_pool
+ i
;
8420 refcount
= atomic_inc_return(&c
->refcount
);
8422 c
->err_info
->CommandStatus
= CMD_CTLR_LOCKUP
;
8424 atomic_dec(&h
->commands_outstanding
);
8429 dev_warn(&h
->pdev
->dev
,
8430 "failed %d commands in fail_all\n", failcount
);
8433 static void set_lockup_detected_for_all_cpus(struct ctlr_info
*h
, u32 value
)
8437 for_each_online_cpu(cpu
) {
8438 u32
*lockup_detected
;
8439 lockup_detected
= per_cpu_ptr(h
->lockup_detected
, cpu
);
8440 *lockup_detected
= value
;
8442 wmb(); /* be sure the per-cpu variables are out to memory */
8445 static void controller_lockup_detected(struct ctlr_info
*h
)
8447 unsigned long flags
;
8448 u32 lockup_detected
;
8450 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8451 spin_lock_irqsave(&h
->lock
, flags
);
8452 lockup_detected
= readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
8453 if (!lockup_detected
) {
8454 /* no heartbeat, but controller gave us a zero. */
8455 dev_warn(&h
->pdev
->dev
,
8456 "lockup detected after %d but scratchpad register is zero\n",
8457 h
->heartbeat_sample_interval
/ HZ
);
8458 lockup_detected
= 0xffffffff;
8460 set_lockup_detected_for_all_cpus(h
, lockup_detected
);
8461 spin_unlock_irqrestore(&h
->lock
, flags
);
8462 dev_warn(&h
->pdev
->dev
, "Controller lockup detected: 0x%08x after %d\n",
8463 lockup_detected
, h
->heartbeat_sample_interval
/ HZ
);
8464 pci_disable_device(h
->pdev
);
8465 fail_all_outstanding_cmds(h
);
8468 static int detect_controller_lockup(struct ctlr_info
*h
)
8472 unsigned long flags
;
8474 now
= get_jiffies_64();
8475 /* If we've received an interrupt recently, we're ok. */
8476 if (time_after64(h
->last_intr_timestamp
+
8477 (h
->heartbeat_sample_interval
), now
))
8481 * If we've already checked the heartbeat recently, we're ok.
8482 * This could happen if someone sends us a signal. We
8483 * otherwise don't care about signals in this thread.
8485 if (time_after64(h
->last_heartbeat_timestamp
+
8486 (h
->heartbeat_sample_interval
), now
))
8489 /* If heartbeat has not changed since we last looked, we're not ok. */
8490 spin_lock_irqsave(&h
->lock
, flags
);
8491 heartbeat
= readl(&h
->cfgtable
->HeartBeat
);
8492 spin_unlock_irqrestore(&h
->lock
, flags
);
8493 if (h
->last_heartbeat
== heartbeat
) {
8494 controller_lockup_detected(h
);
8499 h
->last_heartbeat
= heartbeat
;
8500 h
->last_heartbeat_timestamp
= now
;
8504 static void hpsa_ack_ctlr_events(struct ctlr_info
*h
)
8509 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8512 /* Ask the controller to clear the events we're handling. */
8513 if ((h
->transMethod
& (CFGTBL_Trans_io_accel1
8514 | CFGTBL_Trans_io_accel2
)) &&
8515 (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
||
8516 h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)) {
8518 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE
)
8519 event_type
= "state change";
8520 if (h
->events
& HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE
)
8521 event_type
= "configuration change";
8522 /* Stop sending new RAID offload reqs via the IO accelerator */
8523 scsi_block_requests(h
->scsi_host
);
8524 for (i
= 0; i
< h
->ndevices
; i
++) {
8525 h
->dev
[i
]->offload_enabled
= 0;
8526 h
->dev
[i
]->offload_to_be_enabled
= 0;
8528 hpsa_drain_accel_commands(h
);
8529 /* Set 'accelerator path config change' bit */
8530 dev_warn(&h
->pdev
->dev
,
8531 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
8532 h
->events
, event_type
);
8533 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8534 /* Set the "clear event notify field update" bit 6 */
8535 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8536 /* Wait until ctlr clears 'clear event notify field', bit 6 */
8537 hpsa_wait_for_clear_event_notify_ack(h
);
8538 scsi_unblock_requests(h
->scsi_host
);
8540 /* Acknowledge controller notification events. */
8541 writel(h
->events
, &(h
->cfgtable
->clear_event_notify
));
8542 writel(DOORBELL_CLEAR_EVENTS
, h
->vaddr
+ SA5_DOORBELL
);
8543 hpsa_wait_for_clear_event_notify_ack(h
);
8545 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
8546 hpsa_wait_for_mode_change_ack(h
);
8552 /* Check a register on the controller to see if there are configuration
8553 * changes (added/changed/removed logical drives, etc.) which mean that
8554 * we should rescan the controller for devices.
8555 * Also check flag for driver-initiated rescan.
8557 static int hpsa_ctlr_needs_rescan(struct ctlr_info
*h
)
8559 if (h
->drv_req_rescan
) {
8560 h
->drv_req_rescan
= 0;
8564 if (!(h
->fw_support
& MISC_FW_EVENT_NOTIFY
))
8567 h
->events
= readl(&(h
->cfgtable
->event_notify
));
8568 return h
->events
& RESCAN_REQUIRED_EVENT_BITS
;
8572 * Check if any of the offline devices have become ready
8574 static int hpsa_offline_devices_ready(struct ctlr_info
*h
)
8576 unsigned long flags
;
8577 struct offline_device_entry
*d
;
8578 struct list_head
*this, *tmp
;
8580 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8581 list_for_each_safe(this, tmp
, &h
->offline_device_list
) {
8582 d
= list_entry(this, struct offline_device_entry
,
8584 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8585 if (!hpsa_volume_offline(h
, d
->scsi3addr
)) {
8586 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8587 list_del(&d
->offline_list
);
8588 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8591 spin_lock_irqsave(&h
->offline_device_lock
, flags
);
8593 spin_unlock_irqrestore(&h
->offline_device_lock
, flags
);
8597 static int hpsa_luns_changed(struct ctlr_info
*h
)
8599 int rc
= 1; /* assume there are changes */
8600 struct ReportLUNdata
*logdev
= NULL
;
8602 /* if we can't find out if lun data has changed,
8603 * assume that it has.
8606 if (!h
->lastlogicals
)
8609 logdev
= kzalloc(sizeof(*logdev
), GFP_KERNEL
);
8613 if (hpsa_scsi_do_report_luns(h
, 1, logdev
, sizeof(*logdev
), 0)) {
8614 dev_warn(&h
->pdev
->dev
,
8615 "report luns failed, can't track lun changes.\n");
8618 if (memcmp(logdev
, h
->lastlogicals
, sizeof(*logdev
))) {
8619 dev_info(&h
->pdev
->dev
,
8620 "Lun changes detected.\n");
8621 memcpy(h
->lastlogicals
, logdev
, sizeof(*logdev
));
8624 rc
= 0; /* no changes detected. */
8630 static void hpsa_rescan_ctlr_worker(struct work_struct
*work
)
8632 unsigned long flags
;
8633 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8634 struct ctlr_info
, rescan_ctlr_work
);
8637 if (h
->remove_in_progress
)
8641 * Do the scan after the reset
8643 if (h
->reset_in_progress
) {
8644 h
->drv_req_rescan
= 1;
8648 if (hpsa_ctlr_needs_rescan(h
) || hpsa_offline_devices_ready(h
)) {
8649 scsi_host_get(h
->scsi_host
);
8650 hpsa_ack_ctlr_events(h
);
8651 hpsa_scan_start(h
->scsi_host
);
8652 scsi_host_put(h
->scsi_host
);
8653 } else if (h
->discovery_polling
) {
8654 hpsa_disable_rld_caching(h
);
8655 if (hpsa_luns_changed(h
)) {
8656 struct Scsi_Host
*sh
= NULL
;
8658 dev_info(&h
->pdev
->dev
,
8659 "driver discovery polling rescan.\n");
8660 sh
= scsi_host_get(h
->scsi_host
);
8662 hpsa_scan_start(sh
);
8667 spin_lock_irqsave(&h
->lock
, flags
);
8668 if (!h
->remove_in_progress
)
8669 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8670 h
->heartbeat_sample_interval
);
8671 spin_unlock_irqrestore(&h
->lock
, flags
);
8674 static void hpsa_monitor_ctlr_worker(struct work_struct
*work
)
8676 unsigned long flags
;
8677 struct ctlr_info
*h
= container_of(to_delayed_work(work
),
8678 struct ctlr_info
, monitor_ctlr_work
);
8680 detect_controller_lockup(h
);
8681 if (lockup_detected(h
))
8684 spin_lock_irqsave(&h
->lock
, flags
);
8685 if (!h
->remove_in_progress
)
8686 schedule_delayed_work(&h
->monitor_ctlr_work
,
8687 h
->heartbeat_sample_interval
);
8688 spin_unlock_irqrestore(&h
->lock
, flags
);
8691 static struct workqueue_struct
*hpsa_create_controller_wq(struct ctlr_info
*h
,
8694 struct workqueue_struct
*wq
= NULL
;
8696 wq
= alloc_ordered_workqueue("%s_%d_hpsa", 0, name
, h
->ctlr
);
8698 dev_err(&h
->pdev
->dev
, "failed to create %s workqueue\n", name
);
8703 static int hpsa_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
8706 struct ctlr_info
*h
;
8707 int try_soft_reset
= 0;
8708 unsigned long flags
;
8711 if (number_of_controllers
== 0)
8712 printk(KERN_INFO DRIVER_NAME
"\n");
8714 rc
= hpsa_lookup_board_id(pdev
, &board_id
);
8716 dev_warn(&pdev
->dev
, "Board ID not found\n");
8720 rc
= hpsa_init_reset_devices(pdev
, board_id
);
8722 if (rc
!= -ENOTSUPP
)
8724 /* If the reset fails in a particular way (it has no way to do
8725 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8726 * a soft reset once we get the controller configured up to the
8727 * point that it can accept a command.
8733 reinit_after_soft_reset
:
8735 /* Command structures must be aligned on a 32-byte boundary because
8736 * the 5 lower bits of the address are used by the hardware. and by
8737 * the driver. See comments in hpsa.h for more info.
8739 BUILD_BUG_ON(sizeof(struct CommandList
) % COMMANDLIST_ALIGNMENT
);
8740 h
= kzalloc(sizeof(*h
), GFP_KERNEL
);
8742 dev_err(&pdev
->dev
, "Failed to allocate controller head\n");
8748 h
->intr_mode
= hpsa_simple_mode
? SIMPLE_MODE_INT
: PERF_MODE_INT
;
8749 INIT_LIST_HEAD(&h
->offline_device_list
);
8750 spin_lock_init(&h
->lock
);
8751 spin_lock_init(&h
->offline_device_lock
);
8752 spin_lock_init(&h
->scan_lock
);
8753 atomic_set(&h
->passthru_cmds_avail
, HPSA_MAX_CONCURRENT_PASSTHRUS
);
8754 atomic_set(&h
->abort_cmds_available
, HPSA_CMDS_RESERVED_FOR_ABORTS
);
8756 /* Allocate and clear per-cpu variable lockup_detected */
8757 h
->lockup_detected
= alloc_percpu(u32
);
8758 if (!h
->lockup_detected
) {
8759 dev_err(&h
->pdev
->dev
, "Failed to allocate lockup detector\n");
8761 goto clean1
; /* aer/h */
8763 set_lockup_detected_for_all_cpus(h
, 0);
8765 rc
= hpsa_pci_init(h
);
8767 goto clean2
; /* lu, aer/h */
8769 /* relies on h-> settings made by hpsa_pci_init, including
8770 * interrupt_mode h->intr */
8771 rc
= hpsa_scsi_host_alloc(h
);
8773 goto clean2_5
; /* pci, lu, aer/h */
8775 sprintf(h
->devname
, HPSA
"%d", h
->scsi_host
->host_no
);
8776 h
->ctlr
= number_of_controllers
;
8777 number_of_controllers
++;
8779 /* configure PCI DMA stuff */
8780 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
8784 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
8788 dev_err(&pdev
->dev
, "no suitable DMA available\n");
8789 goto clean3
; /* shost, pci, lu, aer/h */
8793 /* make sure the board interrupts are off */
8794 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8796 rc
= hpsa_request_irqs(h
, do_hpsa_intr_msi
, do_hpsa_intr_intx
);
8798 goto clean3
; /* shost, pci, lu, aer/h */
8799 rc
= hpsa_alloc_cmd_pool(h
);
8801 goto clean4
; /* irq, shost, pci, lu, aer/h */
8802 rc
= hpsa_alloc_sg_chain_blocks(h
);
8804 goto clean5
; /* cmd, irq, shost, pci, lu, aer/h */
8805 init_waitqueue_head(&h
->scan_wait_queue
);
8806 init_waitqueue_head(&h
->abort_cmd_wait_queue
);
8807 init_waitqueue_head(&h
->event_sync_wait_queue
);
8808 mutex_init(&h
->reset_mutex
);
8809 h
->scan_finished
= 1; /* no scan currently in progress */
8810 h
->scan_waiting
= 0;
8812 pci_set_drvdata(pdev
, h
);
8815 spin_lock_init(&h
->devlock
);
8816 rc
= hpsa_put_ctlr_into_performant_mode(h
);
8818 goto clean6
; /* sg, cmd, irq, shost, pci, lu, aer/h */
8820 /* create the resubmit workqueue */
8821 h
->rescan_ctlr_wq
= hpsa_create_controller_wq(h
, "rescan");
8822 if (!h
->rescan_ctlr_wq
) {
8827 h
->resubmit_wq
= hpsa_create_controller_wq(h
, "resubmit");
8828 if (!h
->resubmit_wq
) {
8830 goto clean7
; /* aer/h */
8834 * At this point, the controller is ready to take commands.
8835 * Now, if reset_devices and the hard reset didn't work, try
8836 * the soft reset and see if that works.
8838 if (try_soft_reset
) {
8840 /* This is kind of gross. We may or may not get a completion
8841 * from the soft reset command, and if we do, then the value
8842 * from the fifo may or may not be valid. So, we wait 10 secs
8843 * after the reset throwing away any completions we get during
8844 * that time. Unregister the interrupt handler and register
8845 * fake ones to scoop up any residual completions.
8847 spin_lock_irqsave(&h
->lock
, flags
);
8848 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8849 spin_unlock_irqrestore(&h
->lock
, flags
);
8851 rc
= hpsa_request_irqs(h
, hpsa_msix_discard_completions
,
8852 hpsa_intx_discard_completions
);
8854 dev_warn(&h
->pdev
->dev
,
8855 "Failed to request_irq after soft reset.\n");
8857 * cannot goto clean7 or free_irqs will be called
8858 * again. Instead, do its work
8860 hpsa_free_performant_mode(h
); /* clean7 */
8861 hpsa_free_sg_chain_blocks(h
); /* clean6 */
8862 hpsa_free_cmd_pool(h
); /* clean5 */
8864 * skip hpsa_free_irqs(h) clean4 since that
8865 * was just called before request_irqs failed
8870 rc
= hpsa_kdump_soft_reset(h
);
8872 /* Neither hard nor soft reset worked, we're hosed. */
8875 dev_info(&h
->pdev
->dev
, "Board READY.\n");
8876 dev_info(&h
->pdev
->dev
,
8877 "Waiting for stale completions to drain.\n");
8878 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8880 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8882 rc
= controller_reset_failed(h
->cfgtable
);
8884 dev_info(&h
->pdev
->dev
,
8885 "Soft reset appears to have failed.\n");
8887 /* since the controller's reset, we have to go back and re-init
8888 * everything. Easiest to just forget what we've done and do it
8891 hpsa_undo_allocations_after_kdump_soft_reset(h
);
8894 /* don't goto clean, we already unallocated */
8897 goto reinit_after_soft_reset
;
8900 /* Enable Accelerated IO path at driver layer */
8901 h
->acciopath_status
= 1;
8902 /* Disable discovery polling.*/
8903 h
->discovery_polling
= 0;
8906 /* Turn the interrupts on so we can service requests */
8907 h
->access
.set_intr_mask(h
, HPSA_INTR_ON
);
8909 hpsa_hba_inquiry(h
);
8911 h
->lastlogicals
= kzalloc(sizeof(*(h
->lastlogicals
)), GFP_KERNEL
);
8912 if (!h
->lastlogicals
)
8913 dev_info(&h
->pdev
->dev
,
8914 "Can't track change to report lun data\n");
8916 /* hook into SCSI subsystem */
8917 rc
= hpsa_scsi_add_host(h
);
8919 goto clean7
; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8921 /* Monitor the controller for firmware lockups */
8922 h
->heartbeat_sample_interval
= HEARTBEAT_SAMPLE_INTERVAL
;
8923 INIT_DELAYED_WORK(&h
->monitor_ctlr_work
, hpsa_monitor_ctlr_worker
);
8924 schedule_delayed_work(&h
->monitor_ctlr_work
,
8925 h
->heartbeat_sample_interval
);
8926 INIT_DELAYED_WORK(&h
->rescan_ctlr_work
, hpsa_rescan_ctlr_worker
);
8927 queue_delayed_work(h
->rescan_ctlr_wq
, &h
->rescan_ctlr_work
,
8928 h
->heartbeat_sample_interval
);
8931 clean7
: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8932 hpsa_free_performant_mode(h
);
8933 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
8934 clean6
: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8935 hpsa_free_sg_chain_blocks(h
);
8936 clean5
: /* cmd, irq, shost, pci, lu, aer/h */
8937 hpsa_free_cmd_pool(h
);
8938 clean4
: /* irq, shost, pci, lu, aer/h */
8940 clean3
: /* shost, pci, lu, aer/h */
8941 scsi_host_put(h
->scsi_host
);
8942 h
->scsi_host
= NULL
;
8943 clean2_5
: /* pci, lu, aer/h */
8944 hpsa_free_pci_init(h
);
8945 clean2
: /* lu, aer/h */
8946 if (h
->lockup_detected
) {
8947 free_percpu(h
->lockup_detected
);
8948 h
->lockup_detected
= NULL
;
8950 clean1
: /* wq/aer/h */
8951 if (h
->resubmit_wq
) {
8952 destroy_workqueue(h
->resubmit_wq
);
8953 h
->resubmit_wq
= NULL
;
8955 if (h
->rescan_ctlr_wq
) {
8956 destroy_workqueue(h
->rescan_ctlr_wq
);
8957 h
->rescan_ctlr_wq
= NULL
;
8963 static void hpsa_flush_cache(struct ctlr_info
*h
)
8966 struct CommandList
*c
;
8969 if (unlikely(lockup_detected(h
)))
8971 flush_buf
= kzalloc(4, GFP_KERNEL
);
8977 if (fill_cmd(c
, HPSA_CACHE_FLUSH
, h
, flush_buf
, 4, 0,
8978 RAID_CTLR_LUNID
, TYPE_CMD
)) {
8981 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
8982 PCI_DMA_TODEVICE
, DEFAULT_TIMEOUT
);
8985 if (c
->err_info
->CommandStatus
!= 0)
8987 dev_warn(&h
->pdev
->dev
,
8988 "error flushing cache on controller\n");
8993 /* Make controller gather fresh report lun data each time we
8994 * send down a report luns request
8996 static void hpsa_disable_rld_caching(struct ctlr_info
*h
)
8999 struct CommandList
*c
;
9002 /* Don't bother trying to set diag options if locked up */
9003 if (unlikely(h
->lockup_detected
))
9006 options
= kzalloc(sizeof(*options
), GFP_KERNEL
);
9012 /* first, get the current diag options settings */
9013 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
9014 RAID_CTLR_LUNID
, TYPE_CMD
))
9017 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
9018 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
9019 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
9022 /* Now, set the bit for disabling the RLD caching */
9023 *options
|= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
;
9025 if (fill_cmd(c
, BMIC_SET_DIAG_OPTIONS
, h
, options
, 4, 0,
9026 RAID_CTLR_LUNID
, TYPE_CMD
))
9029 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
9030 PCI_DMA_TODEVICE
, DEFAULT_TIMEOUT
);
9031 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
9034 /* Now verify that it got set: */
9035 if (fill_cmd(c
, BMIC_SENSE_DIAG_OPTIONS
, h
, options
, 4, 0,
9036 RAID_CTLR_LUNID
, TYPE_CMD
))
9039 rc
= hpsa_scsi_do_simple_cmd_with_retry(h
, c
,
9040 PCI_DMA_FROMDEVICE
, DEFAULT_TIMEOUT
);
9041 if ((rc
!= 0) || (c
->err_info
->CommandStatus
!= 0))
9044 if (*options
& HPSA_DIAG_OPTS_DISABLE_RLD_CACHING
)
9048 dev_err(&h
->pdev
->dev
,
9049 "Error: failed to disable report lun data caching.\n");
9055 static void hpsa_shutdown(struct pci_dev
*pdev
)
9057 struct ctlr_info
*h
;
9059 h
= pci_get_drvdata(pdev
);
9060 /* Turn board interrupts off and send the flush cache command
9061 * sendcmd will turn off interrupt, and send the flush...
9062 * To write all data in the battery backed cache to disks
9064 hpsa_flush_cache(h
);
9065 h
->access
.set_intr_mask(h
, HPSA_INTR_OFF
);
9066 hpsa_free_irqs(h
); /* init_one 4 */
9067 hpsa_disable_interrupt_mode(h
); /* pci_init 2 */
9070 static void hpsa_free_device_info(struct ctlr_info
*h
)
9074 for (i
= 0; i
< h
->ndevices
; i
++) {
9080 static void hpsa_remove_one(struct pci_dev
*pdev
)
9082 struct ctlr_info
*h
;
9083 unsigned long flags
;
9085 if (pci_get_drvdata(pdev
) == NULL
) {
9086 dev_err(&pdev
->dev
, "unable to remove device\n");
9089 h
= pci_get_drvdata(pdev
);
9091 /* Get rid of any controller monitoring work items */
9092 spin_lock_irqsave(&h
->lock
, flags
);
9093 h
->remove_in_progress
= 1;
9094 spin_unlock_irqrestore(&h
->lock
, flags
);
9095 cancel_delayed_work_sync(&h
->monitor_ctlr_work
);
9096 cancel_delayed_work_sync(&h
->rescan_ctlr_work
);
9097 destroy_workqueue(h
->rescan_ctlr_wq
);
9098 destroy_workqueue(h
->resubmit_wq
);
9101 * Call before disabling interrupts.
9102 * scsi_remove_host can trigger I/O operations especially
9103 * when multipath is enabled. There can be SYNCHRONIZE CACHE
9104 * operations which cannot complete and will hang the system.
9107 scsi_remove_host(h
->scsi_host
); /* init_one 8 */
9108 /* includes hpsa_free_irqs - init_one 4 */
9109 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9110 hpsa_shutdown(pdev
);
9112 hpsa_free_device_info(h
); /* scan */
9114 kfree(h
->hba_inquiry_data
); /* init_one 10 */
9115 h
->hba_inquiry_data
= NULL
; /* init_one 10 */
9116 hpsa_free_ioaccel2_sg_chain_blocks(h
);
9117 hpsa_free_performant_mode(h
); /* init_one 7 */
9118 hpsa_free_sg_chain_blocks(h
); /* init_one 6 */
9119 hpsa_free_cmd_pool(h
); /* init_one 5 */
9120 kfree(h
->lastlogicals
);
9122 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
9124 scsi_host_put(h
->scsi_host
); /* init_one 3 */
9125 h
->scsi_host
= NULL
; /* init_one 3 */
9127 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
9128 hpsa_free_pci_init(h
); /* init_one 2.5 */
9130 free_percpu(h
->lockup_detected
); /* init_one 2 */
9131 h
->lockup_detected
= NULL
; /* init_one 2 */
9132 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
9134 hpsa_delete_sas_host(h
);
9136 kfree(h
); /* init_one 1 */
9139 static int hpsa_suspend(__attribute__((unused
)) struct pci_dev
*pdev
,
9140 __attribute__((unused
)) pm_message_t state
)
9145 static int hpsa_resume(__attribute__((unused
)) struct pci_dev
*pdev
)
9150 static struct pci_driver hpsa_pci_driver
= {
9152 .probe
= hpsa_init_one
,
9153 .remove
= hpsa_remove_one
,
9154 .id_table
= hpsa_pci_device_id
, /* id_table */
9155 .shutdown
= hpsa_shutdown
,
9156 .suspend
= hpsa_suspend
,
9157 .resume
= hpsa_resume
,
9160 /* Fill in bucket_map[], given nsgs (the max number of
9161 * scatter gather elements supported) and bucket[],
9162 * which is an array of 8 integers. The bucket[] array
9163 * contains 8 different DMA transfer sizes (in 16
9164 * byte increments) which the controller uses to fetch
9165 * commands. This function fills in bucket_map[], which
9166 * maps a given number of scatter gather elements to one of
9167 * the 8 DMA transfer sizes. The point of it is to allow the
9168 * controller to only do as much DMA as needed to fetch the
9169 * command, with the DMA transfer size encoded in the lower
9170 * bits of the command address.
9172 static void calc_bucket_map(int bucket
[], int num_buckets
,
9173 int nsgs
, int min_blocks
, u32
*bucket_map
)
9177 /* Note, bucket_map must have nsgs+1 entries. */
9178 for (i
= 0; i
<= nsgs
; i
++) {
9179 /* Compute size of a command with i SG entries */
9180 size
= i
+ min_blocks
;
9181 b
= num_buckets
; /* Assume the biggest bucket */
9182 /* Find the bucket that is just big enough */
9183 for (j
= 0; j
< num_buckets
; j
++) {
9184 if (bucket
[j
] >= size
) {
9189 /* for a command with i SG entries, use bucket b. */
9195 * return -ENODEV on err, 0 on success (or no action)
9196 * allocates numerous items that must be freed later
9198 static int hpsa_enter_performant_mode(struct ctlr_info
*h
, u32 trans_support
)
9201 unsigned long register_value
;
9202 unsigned long transMethod
= CFGTBL_Trans_Performant
|
9203 (trans_support
& CFGTBL_Trans_use_short_tags
) |
9204 CFGTBL_Trans_enable_directed_msix
|
9205 (trans_support
& (CFGTBL_Trans_io_accel1
|
9206 CFGTBL_Trans_io_accel2
));
9207 struct access_method access
= SA5_performant_access
;
9209 /* This is a bit complicated. There are 8 registers on
9210 * the controller which we write to to tell it 8 different
9211 * sizes of commands which there may be. It's a way of
9212 * reducing the DMA done to fetch each command. Encoded into
9213 * each command's tag are 3 bits which communicate to the controller
9214 * which of the eight sizes that command fits within. The size of
9215 * each command depends on how many scatter gather entries there are.
9216 * Each SG entry requires 16 bytes. The eight registers are programmed
9217 * with the number of 16-byte blocks a command of that size requires.
9218 * The smallest command possible requires 5 such 16 byte blocks.
9219 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
9220 * blocks. Note, this only extends to the SG entries contained
9221 * within the command block, and does not extend to chained blocks
9222 * of SG elements. bft[] contains the eight values we write to
9223 * the registers. They are not evenly distributed, but have more
9224 * sizes for small commands, and fewer sizes for larger commands.
9226 int bft
[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD
+ 4};
9227 #define MIN_IOACCEL2_BFT_ENTRY 5
9228 #define HPSA_IOACCEL2_HEADER_SZ 4
9229 int bft2
[16] = {MIN_IOACCEL2_BFT_ENTRY
, 6, 7, 8, 9, 10, 11, 12,
9230 13, 14, 15, 16, 17, 18, 19,
9231 HPSA_IOACCEL2_HEADER_SZ
+ IOACCEL2_MAXSGENTRIES
};
9232 BUILD_BUG_ON(ARRAY_SIZE(bft2
) != 16);
9233 BUILD_BUG_ON(ARRAY_SIZE(bft
) != 8);
9234 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) >
9235 16 * MIN_IOACCEL2_BFT_ENTRY
);
9236 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element
) != 16);
9237 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD
+ 4);
9238 /* 5 = 1 s/g entry or 4k
9239 * 6 = 2 s/g entry or 8k
9240 * 8 = 4 s/g entry or 16k
9241 * 10 = 6 s/g entry or 24k
9244 /* If the controller supports either ioaccel method then
9245 * we can also use the RAID stack submit path that does not
9246 * perform the superfluous readl() after each command submission.
9248 if (trans_support
& (CFGTBL_Trans_io_accel1
| CFGTBL_Trans_io_accel2
))
9249 access
= SA5_performant_access_no_read
;
9251 /* Controller spec: zero out this buffer. */
9252 for (i
= 0; i
< h
->nreply_queues
; i
++)
9253 memset(h
->reply_queue
[i
].head
, 0, h
->reply_queue_size
);
9255 bft
[7] = SG_ENTRIES_IN_CMD
+ 4;
9256 calc_bucket_map(bft
, ARRAY_SIZE(bft
),
9257 SG_ENTRIES_IN_CMD
, 4, h
->blockFetchTable
);
9258 for (i
= 0; i
< 8; i
++)
9259 writel(bft
[i
], &h
->transtable
->BlockFetch
[i
]);
9261 /* size of controller ring buffer */
9262 writel(h
->max_commands
, &h
->transtable
->RepQSize
);
9263 writel(h
->nreply_queues
, &h
->transtable
->RepQCount
);
9264 writel(0, &h
->transtable
->RepQCtrAddrLow32
);
9265 writel(0, &h
->transtable
->RepQCtrAddrHigh32
);
9267 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9268 writel(0, &h
->transtable
->RepQAddr
[i
].upper
);
9269 writel(h
->reply_queue
[i
].busaddr
,
9270 &h
->transtable
->RepQAddr
[i
].lower
);
9273 writel(0, &h
->cfgtable
->HostWrite
.command_pool_addr_hi
);
9274 writel(transMethod
, &(h
->cfgtable
->HostWrite
.TransportRequest
));
9276 * enable outbound interrupt coalescing in accelerator mode;
9278 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9279 access
= SA5_ioaccel_mode1_access
;
9280 writel(10, &h
->cfgtable
->HostWrite
.CoalIntDelay
);
9281 writel(4, &h
->cfgtable
->HostWrite
.CoalIntCount
);
9283 if (trans_support
& CFGTBL_Trans_io_accel2
)
9284 access
= SA5_ioaccel_mode2_access
;
9285 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9286 if (hpsa_wait_for_mode_change_ack(h
)) {
9287 dev_err(&h
->pdev
->dev
,
9288 "performant mode problem - doorbell timeout\n");
9291 register_value
= readl(&(h
->cfgtable
->TransportActive
));
9292 if (!(register_value
& CFGTBL_Trans_Performant
)) {
9293 dev_err(&h
->pdev
->dev
,
9294 "performant mode problem - transport not active\n");
9297 /* Change the access methods to the performant access methods */
9299 h
->transMethod
= transMethod
;
9301 if (!((trans_support
& CFGTBL_Trans_io_accel1
) ||
9302 (trans_support
& CFGTBL_Trans_io_accel2
)))
9305 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9306 /* Set up I/O accelerator mode */
9307 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9308 writel(i
, h
->vaddr
+ IOACCEL_MODE1_REPLY_QUEUE_INDEX
);
9309 h
->reply_queue
[i
].current_entry
=
9310 readl(h
->vaddr
+ IOACCEL_MODE1_PRODUCER_INDEX
);
9312 bft
[7] = h
->ioaccel_maxsg
+ 8;
9313 calc_bucket_map(bft
, ARRAY_SIZE(bft
), h
->ioaccel_maxsg
, 8,
9314 h
->ioaccel1_blockFetchTable
);
9316 /* initialize all reply queue entries to unused */
9317 for (i
= 0; i
< h
->nreply_queues
; i
++)
9318 memset(h
->reply_queue
[i
].head
,
9319 (u8
) IOACCEL_MODE1_REPLY_UNUSED
,
9320 h
->reply_queue_size
);
9322 /* set all the constant fields in the accelerator command
9323 * frames once at init time to save CPU cycles later.
9325 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9326 struct io_accel1_cmd
*cp
= &h
->ioaccel_cmd_pool
[i
];
9328 cp
->function
= IOACCEL1_FUNCTION_SCSIIO
;
9329 cp
->err_info
= (u32
) (h
->errinfo_pool_dhandle
+
9330 (i
* sizeof(struct ErrorInfo
)));
9331 cp
->err_info_len
= sizeof(struct ErrorInfo
);
9332 cp
->sgl_offset
= IOACCEL1_SGLOFFSET
;
9333 cp
->host_context_flags
=
9334 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT
);
9335 cp
->timeout_sec
= 0;
9338 cpu_to_le64((i
<< DIRECT_LOOKUP_SHIFT
));
9340 cpu_to_le64(h
->ioaccel_cmd_pool_dhandle
+
9341 (i
* sizeof(struct io_accel1_cmd
)));
9343 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9344 u64 cfg_offset
, cfg_base_addr_index
;
9345 u32 bft2_offset
, cfg_base_addr
;
9348 rc
= hpsa_find_cfg_addrs(h
->pdev
, h
->vaddr
, &cfg_base_addr
,
9349 &cfg_base_addr_index
, &cfg_offset
);
9350 BUILD_BUG_ON(offsetof(struct io_accel2_cmd
, sg
) != 64);
9351 bft2
[15] = h
->ioaccel_maxsg
+ HPSA_IOACCEL2_HEADER_SZ
;
9352 calc_bucket_map(bft2
, ARRAY_SIZE(bft2
), h
->ioaccel_maxsg
,
9353 4, h
->ioaccel2_blockFetchTable
);
9354 bft2_offset
= readl(&h
->cfgtable
->io_accel_request_size_offset
);
9355 BUILD_BUG_ON(offsetof(struct CfgTable
,
9356 io_accel_request_size_offset
) != 0xb8);
9357 h
->ioaccel2_bft2_regs
=
9358 remap_pci_mem(pci_resource_start(h
->pdev
,
9359 cfg_base_addr_index
) +
9360 cfg_offset
+ bft2_offset
,
9362 sizeof(*h
->ioaccel2_bft2_regs
));
9363 for (i
= 0; i
< ARRAY_SIZE(bft2
); i
++)
9364 writel(bft2
[i
], &h
->ioaccel2_bft2_regs
[i
]);
9366 writel(CFGTBL_ChangeReq
, h
->vaddr
+ SA5_DOORBELL
);
9367 if (hpsa_wait_for_mode_change_ack(h
)) {
9368 dev_err(&h
->pdev
->dev
,
9369 "performant mode problem - enabling ioaccel mode\n");
9375 /* Free ioaccel1 mode command blocks and block fetch table */
9376 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9378 if (h
->ioaccel_cmd_pool
) {
9379 pci_free_consistent(h
->pdev
,
9380 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9381 h
->ioaccel_cmd_pool
,
9382 h
->ioaccel_cmd_pool_dhandle
);
9383 h
->ioaccel_cmd_pool
= NULL
;
9384 h
->ioaccel_cmd_pool_dhandle
= 0;
9386 kfree(h
->ioaccel1_blockFetchTable
);
9387 h
->ioaccel1_blockFetchTable
= NULL
;
9390 /* Allocate ioaccel1 mode command blocks and block fetch table */
9391 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info
*h
)
9394 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9395 if (h
->ioaccel_maxsg
> IOACCEL1_MAXSGENTRIES
)
9396 h
->ioaccel_maxsg
= IOACCEL1_MAXSGENTRIES
;
9398 /* Command structures must be aligned on a 128-byte boundary
9399 * because the 7 lower bits of the address are used by the
9402 BUILD_BUG_ON(sizeof(struct io_accel1_cmd
) %
9403 IOACCEL1_COMMANDLIST_ALIGNMENT
);
9404 h
->ioaccel_cmd_pool
=
9405 pci_alloc_consistent(h
->pdev
,
9406 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
),
9407 &(h
->ioaccel_cmd_pool_dhandle
));
9409 h
->ioaccel1_blockFetchTable
=
9410 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9411 sizeof(u32
)), GFP_KERNEL
);
9413 if ((h
->ioaccel_cmd_pool
== NULL
) ||
9414 (h
->ioaccel1_blockFetchTable
== NULL
))
9417 memset(h
->ioaccel_cmd_pool
, 0,
9418 h
->nr_cmds
* sizeof(*h
->ioaccel_cmd_pool
));
9422 hpsa_free_ioaccel1_cmd_and_bft(h
);
9426 /* Free ioaccel2 mode command blocks and block fetch table */
9427 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9429 hpsa_free_ioaccel2_sg_chain_blocks(h
);
9431 if (h
->ioaccel2_cmd_pool
) {
9432 pci_free_consistent(h
->pdev
,
9433 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9434 h
->ioaccel2_cmd_pool
,
9435 h
->ioaccel2_cmd_pool_dhandle
);
9436 h
->ioaccel2_cmd_pool
= NULL
;
9437 h
->ioaccel2_cmd_pool_dhandle
= 0;
9439 kfree(h
->ioaccel2_blockFetchTable
);
9440 h
->ioaccel2_blockFetchTable
= NULL
;
9443 /* Allocate ioaccel2 mode command blocks and block fetch table */
9444 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info
*h
)
9448 /* Allocate ioaccel2 mode command blocks and block fetch table */
9451 readl(&(h
->cfgtable
->io_accel_max_embedded_sg_count
));
9452 if (h
->ioaccel_maxsg
> IOACCEL2_MAXSGENTRIES
)
9453 h
->ioaccel_maxsg
= IOACCEL2_MAXSGENTRIES
;
9455 BUILD_BUG_ON(sizeof(struct io_accel2_cmd
) %
9456 IOACCEL2_COMMANDLIST_ALIGNMENT
);
9457 h
->ioaccel2_cmd_pool
=
9458 pci_alloc_consistent(h
->pdev
,
9459 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
),
9460 &(h
->ioaccel2_cmd_pool_dhandle
));
9462 h
->ioaccel2_blockFetchTable
=
9463 kmalloc(((h
->ioaccel_maxsg
+ 1) *
9464 sizeof(u32
)), GFP_KERNEL
);
9466 if ((h
->ioaccel2_cmd_pool
== NULL
) ||
9467 (h
->ioaccel2_blockFetchTable
== NULL
)) {
9472 rc
= hpsa_allocate_ioaccel2_sg_chain_blocks(h
);
9476 memset(h
->ioaccel2_cmd_pool
, 0,
9477 h
->nr_cmds
* sizeof(*h
->ioaccel2_cmd_pool
));
9481 hpsa_free_ioaccel2_cmd_and_bft(h
);
9485 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
9486 static void hpsa_free_performant_mode(struct ctlr_info
*h
)
9488 kfree(h
->blockFetchTable
);
9489 h
->blockFetchTable
= NULL
;
9490 hpsa_free_reply_queues(h
);
9491 hpsa_free_ioaccel1_cmd_and_bft(h
);
9492 hpsa_free_ioaccel2_cmd_and_bft(h
);
9495 /* return -ENODEV on error, 0 on success (or no action)
9496 * allocates numerous items that must be freed later
9498 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info
*h
)
9501 unsigned long transMethod
= CFGTBL_Trans_Performant
|
9502 CFGTBL_Trans_use_short_tags
;
9505 if (hpsa_simple_mode
)
9508 trans_support
= readl(&(h
->cfgtable
->TransportSupport
));
9509 if (!(trans_support
& PERFORMANT_MODE
))
9512 /* Check for I/O accelerator mode support */
9513 if (trans_support
& CFGTBL_Trans_io_accel1
) {
9514 transMethod
|= CFGTBL_Trans_io_accel1
|
9515 CFGTBL_Trans_enable_directed_msix
;
9516 rc
= hpsa_alloc_ioaccel1_cmd_and_bft(h
);
9519 } else if (trans_support
& CFGTBL_Trans_io_accel2
) {
9520 transMethod
|= CFGTBL_Trans_io_accel2
|
9521 CFGTBL_Trans_enable_directed_msix
;
9522 rc
= hpsa_alloc_ioaccel2_cmd_and_bft(h
);
9527 h
->nreply_queues
= h
->msix_vectors
> 0 ? h
->msix_vectors
: 1;
9528 hpsa_get_max_perf_mode_cmds(h
);
9529 /* Performant mode ring buffer and supporting data structures */
9530 h
->reply_queue_size
= h
->max_commands
* sizeof(u64
);
9532 for (i
= 0; i
< h
->nreply_queues
; i
++) {
9533 h
->reply_queue
[i
].head
= pci_alloc_consistent(h
->pdev
,
9534 h
->reply_queue_size
,
9535 &(h
->reply_queue
[i
].busaddr
));
9536 if (!h
->reply_queue
[i
].head
) {
9538 goto clean1
; /* rq, ioaccel */
9540 h
->reply_queue
[i
].size
= h
->max_commands
;
9541 h
->reply_queue
[i
].wraparound
= 1; /* spec: init to 1 */
9542 h
->reply_queue
[i
].current_entry
= 0;
9545 /* Need a block fetch table for performant mode */
9546 h
->blockFetchTable
= kmalloc(((SG_ENTRIES_IN_CMD
+ 1) *
9547 sizeof(u32
)), GFP_KERNEL
);
9548 if (!h
->blockFetchTable
) {
9550 goto clean1
; /* rq, ioaccel */
9553 rc
= hpsa_enter_performant_mode(h
, trans_support
);
9555 goto clean2
; /* bft, rq, ioaccel */
9558 clean2
: /* bft, rq, ioaccel */
9559 kfree(h
->blockFetchTable
);
9560 h
->blockFetchTable
= NULL
;
9561 clean1
: /* rq, ioaccel */
9562 hpsa_free_reply_queues(h
);
9563 hpsa_free_ioaccel1_cmd_and_bft(h
);
9564 hpsa_free_ioaccel2_cmd_and_bft(h
);
9568 static int is_accelerated_cmd(struct CommandList
*c
)
9570 return c
->cmd_type
== CMD_IOACCEL1
|| c
->cmd_type
== CMD_IOACCEL2
;
9573 static void hpsa_drain_accel_commands(struct ctlr_info
*h
)
9575 struct CommandList
*c
= NULL
;
9576 int i
, accel_cmds_out
;
9579 do { /* wait for all outstanding ioaccel commands to drain out */
9581 for (i
= 0; i
< h
->nr_cmds
; i
++) {
9582 c
= h
->cmd_pool
+ i
;
9583 refcount
= atomic_inc_return(&c
->refcount
);
9584 if (refcount
> 1) /* Command is allocated */
9585 accel_cmds_out
+= is_accelerated_cmd(c
);
9588 if (accel_cmds_out
<= 0)
9594 static struct hpsa_sas_phy
*hpsa_alloc_sas_phy(
9595 struct hpsa_sas_port
*hpsa_sas_port
)
9597 struct hpsa_sas_phy
*hpsa_sas_phy
;
9598 struct sas_phy
*phy
;
9600 hpsa_sas_phy
= kzalloc(sizeof(*hpsa_sas_phy
), GFP_KERNEL
);
9604 phy
= sas_phy_alloc(hpsa_sas_port
->parent_node
->parent_dev
,
9605 hpsa_sas_port
->next_phy_index
);
9607 kfree(hpsa_sas_phy
);
9611 hpsa_sas_port
->next_phy_index
++;
9612 hpsa_sas_phy
->phy
= phy
;
9613 hpsa_sas_phy
->parent_port
= hpsa_sas_port
;
9615 return hpsa_sas_phy
;
9618 static void hpsa_free_sas_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9620 struct sas_phy
*phy
= hpsa_sas_phy
->phy
;
9622 sas_port_delete_phy(hpsa_sas_phy
->parent_port
->port
, phy
);
9624 if (hpsa_sas_phy
->added_to_port
)
9625 list_del(&hpsa_sas_phy
->phy_list_entry
);
9626 kfree(hpsa_sas_phy
);
9629 static int hpsa_sas_port_add_phy(struct hpsa_sas_phy
*hpsa_sas_phy
)
9632 struct hpsa_sas_port
*hpsa_sas_port
;
9633 struct sas_phy
*phy
;
9634 struct sas_identify
*identify
;
9636 hpsa_sas_port
= hpsa_sas_phy
->parent_port
;
9637 phy
= hpsa_sas_phy
->phy
;
9639 identify
= &phy
->identify
;
9640 memset(identify
, 0, sizeof(*identify
));
9641 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9642 identify
->device_type
= SAS_END_DEVICE
;
9643 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9644 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9645 phy
->minimum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9646 phy
->maximum_linkrate_hw
= SAS_LINK_RATE_UNKNOWN
;
9647 phy
->minimum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9648 phy
->maximum_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9649 phy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
9651 rc
= sas_phy_add(hpsa_sas_phy
->phy
);
9655 sas_port_add_phy(hpsa_sas_port
->port
, hpsa_sas_phy
->phy
);
9656 list_add_tail(&hpsa_sas_phy
->phy_list_entry
,
9657 &hpsa_sas_port
->phy_list_head
);
9658 hpsa_sas_phy
->added_to_port
= true;
9664 hpsa_sas_port_add_rphy(struct hpsa_sas_port
*hpsa_sas_port
,
9665 struct sas_rphy
*rphy
)
9667 struct sas_identify
*identify
;
9669 identify
= &rphy
->identify
;
9670 identify
->sas_address
= hpsa_sas_port
->sas_address
;
9671 identify
->initiator_port_protocols
= SAS_PROTOCOL_STP
;
9672 identify
->target_port_protocols
= SAS_PROTOCOL_STP
;
9674 return sas_rphy_add(rphy
);
9677 static struct hpsa_sas_port
9678 *hpsa_alloc_sas_port(struct hpsa_sas_node
*hpsa_sas_node
,
9682 struct hpsa_sas_port
*hpsa_sas_port
;
9683 struct sas_port
*port
;
9685 hpsa_sas_port
= kzalloc(sizeof(*hpsa_sas_port
), GFP_KERNEL
);
9689 INIT_LIST_HEAD(&hpsa_sas_port
->phy_list_head
);
9690 hpsa_sas_port
->parent_node
= hpsa_sas_node
;
9692 port
= sas_port_alloc_num(hpsa_sas_node
->parent_dev
);
9694 goto free_hpsa_port
;
9696 rc
= sas_port_add(port
);
9700 hpsa_sas_port
->port
= port
;
9701 hpsa_sas_port
->sas_address
= sas_address
;
9702 list_add_tail(&hpsa_sas_port
->port_list_entry
,
9703 &hpsa_sas_node
->port_list_head
);
9705 return hpsa_sas_port
;
9708 sas_port_free(port
);
9710 kfree(hpsa_sas_port
);
9715 static void hpsa_free_sas_port(struct hpsa_sas_port
*hpsa_sas_port
)
9717 struct hpsa_sas_phy
*hpsa_sas_phy
;
9718 struct hpsa_sas_phy
*next
;
9720 list_for_each_entry_safe(hpsa_sas_phy
, next
,
9721 &hpsa_sas_port
->phy_list_head
, phy_list_entry
)
9722 hpsa_free_sas_phy(hpsa_sas_phy
);
9724 sas_port_delete(hpsa_sas_port
->port
);
9725 list_del(&hpsa_sas_port
->port_list_entry
);
9726 kfree(hpsa_sas_port
);
9729 static struct hpsa_sas_node
*hpsa_alloc_sas_node(struct device
*parent_dev
)
9731 struct hpsa_sas_node
*hpsa_sas_node
;
9733 hpsa_sas_node
= kzalloc(sizeof(*hpsa_sas_node
), GFP_KERNEL
);
9734 if (hpsa_sas_node
) {
9735 hpsa_sas_node
->parent_dev
= parent_dev
;
9736 INIT_LIST_HEAD(&hpsa_sas_node
->port_list_head
);
9739 return hpsa_sas_node
;
9742 static void hpsa_free_sas_node(struct hpsa_sas_node
*hpsa_sas_node
)
9744 struct hpsa_sas_port
*hpsa_sas_port
;
9745 struct hpsa_sas_port
*next
;
9750 list_for_each_entry_safe(hpsa_sas_port
, next
,
9751 &hpsa_sas_node
->port_list_head
, port_list_entry
)
9752 hpsa_free_sas_port(hpsa_sas_port
);
9754 kfree(hpsa_sas_node
);
9757 static struct hpsa_scsi_dev_t
9758 *hpsa_find_device_by_sas_rphy(struct ctlr_info
*h
,
9759 struct sas_rphy
*rphy
)
9762 struct hpsa_scsi_dev_t
*device
;
9764 for (i
= 0; i
< h
->ndevices
; i
++) {
9766 if (!device
->sas_port
)
9768 if (device
->sas_port
->rphy
== rphy
)
9775 static int hpsa_add_sas_host(struct ctlr_info
*h
)
9778 struct device
*parent_dev
;
9779 struct hpsa_sas_node
*hpsa_sas_node
;
9780 struct hpsa_sas_port
*hpsa_sas_port
;
9781 struct hpsa_sas_phy
*hpsa_sas_phy
;
9783 parent_dev
= &h
->scsi_host
->shost_gendev
;
9785 hpsa_sas_node
= hpsa_alloc_sas_node(parent_dev
);
9789 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, h
->sas_address
);
9790 if (!hpsa_sas_port
) {
9795 hpsa_sas_phy
= hpsa_alloc_sas_phy(hpsa_sas_port
);
9796 if (!hpsa_sas_phy
) {
9801 rc
= hpsa_sas_port_add_phy(hpsa_sas_phy
);
9805 h
->sas_host
= hpsa_sas_node
;
9810 hpsa_free_sas_phy(hpsa_sas_phy
);
9812 hpsa_free_sas_port(hpsa_sas_port
);
9814 hpsa_free_sas_node(hpsa_sas_node
);
9819 static void hpsa_delete_sas_host(struct ctlr_info
*h
)
9821 hpsa_free_sas_node(h
->sas_host
);
9824 static int hpsa_add_sas_device(struct hpsa_sas_node
*hpsa_sas_node
,
9825 struct hpsa_scsi_dev_t
*device
)
9828 struct hpsa_sas_port
*hpsa_sas_port
;
9829 struct sas_rphy
*rphy
;
9831 hpsa_sas_port
= hpsa_alloc_sas_port(hpsa_sas_node
, device
->sas_address
);
9835 rphy
= sas_end_device_alloc(hpsa_sas_port
->port
);
9841 hpsa_sas_port
->rphy
= rphy
;
9842 device
->sas_port
= hpsa_sas_port
;
9844 rc
= hpsa_sas_port_add_rphy(hpsa_sas_port
, rphy
);
9851 hpsa_free_sas_port(hpsa_sas_port
);
9852 device
->sas_port
= NULL
;
9857 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t
*device
)
9859 if (device
->sas_port
) {
9860 hpsa_free_sas_port(device
->sas_port
);
9861 device
->sas_port
= NULL
;
9866 hpsa_sas_get_linkerrors(struct sas_phy
*phy
)
9872 hpsa_sas_get_enclosure_identifier(struct sas_rphy
*rphy
, u64
*identifier
)
9879 hpsa_sas_get_bay_identifier(struct sas_rphy
*rphy
)
9885 hpsa_sas_phy_reset(struct sas_phy
*phy
, int hard_reset
)
9891 hpsa_sas_phy_enable(struct sas_phy
*phy
, int enable
)
9897 hpsa_sas_phy_setup(struct sas_phy
*phy
)
9903 hpsa_sas_phy_release(struct sas_phy
*phy
)
9908 hpsa_sas_phy_speed(struct sas_phy
*phy
, struct sas_phy_linkrates
*rates
)
9913 /* SMP = Serial Management Protocol */
9915 hpsa_sas_smp_handler(struct Scsi_Host
*shost
, struct sas_rphy
*rphy
,
9916 struct request
*req
)
9921 static struct sas_function_template hpsa_sas_transport_functions
= {
9922 .get_linkerrors
= hpsa_sas_get_linkerrors
,
9923 .get_enclosure_identifier
= hpsa_sas_get_enclosure_identifier
,
9924 .get_bay_identifier
= hpsa_sas_get_bay_identifier
,
9925 .phy_reset
= hpsa_sas_phy_reset
,
9926 .phy_enable
= hpsa_sas_phy_enable
,
9927 .phy_setup
= hpsa_sas_phy_setup
,
9928 .phy_release
= hpsa_sas_phy_release
,
9929 .set_phy_speed
= hpsa_sas_phy_speed
,
9930 .smp_handler
= hpsa_sas_smp_handler
,
9934 * This is it. Register the PCI driver information for the cards we control
9935 * the OS will call our registered routines when it finds one of our cards.
9937 static int __init
hpsa_init(void)
9941 hpsa_sas_transport_template
=
9942 sas_attach_transport(&hpsa_sas_transport_functions
);
9943 if (!hpsa_sas_transport_template
)
9946 rc
= pci_register_driver(&hpsa_pci_driver
);
9949 sas_release_transport(hpsa_sas_transport_template
);
9954 static void __exit
hpsa_cleanup(void)
9956 pci_unregister_driver(&hpsa_pci_driver
);
9957 sas_release_transport(hpsa_sas_transport_template
);
9960 static void __attribute__((unused
)) verify_offsets(void)
9962 #define VERIFY_OFFSET(member, offset) \
9963 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
9965 VERIFY_OFFSET(structure_size
, 0);
9966 VERIFY_OFFSET(volume_blk_size
, 4);
9967 VERIFY_OFFSET(volume_blk_cnt
, 8);
9968 VERIFY_OFFSET(phys_blk_shift
, 16);
9969 VERIFY_OFFSET(parity_rotation_shift
, 17);
9970 VERIFY_OFFSET(strip_size
, 18);
9971 VERIFY_OFFSET(disk_starting_blk
, 20);
9972 VERIFY_OFFSET(disk_blk_cnt
, 28);
9973 VERIFY_OFFSET(data_disks_per_row
, 36);
9974 VERIFY_OFFSET(metadata_disks_per_row
, 38);
9975 VERIFY_OFFSET(row_cnt
, 40);
9976 VERIFY_OFFSET(layout_map_count
, 42);
9977 VERIFY_OFFSET(flags
, 44);
9978 VERIFY_OFFSET(dekindex
, 46);
9979 /* VERIFY_OFFSET(reserved, 48 */
9980 VERIFY_OFFSET(data
, 64);
9982 #undef VERIFY_OFFSET
9984 #define VERIFY_OFFSET(member, offset) \
9985 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
9987 VERIFY_OFFSET(IU_type
, 0);
9988 VERIFY_OFFSET(direction
, 1);
9989 VERIFY_OFFSET(reply_queue
, 2);
9990 /* VERIFY_OFFSET(reserved1, 3); */
9991 VERIFY_OFFSET(scsi_nexus
, 4);
9992 VERIFY_OFFSET(Tag
, 8);
9993 VERIFY_OFFSET(cdb
, 16);
9994 VERIFY_OFFSET(cciss_lun
, 32);
9995 VERIFY_OFFSET(data_len
, 40);
9996 VERIFY_OFFSET(cmd_priority_task_attr
, 44);
9997 VERIFY_OFFSET(sg_count
, 45);
9998 /* VERIFY_OFFSET(reserved3 */
9999 VERIFY_OFFSET(err_ptr
, 48);
10000 VERIFY_OFFSET(err_len
, 56);
10001 /* VERIFY_OFFSET(reserved4 */
10002 VERIFY_OFFSET(sg
, 64);
10004 #undef VERIFY_OFFSET
10006 #define VERIFY_OFFSET(member, offset) \
10007 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
10009 VERIFY_OFFSET(dev_handle
, 0x00);
10010 VERIFY_OFFSET(reserved1
, 0x02);
10011 VERIFY_OFFSET(function
, 0x03);
10012 VERIFY_OFFSET(reserved2
, 0x04);
10013 VERIFY_OFFSET(err_info
, 0x0C);
10014 VERIFY_OFFSET(reserved3
, 0x10);
10015 VERIFY_OFFSET(err_info_len
, 0x12);
10016 VERIFY_OFFSET(reserved4
, 0x13);
10017 VERIFY_OFFSET(sgl_offset
, 0x14);
10018 VERIFY_OFFSET(reserved5
, 0x15);
10019 VERIFY_OFFSET(transfer_len
, 0x1C);
10020 VERIFY_OFFSET(reserved6
, 0x20);
10021 VERIFY_OFFSET(io_flags
, 0x24);
10022 VERIFY_OFFSET(reserved7
, 0x26);
10023 VERIFY_OFFSET(LUN
, 0x34);
10024 VERIFY_OFFSET(control
, 0x3C);
10025 VERIFY_OFFSET(CDB
, 0x40);
10026 VERIFY_OFFSET(reserved8
, 0x50);
10027 VERIFY_OFFSET(host_context_flags
, 0x60);
10028 VERIFY_OFFSET(timeout_sec
, 0x62);
10029 VERIFY_OFFSET(ReplyQueue
, 0x64);
10030 VERIFY_OFFSET(reserved9
, 0x65);
10031 VERIFY_OFFSET(tag
, 0x68);
10032 VERIFY_OFFSET(host_addr
, 0x70);
10033 VERIFY_OFFSET(CISS_LUN
, 0x78);
10034 VERIFY_OFFSET(SG
, 0x78 + 8);
10035 #undef VERIFY_OFFSET
10038 module_init(hpsa_init
);
10039 module_exit(hpsa_cleanup
);