]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/hpsa.c
UBUNTU: Ubuntu-4.15.0-46.49
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hpsa.c
1 /*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2016 Microsemi Corporation
4 * Copyright 2014-2015 PMC-Sierra, Inc.
5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 *
16 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
17 *
18 */
19
20 #include <linux/module.h>
21 #include <linux/interrupt.h>
22 #include <linux/types.h>
23 #include <linux/pci.h>
24 #include <linux/pci-aspm.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/fs.h>
29 #include <linux/timer.h>
30 #include <linux/init.h>
31 #include <linux/spinlock.h>
32 #include <linux/compat.h>
33 #include <linux/blktrace_api.h>
34 #include <linux/uaccess.h>
35 #include <linux/io.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/completion.h>
38 #include <linux/moduleparam.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_cmnd.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi_tcq.h>
44 #include <scsi/scsi_eh.h>
45 #include <scsi/scsi_transport_sas.h>
46 #include <scsi/scsi_dbg.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
50 #include <linux/atomic.h>
51 #include <linux/jiffies.h>
52 #include <linux/percpu-defs.h>
53 #include <linux/percpu.h>
54 #include <asm/unaligned.h>
55 #include <asm/div64.h>
56 #include "hpsa_cmd.h"
57 #include "hpsa.h"
58
59 /*
60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
61 * with an optional trailing '-' followed by a byte value (0-255).
62 */
63 #define HPSA_DRIVER_VERSION "3.4.20-125"
64 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
65 #define HPSA "hpsa"
66
67 /* How long to wait for CISS doorbell communication */
68 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
69 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
70 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
71 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
72 #define MAX_IOCTL_CONFIG_WAIT 1000
73
74 /*define how many times we will try a command because of bus resets */
75 #define MAX_CMD_RETRIES 3
76
77 /* Embedded module documentation macros - see modules.h */
78 MODULE_AUTHOR("Hewlett-Packard Company");
79 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
80 HPSA_DRIVER_VERSION);
81 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
82 MODULE_VERSION(HPSA_DRIVER_VERSION);
83 MODULE_LICENSE("GPL");
84 MODULE_ALIAS("cciss");
85
86 static int hpsa_simple_mode;
87 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
88 MODULE_PARM_DESC(hpsa_simple_mode,
89 "Use 'simple mode' rather than 'performant mode'");
90
91 /* define the PCI info for the cards we can control */
92 static const struct pci_device_id hpsa_pci_device_id[] = {
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
134 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
137 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
138 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
139 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
140 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
141 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
142 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
143 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
144 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
145 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
146 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
147 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
148 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
149 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
150 {0,}
151 };
152
153 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
154
155 /* board_id = Subsystem Device ID & Vendor ID
156 * product = Marketing Name for the board
157 * access = Address of the struct of function pointers
158 */
159 static struct board_type products[] = {
160 {0x40700E11, "Smart Array 5300", &SA5A_access},
161 {0x40800E11, "Smart Array 5i", &SA5B_access},
162 {0x40820E11, "Smart Array 532", &SA5B_access},
163 {0x40830E11, "Smart Array 5312", &SA5B_access},
164 {0x409A0E11, "Smart Array 641", &SA5A_access},
165 {0x409B0E11, "Smart Array 642", &SA5A_access},
166 {0x409C0E11, "Smart Array 6400", &SA5A_access},
167 {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
168 {0x40910E11, "Smart Array 6i", &SA5A_access},
169 {0x3225103C, "Smart Array P600", &SA5A_access},
170 {0x3223103C, "Smart Array P800", &SA5A_access},
171 {0x3234103C, "Smart Array P400", &SA5A_access},
172 {0x3235103C, "Smart Array P400i", &SA5A_access},
173 {0x3211103C, "Smart Array E200i", &SA5A_access},
174 {0x3212103C, "Smart Array E200", &SA5A_access},
175 {0x3213103C, "Smart Array E200i", &SA5A_access},
176 {0x3214103C, "Smart Array E200i", &SA5A_access},
177 {0x3215103C, "Smart Array E200i", &SA5A_access},
178 {0x3237103C, "Smart Array E500", &SA5A_access},
179 {0x323D103C, "Smart Array P700m", &SA5A_access},
180 {0x3241103C, "Smart Array P212", &SA5_access},
181 {0x3243103C, "Smart Array P410", &SA5_access},
182 {0x3245103C, "Smart Array P410i", &SA5_access},
183 {0x3247103C, "Smart Array P411", &SA5_access},
184 {0x3249103C, "Smart Array P812", &SA5_access},
185 {0x324A103C, "Smart Array P712m", &SA5_access},
186 {0x324B103C, "Smart Array P711m", &SA5_access},
187 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
188 {0x3350103C, "Smart Array P222", &SA5_access},
189 {0x3351103C, "Smart Array P420", &SA5_access},
190 {0x3352103C, "Smart Array P421", &SA5_access},
191 {0x3353103C, "Smart Array P822", &SA5_access},
192 {0x3354103C, "Smart Array P420i", &SA5_access},
193 {0x3355103C, "Smart Array P220i", &SA5_access},
194 {0x3356103C, "Smart Array P721m", &SA5_access},
195 {0x1920103C, "Smart Array P430i", &SA5_access},
196 {0x1921103C, "Smart Array P830i", &SA5_access},
197 {0x1922103C, "Smart Array P430", &SA5_access},
198 {0x1923103C, "Smart Array P431", &SA5_access},
199 {0x1924103C, "Smart Array P830", &SA5_access},
200 {0x1925103C, "Smart Array P831", &SA5_access},
201 {0x1926103C, "Smart Array P731m", &SA5_access},
202 {0x1928103C, "Smart Array P230i", &SA5_access},
203 {0x1929103C, "Smart Array P530", &SA5_access},
204 {0x21BD103C, "Smart Array P244br", &SA5_access},
205 {0x21BE103C, "Smart Array P741m", &SA5_access},
206 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
207 {0x21C0103C, "Smart Array P440ar", &SA5_access},
208 {0x21C1103C, "Smart Array P840ar", &SA5_access},
209 {0x21C2103C, "Smart Array P440", &SA5_access},
210 {0x21C3103C, "Smart Array P441", &SA5_access},
211 {0x21C4103C, "Smart Array", &SA5_access},
212 {0x21C5103C, "Smart Array P841", &SA5_access},
213 {0x21C6103C, "Smart HBA H244br", &SA5_access},
214 {0x21C7103C, "Smart HBA H240", &SA5_access},
215 {0x21C8103C, "Smart HBA H241", &SA5_access},
216 {0x21C9103C, "Smart Array", &SA5_access},
217 {0x21CA103C, "Smart Array P246br", &SA5_access},
218 {0x21CB103C, "Smart Array P840", &SA5_access},
219 {0x21CC103C, "Smart Array", &SA5_access},
220 {0x21CD103C, "Smart Array", &SA5_access},
221 {0x21CE103C, "Smart HBA", &SA5_access},
222 {0x05809005, "SmartHBA-SA", &SA5_access},
223 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
224 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
225 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
226 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
227 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
228 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
229 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
230 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
231 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
232 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
233 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
234 };
235
236 static struct scsi_transport_template *hpsa_sas_transport_template;
237 static int hpsa_add_sas_host(struct ctlr_info *h);
238 static void hpsa_delete_sas_host(struct ctlr_info *h);
239 static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
240 struct hpsa_scsi_dev_t *device);
241 static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
242 static struct hpsa_scsi_dev_t
243 *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
244 struct sas_rphy *rphy);
245
246 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
247 static const struct scsi_cmnd hpsa_cmd_busy;
248 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
249 static const struct scsi_cmnd hpsa_cmd_idle;
250 static int number_of_controllers;
251
252 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
253 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
254 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
255
256 #ifdef CONFIG_COMPAT
257 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
258 void __user *arg);
259 #endif
260
261 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
262 static struct CommandList *cmd_alloc(struct ctlr_info *h);
263 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
264 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
265 struct scsi_cmnd *scmd);
266 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
267 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
268 int cmd_type);
269 static void hpsa_free_cmd_pool(struct ctlr_info *h);
270 #define VPD_PAGE (1 << 8)
271 #define HPSA_SIMPLE_ERROR_BITS 0x03
272
273 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
274 static void hpsa_scan_start(struct Scsi_Host *);
275 static int hpsa_scan_finished(struct Scsi_Host *sh,
276 unsigned long elapsed_time);
277 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
278
279 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
280 static int hpsa_slave_alloc(struct scsi_device *sdev);
281 static int hpsa_slave_configure(struct scsi_device *sdev);
282 static void hpsa_slave_destroy(struct scsi_device *sdev);
283
284 static void hpsa_update_scsi_devices(struct ctlr_info *h);
285 static int check_for_unit_attention(struct ctlr_info *h,
286 struct CommandList *c);
287 static void check_ioctl_unit_attention(struct ctlr_info *h,
288 struct CommandList *c);
289 /* performant mode helper functions */
290 static void calc_bucket_map(int *bucket, int num_buckets,
291 int nsgs, int min_blocks, u32 *bucket_map);
292 static void hpsa_free_performant_mode(struct ctlr_info *h);
293 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
294 static inline u32 next_command(struct ctlr_info *h, u8 q);
295 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
296 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
297 u64 *cfg_offset);
298 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
299 unsigned long *memory_bar);
300 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
301 bool *legacy_board);
302 static int wait_for_device_to_become_ready(struct ctlr_info *h,
303 unsigned char lunaddr[],
304 int reply_queue);
305 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
306 int wait_for_ready);
307 static inline void finish_cmd(struct CommandList *c);
308 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
309 #define BOARD_NOT_READY 0
310 #define BOARD_READY 1
311 static void hpsa_drain_accel_commands(struct ctlr_info *h);
312 static void hpsa_flush_cache(struct ctlr_info *h);
313 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
314 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
315 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
316 static void hpsa_command_resubmit_worker(struct work_struct *work);
317 static u32 lockup_detected(struct ctlr_info *h);
318 static int detect_controller_lockup(struct ctlr_info *h);
319 static void hpsa_disable_rld_caching(struct ctlr_info *h);
320 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
321 struct ReportExtendedLUNdata *buf, int bufsize);
322 static bool hpsa_vpd_page_supported(struct ctlr_info *h,
323 unsigned char scsi3addr[], u8 page);
324 static int hpsa_luns_changed(struct ctlr_info *h);
325 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
326 struct hpsa_scsi_dev_t *dev,
327 unsigned char *scsi3addr);
328
329 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
330 {
331 unsigned long *priv = shost_priv(sdev->host);
332 return (struct ctlr_info *) *priv;
333 }
334
335 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
336 {
337 unsigned long *priv = shost_priv(sh);
338 return (struct ctlr_info *) *priv;
339 }
340
341 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
342 {
343 return c->scsi_cmd == SCSI_CMD_IDLE;
344 }
345
346 static inline bool hpsa_is_pending_event(struct CommandList *c)
347 {
348 return c->reset_pending;
349 }
350
351 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
352 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
353 u8 *sense_key, u8 *asc, u8 *ascq)
354 {
355 struct scsi_sense_hdr sshdr;
356 bool rc;
357
358 *sense_key = -1;
359 *asc = -1;
360 *ascq = -1;
361
362 if (sense_data_len < 1)
363 return;
364
365 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
366 if (rc) {
367 *sense_key = sshdr.sense_key;
368 *asc = sshdr.asc;
369 *ascq = sshdr.ascq;
370 }
371 }
372
373 static int check_for_unit_attention(struct ctlr_info *h,
374 struct CommandList *c)
375 {
376 u8 sense_key, asc, ascq;
377 int sense_len;
378
379 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
380 sense_len = sizeof(c->err_info->SenseInfo);
381 else
382 sense_len = c->err_info->SenseLen;
383
384 decode_sense_data(c->err_info->SenseInfo, sense_len,
385 &sense_key, &asc, &ascq);
386 if (sense_key != UNIT_ATTENTION || asc == 0xff)
387 return 0;
388
389 switch (asc) {
390 case STATE_CHANGED:
391 dev_warn(&h->pdev->dev,
392 "%s: a state change detected, command retried\n",
393 h->devname);
394 break;
395 case LUN_FAILED:
396 dev_warn(&h->pdev->dev,
397 "%s: LUN failure detected\n", h->devname);
398 break;
399 case REPORT_LUNS_CHANGED:
400 dev_warn(&h->pdev->dev,
401 "%s: report LUN data changed\n", h->devname);
402 /*
403 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
404 * target (array) devices.
405 */
406 break;
407 case POWER_OR_RESET:
408 dev_warn(&h->pdev->dev,
409 "%s: a power on or device reset detected\n",
410 h->devname);
411 break;
412 case UNIT_ATTENTION_CLEARED:
413 dev_warn(&h->pdev->dev,
414 "%s: unit attention cleared by another initiator\n",
415 h->devname);
416 break;
417 default:
418 dev_warn(&h->pdev->dev,
419 "%s: unknown unit attention detected\n",
420 h->devname);
421 break;
422 }
423 return 1;
424 }
425
426 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
427 {
428 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
429 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
430 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
431 return 0;
432 dev_warn(&h->pdev->dev, HPSA "device busy");
433 return 1;
434 }
435
436 static u32 lockup_detected(struct ctlr_info *h);
437 static ssize_t host_show_lockup_detected(struct device *dev,
438 struct device_attribute *attr, char *buf)
439 {
440 int ld;
441 struct ctlr_info *h;
442 struct Scsi_Host *shost = class_to_shost(dev);
443
444 h = shost_to_hba(shost);
445 ld = lockup_detected(h);
446
447 return sprintf(buf, "ld=%d\n", ld);
448 }
449
450 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
453 {
454 int status, len;
455 struct ctlr_info *h;
456 struct Scsi_Host *shost = class_to_shost(dev);
457 char tmpbuf[10];
458
459 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
460 return -EACCES;
461 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
462 strncpy(tmpbuf, buf, len);
463 tmpbuf[len] = '\0';
464 if (sscanf(tmpbuf, "%d", &status) != 1)
465 return -EINVAL;
466 h = shost_to_hba(shost);
467 h->acciopath_status = !!status;
468 dev_warn(&h->pdev->dev,
469 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
470 h->acciopath_status ? "enabled" : "disabled");
471 return count;
472 }
473
474 static ssize_t host_store_raid_offload_debug(struct device *dev,
475 struct device_attribute *attr,
476 const char *buf, size_t count)
477 {
478 int debug_level, len;
479 struct ctlr_info *h;
480 struct Scsi_Host *shost = class_to_shost(dev);
481 char tmpbuf[10];
482
483 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
484 return -EACCES;
485 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
486 strncpy(tmpbuf, buf, len);
487 tmpbuf[len] = '\0';
488 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
489 return -EINVAL;
490 if (debug_level < 0)
491 debug_level = 0;
492 h = shost_to_hba(shost);
493 h->raid_offload_debug = debug_level;
494 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
495 h->raid_offload_debug);
496 return count;
497 }
498
499 static ssize_t host_store_rescan(struct device *dev,
500 struct device_attribute *attr,
501 const char *buf, size_t count)
502 {
503 struct ctlr_info *h;
504 struct Scsi_Host *shost = class_to_shost(dev);
505 h = shost_to_hba(shost);
506 hpsa_scan_start(h->scsi_host);
507 return count;
508 }
509
510 static ssize_t host_show_firmware_revision(struct device *dev,
511 struct device_attribute *attr, char *buf)
512 {
513 struct ctlr_info *h;
514 struct Scsi_Host *shost = class_to_shost(dev);
515 unsigned char *fwrev;
516
517 h = shost_to_hba(shost);
518 if (!h->hba_inquiry_data)
519 return 0;
520 fwrev = &h->hba_inquiry_data[32];
521 return snprintf(buf, 20, "%c%c%c%c\n",
522 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
523 }
524
525 static ssize_t host_show_commands_outstanding(struct device *dev,
526 struct device_attribute *attr, char *buf)
527 {
528 struct Scsi_Host *shost = class_to_shost(dev);
529 struct ctlr_info *h = shost_to_hba(shost);
530
531 return snprintf(buf, 20, "%d\n",
532 atomic_read(&h->commands_outstanding));
533 }
534
535 static ssize_t host_show_transport_mode(struct device *dev,
536 struct device_attribute *attr, char *buf)
537 {
538 struct ctlr_info *h;
539 struct Scsi_Host *shost = class_to_shost(dev);
540
541 h = shost_to_hba(shost);
542 return snprintf(buf, 20, "%s\n",
543 h->transMethod & CFGTBL_Trans_Performant ?
544 "performant" : "simple");
545 }
546
547 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
548 struct device_attribute *attr, char *buf)
549 {
550 struct ctlr_info *h;
551 struct Scsi_Host *shost = class_to_shost(dev);
552
553 h = shost_to_hba(shost);
554 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
555 (h->acciopath_status == 1) ? "enabled" : "disabled");
556 }
557
558 /* List of controllers which cannot be hard reset on kexec with reset_devices */
559 static u32 unresettable_controller[] = {
560 0x324a103C, /* Smart Array P712m */
561 0x324b103C, /* Smart Array P711m */
562 0x3223103C, /* Smart Array P800 */
563 0x3234103C, /* Smart Array P400 */
564 0x3235103C, /* Smart Array P400i */
565 0x3211103C, /* Smart Array E200i */
566 0x3212103C, /* Smart Array E200 */
567 0x3213103C, /* Smart Array E200i */
568 0x3214103C, /* Smart Array E200i */
569 0x3215103C, /* Smart Array E200i */
570 0x3237103C, /* Smart Array E500 */
571 0x323D103C, /* Smart Array P700m */
572 0x40800E11, /* Smart Array 5i */
573 0x409C0E11, /* Smart Array 6400 */
574 0x409D0E11, /* Smart Array 6400 EM */
575 0x40700E11, /* Smart Array 5300 */
576 0x40820E11, /* Smart Array 532 */
577 0x40830E11, /* Smart Array 5312 */
578 0x409A0E11, /* Smart Array 641 */
579 0x409B0E11, /* Smart Array 642 */
580 0x40910E11, /* Smart Array 6i */
581 };
582
583 /* List of controllers which cannot even be soft reset */
584 static u32 soft_unresettable_controller[] = {
585 0x40800E11, /* Smart Array 5i */
586 0x40700E11, /* Smart Array 5300 */
587 0x40820E11, /* Smart Array 532 */
588 0x40830E11, /* Smart Array 5312 */
589 0x409A0E11, /* Smart Array 641 */
590 0x409B0E11, /* Smart Array 642 */
591 0x40910E11, /* Smart Array 6i */
592 /* Exclude 640x boards. These are two pci devices in one slot
593 * which share a battery backed cache module. One controls the
594 * cache, the other accesses the cache through the one that controls
595 * it. If we reset the one controlling the cache, the other will
596 * likely not be happy. Just forbid resetting this conjoined mess.
597 * The 640x isn't really supported by hpsa anyway.
598 */
599 0x409C0E11, /* Smart Array 6400 */
600 0x409D0E11, /* Smart Array 6400 EM */
601 };
602
603 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
604 {
605 int i;
606
607 for (i = 0; i < nelems; i++)
608 if (a[i] == board_id)
609 return 1;
610 return 0;
611 }
612
613 static int ctlr_is_hard_resettable(u32 board_id)
614 {
615 return !board_id_in_array(unresettable_controller,
616 ARRAY_SIZE(unresettable_controller), board_id);
617 }
618
619 static int ctlr_is_soft_resettable(u32 board_id)
620 {
621 return !board_id_in_array(soft_unresettable_controller,
622 ARRAY_SIZE(soft_unresettable_controller), board_id);
623 }
624
625 static int ctlr_is_resettable(u32 board_id)
626 {
627 return ctlr_is_hard_resettable(board_id) ||
628 ctlr_is_soft_resettable(board_id);
629 }
630
631 static ssize_t host_show_resettable(struct device *dev,
632 struct device_attribute *attr, char *buf)
633 {
634 struct ctlr_info *h;
635 struct Scsi_Host *shost = class_to_shost(dev);
636
637 h = shost_to_hba(shost);
638 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
639 }
640
641 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
642 {
643 return (scsi3addr[3] & 0xC0) == 0x40;
644 }
645
646 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
647 "1(+0)ADM", "UNKNOWN", "PHYS DRV"
648 };
649 #define HPSA_RAID_0 0
650 #define HPSA_RAID_4 1
651 #define HPSA_RAID_1 2 /* also used for RAID 10 */
652 #define HPSA_RAID_5 3 /* also used for RAID 50 */
653 #define HPSA_RAID_51 4
654 #define HPSA_RAID_6 5 /* also used for RAID 60 */
655 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
656 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
657 #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
658
659 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
660 {
661 return !device->physical_device;
662 }
663
664 static ssize_t raid_level_show(struct device *dev,
665 struct device_attribute *attr, char *buf)
666 {
667 ssize_t l = 0;
668 unsigned char rlevel;
669 struct ctlr_info *h;
670 struct scsi_device *sdev;
671 struct hpsa_scsi_dev_t *hdev;
672 unsigned long flags;
673
674 sdev = to_scsi_device(dev);
675 h = sdev_to_hba(sdev);
676 spin_lock_irqsave(&h->lock, flags);
677 hdev = sdev->hostdata;
678 if (!hdev) {
679 spin_unlock_irqrestore(&h->lock, flags);
680 return -ENODEV;
681 }
682
683 /* Is this even a logical drive? */
684 if (!is_logical_device(hdev)) {
685 spin_unlock_irqrestore(&h->lock, flags);
686 l = snprintf(buf, PAGE_SIZE, "N/A\n");
687 return l;
688 }
689
690 rlevel = hdev->raid_level;
691 spin_unlock_irqrestore(&h->lock, flags);
692 if (rlevel > RAID_UNKNOWN)
693 rlevel = RAID_UNKNOWN;
694 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
695 return l;
696 }
697
698 static ssize_t lunid_show(struct device *dev,
699 struct device_attribute *attr, char *buf)
700 {
701 struct ctlr_info *h;
702 struct scsi_device *sdev;
703 struct hpsa_scsi_dev_t *hdev;
704 unsigned long flags;
705 unsigned char lunid[8];
706
707 sdev = to_scsi_device(dev);
708 h = sdev_to_hba(sdev);
709 spin_lock_irqsave(&h->lock, flags);
710 hdev = sdev->hostdata;
711 if (!hdev) {
712 spin_unlock_irqrestore(&h->lock, flags);
713 return -ENODEV;
714 }
715 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
716 spin_unlock_irqrestore(&h->lock, flags);
717 return snprintf(buf, 20, "0x%8phN\n", lunid);
718 }
719
720 static ssize_t unique_id_show(struct device *dev,
721 struct device_attribute *attr, char *buf)
722 {
723 struct ctlr_info *h;
724 struct scsi_device *sdev;
725 struct hpsa_scsi_dev_t *hdev;
726 unsigned long flags;
727 unsigned char sn[16];
728
729 sdev = to_scsi_device(dev);
730 h = sdev_to_hba(sdev);
731 spin_lock_irqsave(&h->lock, flags);
732 hdev = sdev->hostdata;
733 if (!hdev) {
734 spin_unlock_irqrestore(&h->lock, flags);
735 return -ENODEV;
736 }
737 memcpy(sn, hdev->device_id, sizeof(sn));
738 spin_unlock_irqrestore(&h->lock, flags);
739 return snprintf(buf, 16 * 2 + 2,
740 "%02X%02X%02X%02X%02X%02X%02X%02X"
741 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
742 sn[0], sn[1], sn[2], sn[3],
743 sn[4], sn[5], sn[6], sn[7],
744 sn[8], sn[9], sn[10], sn[11],
745 sn[12], sn[13], sn[14], sn[15]);
746 }
747
748 static ssize_t sas_address_show(struct device *dev,
749 struct device_attribute *attr, char *buf)
750 {
751 struct ctlr_info *h;
752 struct scsi_device *sdev;
753 struct hpsa_scsi_dev_t *hdev;
754 unsigned long flags;
755 u64 sas_address;
756
757 sdev = to_scsi_device(dev);
758 h = sdev_to_hba(sdev);
759 spin_lock_irqsave(&h->lock, flags);
760 hdev = sdev->hostdata;
761 if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
762 spin_unlock_irqrestore(&h->lock, flags);
763 return -ENODEV;
764 }
765 sas_address = hdev->sas_address;
766 spin_unlock_irqrestore(&h->lock, flags);
767
768 return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
769 }
770
771 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
772 struct device_attribute *attr, char *buf)
773 {
774 struct ctlr_info *h;
775 struct scsi_device *sdev;
776 struct hpsa_scsi_dev_t *hdev;
777 unsigned long flags;
778 int offload_enabled;
779
780 sdev = to_scsi_device(dev);
781 h = sdev_to_hba(sdev);
782 spin_lock_irqsave(&h->lock, flags);
783 hdev = sdev->hostdata;
784 if (!hdev) {
785 spin_unlock_irqrestore(&h->lock, flags);
786 return -ENODEV;
787 }
788 offload_enabled = hdev->offload_enabled;
789 spin_unlock_irqrestore(&h->lock, flags);
790
791 if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
792 return snprintf(buf, 20, "%d\n", offload_enabled);
793 else
794 return snprintf(buf, 40, "%s\n",
795 "Not applicable for a controller");
796 }
797
798 #define MAX_PATHS 8
799 static ssize_t path_info_show(struct device *dev,
800 struct device_attribute *attr, char *buf)
801 {
802 struct ctlr_info *h;
803 struct scsi_device *sdev;
804 struct hpsa_scsi_dev_t *hdev;
805 unsigned long flags;
806 int i;
807 int output_len = 0;
808 u8 box;
809 u8 bay;
810 u8 path_map_index = 0;
811 char *active;
812 unsigned char phys_connector[2];
813
814 sdev = to_scsi_device(dev);
815 h = sdev_to_hba(sdev);
816 spin_lock_irqsave(&h->devlock, flags);
817 hdev = sdev->hostdata;
818 if (!hdev) {
819 spin_unlock_irqrestore(&h->devlock, flags);
820 return -ENODEV;
821 }
822
823 bay = hdev->bay;
824 for (i = 0; i < MAX_PATHS; i++) {
825 path_map_index = 1<<i;
826 if (i == hdev->active_path_index)
827 active = "Active";
828 else if (hdev->path_map & path_map_index)
829 active = "Inactive";
830 else
831 continue;
832
833 output_len += scnprintf(buf + output_len,
834 PAGE_SIZE - output_len,
835 "[%d:%d:%d:%d] %20.20s ",
836 h->scsi_host->host_no,
837 hdev->bus, hdev->target, hdev->lun,
838 scsi_device_type(hdev->devtype));
839
840 if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
841 output_len += scnprintf(buf + output_len,
842 PAGE_SIZE - output_len,
843 "%s\n", active);
844 continue;
845 }
846
847 box = hdev->box[i];
848 memcpy(&phys_connector, &hdev->phys_connector[i],
849 sizeof(phys_connector));
850 if (phys_connector[0] < '0')
851 phys_connector[0] = '0';
852 if (phys_connector[1] < '0')
853 phys_connector[1] = '0';
854 output_len += scnprintf(buf + output_len,
855 PAGE_SIZE - output_len,
856 "PORT: %.2s ",
857 phys_connector);
858 if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
859 hdev->expose_device) {
860 if (box == 0 || box == 0xFF) {
861 output_len += scnprintf(buf + output_len,
862 PAGE_SIZE - output_len,
863 "BAY: %hhu %s\n",
864 bay, active);
865 } else {
866 output_len += scnprintf(buf + output_len,
867 PAGE_SIZE - output_len,
868 "BOX: %hhu BAY: %hhu %s\n",
869 box, bay, active);
870 }
871 } else if (box != 0 && box != 0xFF) {
872 output_len += scnprintf(buf + output_len,
873 PAGE_SIZE - output_len, "BOX: %hhu %s\n",
874 box, active);
875 } else
876 output_len += scnprintf(buf + output_len,
877 PAGE_SIZE - output_len, "%s\n", active);
878 }
879
880 spin_unlock_irqrestore(&h->devlock, flags);
881 return output_len;
882 }
883
884 static ssize_t host_show_ctlr_num(struct device *dev,
885 struct device_attribute *attr, char *buf)
886 {
887 struct ctlr_info *h;
888 struct Scsi_Host *shost = class_to_shost(dev);
889
890 h = shost_to_hba(shost);
891 return snprintf(buf, 20, "%d\n", h->ctlr);
892 }
893
894 static ssize_t host_show_legacy_board(struct device *dev,
895 struct device_attribute *attr, char *buf)
896 {
897 struct ctlr_info *h;
898 struct Scsi_Host *shost = class_to_shost(dev);
899
900 h = shost_to_hba(shost);
901 return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
902 }
903
904 static DEVICE_ATTR_RO(raid_level);
905 static DEVICE_ATTR_RO(lunid);
906 static DEVICE_ATTR_RO(unique_id);
907 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
908 static DEVICE_ATTR_RO(sas_address);
909 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
910 host_show_hp_ssd_smart_path_enabled, NULL);
911 static DEVICE_ATTR_RO(path_info);
912 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
913 host_show_hp_ssd_smart_path_status,
914 host_store_hp_ssd_smart_path_status);
915 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
916 host_store_raid_offload_debug);
917 static DEVICE_ATTR(firmware_revision, S_IRUGO,
918 host_show_firmware_revision, NULL);
919 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
920 host_show_commands_outstanding, NULL);
921 static DEVICE_ATTR(transport_mode, S_IRUGO,
922 host_show_transport_mode, NULL);
923 static DEVICE_ATTR(resettable, S_IRUGO,
924 host_show_resettable, NULL);
925 static DEVICE_ATTR(lockup_detected, S_IRUGO,
926 host_show_lockup_detected, NULL);
927 static DEVICE_ATTR(ctlr_num, S_IRUGO,
928 host_show_ctlr_num, NULL);
929 static DEVICE_ATTR(legacy_board, S_IRUGO,
930 host_show_legacy_board, NULL);
931
932 static struct device_attribute *hpsa_sdev_attrs[] = {
933 &dev_attr_raid_level,
934 &dev_attr_lunid,
935 &dev_attr_unique_id,
936 &dev_attr_hp_ssd_smart_path_enabled,
937 &dev_attr_path_info,
938 &dev_attr_sas_address,
939 NULL,
940 };
941
942 static struct device_attribute *hpsa_shost_attrs[] = {
943 &dev_attr_rescan,
944 &dev_attr_firmware_revision,
945 &dev_attr_commands_outstanding,
946 &dev_attr_transport_mode,
947 &dev_attr_resettable,
948 &dev_attr_hp_ssd_smart_path_status,
949 &dev_attr_raid_offload_debug,
950 &dev_attr_lockup_detected,
951 &dev_attr_ctlr_num,
952 &dev_attr_legacy_board,
953 NULL,
954 };
955
956 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
957 HPSA_MAX_CONCURRENT_PASSTHRUS)
958
959 static struct scsi_host_template hpsa_driver_template = {
960 .module = THIS_MODULE,
961 .name = HPSA,
962 .proc_name = HPSA,
963 .queuecommand = hpsa_scsi_queue_command,
964 .scan_start = hpsa_scan_start,
965 .scan_finished = hpsa_scan_finished,
966 .change_queue_depth = hpsa_change_queue_depth,
967 .this_id = -1,
968 .use_clustering = ENABLE_CLUSTERING,
969 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
970 .ioctl = hpsa_ioctl,
971 .slave_alloc = hpsa_slave_alloc,
972 .slave_configure = hpsa_slave_configure,
973 .slave_destroy = hpsa_slave_destroy,
974 #ifdef CONFIG_COMPAT
975 .compat_ioctl = hpsa_compat_ioctl,
976 #endif
977 .sdev_attrs = hpsa_sdev_attrs,
978 .shost_attrs = hpsa_shost_attrs,
979 .max_sectors = 1024,
980 .no_write_same = 1,
981 };
982
983 static inline u32 next_command(struct ctlr_info *h, u8 q)
984 {
985 u32 a;
986 struct reply_queue_buffer *rq = &h->reply_queue[q];
987
988 if (h->transMethod & CFGTBL_Trans_io_accel1)
989 return h->access.command_completed(h, q);
990
991 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
992 return h->access.command_completed(h, q);
993
994 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
995 a = rq->head[rq->current_entry];
996 rq->current_entry++;
997 atomic_dec(&h->commands_outstanding);
998 } else {
999 a = FIFO_EMPTY;
1000 }
1001 /* Check for wraparound */
1002 if (rq->current_entry == h->max_commands) {
1003 rq->current_entry = 0;
1004 rq->wraparound ^= 1;
1005 }
1006 return a;
1007 }
1008
1009 /*
1010 * There are some special bits in the bus address of the
1011 * command that we have to set for the controller to know
1012 * how to process the command:
1013 *
1014 * Normal performant mode:
1015 * bit 0: 1 means performant mode, 0 means simple mode.
1016 * bits 1-3 = block fetch table entry
1017 * bits 4-6 = command type (== 0)
1018 *
1019 * ioaccel1 mode:
1020 * bit 0 = "performant mode" bit.
1021 * bits 1-3 = block fetch table entry
1022 * bits 4-6 = command type (== 110)
1023 * (command type is needed because ioaccel1 mode
1024 * commands are submitted through the same register as normal
1025 * mode commands, so this is how the controller knows whether
1026 * the command is normal mode or ioaccel1 mode.)
1027 *
1028 * ioaccel2 mode:
1029 * bit 0 = "performant mode" bit.
1030 * bits 1-4 = block fetch table entry (note extra bit)
1031 * bits 4-6 = not needed, because ioaccel2 mode has
1032 * a separate special register for submitting commands.
1033 */
1034
1035 /*
1036 * set_performant_mode: Modify the tag for cciss performant
1037 * set bit 0 for pull model, bits 3-1 for block fetch
1038 * register number
1039 */
1040 #define DEFAULT_REPLY_QUEUE (-1)
1041 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
1042 int reply_queue)
1043 {
1044 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
1045 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
1046 if (unlikely(!h->msix_vectors))
1047 return;
1048 c->Header.ReplyQueue = reply_queue;
1049 }
1050 }
1051
1052 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
1053 struct CommandList *c,
1054 int reply_queue)
1055 {
1056 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
1057
1058 /*
1059 * Tell the controller to post the reply to the queue for this
1060 * processor. This seems to give the best I/O throughput.
1061 */
1062 cp->ReplyQueue = reply_queue;
1063 /*
1064 * Set the bits in the address sent down to include:
1065 * - performant mode bit (bit 0)
1066 * - pull count (bits 1-3)
1067 * - command type (bits 4-6)
1068 */
1069 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
1070 IOACCEL1_BUSADDR_CMDTYPE;
1071 }
1072
1073 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
1074 struct CommandList *c,
1075 int reply_queue)
1076 {
1077 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1078 &h->ioaccel2_cmd_pool[c->cmdindex];
1079
1080 /* Tell the controller to post the reply to the queue for this
1081 * processor. This seems to give the best I/O throughput.
1082 */
1083 cp->reply_queue = reply_queue;
1084 /* Set the bits in the address sent down to include:
1085 * - performant mode bit not used in ioaccel mode 2
1086 * - pull count (bits 0-3)
1087 * - command type isn't needed for ioaccel2
1088 */
1089 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1090 }
1091
1092 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1093 struct CommandList *c,
1094 int reply_queue)
1095 {
1096 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1097
1098 /*
1099 * Tell the controller to post the reply to the queue for this
1100 * processor. This seems to give the best I/O throughput.
1101 */
1102 cp->reply_queue = reply_queue;
1103 /*
1104 * Set the bits in the address sent down to include:
1105 * - performant mode bit not used in ioaccel mode 2
1106 * - pull count (bits 0-3)
1107 * - command type isn't needed for ioaccel2
1108 */
1109 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1110 }
1111
1112 static int is_firmware_flash_cmd(u8 *cdb)
1113 {
1114 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1115 }
1116
1117 /*
1118 * During firmware flash, the heartbeat register may not update as frequently
1119 * as it should. So we dial down lockup detection during firmware flash. and
1120 * dial it back up when firmware flash completes.
1121 */
1122 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1123 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1124 #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
1125 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1126 struct CommandList *c)
1127 {
1128 if (!is_firmware_flash_cmd(c->Request.CDB))
1129 return;
1130 atomic_inc(&h->firmware_flash_in_progress);
1131 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1132 }
1133
1134 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1135 struct CommandList *c)
1136 {
1137 if (is_firmware_flash_cmd(c->Request.CDB) &&
1138 atomic_dec_and_test(&h->firmware_flash_in_progress))
1139 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1140 }
1141
1142 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1143 struct CommandList *c, int reply_queue)
1144 {
1145 dial_down_lockup_detection_during_fw_flash(h, c);
1146 atomic_inc(&h->commands_outstanding);
1147
1148 reply_queue = h->reply_map[raw_smp_processor_id()];
1149 switch (c->cmd_type) {
1150 case CMD_IOACCEL1:
1151 set_ioaccel1_performant_mode(h, c, reply_queue);
1152 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1153 break;
1154 case CMD_IOACCEL2:
1155 set_ioaccel2_performant_mode(h, c, reply_queue);
1156 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1157 break;
1158 case IOACCEL2_TMF:
1159 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1160 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1161 break;
1162 default:
1163 set_performant_mode(h, c, reply_queue);
1164 h->access.submit_command(h, c);
1165 }
1166 }
1167
1168 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1169 {
1170 if (unlikely(hpsa_is_pending_event(c)))
1171 return finish_cmd(c);
1172
1173 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1174 }
1175
1176 static inline int is_hba_lunid(unsigned char scsi3addr[])
1177 {
1178 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1179 }
1180
1181 static inline int is_scsi_rev_5(struct ctlr_info *h)
1182 {
1183 if (!h->hba_inquiry_data)
1184 return 0;
1185 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1186 return 1;
1187 return 0;
1188 }
1189
1190 static int hpsa_find_target_lun(struct ctlr_info *h,
1191 unsigned char scsi3addr[], int bus, int *target, int *lun)
1192 {
1193 /* finds an unused bus, target, lun for a new physical device
1194 * assumes h->devlock is held
1195 */
1196 int i, found = 0;
1197 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1198
1199 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1200
1201 for (i = 0; i < h->ndevices; i++) {
1202 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1203 __set_bit(h->dev[i]->target, lun_taken);
1204 }
1205
1206 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1207 if (i < HPSA_MAX_DEVICES) {
1208 /* *bus = 1; */
1209 *target = i;
1210 *lun = 0;
1211 found = 1;
1212 }
1213 return !found;
1214 }
1215
1216 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1217 struct hpsa_scsi_dev_t *dev, char *description)
1218 {
1219 #define LABEL_SIZE 25
1220 char label[LABEL_SIZE];
1221
1222 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1223 return;
1224
1225 switch (dev->devtype) {
1226 case TYPE_RAID:
1227 snprintf(label, LABEL_SIZE, "controller");
1228 break;
1229 case TYPE_ENCLOSURE:
1230 snprintf(label, LABEL_SIZE, "enclosure");
1231 break;
1232 case TYPE_DISK:
1233 case TYPE_ZBC:
1234 if (dev->external)
1235 snprintf(label, LABEL_SIZE, "external");
1236 else if (!is_logical_dev_addr_mode(dev->scsi3addr))
1237 snprintf(label, LABEL_SIZE, "%s",
1238 raid_label[PHYSICAL_DRIVE]);
1239 else
1240 snprintf(label, LABEL_SIZE, "RAID-%s",
1241 dev->raid_level > RAID_UNKNOWN ? "?" :
1242 raid_label[dev->raid_level]);
1243 break;
1244 case TYPE_ROM:
1245 snprintf(label, LABEL_SIZE, "rom");
1246 break;
1247 case TYPE_TAPE:
1248 snprintf(label, LABEL_SIZE, "tape");
1249 break;
1250 case TYPE_MEDIUM_CHANGER:
1251 snprintf(label, LABEL_SIZE, "changer");
1252 break;
1253 default:
1254 snprintf(label, LABEL_SIZE, "UNKNOWN");
1255 break;
1256 }
1257
1258 dev_printk(level, &h->pdev->dev,
1259 "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
1260 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1261 description,
1262 scsi_device_type(dev->devtype),
1263 dev->vendor,
1264 dev->model,
1265 label,
1266 dev->offload_config ? '+' : '-',
1267 dev->offload_to_be_enabled ? '+' : '-',
1268 dev->expose_device);
1269 }
1270
1271 /* Add an entry into h->dev[] array. */
1272 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1273 struct hpsa_scsi_dev_t *device,
1274 struct hpsa_scsi_dev_t *added[], int *nadded)
1275 {
1276 /* assumes h->devlock is held */
1277 int n = h->ndevices;
1278 int i;
1279 unsigned char addr1[8], addr2[8];
1280 struct hpsa_scsi_dev_t *sd;
1281
1282 if (n >= HPSA_MAX_DEVICES) {
1283 dev_err(&h->pdev->dev, "too many devices, some will be "
1284 "inaccessible.\n");
1285 return -1;
1286 }
1287
1288 /* physical devices do not have lun or target assigned until now. */
1289 if (device->lun != -1)
1290 /* Logical device, lun is already assigned. */
1291 goto lun_assigned;
1292
1293 /* If this device a non-zero lun of a multi-lun device
1294 * byte 4 of the 8-byte LUN addr will contain the logical
1295 * unit no, zero otherwise.
1296 */
1297 if (device->scsi3addr[4] == 0) {
1298 /* This is not a non-zero lun of a multi-lun device */
1299 if (hpsa_find_target_lun(h, device->scsi3addr,
1300 device->bus, &device->target, &device->lun) != 0)
1301 return -1;
1302 goto lun_assigned;
1303 }
1304
1305 /* This is a non-zero lun of a multi-lun device.
1306 * Search through our list and find the device which
1307 * has the same 8 byte LUN address, excepting byte 4 and 5.
1308 * Assign the same bus and target for this new LUN.
1309 * Use the logical unit number from the firmware.
1310 */
1311 memcpy(addr1, device->scsi3addr, 8);
1312 addr1[4] = 0;
1313 addr1[5] = 0;
1314 for (i = 0; i < n; i++) {
1315 sd = h->dev[i];
1316 memcpy(addr2, sd->scsi3addr, 8);
1317 addr2[4] = 0;
1318 addr2[5] = 0;
1319 /* differ only in byte 4 and 5? */
1320 if (memcmp(addr1, addr2, 8) == 0) {
1321 device->bus = sd->bus;
1322 device->target = sd->target;
1323 device->lun = device->scsi3addr[4];
1324 break;
1325 }
1326 }
1327 if (device->lun == -1) {
1328 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1329 " suspect firmware bug or unsupported hardware "
1330 "configuration.\n");
1331 return -1;
1332 }
1333
1334 lun_assigned:
1335
1336 h->dev[n] = device;
1337 h->ndevices++;
1338 added[*nadded] = device;
1339 (*nadded)++;
1340 hpsa_show_dev_msg(KERN_INFO, h, device,
1341 device->expose_device ? "added" : "masked");
1342 return 0;
1343 }
1344
1345 /*
1346 * Called during a scan operation.
1347 *
1348 * Update an entry in h->dev[] array.
1349 */
1350 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1351 int entry, struct hpsa_scsi_dev_t *new_entry)
1352 {
1353 /* assumes h->devlock is held */
1354 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1355
1356 /* Raid level changed. */
1357 h->dev[entry]->raid_level = new_entry->raid_level;
1358
1359 /*
1360 * ioacccel_handle may have changed for a dual domain disk
1361 */
1362 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1363
1364 /* Raid offload parameters changed. Careful about the ordering. */
1365 if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
1366 /*
1367 * if drive is newly offload_enabled, we want to copy the
1368 * raid map data first. If previously offload_enabled and
1369 * offload_config were set, raid map data had better be
1370 * the same as it was before. If raid map data has changed
1371 * then it had better be the case that
1372 * h->dev[entry]->offload_enabled is currently 0.
1373 */
1374 h->dev[entry]->raid_map = new_entry->raid_map;
1375 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1376 }
1377 if (new_entry->offload_to_be_enabled) {
1378 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1379 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1380 }
1381 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1382 h->dev[entry]->offload_config = new_entry->offload_config;
1383 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1384 h->dev[entry]->queue_depth = new_entry->queue_depth;
1385
1386 /*
1387 * We can turn off ioaccel offload now, but need to delay turning
1388 * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
1389 * can't do that until all the devices are updated.
1390 */
1391 h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
1392
1393 /*
1394 * turn ioaccel off immediately if told to do so.
1395 */
1396 if (!new_entry->offload_to_be_enabled)
1397 h->dev[entry]->offload_enabled = 0;
1398
1399 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1400 }
1401
1402 /* Replace an entry from h->dev[] array. */
1403 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1404 int entry, struct hpsa_scsi_dev_t *new_entry,
1405 struct hpsa_scsi_dev_t *added[], int *nadded,
1406 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1407 {
1408 /* assumes h->devlock is held */
1409 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1410 removed[*nremoved] = h->dev[entry];
1411 (*nremoved)++;
1412
1413 /*
1414 * New physical devices won't have target/lun assigned yet
1415 * so we need to preserve the values in the slot we are replacing.
1416 */
1417 if (new_entry->target == -1) {
1418 new_entry->target = h->dev[entry]->target;
1419 new_entry->lun = h->dev[entry]->lun;
1420 }
1421
1422 h->dev[entry] = new_entry;
1423 added[*nadded] = new_entry;
1424 (*nadded)++;
1425
1426 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1427 }
1428
1429 /* Remove an entry from h->dev[] array. */
1430 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1431 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1432 {
1433 /* assumes h->devlock is held */
1434 int i;
1435 struct hpsa_scsi_dev_t *sd;
1436
1437 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1438
1439 sd = h->dev[entry];
1440 removed[*nremoved] = h->dev[entry];
1441 (*nremoved)++;
1442
1443 for (i = entry; i < h->ndevices-1; i++)
1444 h->dev[i] = h->dev[i+1];
1445 h->ndevices--;
1446 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1447 }
1448
1449 #define SCSI3ADDR_EQ(a, b) ( \
1450 (a)[7] == (b)[7] && \
1451 (a)[6] == (b)[6] && \
1452 (a)[5] == (b)[5] && \
1453 (a)[4] == (b)[4] && \
1454 (a)[3] == (b)[3] && \
1455 (a)[2] == (b)[2] && \
1456 (a)[1] == (b)[1] && \
1457 (a)[0] == (b)[0])
1458
1459 static void fixup_botched_add(struct ctlr_info *h,
1460 struct hpsa_scsi_dev_t *added)
1461 {
1462 /* called when scsi_add_device fails in order to re-adjust
1463 * h->dev[] to match the mid layer's view.
1464 */
1465 unsigned long flags;
1466 int i, j;
1467
1468 spin_lock_irqsave(&h->lock, flags);
1469 for (i = 0; i < h->ndevices; i++) {
1470 if (h->dev[i] == added) {
1471 for (j = i; j < h->ndevices-1; j++)
1472 h->dev[j] = h->dev[j+1];
1473 h->ndevices--;
1474 break;
1475 }
1476 }
1477 spin_unlock_irqrestore(&h->lock, flags);
1478 kfree(added);
1479 }
1480
1481 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1482 struct hpsa_scsi_dev_t *dev2)
1483 {
1484 /* we compare everything except lun and target as these
1485 * are not yet assigned. Compare parts likely
1486 * to differ first
1487 */
1488 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1489 sizeof(dev1->scsi3addr)) != 0)
1490 return 0;
1491 if (memcmp(dev1->device_id, dev2->device_id,
1492 sizeof(dev1->device_id)) != 0)
1493 return 0;
1494 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1495 return 0;
1496 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1497 return 0;
1498 if (dev1->devtype != dev2->devtype)
1499 return 0;
1500 if (dev1->bus != dev2->bus)
1501 return 0;
1502 return 1;
1503 }
1504
1505 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1506 struct hpsa_scsi_dev_t *dev2)
1507 {
1508 /* Device attributes that can change, but don't mean
1509 * that the device is a different device, nor that the OS
1510 * needs to be told anything about the change.
1511 */
1512 if (dev1->raid_level != dev2->raid_level)
1513 return 1;
1514 if (dev1->offload_config != dev2->offload_config)
1515 return 1;
1516 if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
1517 return 1;
1518 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1519 if (dev1->queue_depth != dev2->queue_depth)
1520 return 1;
1521 /*
1522 * This can happen for dual domain devices. An active
1523 * path change causes the ioaccel handle to change
1524 *
1525 * for example note the handle differences between p0 and p1
1526 * Device WWN ,WWN hash,Handle
1527 * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
1528 * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
1529 */
1530 if (dev1->ioaccel_handle != dev2->ioaccel_handle)
1531 return 1;
1532 return 0;
1533 }
1534
1535 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1536 * and return needle location in *index. If scsi3addr matches, but not
1537 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1538 * location in *index.
1539 * In the case of a minor device attribute change, such as RAID level, just
1540 * return DEVICE_UPDATED, along with the updated device's location in index.
1541 * If needle not found, return DEVICE_NOT_FOUND.
1542 */
1543 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1544 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1545 int *index)
1546 {
1547 int i;
1548 #define DEVICE_NOT_FOUND 0
1549 #define DEVICE_CHANGED 1
1550 #define DEVICE_SAME 2
1551 #define DEVICE_UPDATED 3
1552 if (needle == NULL)
1553 return DEVICE_NOT_FOUND;
1554
1555 for (i = 0; i < haystack_size; i++) {
1556 if (haystack[i] == NULL) /* previously removed. */
1557 continue;
1558 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1559 *index = i;
1560 if (device_is_the_same(needle, haystack[i])) {
1561 if (device_updated(needle, haystack[i]))
1562 return DEVICE_UPDATED;
1563 return DEVICE_SAME;
1564 } else {
1565 /* Keep offline devices offline */
1566 if (needle->volume_offline)
1567 return DEVICE_NOT_FOUND;
1568 return DEVICE_CHANGED;
1569 }
1570 }
1571 }
1572 *index = -1;
1573 return DEVICE_NOT_FOUND;
1574 }
1575
1576 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1577 unsigned char scsi3addr[])
1578 {
1579 struct offline_device_entry *device;
1580 unsigned long flags;
1581
1582 /* Check to see if device is already on the list */
1583 spin_lock_irqsave(&h->offline_device_lock, flags);
1584 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1585 if (memcmp(device->scsi3addr, scsi3addr,
1586 sizeof(device->scsi3addr)) == 0) {
1587 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1588 return;
1589 }
1590 }
1591 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1592
1593 /* Device is not on the list, add it. */
1594 device = kmalloc(sizeof(*device), GFP_KERNEL);
1595 if (!device)
1596 return;
1597
1598 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1599 spin_lock_irqsave(&h->offline_device_lock, flags);
1600 list_add_tail(&device->offline_list, &h->offline_device_list);
1601 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1602 }
1603
1604 /* Print a message explaining various offline volume states */
1605 static void hpsa_show_volume_status(struct ctlr_info *h,
1606 struct hpsa_scsi_dev_t *sd)
1607 {
1608 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1609 dev_info(&h->pdev->dev,
1610 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1611 h->scsi_host->host_no,
1612 sd->bus, sd->target, sd->lun);
1613 switch (sd->volume_offline) {
1614 case HPSA_LV_OK:
1615 break;
1616 case HPSA_LV_UNDERGOING_ERASE:
1617 dev_info(&h->pdev->dev,
1618 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1619 h->scsi_host->host_no,
1620 sd->bus, sd->target, sd->lun);
1621 break;
1622 case HPSA_LV_NOT_AVAILABLE:
1623 dev_info(&h->pdev->dev,
1624 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1625 h->scsi_host->host_no,
1626 sd->bus, sd->target, sd->lun);
1627 break;
1628 case HPSA_LV_UNDERGOING_RPI:
1629 dev_info(&h->pdev->dev,
1630 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1631 h->scsi_host->host_no,
1632 sd->bus, sd->target, sd->lun);
1633 break;
1634 case HPSA_LV_PENDING_RPI:
1635 dev_info(&h->pdev->dev,
1636 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1637 h->scsi_host->host_no,
1638 sd->bus, sd->target, sd->lun);
1639 break;
1640 case HPSA_LV_ENCRYPTED_NO_KEY:
1641 dev_info(&h->pdev->dev,
1642 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1643 h->scsi_host->host_no,
1644 sd->bus, sd->target, sd->lun);
1645 break;
1646 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1647 dev_info(&h->pdev->dev,
1648 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1649 h->scsi_host->host_no,
1650 sd->bus, sd->target, sd->lun);
1651 break;
1652 case HPSA_LV_UNDERGOING_ENCRYPTION:
1653 dev_info(&h->pdev->dev,
1654 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1655 h->scsi_host->host_no,
1656 sd->bus, sd->target, sd->lun);
1657 break;
1658 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1659 dev_info(&h->pdev->dev,
1660 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1661 h->scsi_host->host_no,
1662 sd->bus, sd->target, sd->lun);
1663 break;
1664 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1665 dev_info(&h->pdev->dev,
1666 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1667 h->scsi_host->host_no,
1668 sd->bus, sd->target, sd->lun);
1669 break;
1670 case HPSA_LV_PENDING_ENCRYPTION:
1671 dev_info(&h->pdev->dev,
1672 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1673 h->scsi_host->host_no,
1674 sd->bus, sd->target, sd->lun);
1675 break;
1676 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1677 dev_info(&h->pdev->dev,
1678 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1679 h->scsi_host->host_no,
1680 sd->bus, sd->target, sd->lun);
1681 break;
1682 }
1683 }
1684
1685 /*
1686 * Figure the list of physical drive pointers for a logical drive with
1687 * raid offload configured.
1688 */
1689 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1690 struct hpsa_scsi_dev_t *dev[], int ndevices,
1691 struct hpsa_scsi_dev_t *logical_drive)
1692 {
1693 struct raid_map_data *map = &logical_drive->raid_map;
1694 struct raid_map_disk_data *dd = &map->data[0];
1695 int i, j;
1696 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1697 le16_to_cpu(map->metadata_disks_per_row);
1698 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1699 le16_to_cpu(map->layout_map_count) *
1700 total_disks_per_row;
1701 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1702 total_disks_per_row;
1703 int qdepth;
1704
1705 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1706 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1707
1708 logical_drive->nphysical_disks = nraid_map_entries;
1709
1710 qdepth = 0;
1711 for (i = 0; i < nraid_map_entries; i++) {
1712 logical_drive->phys_disk[i] = NULL;
1713 if (!logical_drive->offload_config)
1714 continue;
1715 for (j = 0; j < ndevices; j++) {
1716 if (dev[j] == NULL)
1717 continue;
1718 if (dev[j]->devtype != TYPE_DISK &&
1719 dev[j]->devtype != TYPE_ZBC)
1720 continue;
1721 if (is_logical_device(dev[j]))
1722 continue;
1723 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1724 continue;
1725
1726 logical_drive->phys_disk[i] = dev[j];
1727 if (i < nphys_disk)
1728 qdepth = min(h->nr_cmds, qdepth +
1729 logical_drive->phys_disk[i]->queue_depth);
1730 break;
1731 }
1732
1733 /*
1734 * This can happen if a physical drive is removed and
1735 * the logical drive is degraded. In that case, the RAID
1736 * map data will refer to a physical disk which isn't actually
1737 * present. And in that case offload_enabled should already
1738 * be 0, but we'll turn it off here just in case
1739 */
1740 if (!logical_drive->phys_disk[i]) {
1741 dev_warn(&h->pdev->dev,
1742 "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
1743 __func__,
1744 h->scsi_host->host_no, logical_drive->bus,
1745 logical_drive->target, logical_drive->lun);
1746 logical_drive->offload_enabled = 0;
1747 logical_drive->offload_to_be_enabled = 0;
1748 logical_drive->queue_depth = 8;
1749 }
1750 }
1751 if (nraid_map_entries)
1752 /*
1753 * This is correct for reads, too high for full stripe writes,
1754 * way too high for partial stripe writes
1755 */
1756 logical_drive->queue_depth = qdepth;
1757 else {
1758 if (logical_drive->external)
1759 logical_drive->queue_depth = EXTERNAL_QD;
1760 else
1761 logical_drive->queue_depth = h->nr_cmds;
1762 }
1763 }
1764
1765 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1766 struct hpsa_scsi_dev_t *dev[], int ndevices)
1767 {
1768 int i;
1769
1770 for (i = 0; i < ndevices; i++) {
1771 if (dev[i] == NULL)
1772 continue;
1773 if (dev[i]->devtype != TYPE_DISK &&
1774 dev[i]->devtype != TYPE_ZBC)
1775 continue;
1776 if (!is_logical_device(dev[i]))
1777 continue;
1778
1779 /*
1780 * If offload is currently enabled, the RAID map and
1781 * phys_disk[] assignment *better* not be changing
1782 * because we would be changing ioaccel phsy_disk[] pointers
1783 * on a ioaccel volume processing I/O requests.
1784 *
1785 * If an ioaccel volume status changed, initially because it was
1786 * re-configured and thus underwent a transformation, or
1787 * a drive failed, we would have received a state change
1788 * request and ioaccel should have been turned off. When the
1789 * transformation completes, we get another state change
1790 * request to turn ioaccel back on. In this case, we need
1791 * to update the ioaccel information.
1792 *
1793 * Thus: If it is not currently enabled, but will be after
1794 * the scan completes, make sure the ioaccel pointers
1795 * are up to date.
1796 */
1797
1798 if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
1799 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1800 }
1801 }
1802
1803 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1804 {
1805 int rc = 0;
1806
1807 if (!h->scsi_host)
1808 return 1;
1809
1810 if (is_logical_device(device)) /* RAID */
1811 rc = scsi_add_device(h->scsi_host, device->bus,
1812 device->target, device->lun);
1813 else /* HBA */
1814 rc = hpsa_add_sas_device(h->sas_host, device);
1815
1816 return rc;
1817 }
1818
1819 static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
1820 struct hpsa_scsi_dev_t *dev)
1821 {
1822 int i;
1823 int count = 0;
1824
1825 for (i = 0; i < h->nr_cmds; i++) {
1826 struct CommandList *c = h->cmd_pool + i;
1827 int refcount = atomic_inc_return(&c->refcount);
1828
1829 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
1830 dev->scsi3addr)) {
1831 unsigned long flags;
1832
1833 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
1834 if (!hpsa_is_cmd_idle(c))
1835 ++count;
1836 spin_unlock_irqrestore(&h->lock, flags);
1837 }
1838
1839 cmd_free(h, c);
1840 }
1841
1842 return count;
1843 }
1844
1845 static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
1846 struct hpsa_scsi_dev_t *device)
1847 {
1848 int cmds = 0;
1849 int waits = 0;
1850
1851 while (1) {
1852 cmds = hpsa_find_outstanding_commands_for_dev(h, device);
1853 if (cmds == 0)
1854 break;
1855 if (++waits > 20)
1856 break;
1857 msleep(1000);
1858 }
1859
1860 if (waits > 20)
1861 dev_warn(&h->pdev->dev,
1862 "%s: removing device with %d outstanding commands!\n",
1863 __func__, cmds);
1864 }
1865
1866 static void hpsa_remove_device(struct ctlr_info *h,
1867 struct hpsa_scsi_dev_t *device)
1868 {
1869 struct scsi_device *sdev = NULL;
1870
1871 if (!h->scsi_host)
1872 return;
1873
1874 /*
1875 * Allow for commands to drain
1876 */
1877 device->removed = 1;
1878 hpsa_wait_for_outstanding_commands_for_dev(h, device);
1879
1880 if (is_logical_device(device)) { /* RAID */
1881 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1882 device->target, device->lun);
1883 if (sdev) {
1884 scsi_remove_device(sdev);
1885 scsi_device_put(sdev);
1886 } else {
1887 /*
1888 * We don't expect to get here. Future commands
1889 * to this device will get a selection timeout as
1890 * if the device were gone.
1891 */
1892 hpsa_show_dev_msg(KERN_WARNING, h, device,
1893 "didn't find device for removal.");
1894 }
1895 } else { /* HBA */
1896
1897 hpsa_remove_sas_device(device);
1898 }
1899 }
1900
1901 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1902 struct hpsa_scsi_dev_t *sd[], int nsds)
1903 {
1904 /* sd contains scsi3 addresses and devtypes, and inquiry
1905 * data. This function takes what's in sd to be the current
1906 * reality and updates h->dev[] to reflect that reality.
1907 */
1908 int i, entry, device_change, changes = 0;
1909 struct hpsa_scsi_dev_t *csd;
1910 unsigned long flags;
1911 struct hpsa_scsi_dev_t **added, **removed;
1912 int nadded, nremoved;
1913
1914 /*
1915 * A reset can cause a device status to change
1916 * re-schedule the scan to see what happened.
1917 */
1918 spin_lock_irqsave(&h->reset_lock, flags);
1919 if (h->reset_in_progress) {
1920 h->drv_req_rescan = 1;
1921 spin_unlock_irqrestore(&h->reset_lock, flags);
1922 return;
1923 }
1924 spin_unlock_irqrestore(&h->reset_lock, flags);
1925
1926 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1927 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1928
1929 if (!added || !removed) {
1930 dev_warn(&h->pdev->dev, "out of memory in "
1931 "adjust_hpsa_scsi_table\n");
1932 goto free_and_out;
1933 }
1934
1935 spin_lock_irqsave(&h->devlock, flags);
1936
1937 /* find any devices in h->dev[] that are not in
1938 * sd[] and remove them from h->dev[], and for any
1939 * devices which have changed, remove the old device
1940 * info and add the new device info.
1941 * If minor device attributes change, just update
1942 * the existing device structure.
1943 */
1944 i = 0;
1945 nremoved = 0;
1946 nadded = 0;
1947 while (i < h->ndevices) {
1948 csd = h->dev[i];
1949 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1950 if (device_change == DEVICE_NOT_FOUND) {
1951 changes++;
1952 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1953 continue; /* remove ^^^, hence i not incremented */
1954 } else if (device_change == DEVICE_CHANGED) {
1955 changes++;
1956 hpsa_scsi_replace_entry(h, i, sd[entry],
1957 added, &nadded, removed, &nremoved);
1958 /* Set it to NULL to prevent it from being freed
1959 * at the bottom of hpsa_update_scsi_devices()
1960 */
1961 sd[entry] = NULL;
1962 } else if (device_change == DEVICE_UPDATED) {
1963 hpsa_scsi_update_entry(h, i, sd[entry]);
1964 }
1965 i++;
1966 }
1967
1968 /* Now, make sure every device listed in sd[] is also
1969 * listed in h->dev[], adding them if they aren't found
1970 */
1971
1972 for (i = 0; i < nsds; i++) {
1973 if (!sd[i]) /* if already added above. */
1974 continue;
1975
1976 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1977 * as the SCSI mid-layer does not handle such devices well.
1978 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1979 * at 160Hz, and prevents the system from coming up.
1980 */
1981 if (sd[i]->volume_offline) {
1982 hpsa_show_volume_status(h, sd[i]);
1983 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1984 continue;
1985 }
1986
1987 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1988 h->ndevices, &entry);
1989 if (device_change == DEVICE_NOT_FOUND) {
1990 changes++;
1991 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1992 break;
1993 sd[i] = NULL; /* prevent from being freed later. */
1994 } else if (device_change == DEVICE_CHANGED) {
1995 /* should never happen... */
1996 changes++;
1997 dev_warn(&h->pdev->dev,
1998 "device unexpectedly changed.\n");
1999 /* but if it does happen, we just ignore that device */
2000 }
2001 }
2002 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
2003
2004 /*
2005 * Now that h->dev[]->phys_disk[] is coherent, we can enable
2006 * any logical drives that need it enabled.
2007 *
2008 * The raid map should be current by now.
2009 *
2010 * We are updating the device list used for I/O requests.
2011 */
2012 for (i = 0; i < h->ndevices; i++) {
2013 if (h->dev[i] == NULL)
2014 continue;
2015 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
2016 }
2017
2018 spin_unlock_irqrestore(&h->devlock, flags);
2019
2020 /* Monitor devices which are in one of several NOT READY states to be
2021 * brought online later. This must be done without holding h->devlock,
2022 * so don't touch h->dev[]
2023 */
2024 for (i = 0; i < nsds; i++) {
2025 if (!sd[i]) /* if already added above. */
2026 continue;
2027 if (sd[i]->volume_offline)
2028 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
2029 }
2030
2031 /* Don't notify scsi mid layer of any changes the first time through
2032 * (or if there are no changes) scsi_scan_host will do it later the
2033 * first time through.
2034 */
2035 if (!changes)
2036 goto free_and_out;
2037
2038 /* Notify scsi mid layer of any removed devices */
2039 for (i = 0; i < nremoved; i++) {
2040 if (removed[i] == NULL)
2041 continue;
2042 if (removed[i]->expose_device)
2043 hpsa_remove_device(h, removed[i]);
2044 kfree(removed[i]);
2045 removed[i] = NULL;
2046 }
2047
2048 /* Notify scsi mid layer of any added devices */
2049 for (i = 0; i < nadded; i++) {
2050 int rc = 0;
2051
2052 if (added[i] == NULL)
2053 continue;
2054 if (!(added[i]->expose_device))
2055 continue;
2056 rc = hpsa_add_device(h, added[i]);
2057 if (!rc)
2058 continue;
2059 dev_warn(&h->pdev->dev,
2060 "addition failed %d, device not added.", rc);
2061 /* now we have to remove it from h->dev,
2062 * since it didn't get added to scsi mid layer
2063 */
2064 fixup_botched_add(h, added[i]);
2065 h->drv_req_rescan = 1;
2066 }
2067
2068 free_and_out:
2069 kfree(added);
2070 kfree(removed);
2071 }
2072
2073 /*
2074 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
2075 * Assume's h->devlock is held.
2076 */
2077 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
2078 int bus, int target, int lun)
2079 {
2080 int i;
2081 struct hpsa_scsi_dev_t *sd;
2082
2083 for (i = 0; i < h->ndevices; i++) {
2084 sd = h->dev[i];
2085 if (sd->bus == bus && sd->target == target && sd->lun == lun)
2086 return sd;
2087 }
2088 return NULL;
2089 }
2090
2091 static int hpsa_slave_alloc(struct scsi_device *sdev)
2092 {
2093 struct hpsa_scsi_dev_t *sd = NULL;
2094 unsigned long flags;
2095 struct ctlr_info *h;
2096
2097 h = sdev_to_hba(sdev);
2098 spin_lock_irqsave(&h->devlock, flags);
2099 if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
2100 struct scsi_target *starget;
2101 struct sas_rphy *rphy;
2102
2103 starget = scsi_target(sdev);
2104 rphy = target_to_rphy(starget);
2105 sd = hpsa_find_device_by_sas_rphy(h, rphy);
2106 if (sd) {
2107 sd->target = sdev_id(sdev);
2108 sd->lun = sdev->lun;
2109 }
2110 }
2111 if (!sd)
2112 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
2113 sdev_id(sdev), sdev->lun);
2114
2115 if (sd && sd->expose_device) {
2116 atomic_set(&sd->ioaccel_cmds_out, 0);
2117 sdev->hostdata = sd;
2118 } else
2119 sdev->hostdata = NULL;
2120 spin_unlock_irqrestore(&h->devlock, flags);
2121 return 0;
2122 }
2123
2124 /* configure scsi device based on internal per-device structure */
2125 static int hpsa_slave_configure(struct scsi_device *sdev)
2126 {
2127 struct hpsa_scsi_dev_t *sd;
2128 int queue_depth;
2129
2130 sd = sdev->hostdata;
2131 sdev->no_uld_attach = !sd || !sd->expose_device;
2132
2133 if (sd) {
2134 if (sd->external)
2135 queue_depth = EXTERNAL_QD;
2136 else
2137 queue_depth = sd->queue_depth != 0 ?
2138 sd->queue_depth : sdev->host->can_queue;
2139 } else
2140 queue_depth = sdev->host->can_queue;
2141
2142 scsi_change_queue_depth(sdev, queue_depth);
2143
2144 return 0;
2145 }
2146
2147 static void hpsa_slave_destroy(struct scsi_device *sdev)
2148 {
2149 /* nothing to do. */
2150 }
2151
2152 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2153 {
2154 int i;
2155
2156 if (!h->ioaccel2_cmd_sg_list)
2157 return;
2158 for (i = 0; i < h->nr_cmds; i++) {
2159 kfree(h->ioaccel2_cmd_sg_list[i]);
2160 h->ioaccel2_cmd_sg_list[i] = NULL;
2161 }
2162 kfree(h->ioaccel2_cmd_sg_list);
2163 h->ioaccel2_cmd_sg_list = NULL;
2164 }
2165
2166 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
2167 {
2168 int i;
2169
2170 if (h->chainsize <= 0)
2171 return 0;
2172
2173 h->ioaccel2_cmd_sg_list =
2174 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
2175 GFP_KERNEL);
2176 if (!h->ioaccel2_cmd_sg_list)
2177 return -ENOMEM;
2178 for (i = 0; i < h->nr_cmds; i++) {
2179 h->ioaccel2_cmd_sg_list[i] =
2180 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
2181 h->maxsgentries, GFP_KERNEL);
2182 if (!h->ioaccel2_cmd_sg_list[i])
2183 goto clean;
2184 }
2185 return 0;
2186
2187 clean:
2188 hpsa_free_ioaccel2_sg_chain_blocks(h);
2189 return -ENOMEM;
2190 }
2191
2192 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
2193 {
2194 int i;
2195
2196 if (!h->cmd_sg_list)
2197 return;
2198 for (i = 0; i < h->nr_cmds; i++) {
2199 kfree(h->cmd_sg_list[i]);
2200 h->cmd_sg_list[i] = NULL;
2201 }
2202 kfree(h->cmd_sg_list);
2203 h->cmd_sg_list = NULL;
2204 }
2205
2206 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
2207 {
2208 int i;
2209
2210 if (h->chainsize <= 0)
2211 return 0;
2212
2213 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
2214 GFP_KERNEL);
2215 if (!h->cmd_sg_list)
2216 return -ENOMEM;
2217
2218 for (i = 0; i < h->nr_cmds; i++) {
2219 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
2220 h->chainsize, GFP_KERNEL);
2221 if (!h->cmd_sg_list[i])
2222 goto clean;
2223
2224 }
2225 return 0;
2226
2227 clean:
2228 hpsa_free_sg_chain_blocks(h);
2229 return -ENOMEM;
2230 }
2231
2232 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2233 struct io_accel2_cmd *cp, struct CommandList *c)
2234 {
2235 struct ioaccel2_sg_element *chain_block;
2236 u64 temp64;
2237 u32 chain_size;
2238
2239 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2240 chain_size = le32_to_cpu(cp->sg[0].length);
2241 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2242 PCI_DMA_TODEVICE);
2243 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2244 /* prevent subsequent unmapping */
2245 cp->sg->address = 0;
2246 return -1;
2247 }
2248 cp->sg->address = cpu_to_le64(temp64);
2249 return 0;
2250 }
2251
2252 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2253 struct io_accel2_cmd *cp)
2254 {
2255 struct ioaccel2_sg_element *chain_sg;
2256 u64 temp64;
2257 u32 chain_size;
2258
2259 chain_sg = cp->sg;
2260 temp64 = le64_to_cpu(chain_sg->address);
2261 chain_size = le32_to_cpu(cp->sg[0].length);
2262 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2263 }
2264
2265 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2266 struct CommandList *c)
2267 {
2268 struct SGDescriptor *chain_sg, *chain_block;
2269 u64 temp64;
2270 u32 chain_len;
2271
2272 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2273 chain_block = h->cmd_sg_list[c->cmdindex];
2274 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2275 chain_len = sizeof(*chain_sg) *
2276 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2277 chain_sg->Len = cpu_to_le32(chain_len);
2278 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2279 PCI_DMA_TODEVICE);
2280 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2281 /* prevent subsequent unmapping */
2282 chain_sg->Addr = cpu_to_le64(0);
2283 return -1;
2284 }
2285 chain_sg->Addr = cpu_to_le64(temp64);
2286 return 0;
2287 }
2288
2289 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2290 struct CommandList *c)
2291 {
2292 struct SGDescriptor *chain_sg;
2293
2294 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2295 return;
2296
2297 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2298 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2299 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2300 }
2301
2302
2303 /* Decode the various types of errors on ioaccel2 path.
2304 * Return 1 for any error that should generate a RAID path retry.
2305 * Return 0 for errors that don't require a RAID path retry.
2306 */
2307 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2308 struct CommandList *c,
2309 struct scsi_cmnd *cmd,
2310 struct io_accel2_cmd *c2,
2311 struct hpsa_scsi_dev_t *dev)
2312 {
2313 int data_len;
2314 int retry = 0;
2315 u32 ioaccel2_resid = 0;
2316
2317 switch (c2->error_data.serv_response) {
2318 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2319 switch (c2->error_data.status) {
2320 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2321 break;
2322 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2323 cmd->result |= SAM_STAT_CHECK_CONDITION;
2324 if (c2->error_data.data_present !=
2325 IOACCEL2_SENSE_DATA_PRESENT) {
2326 memset(cmd->sense_buffer, 0,
2327 SCSI_SENSE_BUFFERSIZE);
2328 break;
2329 }
2330 /* copy the sense data */
2331 data_len = c2->error_data.sense_data_len;
2332 if (data_len > SCSI_SENSE_BUFFERSIZE)
2333 data_len = SCSI_SENSE_BUFFERSIZE;
2334 if (data_len > sizeof(c2->error_data.sense_data_buff))
2335 data_len =
2336 sizeof(c2->error_data.sense_data_buff);
2337 memcpy(cmd->sense_buffer,
2338 c2->error_data.sense_data_buff, data_len);
2339 retry = 1;
2340 break;
2341 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2342 retry = 1;
2343 break;
2344 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2345 retry = 1;
2346 break;
2347 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2348 retry = 1;
2349 break;
2350 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2351 retry = 1;
2352 break;
2353 default:
2354 retry = 1;
2355 break;
2356 }
2357 break;
2358 case IOACCEL2_SERV_RESPONSE_FAILURE:
2359 switch (c2->error_data.status) {
2360 case IOACCEL2_STATUS_SR_IO_ERROR:
2361 case IOACCEL2_STATUS_SR_IO_ABORTED:
2362 case IOACCEL2_STATUS_SR_OVERRUN:
2363 retry = 1;
2364 break;
2365 case IOACCEL2_STATUS_SR_UNDERRUN:
2366 cmd->result = (DID_OK << 16); /* host byte */
2367 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2368 ioaccel2_resid = get_unaligned_le32(
2369 &c2->error_data.resid_cnt[0]);
2370 scsi_set_resid(cmd, ioaccel2_resid);
2371 break;
2372 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2373 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2374 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2375 /*
2376 * Did an HBA disk disappear? We will eventually
2377 * get a state change event from the controller but
2378 * in the meantime, we need to tell the OS that the
2379 * HBA disk is no longer there and stop I/O
2380 * from going down. This allows the potential re-insert
2381 * of the disk to get the same device node.
2382 */
2383 if (dev->physical_device && dev->expose_device) {
2384 cmd->result = DID_NO_CONNECT << 16;
2385 dev->removed = 1;
2386 h->drv_req_rescan = 1;
2387 dev_warn(&h->pdev->dev,
2388 "%s: device is gone!\n", __func__);
2389 } else
2390 /*
2391 * Retry by sending down the RAID path.
2392 * We will get an event from ctlr to
2393 * trigger rescan regardless.
2394 */
2395 retry = 1;
2396 break;
2397 default:
2398 retry = 1;
2399 }
2400 break;
2401 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2402 break;
2403 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2404 break;
2405 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2406 retry = 1;
2407 break;
2408 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2409 break;
2410 default:
2411 retry = 1;
2412 break;
2413 }
2414
2415 return retry; /* retry on raid path? */
2416 }
2417
2418 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2419 struct CommandList *c)
2420 {
2421 bool do_wake = false;
2422
2423 /*
2424 * Reset c->scsi_cmd here so that the reset handler will know
2425 * this command has completed. Then, check to see if the handler is
2426 * waiting for this command, and, if so, wake it.
2427 */
2428 c->scsi_cmd = SCSI_CMD_IDLE;
2429 mb(); /* Declare command idle before checking for pending events. */
2430 if (c->reset_pending) {
2431 unsigned long flags;
2432 struct hpsa_scsi_dev_t *dev;
2433
2434 /*
2435 * There appears to be a reset pending; lock the lock and
2436 * reconfirm. If so, then decrement the count of outstanding
2437 * commands and wake the reset command if this is the last one.
2438 */
2439 spin_lock_irqsave(&h->lock, flags);
2440 dev = c->reset_pending; /* Re-fetch under the lock. */
2441 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2442 do_wake = true;
2443 c->reset_pending = NULL;
2444 spin_unlock_irqrestore(&h->lock, flags);
2445 }
2446
2447 if (do_wake)
2448 wake_up_all(&h->event_sync_wait_queue);
2449 }
2450
2451 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2452 struct CommandList *c)
2453 {
2454 hpsa_cmd_resolve_events(h, c);
2455 cmd_tagged_free(h, c);
2456 }
2457
2458 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2459 struct CommandList *c, struct scsi_cmnd *cmd)
2460 {
2461 hpsa_cmd_resolve_and_free(h, c);
2462 if (cmd && cmd->scsi_done)
2463 cmd->scsi_done(cmd);
2464 }
2465
2466 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2467 {
2468 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2469 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2470 }
2471
2472 static void process_ioaccel2_completion(struct ctlr_info *h,
2473 struct CommandList *c, struct scsi_cmnd *cmd,
2474 struct hpsa_scsi_dev_t *dev)
2475 {
2476 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2477
2478 /* check for good status */
2479 if (likely(c2->error_data.serv_response == 0 &&
2480 c2->error_data.status == 0))
2481 return hpsa_cmd_free_and_done(h, c, cmd);
2482
2483 /*
2484 * Any RAID offload error results in retry which will use
2485 * the normal I/O path so the controller can handle whatever is
2486 * wrong.
2487 */
2488 if (is_logical_device(dev) &&
2489 c2->error_data.serv_response ==
2490 IOACCEL2_SERV_RESPONSE_FAILURE) {
2491 if (c2->error_data.status ==
2492 IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
2493 dev->offload_enabled = 0;
2494 dev->offload_to_be_enabled = 0;
2495 }
2496
2497 return hpsa_retry_cmd(h, c);
2498 }
2499
2500 if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
2501 return hpsa_retry_cmd(h, c);
2502
2503 return hpsa_cmd_free_and_done(h, c, cmd);
2504 }
2505
2506 /* Returns 0 on success, < 0 otherwise. */
2507 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2508 struct CommandList *cp)
2509 {
2510 u8 tmf_status = cp->err_info->ScsiStatus;
2511
2512 switch (tmf_status) {
2513 case CISS_TMF_COMPLETE:
2514 /*
2515 * CISS_TMF_COMPLETE never happens, instead,
2516 * ei->CommandStatus == 0 for this case.
2517 */
2518 case CISS_TMF_SUCCESS:
2519 return 0;
2520 case CISS_TMF_INVALID_FRAME:
2521 case CISS_TMF_NOT_SUPPORTED:
2522 case CISS_TMF_FAILED:
2523 case CISS_TMF_WRONG_LUN:
2524 case CISS_TMF_OVERLAPPED_TAG:
2525 break;
2526 default:
2527 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2528 tmf_status);
2529 break;
2530 }
2531 return -tmf_status;
2532 }
2533
2534 static void complete_scsi_command(struct CommandList *cp)
2535 {
2536 struct scsi_cmnd *cmd;
2537 struct ctlr_info *h;
2538 struct ErrorInfo *ei;
2539 struct hpsa_scsi_dev_t *dev;
2540 struct io_accel2_cmd *c2;
2541
2542 u8 sense_key;
2543 u8 asc; /* additional sense code */
2544 u8 ascq; /* additional sense code qualifier */
2545 unsigned long sense_data_size;
2546
2547 ei = cp->err_info;
2548 cmd = cp->scsi_cmd;
2549 h = cp->h;
2550
2551 if (!cmd->device) {
2552 cmd->result = DID_NO_CONNECT << 16;
2553 return hpsa_cmd_free_and_done(h, cp, cmd);
2554 }
2555
2556 dev = cmd->device->hostdata;
2557 if (!dev) {
2558 cmd->result = DID_NO_CONNECT << 16;
2559 return hpsa_cmd_free_and_done(h, cp, cmd);
2560 }
2561 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2562
2563 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2564 if ((cp->cmd_type == CMD_SCSI) &&
2565 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2566 hpsa_unmap_sg_chain_block(h, cp);
2567
2568 if ((cp->cmd_type == CMD_IOACCEL2) &&
2569 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2570 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2571
2572 cmd->result = (DID_OK << 16); /* host byte */
2573 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2574
2575 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2576 if (dev->physical_device && dev->expose_device &&
2577 dev->removed) {
2578 cmd->result = DID_NO_CONNECT << 16;
2579 return hpsa_cmd_free_and_done(h, cp, cmd);
2580 }
2581 if (likely(cp->phys_disk != NULL))
2582 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2583 }
2584
2585 /*
2586 * We check for lockup status here as it may be set for
2587 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2588 * fail_all_oustanding_cmds()
2589 */
2590 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2591 /* DID_NO_CONNECT will prevent a retry */
2592 cmd->result = DID_NO_CONNECT << 16;
2593 return hpsa_cmd_free_and_done(h, cp, cmd);
2594 }
2595
2596 if ((unlikely(hpsa_is_pending_event(cp))))
2597 if (cp->reset_pending)
2598 return hpsa_cmd_free_and_done(h, cp, cmd);
2599
2600 if (cp->cmd_type == CMD_IOACCEL2)
2601 return process_ioaccel2_completion(h, cp, cmd, dev);
2602
2603 scsi_set_resid(cmd, ei->ResidualCnt);
2604 if (ei->CommandStatus == 0)
2605 return hpsa_cmd_free_and_done(h, cp, cmd);
2606
2607 /* For I/O accelerator commands, copy over some fields to the normal
2608 * CISS header used below for error handling.
2609 */
2610 if (cp->cmd_type == CMD_IOACCEL1) {
2611 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2612 cp->Header.SGList = scsi_sg_count(cmd);
2613 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2614 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2615 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2616 cp->Header.tag = c->tag;
2617 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2618 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2619
2620 /* Any RAID offload error results in retry which will use
2621 * the normal I/O path so the controller can handle whatever's
2622 * wrong.
2623 */
2624 if (is_logical_device(dev)) {
2625 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2626 dev->offload_enabled = 0;
2627 return hpsa_retry_cmd(h, cp);
2628 }
2629 }
2630
2631 /* an error has occurred */
2632 switch (ei->CommandStatus) {
2633
2634 case CMD_TARGET_STATUS:
2635 cmd->result |= ei->ScsiStatus;
2636 /* copy the sense data */
2637 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2638 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2639 else
2640 sense_data_size = sizeof(ei->SenseInfo);
2641 if (ei->SenseLen < sense_data_size)
2642 sense_data_size = ei->SenseLen;
2643 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2644 if (ei->ScsiStatus)
2645 decode_sense_data(ei->SenseInfo, sense_data_size,
2646 &sense_key, &asc, &ascq);
2647 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2648 if (sense_key == ABORTED_COMMAND) {
2649 cmd->result |= DID_SOFT_ERROR << 16;
2650 break;
2651 }
2652 break;
2653 }
2654 /* Problem was not a check condition
2655 * Pass it up to the upper layers...
2656 */
2657 if (ei->ScsiStatus) {
2658 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2659 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2660 "Returning result: 0x%x\n",
2661 cp, ei->ScsiStatus,
2662 sense_key, asc, ascq,
2663 cmd->result);
2664 } else { /* scsi status is zero??? How??? */
2665 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2666 "Returning no connection.\n", cp),
2667
2668 /* Ordinarily, this case should never happen,
2669 * but there is a bug in some released firmware
2670 * revisions that allows it to happen if, for
2671 * example, a 4100 backplane loses power and
2672 * the tape drive is in it. We assume that
2673 * it's a fatal error of some kind because we
2674 * can't show that it wasn't. We will make it
2675 * look like selection timeout since that is
2676 * the most common reason for this to occur,
2677 * and it's severe enough.
2678 */
2679
2680 cmd->result = DID_NO_CONNECT << 16;
2681 }
2682 break;
2683
2684 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2685 break;
2686 case CMD_DATA_OVERRUN:
2687 dev_warn(&h->pdev->dev,
2688 "CDB %16phN data overrun\n", cp->Request.CDB);
2689 break;
2690 case CMD_INVALID: {
2691 /* print_bytes(cp, sizeof(*cp), 1, 0);
2692 print_cmd(cp); */
2693 /* We get CMD_INVALID if you address a non-existent device
2694 * instead of a selection timeout (no response). You will
2695 * see this if you yank out a drive, then try to access it.
2696 * This is kind of a shame because it means that any other
2697 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2698 * missing target. */
2699 cmd->result = DID_NO_CONNECT << 16;
2700 }
2701 break;
2702 case CMD_PROTOCOL_ERR:
2703 cmd->result = DID_ERROR << 16;
2704 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2705 cp->Request.CDB);
2706 break;
2707 case CMD_HARDWARE_ERR:
2708 cmd->result = DID_ERROR << 16;
2709 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2710 cp->Request.CDB);
2711 break;
2712 case CMD_CONNECTION_LOST:
2713 cmd->result = DID_ERROR << 16;
2714 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2715 cp->Request.CDB);
2716 break;
2717 case CMD_ABORTED:
2718 cmd->result = DID_ABORT << 16;
2719 break;
2720 case CMD_ABORT_FAILED:
2721 cmd->result = DID_ERROR << 16;
2722 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2723 cp->Request.CDB);
2724 break;
2725 case CMD_UNSOLICITED_ABORT:
2726 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2727 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2728 cp->Request.CDB);
2729 break;
2730 case CMD_TIMEOUT:
2731 cmd->result = DID_TIME_OUT << 16;
2732 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2733 cp->Request.CDB);
2734 break;
2735 case CMD_UNABORTABLE:
2736 cmd->result = DID_ERROR << 16;
2737 dev_warn(&h->pdev->dev, "Command unabortable\n");
2738 break;
2739 case CMD_TMF_STATUS:
2740 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2741 cmd->result = DID_ERROR << 16;
2742 break;
2743 case CMD_IOACCEL_DISABLED:
2744 /* This only handles the direct pass-through case since RAID
2745 * offload is handled above. Just attempt a retry.
2746 */
2747 cmd->result = DID_SOFT_ERROR << 16;
2748 dev_warn(&h->pdev->dev,
2749 "cp %p had HP SSD Smart Path error\n", cp);
2750 break;
2751 default:
2752 cmd->result = DID_ERROR << 16;
2753 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2754 cp, ei->CommandStatus);
2755 }
2756
2757 return hpsa_cmd_free_and_done(h, cp, cmd);
2758 }
2759
2760 static void hpsa_pci_unmap(struct pci_dev *pdev,
2761 struct CommandList *c, int sg_used, int data_direction)
2762 {
2763 int i;
2764
2765 for (i = 0; i < sg_used; i++)
2766 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2767 le32_to_cpu(c->SG[i].Len),
2768 data_direction);
2769 }
2770
2771 static int hpsa_map_one(struct pci_dev *pdev,
2772 struct CommandList *cp,
2773 unsigned char *buf,
2774 size_t buflen,
2775 int data_direction)
2776 {
2777 u64 addr64;
2778
2779 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2780 cp->Header.SGList = 0;
2781 cp->Header.SGTotal = cpu_to_le16(0);
2782 return 0;
2783 }
2784
2785 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2786 if (dma_mapping_error(&pdev->dev, addr64)) {
2787 /* Prevent subsequent unmap of something never mapped */
2788 cp->Header.SGList = 0;
2789 cp->Header.SGTotal = cpu_to_le16(0);
2790 return -1;
2791 }
2792 cp->SG[0].Addr = cpu_to_le64(addr64);
2793 cp->SG[0].Len = cpu_to_le32(buflen);
2794 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2795 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2796 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2797 return 0;
2798 }
2799
2800 #define NO_TIMEOUT ((unsigned long) -1)
2801 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2802 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2803 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2804 {
2805 DECLARE_COMPLETION_ONSTACK(wait);
2806
2807 c->waiting = &wait;
2808 __enqueue_cmd_and_start_io(h, c, reply_queue);
2809 if (timeout_msecs == NO_TIMEOUT) {
2810 /* TODO: get rid of this no-timeout thing */
2811 wait_for_completion_io(&wait);
2812 return IO_OK;
2813 }
2814 if (!wait_for_completion_io_timeout(&wait,
2815 msecs_to_jiffies(timeout_msecs))) {
2816 dev_warn(&h->pdev->dev, "Command timed out.\n");
2817 return -ETIMEDOUT;
2818 }
2819 return IO_OK;
2820 }
2821
2822 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2823 int reply_queue, unsigned long timeout_msecs)
2824 {
2825 if (unlikely(lockup_detected(h))) {
2826 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2827 return IO_OK;
2828 }
2829 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2830 }
2831
2832 static u32 lockup_detected(struct ctlr_info *h)
2833 {
2834 int cpu;
2835 u32 rc, *lockup_detected;
2836
2837 cpu = get_cpu();
2838 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2839 rc = *lockup_detected;
2840 put_cpu();
2841 return rc;
2842 }
2843
2844 #define MAX_DRIVER_CMD_RETRIES 25
2845 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2846 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2847 {
2848 int backoff_time = 10, retry_count = 0;
2849 int rc;
2850
2851 do {
2852 memset(c->err_info, 0, sizeof(*c->err_info));
2853 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2854 timeout_msecs);
2855 if (rc)
2856 break;
2857 retry_count++;
2858 if (retry_count > 3) {
2859 msleep(backoff_time);
2860 if (backoff_time < 1000)
2861 backoff_time *= 2;
2862 }
2863 } while ((check_for_unit_attention(h, c) ||
2864 check_for_busy(h, c)) &&
2865 retry_count <= MAX_DRIVER_CMD_RETRIES);
2866 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2867 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2868 rc = -EIO;
2869 return rc;
2870 }
2871
2872 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2873 struct CommandList *c)
2874 {
2875 const u8 *cdb = c->Request.CDB;
2876 const u8 *lun = c->Header.LUN.LunAddrBytes;
2877
2878 dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
2879 txt, lun, cdb);
2880 }
2881
2882 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2883 struct CommandList *cp)
2884 {
2885 const struct ErrorInfo *ei = cp->err_info;
2886 struct device *d = &cp->h->pdev->dev;
2887 u8 sense_key, asc, ascq;
2888 int sense_len;
2889
2890 switch (ei->CommandStatus) {
2891 case CMD_TARGET_STATUS:
2892 if (ei->SenseLen > sizeof(ei->SenseInfo))
2893 sense_len = sizeof(ei->SenseInfo);
2894 else
2895 sense_len = ei->SenseLen;
2896 decode_sense_data(ei->SenseInfo, sense_len,
2897 &sense_key, &asc, &ascq);
2898 hpsa_print_cmd(h, "SCSI status", cp);
2899 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2900 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2901 sense_key, asc, ascq);
2902 else
2903 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2904 if (ei->ScsiStatus == 0)
2905 dev_warn(d, "SCSI status is abnormally zero. "
2906 "(probably indicates selection timeout "
2907 "reported incorrectly due to a known "
2908 "firmware bug, circa July, 2001.)\n");
2909 break;
2910 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2911 break;
2912 case CMD_DATA_OVERRUN:
2913 hpsa_print_cmd(h, "overrun condition", cp);
2914 break;
2915 case CMD_INVALID: {
2916 /* controller unfortunately reports SCSI passthru's
2917 * to non-existent targets as invalid commands.
2918 */
2919 hpsa_print_cmd(h, "invalid command", cp);
2920 dev_warn(d, "probably means device no longer present\n");
2921 }
2922 break;
2923 case CMD_PROTOCOL_ERR:
2924 hpsa_print_cmd(h, "protocol error", cp);
2925 break;
2926 case CMD_HARDWARE_ERR:
2927 hpsa_print_cmd(h, "hardware error", cp);
2928 break;
2929 case CMD_CONNECTION_LOST:
2930 hpsa_print_cmd(h, "connection lost", cp);
2931 break;
2932 case CMD_ABORTED:
2933 hpsa_print_cmd(h, "aborted", cp);
2934 break;