]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/hpsa.c
hpsa: Fallback to MSI rather than to INTx if MSI-X failed
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
51c35139 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
edd16368
SC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
e5a44df8 26#include <linux/pci-aspm.h>
edd16368
SC
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
edd16368
SC
32#include <linux/init.h>
33#include <linux/spinlock.h>
edd16368
SC
34#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
667e23d4 45#include <scsi/scsi_tcq.h>
edd16368
SC
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
60063497 49#include <linux/atomic.h>
a0c12413 50#include <linux/jiffies.h>
094963da 51#include <linux/percpu.h>
283b4a9b 52#include <asm/div64.h>
edd16368
SC
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
9a993302 57#define HPSA_DRIVER_VERSION "3.4.4-1"
edd16368 58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
f79cfec6 59#define HPSA "hpsa"
edd16368
SC
60
61/* How long to wait (in milliseconds) for board to go into simple mode */
62#define MAX_CONFIG_WAIT 30000
63#define MAX_IOCTL_CONFIG_WAIT 1000
64
65/*define how many times we will try a command because of bus resets */
66#define MAX_CMD_RETRIES 3
67
68/* Embedded module documentation macros - see modules.h */
69MODULE_AUTHOR("Hewlett-Packard Company");
70MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 HPSA_DRIVER_VERSION);
72MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73MODULE_VERSION(HPSA_DRIVER_VERSION);
74MODULE_LICENSE("GPL");
75
76static int hpsa_allow_any;
77module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
80static int hpsa_simple_mode;
81module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
84
85/* define the PCI info for the cards we can control */
86static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
163dbcd8
MM
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
f8b01eb9 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
fe0c9610
MM
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
97b9f53d
MM
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
3b7a45e5 119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
97b9f53d
MM
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
3b7a45e5
JH
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
8e616a5e
SC
128 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
132 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
7c03b870 133 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 134 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
135 {0,}
136};
137
138MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
139
140/* board_id = Subsystem Device ID & Vendor ID
141 * product = Marketing Name for the board
142 * access = Address of the struct of function pointers
143 */
144static struct board_type products[] = {
edd16368
SC
145 {0x3241103C, "Smart Array P212", &SA5_access},
146 {0x3243103C, "Smart Array P410", &SA5_access},
147 {0x3245103C, "Smart Array P410i", &SA5_access},
148 {0x3247103C, "Smart Array P411", &SA5_access},
149 {0x3249103C, "Smart Array P812", &SA5_access},
163dbcd8
MM
150 {0x324A103C, "Smart Array P712m", &SA5_access},
151 {0x324B103C, "Smart Array P711m", &SA5_access},
fe0c9610
MM
152 {0x3350103C, "Smart Array P222", &SA5_access},
153 {0x3351103C, "Smart Array P420", &SA5_access},
154 {0x3352103C, "Smart Array P421", &SA5_access},
155 {0x3353103C, "Smart Array P822", &SA5_access},
156 {0x3354103C, "Smart Array P420i", &SA5_access},
157 {0x3355103C, "Smart Array P220i", &SA5_access},
158 {0x3356103C, "Smart Array P721m", &SA5_access},
1fd6c8e3
MM
159 {0x1921103C, "Smart Array P830i", &SA5_access},
160 {0x1922103C, "Smart Array P430", &SA5_access},
161 {0x1923103C, "Smart Array P431", &SA5_access},
162 {0x1924103C, "Smart Array P830", &SA5_access},
163 {0x1926103C, "Smart Array P731m", &SA5_access},
164 {0x1928103C, "Smart Array P230i", &SA5_access},
165 {0x1929103C, "Smart Array P530", &SA5_access},
97b9f53d
MM
166 {0x21BD103C, "Smart Array", &SA5_access},
167 {0x21BE103C, "Smart Array", &SA5_access},
168 {0x21BF103C, "Smart Array", &SA5_access},
169 {0x21C0103C, "Smart Array", &SA5_access},
170 {0x21C1103C, "Smart Array", &SA5_access},
171 {0x21C2103C, "Smart Array", &SA5_access},
172 {0x21C3103C, "Smart Array", &SA5_access},
173 {0x21C4103C, "Smart Array", &SA5_access},
174 {0x21C5103C, "Smart Array", &SA5_access},
3b7a45e5 175 {0x21C6103C, "Smart Array", &SA5_access},
97b9f53d
MM
176 {0x21C7103C, "Smart Array", &SA5_access},
177 {0x21C8103C, "Smart Array", &SA5_access},
178 {0x21C9103C, "Smart Array", &SA5_access},
3b7a45e5
JH
179 {0x21CA103C, "Smart Array", &SA5_access},
180 {0x21CB103C, "Smart Array", &SA5_access},
181 {0x21CC103C, "Smart Array", &SA5_access},
182 {0x21CD103C, "Smart Array", &SA5_access},
183 {0x21CE103C, "Smart Array", &SA5_access},
8e616a5e
SC
184 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
185 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
186 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
187 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
188 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
edd16368
SC
189 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
190};
191
192static int number_of_controllers;
193
10f66018
SC
194static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
195static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
edd16368 196static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
0b57075d
SC
197static void lock_and_start_io(struct ctlr_info *h);
198static void start_io(struct ctlr_info *h, unsigned long *flags);
edd16368
SC
199
200#ifdef CONFIG_COMPAT
201static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
202#endif
203
204static void cmd_free(struct ctlr_info *h, struct CommandList *c);
205static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
206static struct CommandList *cmd_alloc(struct ctlr_info *h);
207static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
a2dac136 208static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 209 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368 210 int cmd_type);
b7bb24eb 211#define VPD_PAGE (1 << 8)
edd16368 212
f281233d 213static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
214static void hpsa_scan_start(struct Scsi_Host *);
215static int hpsa_scan_finished(struct Scsi_Host *sh,
216 unsigned long elapsed_time);
667e23d4
SC
217static int hpsa_change_queue_depth(struct scsi_device *sdev,
218 int qdepth, int reason);
edd16368
SC
219
220static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
75167d2c 221static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
edd16368
SC
222static int hpsa_slave_alloc(struct scsi_device *sdev);
223static void hpsa_slave_destroy(struct scsi_device *sdev);
224
edd16368 225static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
edd16368
SC
226static int check_for_unit_attention(struct ctlr_info *h,
227 struct CommandList *c);
228static void check_ioctl_unit_attention(struct ctlr_info *h,
229 struct CommandList *c);
303932fd
DB
230/* performant mode helper functions */
231static void calc_bucket_map(int *bucket, int num_buckets,
e1f7de0c 232 int nsgs, int min_blocks, int *bucket_map);
6f039790 233static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
254f796b 234static inline u32 next_command(struct ctlr_info *h, u8 q);
6f039790
GKH
235static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
236 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
237 u64 *cfg_offset);
238static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
239 unsigned long *memory_bar);
240static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
241static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
242 int wait_for_ready);
75167d2c 243static inline void finish_cmd(struct CommandList *c);
283b4a9b 244static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
fe5389c8
SC
245#define BOARD_NOT_READY 0
246#define BOARD_READY 1
23100dd9 247static void hpsa_drain_accel_commands(struct ctlr_info *h);
76438d08 248static void hpsa_flush_cache(struct ctlr_info *h);
c349775e
ST
249static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
250 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
251 u8 *scsi3addr);
edd16368 252
edd16368
SC
253static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
254{
255 unsigned long *priv = shost_priv(sdev->host);
256 return (struct ctlr_info *) *priv;
257}
258
a23513e8
SC
259static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
260{
261 unsigned long *priv = shost_priv(sh);
262 return (struct ctlr_info *) *priv;
263}
264
edd16368
SC
265static int check_for_unit_attention(struct ctlr_info *h,
266 struct CommandList *c)
267{
268 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
269 return 0;
270
271 switch (c->err_info->SenseInfo[12]) {
272 case STATE_CHANGED:
f79cfec6 273 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
edd16368
SC
274 "detected, command retried\n", h->ctlr);
275 break;
276 case LUN_FAILED:
f79cfec6 277 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
edd16368
SC
278 "detected, action required\n", h->ctlr);
279 break;
280 case REPORT_LUNS_CHANGED:
f79cfec6 281 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
31468401 282 "changed, action required\n", h->ctlr);
edd16368 283 /*
4f4eb9f1
ST
284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285 * target (array) devices.
edd16368
SC
286 */
287 break;
288 case POWER_OR_RESET:
f79cfec6 289 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
edd16368
SC
290 "or device reset detected\n", h->ctlr);
291 break;
292 case UNIT_ATTENTION_CLEARED:
f79cfec6 293 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
edd16368
SC
294 "cleared by another initiator\n", h->ctlr);
295 break;
296 default:
f79cfec6 297 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
edd16368
SC
298 "unit attention detected\n", h->ctlr);
299 break;
300 }
301 return 1;
302}
303
852af20a
MB
304static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
305{
306 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
307 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
308 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
309 return 0;
310 dev_warn(&h->pdev->dev, HPSA "device busy");
311 return 1;
312}
313
da0697bd
ST
314static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
315 struct device_attribute *attr,
316 const char *buf, size_t count)
317{
318 int status, len;
319 struct ctlr_info *h;
320 struct Scsi_Host *shost = class_to_shost(dev);
321 char tmpbuf[10];
322
323 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
324 return -EACCES;
325 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
326 strncpy(tmpbuf, buf, len);
327 tmpbuf[len] = '\0';
328 if (sscanf(tmpbuf, "%d", &status) != 1)
329 return -EINVAL;
330 h = shost_to_hba(shost);
331 h->acciopath_status = !!status;
332 dev_warn(&h->pdev->dev,
333 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
334 h->acciopath_status ? "enabled" : "disabled");
335 return count;
336}
337
2ba8bfc8
SC
338static ssize_t host_store_raid_offload_debug(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf, size_t count)
341{
342 int debug_level, len;
343 struct ctlr_info *h;
344 struct Scsi_Host *shost = class_to_shost(dev);
345 char tmpbuf[10];
346
347 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
348 return -EACCES;
349 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
350 strncpy(tmpbuf, buf, len);
351 tmpbuf[len] = '\0';
352 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
353 return -EINVAL;
354 if (debug_level < 0)
355 debug_level = 0;
356 h = shost_to_hba(shost);
357 h->raid_offload_debug = debug_level;
358 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
359 h->raid_offload_debug);
360 return count;
361}
362
edd16368
SC
363static ssize_t host_store_rescan(struct device *dev,
364 struct device_attribute *attr,
365 const char *buf, size_t count)
366{
367 struct ctlr_info *h;
368 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 369 h = shost_to_hba(shost);
31468401 370 hpsa_scan_start(h->scsi_host);
edd16368
SC
371 return count;
372}
373
d28ce020
SC
374static ssize_t host_show_firmware_revision(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 struct ctlr_info *h;
378 struct Scsi_Host *shost = class_to_shost(dev);
379 unsigned char *fwrev;
380
381 h = shost_to_hba(shost);
382 if (!h->hba_inquiry_data)
383 return 0;
384 fwrev = &h->hba_inquiry_data[32];
385 return snprintf(buf, 20, "%c%c%c%c\n",
386 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
387}
388
94a13649
SC
389static ssize_t host_show_commands_outstanding(struct device *dev,
390 struct device_attribute *attr, char *buf)
391{
392 struct Scsi_Host *shost = class_to_shost(dev);
393 struct ctlr_info *h = shost_to_hba(shost);
394
395 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
396}
397
745a7a25
SC
398static ssize_t host_show_transport_mode(struct device *dev,
399 struct device_attribute *attr, char *buf)
400{
401 struct ctlr_info *h;
402 struct Scsi_Host *shost = class_to_shost(dev);
403
404 h = shost_to_hba(shost);
405 return snprintf(buf, 20, "%s\n",
960a30e7 406 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
407 "performant" : "simple");
408}
409
da0697bd
ST
410static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 struct ctlr_info *h;
414 struct Scsi_Host *shost = class_to_shost(dev);
415
416 h = shost_to_hba(shost);
417 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
418 (h->acciopath_status == 1) ? "enabled" : "disabled");
419}
420
46380786 421/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
422static u32 unresettable_controller[] = {
423 0x324a103C, /* Smart Array P712m */
424 0x324b103C, /* SmartArray P711m */
425 0x3223103C, /* Smart Array P800 */
426 0x3234103C, /* Smart Array P400 */
427 0x3235103C, /* Smart Array P400i */
428 0x3211103C, /* Smart Array E200i */
429 0x3212103C, /* Smart Array E200 */
430 0x3213103C, /* Smart Array E200i */
431 0x3214103C, /* Smart Array E200i */
432 0x3215103C, /* Smart Array E200i */
433 0x3237103C, /* Smart Array E500 */
434 0x323D103C, /* Smart Array P700m */
7af0abbc 435 0x40800E11, /* Smart Array 5i */
941b1cda
SC
436 0x409C0E11, /* Smart Array 6400 */
437 0x409D0E11, /* Smart Array 6400 EM */
5a4f934e
TH
438 0x40700E11, /* Smart Array 5300 */
439 0x40820E11, /* Smart Array 532 */
440 0x40830E11, /* Smart Array 5312 */
441 0x409A0E11, /* Smart Array 641 */
442 0x409B0E11, /* Smart Array 642 */
443 0x40910E11, /* Smart Array 6i */
941b1cda
SC
444};
445
46380786
SC
446/* List of controllers which cannot even be soft reset */
447static u32 soft_unresettable_controller[] = {
7af0abbc 448 0x40800E11, /* Smart Array 5i */
5a4f934e
TH
449 0x40700E11, /* Smart Array 5300 */
450 0x40820E11, /* Smart Array 532 */
451 0x40830E11, /* Smart Array 5312 */
452 0x409A0E11, /* Smart Array 641 */
453 0x409B0E11, /* Smart Array 642 */
454 0x40910E11, /* Smart Array 6i */
46380786
SC
455 /* Exclude 640x boards. These are two pci devices in one slot
456 * which share a battery backed cache module. One controls the
457 * cache, the other accesses the cache through the one that controls
458 * it. If we reset the one controlling the cache, the other will
459 * likely not be happy. Just forbid resetting this conjoined mess.
460 * The 640x isn't really supported by hpsa anyway.
461 */
462 0x409C0E11, /* Smart Array 6400 */
463 0x409D0E11, /* Smart Array 6400 EM */
464};
465
466static int ctlr_is_hard_resettable(u32 board_id)
941b1cda
SC
467{
468 int i;
469
470 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
46380786
SC
471 if (unresettable_controller[i] == board_id)
472 return 0;
473 return 1;
474}
475
476static int ctlr_is_soft_resettable(u32 board_id)
477{
478 int i;
479
480 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
481 if (soft_unresettable_controller[i] == board_id)
941b1cda
SC
482 return 0;
483 return 1;
484}
485
46380786
SC
486static int ctlr_is_resettable(u32 board_id)
487{
488 return ctlr_is_hard_resettable(board_id) ||
489 ctlr_is_soft_resettable(board_id);
490}
491
941b1cda
SC
492static ssize_t host_show_resettable(struct device *dev,
493 struct device_attribute *attr, char *buf)
494{
495 struct ctlr_info *h;
496 struct Scsi_Host *shost = class_to_shost(dev);
497
498 h = shost_to_hba(shost);
46380786 499 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
500}
501
edd16368
SC
502static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
503{
504 return (scsi3addr[3] & 0xC0) == 0x40;
505}
506
507static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
d82357ea 508 "1(ADM)", "UNKNOWN"
edd16368 509};
6b80b18f
ST
510#define HPSA_RAID_0 0
511#define HPSA_RAID_4 1
512#define HPSA_RAID_1 2 /* also used for RAID 10 */
513#define HPSA_RAID_5 3 /* also used for RAID 50 */
514#define HPSA_RAID_51 4
515#define HPSA_RAID_6 5 /* also used for RAID 60 */
516#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
edd16368
SC
517#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
518
519static ssize_t raid_level_show(struct device *dev,
520 struct device_attribute *attr, char *buf)
521{
522 ssize_t l = 0;
82a72c0a 523 unsigned char rlevel;
edd16368
SC
524 struct ctlr_info *h;
525 struct scsi_device *sdev;
526 struct hpsa_scsi_dev_t *hdev;
527 unsigned long flags;
528
529 sdev = to_scsi_device(dev);
530 h = sdev_to_hba(sdev);
531 spin_lock_irqsave(&h->lock, flags);
532 hdev = sdev->hostdata;
533 if (!hdev) {
534 spin_unlock_irqrestore(&h->lock, flags);
535 return -ENODEV;
536 }
537
538 /* Is this even a logical drive? */
539 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
540 spin_unlock_irqrestore(&h->lock, flags);
541 l = snprintf(buf, PAGE_SIZE, "N/A\n");
542 return l;
543 }
544
545 rlevel = hdev->raid_level;
546 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 547 if (rlevel > RAID_UNKNOWN)
edd16368
SC
548 rlevel = RAID_UNKNOWN;
549 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
550 return l;
551}
552
553static ssize_t lunid_show(struct device *dev,
554 struct device_attribute *attr, char *buf)
555{
556 struct ctlr_info *h;
557 struct scsi_device *sdev;
558 struct hpsa_scsi_dev_t *hdev;
559 unsigned long flags;
560 unsigned char lunid[8];
561
562 sdev = to_scsi_device(dev);
563 h = sdev_to_hba(sdev);
564 spin_lock_irqsave(&h->lock, flags);
565 hdev = sdev->hostdata;
566 if (!hdev) {
567 spin_unlock_irqrestore(&h->lock, flags);
568 return -ENODEV;
569 }
570 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
571 spin_unlock_irqrestore(&h->lock, flags);
572 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
573 lunid[0], lunid[1], lunid[2], lunid[3],
574 lunid[4], lunid[5], lunid[6], lunid[7]);
575}
576
577static ssize_t unique_id_show(struct device *dev,
578 struct device_attribute *attr, char *buf)
579{
580 struct ctlr_info *h;
581 struct scsi_device *sdev;
582 struct hpsa_scsi_dev_t *hdev;
583 unsigned long flags;
584 unsigned char sn[16];
585
586 sdev = to_scsi_device(dev);
587 h = sdev_to_hba(sdev);
588 spin_lock_irqsave(&h->lock, flags);
589 hdev = sdev->hostdata;
590 if (!hdev) {
591 spin_unlock_irqrestore(&h->lock, flags);
592 return -ENODEV;
593 }
594 memcpy(sn, hdev->device_id, sizeof(sn));
595 spin_unlock_irqrestore(&h->lock, flags);
596 return snprintf(buf, 16 * 2 + 2,
597 "%02X%02X%02X%02X%02X%02X%02X%02X"
598 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
599 sn[0], sn[1], sn[2], sn[3],
600 sn[4], sn[5], sn[6], sn[7],
601 sn[8], sn[9], sn[10], sn[11],
602 sn[12], sn[13], sn[14], sn[15]);
603}
604
c1988684
ST
605static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
606 struct device_attribute *attr, char *buf)
607{
608 struct ctlr_info *h;
609 struct scsi_device *sdev;
610 struct hpsa_scsi_dev_t *hdev;
611 unsigned long flags;
612 int offload_enabled;
613
614 sdev = to_scsi_device(dev);
615 h = sdev_to_hba(sdev);
616 spin_lock_irqsave(&h->lock, flags);
617 hdev = sdev->hostdata;
618 if (!hdev) {
619 spin_unlock_irqrestore(&h->lock, flags);
620 return -ENODEV;
621 }
622 offload_enabled = hdev->offload_enabled;
623 spin_unlock_irqrestore(&h->lock, flags);
624 return snprintf(buf, 20, "%d\n", offload_enabled);
625}
626
3f5eac3a
SC
627static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
628static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
629static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
630static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
c1988684
ST
631static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
632 host_show_hp_ssd_smart_path_enabled, NULL);
da0697bd
ST
633static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
634 host_show_hp_ssd_smart_path_status,
635 host_store_hp_ssd_smart_path_status);
2ba8bfc8
SC
636static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
637 host_store_raid_offload_debug);
3f5eac3a
SC
638static DEVICE_ATTR(firmware_revision, S_IRUGO,
639 host_show_firmware_revision, NULL);
640static DEVICE_ATTR(commands_outstanding, S_IRUGO,
641 host_show_commands_outstanding, NULL);
642static DEVICE_ATTR(transport_mode, S_IRUGO,
643 host_show_transport_mode, NULL);
941b1cda
SC
644static DEVICE_ATTR(resettable, S_IRUGO,
645 host_show_resettable, NULL);
3f5eac3a
SC
646
647static struct device_attribute *hpsa_sdev_attrs[] = {
648 &dev_attr_raid_level,
649 &dev_attr_lunid,
650 &dev_attr_unique_id,
c1988684 651 &dev_attr_hp_ssd_smart_path_enabled,
3f5eac3a
SC
652 NULL,
653};
654
655static struct device_attribute *hpsa_shost_attrs[] = {
656 &dev_attr_rescan,
657 &dev_attr_firmware_revision,
658 &dev_attr_commands_outstanding,
659 &dev_attr_transport_mode,
941b1cda 660 &dev_attr_resettable,
da0697bd 661 &dev_attr_hp_ssd_smart_path_status,
2ba8bfc8 662 &dev_attr_raid_offload_debug,
3f5eac3a
SC
663 NULL,
664};
665
666static struct scsi_host_template hpsa_driver_template = {
667 .module = THIS_MODULE,
f79cfec6
SC
668 .name = HPSA,
669 .proc_name = HPSA,
3f5eac3a
SC
670 .queuecommand = hpsa_scsi_queue_command,
671 .scan_start = hpsa_scan_start,
672 .scan_finished = hpsa_scan_finished,
673 .change_queue_depth = hpsa_change_queue_depth,
674 .this_id = -1,
675 .use_clustering = ENABLE_CLUSTERING,
75167d2c 676 .eh_abort_handler = hpsa_eh_abort_handler,
3f5eac3a
SC
677 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
678 .ioctl = hpsa_ioctl,
679 .slave_alloc = hpsa_slave_alloc,
680 .slave_destroy = hpsa_slave_destroy,
681#ifdef CONFIG_COMPAT
682 .compat_ioctl = hpsa_compat_ioctl,
683#endif
684 .sdev_attrs = hpsa_sdev_attrs,
685 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 686 .max_sectors = 8192,
54b2b50c 687 .no_write_same = 1,
3f5eac3a
SC
688};
689
690
691/* Enqueuing and dequeuing functions for cmdlists. */
692static inline void addQ(struct list_head *list, struct CommandList *c)
693{
694 list_add_tail(&c->list, list);
695}
696
254f796b 697static inline u32 next_command(struct ctlr_info *h, u8 q)
3f5eac3a
SC
698{
699 u32 a;
072b0518 700 struct reply_queue_buffer *rq = &h->reply_queue[q];
e16a33ad 701 unsigned long flags;
3f5eac3a 702
e1f7de0c
MG
703 if (h->transMethod & CFGTBL_Trans_io_accel1)
704 return h->access.command_completed(h, q);
705
3f5eac3a 706 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
254f796b 707 return h->access.command_completed(h, q);
3f5eac3a 708
254f796b
MG
709 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
710 a = rq->head[rq->current_entry];
711 rq->current_entry++;
e16a33ad 712 spin_lock_irqsave(&h->lock, flags);
3f5eac3a 713 h->commands_outstanding--;
e16a33ad 714 spin_unlock_irqrestore(&h->lock, flags);
3f5eac3a
SC
715 } else {
716 a = FIFO_EMPTY;
717 }
718 /* Check for wraparound */
254f796b
MG
719 if (rq->current_entry == h->max_commands) {
720 rq->current_entry = 0;
721 rq->wraparound ^= 1;
3f5eac3a
SC
722 }
723 return a;
724}
725
c349775e
ST
726/*
727 * There are some special bits in the bus address of the
728 * command that we have to set for the controller to know
729 * how to process the command:
730 *
731 * Normal performant mode:
732 * bit 0: 1 means performant mode, 0 means simple mode.
733 * bits 1-3 = block fetch table entry
734 * bits 4-6 = command type (== 0)
735 *
736 * ioaccel1 mode:
737 * bit 0 = "performant mode" bit.
738 * bits 1-3 = block fetch table entry
739 * bits 4-6 = command type (== 110)
740 * (command type is needed because ioaccel1 mode
741 * commands are submitted through the same register as normal
742 * mode commands, so this is how the controller knows whether
743 * the command is normal mode or ioaccel1 mode.)
744 *
745 * ioaccel2 mode:
746 * bit 0 = "performant mode" bit.
747 * bits 1-4 = block fetch table entry (note extra bit)
748 * bits 4-6 = not needed, because ioaccel2 mode has
749 * a separate special register for submitting commands.
750 */
751
3f5eac3a
SC
752/* set_performant_mode: Modify the tag for cciss performant
753 * set bit 0 for pull model, bits 3-1 for block fetch
754 * register number
755 */
756static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
757{
254f796b 758 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
3f5eac3a 759 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
eee0f03a 760 if (likely(h->msix_vector > 0))
254f796b 761 c->Header.ReplyQueue =
804a5cb5 762 raw_smp_processor_id() % h->nreply_queues;
254f796b 763 }
3f5eac3a
SC
764}
765
c349775e
ST
766static void set_ioaccel1_performant_mode(struct ctlr_info *h,
767 struct CommandList *c)
768{
769 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
770
771 /* Tell the controller to post the reply to the queue for this
772 * processor. This seems to give the best I/O throughput.
773 */
774 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
775 /* Set the bits in the address sent down to include:
776 * - performant mode bit (bit 0)
777 * - pull count (bits 1-3)
778 * - command type (bits 4-6)
779 */
780 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
781 IOACCEL1_BUSADDR_CMDTYPE;
782}
783
784static void set_ioaccel2_performant_mode(struct ctlr_info *h,
785 struct CommandList *c)
786{
787 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
788
789 /* Tell the controller to post the reply to the queue for this
790 * processor. This seems to give the best I/O throughput.
791 */
792 cp->reply_queue = smp_processor_id() % h->nreply_queues;
793 /* Set the bits in the address sent down to include:
794 * - performant mode bit not used in ioaccel mode 2
795 * - pull count (bits 0-3)
796 * - command type isn't needed for ioaccel2
797 */
798 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
799}
800
e85c5974
SC
801static int is_firmware_flash_cmd(u8 *cdb)
802{
803 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
804}
805
806/*
807 * During firmware flash, the heartbeat register may not update as frequently
808 * as it should. So we dial down lockup detection during firmware flash. and
809 * dial it back up when firmware flash completes.
810 */
811#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
812#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
813static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
814 struct CommandList *c)
815{
816 if (!is_firmware_flash_cmd(c->Request.CDB))
817 return;
818 atomic_inc(&h->firmware_flash_in_progress);
819 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
820}
821
822static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
823 struct CommandList *c)
824{
825 if (is_firmware_flash_cmd(c->Request.CDB) &&
826 atomic_dec_and_test(&h->firmware_flash_in_progress))
827 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
828}
829
3f5eac3a
SC
830static void enqueue_cmd_and_start_io(struct ctlr_info *h,
831 struct CommandList *c)
832{
833 unsigned long flags;
834
c349775e
ST
835 switch (c->cmd_type) {
836 case CMD_IOACCEL1:
837 set_ioaccel1_performant_mode(h, c);
838 break;
839 case CMD_IOACCEL2:
840 set_ioaccel2_performant_mode(h, c);
841 break;
842 default:
843 set_performant_mode(h, c);
844 }
e85c5974 845 dial_down_lockup_detection_during_fw_flash(h, c);
3f5eac3a
SC
846 spin_lock_irqsave(&h->lock, flags);
847 addQ(&h->reqQ, c);
848 h->Qdepth++;
0b57075d 849 start_io(h, &flags);
3f5eac3a
SC
850 spin_unlock_irqrestore(&h->lock, flags);
851}
852
853static inline void removeQ(struct CommandList *c)
854{
855 if (WARN_ON(list_empty(&c->list)))
856 return;
857 list_del_init(&c->list);
858}
859
860static inline int is_hba_lunid(unsigned char scsi3addr[])
861{
862 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
863}
864
865static inline int is_scsi_rev_5(struct ctlr_info *h)
866{
867 if (!h->hba_inquiry_data)
868 return 0;
869 if ((h->hba_inquiry_data[2] & 0x07) == 5)
870 return 1;
871 return 0;
872}
873
edd16368
SC
874static int hpsa_find_target_lun(struct ctlr_info *h,
875 unsigned char scsi3addr[], int bus, int *target, int *lun)
876{
877 /* finds an unused bus, target, lun for a new physical device
878 * assumes h->devlock is held
879 */
880 int i, found = 0;
cfe5badc 881 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 882
263d9401 883 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
edd16368
SC
884
885 for (i = 0; i < h->ndevices; i++) {
886 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
263d9401 887 __set_bit(h->dev[i]->target, lun_taken);
edd16368
SC
888 }
889
263d9401
AM
890 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
891 if (i < HPSA_MAX_DEVICES) {
892 /* *bus = 1; */
893 *target = i;
894 *lun = 0;
895 found = 1;
edd16368
SC
896 }
897 return !found;
898}
899
900/* Add an entry into h->dev[] array. */
901static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
902 struct hpsa_scsi_dev_t *device,
903 struct hpsa_scsi_dev_t *added[], int *nadded)
904{
905 /* assumes h->devlock is held */
906 int n = h->ndevices;
907 int i;
908 unsigned char addr1[8], addr2[8];
909 struct hpsa_scsi_dev_t *sd;
910
cfe5badc 911 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
912 dev_err(&h->pdev->dev, "too many devices, some will be "
913 "inaccessible.\n");
914 return -1;
915 }
916
917 /* physical devices do not have lun or target assigned until now. */
918 if (device->lun != -1)
919 /* Logical device, lun is already assigned. */
920 goto lun_assigned;
921
922 /* If this device a non-zero lun of a multi-lun device
923 * byte 4 of the 8-byte LUN addr will contain the logical
924 * unit no, zero otherise.
925 */
926 if (device->scsi3addr[4] == 0) {
927 /* This is not a non-zero lun of a multi-lun device */
928 if (hpsa_find_target_lun(h, device->scsi3addr,
929 device->bus, &device->target, &device->lun) != 0)
930 return -1;
931 goto lun_assigned;
932 }
933
934 /* This is a non-zero lun of a multi-lun device.
935 * Search through our list and find the device which
936 * has the same 8 byte LUN address, excepting byte 4.
937 * Assign the same bus and target for this new LUN.
938 * Use the logical unit number from the firmware.
939 */
940 memcpy(addr1, device->scsi3addr, 8);
941 addr1[4] = 0;
942 for (i = 0; i < n; i++) {
943 sd = h->dev[i];
944 memcpy(addr2, sd->scsi3addr, 8);
945 addr2[4] = 0;
946 /* differ only in byte 4? */
947 if (memcmp(addr1, addr2, 8) == 0) {
948 device->bus = sd->bus;
949 device->target = sd->target;
950 device->lun = device->scsi3addr[4];
951 break;
952 }
953 }
954 if (device->lun == -1) {
955 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
956 " suspect firmware bug or unsupported hardware "
957 "configuration.\n");
958 return -1;
959 }
960
961lun_assigned:
962
963 h->dev[n] = device;
964 h->ndevices++;
965 added[*nadded] = device;
966 (*nadded)++;
967
968 /* initially, (before registering with scsi layer) we don't
969 * know our hostno and we don't want to print anything first
970 * time anyway (the scsi layer's inquiries will show that info)
971 */
972 /* if (hostno != -1) */
973 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
974 scsi_device_type(device->devtype), hostno,
975 device->bus, device->target, device->lun);
976 return 0;
977}
978
bd9244f7
ST
979/* Update an entry in h->dev[] array. */
980static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
981 int entry, struct hpsa_scsi_dev_t *new_entry)
982{
983 /* assumes h->devlock is held */
984 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
985
986 /* Raid level changed. */
987 h->dev[entry]->raid_level = new_entry->raid_level;
250fb125
SC
988
989 /* Raid offload parameters changed. */
990 h->dev[entry]->offload_config = new_entry->offload_config;
991 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
9fb0de2d
SC
992 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
993 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
994 h->dev[entry]->raid_map = new_entry->raid_map;
250fb125 995
bd9244f7
ST
996 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
997 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
998 new_entry->target, new_entry->lun);
999}
1000
2a8ccf31
SC
1001/* Replace an entry from h->dev[] array. */
1002static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1003 int entry, struct hpsa_scsi_dev_t *new_entry,
1004 struct hpsa_scsi_dev_t *added[], int *nadded,
1005 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1006{
1007 /* assumes h->devlock is held */
cfe5badc 1008 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
1009 removed[*nremoved] = h->dev[entry];
1010 (*nremoved)++;
01350d05
SC
1011
1012 /*
1013 * New physical devices won't have target/lun assigned yet
1014 * so we need to preserve the values in the slot we are replacing.
1015 */
1016 if (new_entry->target == -1) {
1017 new_entry->target = h->dev[entry]->target;
1018 new_entry->lun = h->dev[entry]->lun;
1019 }
1020
2a8ccf31
SC
1021 h->dev[entry] = new_entry;
1022 added[*nadded] = new_entry;
1023 (*nadded)++;
1024 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1025 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1026 new_entry->target, new_entry->lun);
1027}
1028
edd16368
SC
1029/* Remove an entry from h->dev[] array. */
1030static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1031 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1032{
1033 /* assumes h->devlock is held */
1034 int i;
1035 struct hpsa_scsi_dev_t *sd;
1036
cfe5badc 1037 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
1038
1039 sd = h->dev[entry];
1040 removed[*nremoved] = h->dev[entry];
1041 (*nremoved)++;
1042
1043 for (i = entry; i < h->ndevices-1; i++)
1044 h->dev[i] = h->dev[i+1];
1045 h->ndevices--;
1046 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1047 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1048 sd->lun);
1049}
1050
1051#define SCSI3ADDR_EQ(a, b) ( \
1052 (a)[7] == (b)[7] && \
1053 (a)[6] == (b)[6] && \
1054 (a)[5] == (b)[5] && \
1055 (a)[4] == (b)[4] && \
1056 (a)[3] == (b)[3] && \
1057 (a)[2] == (b)[2] && \
1058 (a)[1] == (b)[1] && \
1059 (a)[0] == (b)[0])
1060
1061static void fixup_botched_add(struct ctlr_info *h,
1062 struct hpsa_scsi_dev_t *added)
1063{
1064 /* called when scsi_add_device fails in order to re-adjust
1065 * h->dev[] to match the mid layer's view.
1066 */
1067 unsigned long flags;
1068 int i, j;
1069
1070 spin_lock_irqsave(&h->lock, flags);
1071 for (i = 0; i < h->ndevices; i++) {
1072 if (h->dev[i] == added) {
1073 for (j = i; j < h->ndevices-1; j++)
1074 h->dev[j] = h->dev[j+1];
1075 h->ndevices--;
1076 break;
1077 }
1078 }
1079 spin_unlock_irqrestore(&h->lock, flags);
1080 kfree(added);
1081}
1082
1083static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1084 struct hpsa_scsi_dev_t *dev2)
1085{
edd16368
SC
1086 /* we compare everything except lun and target as these
1087 * are not yet assigned. Compare parts likely
1088 * to differ first
1089 */
1090 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1091 sizeof(dev1->scsi3addr)) != 0)
1092 return 0;
1093 if (memcmp(dev1->device_id, dev2->device_id,
1094 sizeof(dev1->device_id)) != 0)
1095 return 0;
1096 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1097 return 0;
1098 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1099 return 0;
edd16368
SC
1100 if (dev1->devtype != dev2->devtype)
1101 return 0;
edd16368
SC
1102 if (dev1->bus != dev2->bus)
1103 return 0;
1104 return 1;
1105}
1106
bd9244f7
ST
1107static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1108 struct hpsa_scsi_dev_t *dev2)
1109{
1110 /* Device attributes that can change, but don't mean
1111 * that the device is a different device, nor that the OS
1112 * needs to be told anything about the change.
1113 */
1114 if (dev1->raid_level != dev2->raid_level)
1115 return 1;
250fb125
SC
1116 if (dev1->offload_config != dev2->offload_config)
1117 return 1;
1118 if (dev1->offload_enabled != dev2->offload_enabled)
1119 return 1;
bd9244f7
ST
1120 return 0;
1121}
1122
edd16368
SC
1123/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1124 * and return needle location in *index. If scsi3addr matches, but not
1125 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
bd9244f7
ST
1126 * location in *index.
1127 * In the case of a minor device attribute change, such as RAID level, just
1128 * return DEVICE_UPDATED, along with the updated device's location in index.
1129 * If needle not found, return DEVICE_NOT_FOUND.
edd16368
SC
1130 */
1131static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1132 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1133 int *index)
1134{
1135 int i;
1136#define DEVICE_NOT_FOUND 0
1137#define DEVICE_CHANGED 1
1138#define DEVICE_SAME 2
bd9244f7 1139#define DEVICE_UPDATED 3
edd16368 1140 for (i = 0; i < haystack_size; i++) {
23231048
SC
1141 if (haystack[i] == NULL) /* previously removed. */
1142 continue;
edd16368
SC
1143 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1144 *index = i;
bd9244f7
ST
1145 if (device_is_the_same(needle, haystack[i])) {
1146 if (device_updated(needle, haystack[i]))
1147 return DEVICE_UPDATED;
edd16368 1148 return DEVICE_SAME;
bd9244f7 1149 } else {
9846590e
SC
1150 /* Keep offline devices offline */
1151 if (needle->volume_offline)
1152 return DEVICE_NOT_FOUND;
edd16368 1153 return DEVICE_CHANGED;
bd9244f7 1154 }
edd16368
SC
1155 }
1156 }
1157 *index = -1;
1158 return DEVICE_NOT_FOUND;
1159}
1160
9846590e
SC
1161static void hpsa_monitor_offline_device(struct ctlr_info *h,
1162 unsigned char scsi3addr[])
1163{
1164 struct offline_device_entry *device;
1165 unsigned long flags;
1166
1167 /* Check to see if device is already on the list */
1168 spin_lock_irqsave(&h->offline_device_lock, flags);
1169 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1170 if (memcmp(device->scsi3addr, scsi3addr,
1171 sizeof(device->scsi3addr)) == 0) {
1172 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1173 return;
1174 }
1175 }
1176 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1177
1178 /* Device is not on the list, add it. */
1179 device = kmalloc(sizeof(*device), GFP_KERNEL);
1180 if (!device) {
1181 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1182 return;
1183 }
1184 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1185 spin_lock_irqsave(&h->offline_device_lock, flags);
1186 list_add_tail(&device->offline_list, &h->offline_device_list);
1187 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1188}
1189
1190/* Print a message explaining various offline volume states */
1191static void hpsa_show_volume_status(struct ctlr_info *h,
1192 struct hpsa_scsi_dev_t *sd)
1193{
1194 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1195 dev_info(&h->pdev->dev,
1196 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1197 h->scsi_host->host_no,
1198 sd->bus, sd->target, sd->lun);
1199 switch (sd->volume_offline) {
1200 case HPSA_LV_OK:
1201 break;
1202 case HPSA_LV_UNDERGOING_ERASE:
1203 dev_info(&h->pdev->dev,
1204 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1205 h->scsi_host->host_no,
1206 sd->bus, sd->target, sd->lun);
1207 break;
1208 case HPSA_LV_UNDERGOING_RPI:
1209 dev_info(&h->pdev->dev,
1210 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1211 h->scsi_host->host_no,
1212 sd->bus, sd->target, sd->lun);
1213 break;
1214 case HPSA_LV_PENDING_RPI:
1215 dev_info(&h->pdev->dev,
1216 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1217 h->scsi_host->host_no,
1218 sd->bus, sd->target, sd->lun);
1219 break;
1220 case HPSA_LV_ENCRYPTED_NO_KEY:
1221 dev_info(&h->pdev->dev,
1222 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1223 h->scsi_host->host_no,
1224 sd->bus, sd->target, sd->lun);
1225 break;
1226 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1227 dev_info(&h->pdev->dev,
1228 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1229 h->scsi_host->host_no,
1230 sd->bus, sd->target, sd->lun);
1231 break;
1232 case HPSA_LV_UNDERGOING_ENCRYPTION:
1233 dev_info(&h->pdev->dev,
1234 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1235 h->scsi_host->host_no,
1236 sd->bus, sd->target, sd->lun);
1237 break;
1238 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1239 dev_info(&h->pdev->dev,
1240 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1241 h->scsi_host->host_no,
1242 sd->bus, sd->target, sd->lun);
1243 break;
1244 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1245 dev_info(&h->pdev->dev,
1246 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1247 h->scsi_host->host_no,
1248 sd->bus, sd->target, sd->lun);
1249 break;
1250 case HPSA_LV_PENDING_ENCRYPTION:
1251 dev_info(&h->pdev->dev,
1252 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1253 h->scsi_host->host_no,
1254 sd->bus, sd->target, sd->lun);
1255 break;
1256 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1257 dev_info(&h->pdev->dev,
1258 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1259 h->scsi_host->host_no,
1260 sd->bus, sd->target, sd->lun);
1261 break;
1262 }
1263}
1264
4967bd3e 1265static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
edd16368
SC
1266 struct hpsa_scsi_dev_t *sd[], int nsds)
1267{
1268 /* sd contains scsi3 addresses and devtypes, and inquiry
1269 * data. This function takes what's in sd to be the current
1270 * reality and updates h->dev[] to reflect that reality.
1271 */
1272 int i, entry, device_change, changes = 0;
1273 struct hpsa_scsi_dev_t *csd;
1274 unsigned long flags;
1275 struct hpsa_scsi_dev_t **added, **removed;
1276 int nadded, nremoved;
1277 struct Scsi_Host *sh = NULL;
1278
cfe5badc
ST
1279 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1280 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1281
1282 if (!added || !removed) {
1283 dev_warn(&h->pdev->dev, "out of memory in "
1284 "adjust_hpsa_scsi_table\n");
1285 goto free_and_out;
1286 }
1287
1288 spin_lock_irqsave(&h->devlock, flags);
1289
1290 /* find any devices in h->dev[] that are not in
1291 * sd[] and remove them from h->dev[], and for any
1292 * devices which have changed, remove the old device
1293 * info and add the new device info.
bd9244f7
ST
1294 * If minor device attributes change, just update
1295 * the existing device structure.
edd16368
SC
1296 */
1297 i = 0;
1298 nremoved = 0;
1299 nadded = 0;
1300 while (i < h->ndevices) {
1301 csd = h->dev[i];
1302 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1303 if (device_change == DEVICE_NOT_FOUND) {
1304 changes++;
1305 hpsa_scsi_remove_entry(h, hostno, i,
1306 removed, &nremoved);
1307 continue; /* remove ^^^, hence i not incremented */
1308 } else if (device_change == DEVICE_CHANGED) {
1309 changes++;
2a8ccf31
SC
1310 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1311 added, &nadded, removed, &nremoved);
c7f172dc
SC
1312 /* Set it to NULL to prevent it from being freed
1313 * at the bottom of hpsa_update_scsi_devices()
1314 */
1315 sd[entry] = NULL;
bd9244f7
ST
1316 } else if (device_change == DEVICE_UPDATED) {
1317 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
edd16368
SC
1318 }
1319 i++;
1320 }
1321
1322 /* Now, make sure every device listed in sd[] is also
1323 * listed in h->dev[], adding them if they aren't found
1324 */
1325
1326 for (i = 0; i < nsds; i++) {
1327 if (!sd[i]) /* if already added above. */
1328 continue;
9846590e
SC
1329
1330 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1331 * as the SCSI mid-layer does not handle such devices well.
1332 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1333 * at 160Hz, and prevents the system from coming up.
1334 */
1335 if (sd[i]->volume_offline) {
1336 hpsa_show_volume_status(h, sd[i]);
1337 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1338 h->scsi_host->host_no,
1339 sd[i]->bus, sd[i]->target, sd[i]->lun);
1340 continue;
1341 }
1342
edd16368
SC
1343 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1344 h->ndevices, &entry);
1345 if (device_change == DEVICE_NOT_FOUND) {
1346 changes++;
1347 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1348 added, &nadded) != 0)
1349 break;
1350 sd[i] = NULL; /* prevent from being freed later. */
1351 } else if (device_change == DEVICE_CHANGED) {
1352 /* should never happen... */
1353 changes++;
1354 dev_warn(&h->pdev->dev,
1355 "device unexpectedly changed.\n");
1356 /* but if it does happen, we just ignore that device */
1357 }
1358 }
1359 spin_unlock_irqrestore(&h->devlock, flags);
1360
9846590e
SC
1361 /* Monitor devices which are in one of several NOT READY states to be
1362 * brought online later. This must be done without holding h->devlock,
1363 * so don't touch h->dev[]
1364 */
1365 for (i = 0; i < nsds; i++) {
1366 if (!sd[i]) /* if already added above. */
1367 continue;
1368 if (sd[i]->volume_offline)
1369 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1370 }
1371
edd16368
SC
1372 /* Don't notify scsi mid layer of any changes the first time through
1373 * (or if there are no changes) scsi_scan_host will do it later the
1374 * first time through.
1375 */
1376 if (hostno == -1 || !changes)
1377 goto free_and_out;
1378
1379 sh = h->scsi_host;
1380 /* Notify scsi mid layer of any removed devices */
1381 for (i = 0; i < nremoved; i++) {
1382 struct scsi_device *sdev =
1383 scsi_device_lookup(sh, removed[i]->bus,
1384 removed[i]->target, removed[i]->lun);
1385 if (sdev != NULL) {
1386 scsi_remove_device(sdev);
1387 scsi_device_put(sdev);
1388 } else {
1389 /* We don't expect to get here.
1390 * future cmds to this device will get selection
1391 * timeout as if the device was gone.
1392 */
1393 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1394 " for removal.", hostno, removed[i]->bus,
1395 removed[i]->target, removed[i]->lun);
1396 }
1397 kfree(removed[i]);
1398 removed[i] = NULL;
1399 }
1400
1401 /* Notify scsi mid layer of any added devices */
1402 for (i = 0; i < nadded; i++) {
1403 if (scsi_add_device(sh, added[i]->bus,
1404 added[i]->target, added[i]->lun) == 0)
1405 continue;
1406 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1407 "device not added.\n", hostno, added[i]->bus,
1408 added[i]->target, added[i]->lun);
1409 /* now we have to remove it from h->dev,
1410 * since it didn't get added to scsi mid layer
1411 */
1412 fixup_botched_add(h, added[i]);
1413 }
1414
1415free_and_out:
1416 kfree(added);
1417 kfree(removed);
edd16368
SC
1418}
1419
1420/*
9e03aa2f 1421 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
edd16368
SC
1422 * Assume's h->devlock is held.
1423 */
1424static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1425 int bus, int target, int lun)
1426{
1427 int i;
1428 struct hpsa_scsi_dev_t *sd;
1429
1430 for (i = 0; i < h->ndevices; i++) {
1431 sd = h->dev[i];
1432 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1433 return sd;
1434 }
1435 return NULL;
1436}
1437
1438/* link sdev->hostdata to our per-device structure. */
1439static int hpsa_slave_alloc(struct scsi_device *sdev)
1440{
1441 struct hpsa_scsi_dev_t *sd;
1442 unsigned long flags;
1443 struct ctlr_info *h;
1444
1445 h = sdev_to_hba(sdev);
1446 spin_lock_irqsave(&h->devlock, flags);
1447 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1448 sdev_id(sdev), sdev->lun);
1449 if (sd != NULL)
1450 sdev->hostdata = sd;
1451 spin_unlock_irqrestore(&h->devlock, flags);
1452 return 0;
1453}
1454
1455static void hpsa_slave_destroy(struct scsi_device *sdev)
1456{
bcc44255 1457 /* nothing to do. */
edd16368
SC
1458}
1459
33a2ffce
SC
1460static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1461{
1462 int i;
1463
1464 if (!h->cmd_sg_list)
1465 return;
1466 for (i = 0; i < h->nr_cmds; i++) {
1467 kfree(h->cmd_sg_list[i]);
1468 h->cmd_sg_list[i] = NULL;
1469 }
1470 kfree(h->cmd_sg_list);
1471 h->cmd_sg_list = NULL;
1472}
1473
1474static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1475{
1476 int i;
1477
1478 if (h->chainsize <= 0)
1479 return 0;
1480
1481 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1482 GFP_KERNEL);
1483 if (!h->cmd_sg_list)
1484 return -ENOMEM;
1485 for (i = 0; i < h->nr_cmds; i++) {
1486 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1487 h->chainsize, GFP_KERNEL);
1488 if (!h->cmd_sg_list[i])
1489 goto clean;
1490 }
1491 return 0;
1492
1493clean:
1494 hpsa_free_sg_chain_blocks(h);
1495 return -ENOMEM;
1496}
1497
e2bea6df 1498static int hpsa_map_sg_chain_block(struct ctlr_info *h,
33a2ffce
SC
1499 struct CommandList *c)
1500{
1501 struct SGDescriptor *chain_sg, *chain_block;
1502 u64 temp64;
1503
1504 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1505 chain_block = h->cmd_sg_list[c->cmdindex];
1506 chain_sg->Ext = HPSA_SG_CHAIN;
1507 chain_sg->Len = sizeof(*chain_sg) *
1508 (c->Header.SGTotal - h->max_cmd_sg_entries);
1509 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1510 PCI_DMA_TODEVICE);
e2bea6df
SC
1511 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1512 /* prevent subsequent unmapping */
1513 chain_sg->Addr.lower = 0;
1514 chain_sg->Addr.upper = 0;
1515 return -1;
1516 }
33a2ffce
SC
1517 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1518 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
e2bea6df 1519 return 0;
33a2ffce
SC
1520}
1521
1522static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1523 struct CommandList *c)
1524{
1525 struct SGDescriptor *chain_sg;
1526 union u64bit temp64;
1527
1528 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1529 return;
1530
1531 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1532 temp64.val32.lower = chain_sg->Addr.lower;
1533 temp64.val32.upper = chain_sg->Addr.upper;
1534 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1535}
1536
a09c1441
ST
1537
1538/* Decode the various types of errors on ioaccel2 path.
1539 * Return 1 for any error that should generate a RAID path retry.
1540 * Return 0 for errors that don't require a RAID path retry.
1541 */
1542static int handle_ioaccel_mode2_error(struct ctlr_info *h,
c349775e
ST
1543 struct CommandList *c,
1544 struct scsi_cmnd *cmd,
1545 struct io_accel2_cmd *c2)
1546{
1547 int data_len;
a09c1441 1548 int retry = 0;
c349775e
ST
1549
1550 switch (c2->error_data.serv_response) {
1551 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1552 switch (c2->error_data.status) {
1553 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1554 break;
1555 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1556 dev_warn(&h->pdev->dev,
1557 "%s: task complete with check condition.\n",
1558 "HP SSD Smart Path");
ee6b1889 1559 cmd->result |= SAM_STAT_CHECK_CONDITION;
c349775e 1560 if (c2->error_data.data_present !=
ee6b1889
SC
1561 IOACCEL2_SENSE_DATA_PRESENT) {
1562 memset(cmd->sense_buffer, 0,
1563 SCSI_SENSE_BUFFERSIZE);
c349775e 1564 break;
ee6b1889 1565 }
c349775e
ST
1566 /* copy the sense data */
1567 data_len = c2->error_data.sense_data_len;
1568 if (data_len > SCSI_SENSE_BUFFERSIZE)
1569 data_len = SCSI_SENSE_BUFFERSIZE;
1570 if (data_len > sizeof(c2->error_data.sense_data_buff))
1571 data_len =
1572 sizeof(c2->error_data.sense_data_buff);
1573 memcpy(cmd->sense_buffer,
1574 c2->error_data.sense_data_buff, data_len);
a09c1441 1575 retry = 1;
c349775e
ST
1576 break;
1577 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1578 dev_warn(&h->pdev->dev,
1579 "%s: task complete with BUSY status.\n",
1580 "HP SSD Smart Path");
a09c1441 1581 retry = 1;
c349775e
ST
1582 break;
1583 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1584 dev_warn(&h->pdev->dev,
1585 "%s: task complete with reservation conflict.\n",
1586 "HP SSD Smart Path");
a09c1441 1587 retry = 1;
c349775e
ST
1588 break;
1589 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1590 /* Make scsi midlayer do unlimited retries */
1591 cmd->result = DID_IMM_RETRY << 16;
1592 break;
1593 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1594 dev_warn(&h->pdev->dev,
1595 "%s: task complete with aborted status.\n",
1596 "HP SSD Smart Path");
a09c1441 1597 retry = 1;
c349775e
ST
1598 break;
1599 default:
1600 dev_warn(&h->pdev->dev,
1601 "%s: task complete with unrecognized status: 0x%02x\n",
1602 "HP SSD Smart Path", c2->error_data.status);
a09c1441 1603 retry = 1;
c349775e
ST
1604 break;
1605 }
1606 break;
1607 case IOACCEL2_SERV_RESPONSE_FAILURE:
1608 /* don't expect to get here. */
1609 dev_warn(&h->pdev->dev,
1610 "unexpected delivery or target failure, status = 0x%02x\n",
1611 c2->error_data.status);
a09c1441 1612 retry = 1;
c349775e
ST
1613 break;
1614 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1615 break;
1616 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1617 break;
1618 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1619 dev_warn(&h->pdev->dev, "task management function rejected.\n");
a09c1441 1620 retry = 1;
c349775e
ST
1621 break;
1622 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1623 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1624 break;
1625 default:
1626 dev_warn(&h->pdev->dev,
1627 "%s: Unrecognized server response: 0x%02x\n",
a09c1441
ST
1628 "HP SSD Smart Path",
1629 c2->error_data.serv_response);
1630 retry = 1;
c349775e
ST
1631 break;
1632 }
a09c1441
ST
1633
1634 return retry; /* retry on raid path? */
c349775e
ST
1635}
1636
1637static void process_ioaccel2_completion(struct ctlr_info *h,
1638 struct CommandList *c, struct scsi_cmnd *cmd,
1639 struct hpsa_scsi_dev_t *dev)
1640{
1641 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
a09c1441 1642 int raid_retry = 0;
c349775e
ST
1643
1644 /* check for good status */
1645 if (likely(c2->error_data.serv_response == 0 &&
1646 c2->error_data.status == 0)) {
1647 cmd_free(h, c);
1648 cmd->scsi_done(cmd);
1649 return;
1650 }
1651
1652 /* Any RAID offload error results in retry which will use
1653 * the normal I/O path so the controller can handle whatever's
1654 * wrong.
1655 */
1656 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1657 c2->error_data.serv_response ==
1658 IOACCEL2_SERV_RESPONSE_FAILURE) {
c349775e 1659 dev->offload_enabled = 0;
e863d68e 1660 h->drv_req_rescan = 1; /* schedule controller for a rescan */
c349775e
ST
1661 cmd->result = DID_SOFT_ERROR << 16;
1662 cmd_free(h, c);
1663 cmd->scsi_done(cmd);
1664 return;
1665 }
a09c1441
ST
1666 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1667 /* If error found, disable Smart Path, schedule a rescan,
1668 * and force a retry on the standard path.
1669 */
1670 if (raid_retry) {
1671 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1672 "HP SSD Smart Path");
1673 dev->offload_enabled = 0; /* Disable Smart Path */
1674 h->drv_req_rescan = 1; /* schedule controller rescan */
1675 cmd->result = DID_SOFT_ERROR << 16;
1676 }
c349775e
ST
1677 cmd_free(h, c);
1678 cmd->scsi_done(cmd);
1679}
1680
1fb011fb 1681static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
1682{
1683 struct scsi_cmnd *cmd;
1684 struct ctlr_info *h;
1685 struct ErrorInfo *ei;
283b4a9b 1686 struct hpsa_scsi_dev_t *dev;
edd16368
SC
1687
1688 unsigned char sense_key;
1689 unsigned char asc; /* additional sense code */
1690 unsigned char ascq; /* additional sense code qualifier */
db111e18 1691 unsigned long sense_data_size;
edd16368
SC
1692
1693 ei = cp->err_info;
1694 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1695 h = cp->h;
283b4a9b 1696 dev = cmd->device->hostdata;
edd16368
SC
1697
1698 scsi_dma_unmap(cmd); /* undo the DMA mappings */
e1f7de0c
MG
1699 if ((cp->cmd_type == CMD_SCSI) &&
1700 (cp->Header.SGTotal > h->max_cmd_sg_entries))
33a2ffce 1701 hpsa_unmap_sg_chain_block(h, cp);
edd16368
SC
1702
1703 cmd->result = (DID_OK << 16); /* host byte */
1704 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
c349775e
ST
1705
1706 if (cp->cmd_type == CMD_IOACCEL2)
1707 return process_ioaccel2_completion(h, cp, cmd, dev);
1708
5512672f 1709 cmd->result |= ei->ScsiStatus;
edd16368 1710
6aa4c361
RE
1711 scsi_set_resid(cmd, ei->ResidualCnt);
1712 if (ei->CommandStatus == 0) {
1713 cmd_free(h, cp);
1714 cmd->scsi_done(cmd);
1715 return;
1716 }
1717
1718 /* copy the sense data */
db111e18
SC
1719 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1720 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1721 else
1722 sense_data_size = sizeof(ei->SenseInfo);
1723 if (ei->SenseLen < sense_data_size)
1724 sense_data_size = ei->SenseLen;
1725
1726 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
edd16368 1727
e1f7de0c
MG
1728 /* For I/O accelerator commands, copy over some fields to the normal
1729 * CISS header used below for error handling.
1730 */
1731 if (cp->cmd_type == CMD_IOACCEL1) {
1732 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1733 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1734 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1735 cp->Header.Tag.lower = c->Tag.lower;
1736 cp->Header.Tag.upper = c->Tag.upper;
1737 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1738 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
283b4a9b
SC
1739
1740 /* Any RAID offload error results in retry which will use
1741 * the normal I/O path so the controller can handle whatever's
1742 * wrong.
1743 */
1744 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1745 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1746 dev->offload_enabled = 0;
1747 cmd->result = DID_SOFT_ERROR << 16;
1748 cmd_free(h, cp);
1749 cmd->scsi_done(cmd);
1750 return;
1751 }
e1f7de0c
MG
1752 }
1753
edd16368
SC
1754 /* an error has occurred */
1755 switch (ei->CommandStatus) {
1756
1757 case CMD_TARGET_STATUS:
1758 if (ei->ScsiStatus) {
1759 /* Get sense key */
1760 sense_key = 0xf & ei->SenseInfo[2];
1761 /* Get additional sense code */
1762 asc = ei->SenseInfo[12];
1763 /* Get addition sense code qualifier */
1764 ascq = ei->SenseInfo[13];
1765 }
1766
1767 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
3ce438df 1768 if (check_for_unit_attention(h, cp))
edd16368 1769 break;
edd16368
SC
1770 if (sense_key == ILLEGAL_REQUEST) {
1771 /*
1772 * SCSI REPORT_LUNS is commonly unsupported on
1773 * Smart Array. Suppress noisy complaint.
1774 */
1775 if (cp->Request.CDB[0] == REPORT_LUNS)
1776 break;
1777
1778 /* If ASC/ASCQ indicate Logical Unit
1779 * Not Supported condition,
1780 */
1781 if ((asc == 0x25) && (ascq == 0x0)) {
1782 dev_warn(&h->pdev->dev, "cp %p "
1783 "has check condition\n", cp);
1784 break;
1785 }
1786 }
1787
1788 if (sense_key == NOT_READY) {
1789 /* If Sense is Not Ready, Logical Unit
1790 * Not ready, Manual Intervention
1791 * required
1792 */
1793 if ((asc == 0x04) && (ascq == 0x03)) {
edd16368
SC
1794 dev_warn(&h->pdev->dev, "cp %p "
1795 "has check condition: unit "
1796 "not ready, manual "
1797 "intervention required\n", cp);
1798 break;
1799 }
1800 }
1d3b3609
MG
1801 if (sense_key == ABORTED_COMMAND) {
1802 /* Aborted command is retryable */
1803 dev_warn(&h->pdev->dev, "cp %p "
1804 "has check condition: aborted command: "
1805 "ASC: 0x%x, ASCQ: 0x%x\n",
1806 cp, asc, ascq);
2e311fba 1807 cmd->result |= DID_SOFT_ERROR << 16;
1d3b3609
MG
1808 break;
1809 }
edd16368 1810 /* Must be some other type of check condition */
21b8e4ef 1811 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
edd16368
SC
1812 "unknown type: "
1813 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1814 "Returning result: 0x%x, "
1815 "cmd=[%02x %02x %02x %02x %02x "
807be732 1816 "%02x %02x %02x %02x %02x %02x "
edd16368
SC
1817 "%02x %02x %02x %02x %02x]\n",
1818 cp, sense_key, asc, ascq,
1819 cmd->result,
1820 cmd->cmnd[0], cmd->cmnd[1],
1821 cmd->cmnd[2], cmd->cmnd[3],
1822 cmd->cmnd[4], cmd->cmnd[5],
1823 cmd->cmnd[6], cmd->cmnd[7],
807be732
MM
1824 cmd->cmnd[8], cmd->cmnd[9],
1825 cmd->cmnd[10], cmd->cmnd[11],
1826 cmd->cmnd[12], cmd->cmnd[13],
1827 cmd->cmnd[14], cmd->cmnd[15]);
edd16368
SC
1828 break;
1829 }
1830
1831
1832 /* Problem was not a check condition
1833 * Pass it up to the upper layers...
1834 */
1835 if (ei->ScsiStatus) {
1836 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1837 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1838 "Returning result: 0x%x\n",
1839 cp, ei->ScsiStatus,
1840 sense_key, asc, ascq,
1841 cmd->result);
1842 } else { /* scsi status is zero??? How??? */
1843 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1844 "Returning no connection.\n", cp),
1845
1846 /* Ordinarily, this case should never happen,
1847 * but there is a bug in some released firmware
1848 * revisions that allows it to happen if, for
1849 * example, a 4100 backplane loses power and
1850 * the tape drive is in it. We assume that
1851 * it's a fatal error of some kind because we
1852 * can't show that it wasn't. We will make it
1853 * look like selection timeout since that is
1854 * the most common reason for this to occur,
1855 * and it's severe enough.
1856 */
1857
1858 cmd->result = DID_NO_CONNECT << 16;
1859 }
1860 break;
1861
1862 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1863 break;
1864 case CMD_DATA_OVERRUN:
1865 dev_warn(&h->pdev->dev, "cp %p has"
1866 " completed with data overrun "
1867 "reported\n", cp);
1868 break;
1869 case CMD_INVALID: {
1870 /* print_bytes(cp, sizeof(*cp), 1, 0);
1871 print_cmd(cp); */
1872 /* We get CMD_INVALID if you address a non-existent device
1873 * instead of a selection timeout (no response). You will
1874 * see this if you yank out a drive, then try to access it.
1875 * This is kind of a shame because it means that any other
1876 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1877 * missing target. */
1878 cmd->result = DID_NO_CONNECT << 16;
1879 }
1880 break;
1881 case CMD_PROTOCOL_ERR:
256d0eaa 1882 cmd->result = DID_ERROR << 16;
edd16368 1883 dev_warn(&h->pdev->dev, "cp %p has "
256d0eaa 1884 "protocol error\n", cp);
edd16368
SC
1885 break;
1886 case CMD_HARDWARE_ERR:
1887 cmd->result = DID_ERROR << 16;
1888 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1889 break;
1890 case CMD_CONNECTION_LOST:
1891 cmd->result = DID_ERROR << 16;
1892 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1893 break;
1894 case CMD_ABORTED:
1895 cmd->result = DID_ABORT << 16;
1896 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1897 cp, ei->ScsiStatus);
1898 break;
1899 case CMD_ABORT_FAILED:
1900 cmd->result = DID_ERROR << 16;
1901 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1902 break;
1903 case CMD_UNSOLICITED_ABORT:
f6e76055
SC
1904 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1905 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
edd16368
SC
1906 "abort\n", cp);
1907 break;
1908 case CMD_TIMEOUT:
1909 cmd->result = DID_TIME_OUT << 16;
1910 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1911 break;
1d5e2ed0
SC
1912 case CMD_UNABORTABLE:
1913 cmd->result = DID_ERROR << 16;
1914 dev_warn(&h->pdev->dev, "Command unabortable\n");
1915 break;
283b4a9b
SC
1916 case CMD_IOACCEL_DISABLED:
1917 /* This only handles the direct pass-through case since RAID
1918 * offload is handled above. Just attempt a retry.
1919 */
1920 cmd->result = DID_SOFT_ERROR << 16;
1921 dev_warn(&h->pdev->dev,
1922 "cp %p had HP SSD Smart Path error\n", cp);
1923 break;
edd16368
SC
1924 default:
1925 cmd->result = DID_ERROR << 16;
1926 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1927 cp, ei->CommandStatus);
1928 }
edd16368 1929 cmd_free(h, cp);
2cc5bfaf 1930 cmd->scsi_done(cmd);
edd16368
SC
1931}
1932
edd16368
SC
1933static void hpsa_pci_unmap(struct pci_dev *pdev,
1934 struct CommandList *c, int sg_used, int data_direction)
1935{
1936 int i;
1937 union u64bit addr64;
1938
1939 for (i = 0; i < sg_used; i++) {
1940 addr64.val32.lower = c->SG[i].Addr.lower;
1941 addr64.val32.upper = c->SG[i].Addr.upper;
1942 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1943 data_direction);
1944 }
1945}
1946
a2dac136 1947static int hpsa_map_one(struct pci_dev *pdev,
edd16368
SC
1948 struct CommandList *cp,
1949 unsigned char *buf,
1950 size_t buflen,
1951 int data_direction)
1952{
01a02ffc 1953 u64 addr64;
edd16368
SC
1954
1955 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1956 cp->Header.SGList = 0;
1957 cp->Header.SGTotal = 0;
a2dac136 1958 return 0;
edd16368
SC
1959 }
1960
01a02ffc 1961 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
eceaae18 1962 if (dma_mapping_error(&pdev->dev, addr64)) {
a2dac136 1963 /* Prevent subsequent unmap of something never mapped */
eceaae18
SK
1964 cp->Header.SGList = 0;
1965 cp->Header.SGTotal = 0;
a2dac136 1966 return -1;
eceaae18 1967 }
edd16368 1968 cp->SG[0].Addr.lower =
01a02ffc 1969 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
edd16368 1970 cp->SG[0].Addr.upper =
01a02ffc 1971 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
edd16368 1972 cp->SG[0].Len = buflen;
e1d9cbfa 1973 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
01a02ffc
SC
1974 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1975 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
a2dac136 1976 return 0;
edd16368
SC
1977}
1978
1979static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1980 struct CommandList *c)
1981{
1982 DECLARE_COMPLETION_ONSTACK(wait);
1983
1984 c->waiting = &wait;
1985 enqueue_cmd_and_start_io(h, c);
1986 wait_for_completion(&wait);
1987}
1988
094963da
SC
1989static u32 lockup_detected(struct ctlr_info *h)
1990{
1991 int cpu;
1992 u32 rc, *lockup_detected;
1993
1994 cpu = get_cpu();
1995 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
1996 rc = *lockup_detected;
1997 put_cpu();
1998 return rc;
1999}
2000
a0c12413
SC
2001static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2002 struct CommandList *c)
2003{
a0c12413 2004 /* If controller lockup detected, fake a hardware error. */
094963da 2005 if (unlikely(lockup_detected(h)))
a0c12413 2006 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
094963da 2007 else
a0c12413 2008 hpsa_scsi_do_simple_cmd_core(h, c);
a0c12413
SC
2009}
2010
9c2fc160 2011#define MAX_DRIVER_CMD_RETRIES 25
edd16368
SC
2012static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2013 struct CommandList *c, int data_direction)
2014{
9c2fc160 2015 int backoff_time = 10, retry_count = 0;
edd16368
SC
2016
2017 do {
7630abd0 2018 memset(c->err_info, 0, sizeof(*c->err_info));
edd16368
SC
2019 hpsa_scsi_do_simple_cmd_core(h, c);
2020 retry_count++;
9c2fc160
SC
2021 if (retry_count > 3) {
2022 msleep(backoff_time);
2023 if (backoff_time < 1000)
2024 backoff_time *= 2;
2025 }
852af20a 2026 } while ((check_for_unit_attention(h, c) ||
9c2fc160
SC
2027 check_for_busy(h, c)) &&
2028 retry_count <= MAX_DRIVER_CMD_RETRIES);
edd16368
SC
2029 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2030}
2031
d1e8beac
SC
2032static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2033 struct CommandList *c)
edd16368 2034{
d1e8beac
SC
2035 const u8 *cdb = c->Request.CDB;
2036 const u8 *lun = c->Header.LUN.LunAddrBytes;
2037
2038 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2039 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2040 txt, lun[0], lun[1], lun[2], lun[3],
2041 lun[4], lun[5], lun[6], lun[7],
2042 cdb[0], cdb[1], cdb[2], cdb[3],
2043 cdb[4], cdb[5], cdb[6], cdb[7],
2044 cdb[8], cdb[9], cdb[10], cdb[11],
2045 cdb[12], cdb[13], cdb[14], cdb[15]);
2046}
2047
2048static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2049 struct CommandList *cp)
2050{
2051 const struct ErrorInfo *ei = cp->err_info;
edd16368 2052 struct device *d = &cp->h->pdev->dev;
d1e8beac 2053 const u8 *sd = ei->SenseInfo;
edd16368 2054
edd16368
SC
2055 switch (ei->CommandStatus) {
2056 case CMD_TARGET_STATUS:
d1e8beac
SC
2057 hpsa_print_cmd(h, "SCSI status", cp);
2058 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2059 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2060 sd[2] & 0x0f, sd[12], sd[13]);
2061 else
2062 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
edd16368
SC
2063 if (ei->ScsiStatus == 0)
2064 dev_warn(d, "SCSI status is abnormally zero. "
2065 "(probably indicates selection timeout "
2066 "reported incorrectly due to a known "
2067 "firmware bug, circa July, 2001.)\n");
2068 break;
2069 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
edd16368
SC
2070 break;
2071 case CMD_DATA_OVERRUN:
d1e8beac 2072 hpsa_print_cmd(h, "overrun condition", cp);
edd16368
SC
2073 break;
2074 case CMD_INVALID: {
2075 /* controller unfortunately reports SCSI passthru's
2076 * to non-existent targets as invalid commands.
2077 */
d1e8beac
SC
2078 hpsa_print_cmd(h, "invalid command", cp);
2079 dev_warn(d, "probably means device no longer present\n");
edd16368
SC
2080 }
2081 break;
2082 case CMD_PROTOCOL_ERR:
d1e8beac 2083 hpsa_print_cmd(h, "protocol error", cp);
edd16368
SC
2084 break;
2085 case CMD_HARDWARE_ERR:
d1e8beac 2086 hpsa_print_cmd(h, "hardware error", cp);
edd16368
SC
2087 break;
2088 case CMD_CONNECTION_LOST:
d1e8beac 2089 hpsa_print_cmd(h, "connection lost", cp);
edd16368
SC
2090 break;
2091 case CMD_ABORTED:
d1e8beac 2092 hpsa_print_cmd(h, "aborted", cp);
edd16368
SC
2093 break;
2094 case CMD_ABORT_FAILED:
d1e8beac 2095 hpsa_print_cmd(h, "abort failed", cp);
edd16368
SC
2096 break;
2097 case CMD_UNSOLICITED_ABORT:
d1e8beac 2098 hpsa_print_cmd(h, "unsolicited abort", cp);
edd16368
SC
2099 break;
2100 case CMD_TIMEOUT:
d1e8beac 2101 hpsa_print_cmd(h, "timed out", cp);
edd16368 2102 break;
1d5e2ed0 2103 case CMD_UNABORTABLE:
d1e8beac 2104 hpsa_print_cmd(h, "unabortable", cp);
1d5e2ed0 2105 break;
edd16368 2106 default:
d1e8beac
SC
2107 hpsa_print_cmd(h, "unknown status", cp);
2108 dev_warn(d, "Unknown command status %x\n",
edd16368
SC
2109 ei->CommandStatus);
2110 }
2111}
2112
2113static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
b7bb24eb 2114 u16 page, unsigned char *buf,
edd16368
SC
2115 unsigned char bufsize)
2116{
2117 int rc = IO_OK;
2118 struct CommandList *c;
2119 struct ErrorInfo *ei;
2120
2121 c = cmd_special_alloc(h);
2122
2123 if (c == NULL) { /* trouble... */
2124 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
ecd9aad4 2125 return -ENOMEM;
edd16368
SC
2126 }
2127
a2dac136
SC
2128 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2129 page, scsi3addr, TYPE_CMD)) {
2130 rc = -1;
2131 goto out;
2132 }
edd16368
SC
2133 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2134 ei = c->err_info;
2135 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2136 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2137 rc = -1;
2138 }
a2dac136 2139out:
edd16368
SC
2140 cmd_special_free(h, c);
2141 return rc;
2142}
2143
316b221a
SC
2144static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2145 unsigned char *scsi3addr, unsigned char page,
2146 struct bmic_controller_parameters *buf, size_t bufsize)
2147{
2148 int rc = IO_OK;
2149 struct CommandList *c;
2150 struct ErrorInfo *ei;
2151
2152 c = cmd_special_alloc(h);
2153
2154 if (c == NULL) { /* trouble... */
2155 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2156 return -ENOMEM;
2157 }
2158
2159 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2160 page, scsi3addr, TYPE_CMD)) {
2161 rc = -1;
2162 goto out;
2163 }
2164 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2165 ei = c->err_info;
2166 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2167 hpsa_scsi_interpret_error(h, c);
2168 rc = -1;
2169 }
2170out:
2171 cmd_special_free(h, c);
2172 return rc;
2173 }
2174
bf711ac6
ST
2175static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2176 u8 reset_type)
edd16368
SC
2177{
2178 int rc = IO_OK;
2179 struct CommandList *c;
2180 struct ErrorInfo *ei;
2181
2182 c = cmd_special_alloc(h);
2183
2184 if (c == NULL) { /* trouble... */
2185 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
e9ea04a6 2186 return -ENOMEM;
edd16368
SC
2187 }
2188
a2dac136 2189 /* fill_cmd can't fail here, no data buffer to map. */
bf711ac6
ST
2190 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2191 scsi3addr, TYPE_MSG);
2192 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
edd16368
SC
2193 hpsa_scsi_do_simple_cmd_core(h, c);
2194 /* no unmap needed here because no data xfer. */
2195
2196 ei = c->err_info;
2197 if (ei->CommandStatus != 0) {
d1e8beac 2198 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2199 rc = -1;
2200 }
2201 cmd_special_free(h, c);
2202 return rc;
2203}
2204
2205static void hpsa_get_raid_level(struct ctlr_info *h,
2206 unsigned char *scsi3addr, unsigned char *raid_level)
2207{
2208 int rc;
2209 unsigned char *buf;
2210
2211 *raid_level = RAID_UNKNOWN;
2212 buf = kzalloc(64, GFP_KERNEL);
2213 if (!buf)
2214 return;
b7bb24eb 2215 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
edd16368
SC
2216 if (rc == 0)
2217 *raid_level = buf[8];
2218 if (*raid_level > RAID_UNKNOWN)
2219 *raid_level = RAID_UNKNOWN;
2220 kfree(buf);
2221 return;
2222}
2223
283b4a9b
SC
2224#define HPSA_MAP_DEBUG
2225#ifdef HPSA_MAP_DEBUG
2226static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2227 struct raid_map_data *map_buff)
2228{
2229 struct raid_map_disk_data *dd = &map_buff->data[0];
2230 int map, row, col;
2231 u16 map_cnt, row_cnt, disks_per_row;
2232
2233 if (rc != 0)
2234 return;
2235
2ba8bfc8
SC
2236 /* Show details only if debugging has been activated. */
2237 if (h->raid_offload_debug < 2)
2238 return;
2239
283b4a9b
SC
2240 dev_info(&h->pdev->dev, "structure_size = %u\n",
2241 le32_to_cpu(map_buff->structure_size));
2242 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2243 le32_to_cpu(map_buff->volume_blk_size));
2244 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2245 le64_to_cpu(map_buff->volume_blk_cnt));
2246 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2247 map_buff->phys_blk_shift);
2248 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2249 map_buff->parity_rotation_shift);
2250 dev_info(&h->pdev->dev, "strip_size = %u\n",
2251 le16_to_cpu(map_buff->strip_size));
2252 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2253 le64_to_cpu(map_buff->disk_starting_blk));
2254 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2255 le64_to_cpu(map_buff->disk_blk_cnt));
2256 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2257 le16_to_cpu(map_buff->data_disks_per_row));
2258 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2259 le16_to_cpu(map_buff->metadata_disks_per_row));
2260 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2261 le16_to_cpu(map_buff->row_cnt));
2262 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2263 le16_to_cpu(map_buff->layout_map_count));
dd0e19f3
ST
2264 dev_info(&h->pdev->dev, "flags = %u\n",
2265 le16_to_cpu(map_buff->flags));
2266 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2267 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2268 else
2269 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2270 dev_info(&h->pdev->dev, "dekindex = %u\n",
2271 le16_to_cpu(map_buff->dekindex));
283b4a9b
SC
2272
2273 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2274 for (map = 0; map < map_cnt; map++) {
2275 dev_info(&h->pdev->dev, "Map%u:\n", map);
2276 row_cnt = le16_to_cpu(map_buff->row_cnt);
2277 for (row = 0; row < row_cnt; row++) {
2278 dev_info(&h->pdev->dev, " Row%u:\n", row);
2279 disks_per_row =
2280 le16_to_cpu(map_buff->data_disks_per_row);
2281 for (col = 0; col < disks_per_row; col++, dd++)
2282 dev_info(&h->pdev->dev,
2283 " D%02u: h=0x%04x xor=%u,%u\n",
2284 col, dd->ioaccel_handle,
2285 dd->xor_mult[0], dd->xor_mult[1]);
2286 disks_per_row =
2287 le16_to_cpu(map_buff->metadata_disks_per_row);
2288 for (col = 0; col < disks_per_row; col++, dd++)
2289 dev_info(&h->pdev->dev,
2290 " M%02u: h=0x%04x xor=%u,%u\n",
2291 col, dd->ioaccel_handle,
2292 dd->xor_mult[0], dd->xor_mult[1]);
2293 }
2294 }
2295}
2296#else
2297static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2298 __attribute__((unused)) int rc,
2299 __attribute__((unused)) struct raid_map_data *map_buff)
2300{
2301}
2302#endif
2303
2304static int hpsa_get_raid_map(struct ctlr_info *h,
2305 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2306{
2307 int rc = 0;
2308 struct CommandList *c;
2309 struct ErrorInfo *ei;
2310
2311 c = cmd_special_alloc(h);
2312 if (c == NULL) {
2313 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2314 return -ENOMEM;
2315 }
2316 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2317 sizeof(this_device->raid_map), 0,
2318 scsi3addr, TYPE_CMD)) {
2319 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2320 cmd_special_free(h, c);
2321 return -ENOMEM;
2322 }
2323 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2324 ei = c->err_info;
2325 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2326 hpsa_scsi_interpret_error(h, c);
283b4a9b
SC
2327 cmd_special_free(h, c);
2328 return -1;
2329 }
2330 cmd_special_free(h, c);
2331
2332 /* @todo in the future, dynamically allocate RAID map memory */
2333 if (le32_to_cpu(this_device->raid_map.structure_size) >
2334 sizeof(this_device->raid_map)) {
2335 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2336 rc = -1;
2337 }
2338 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2339 return rc;
2340}
2341
1b70150a
SC
2342static int hpsa_vpd_page_supported(struct ctlr_info *h,
2343 unsigned char scsi3addr[], u8 page)
2344{
2345 int rc;
2346 int i;
2347 int pages;
2348 unsigned char *buf, bufsize;
2349
2350 buf = kzalloc(256, GFP_KERNEL);
2351 if (!buf)
2352 return 0;
2353
2354 /* Get the size of the page list first */
2355 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2356 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2357 buf, HPSA_VPD_HEADER_SZ);
2358 if (rc != 0)
2359 goto exit_unsupported;
2360 pages = buf[3];
2361 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2362 bufsize = pages + HPSA_VPD_HEADER_SZ;
2363 else
2364 bufsize = 255;
2365
2366 /* Get the whole VPD page list */
2367 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2368 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2369 buf, bufsize);
2370 if (rc != 0)
2371 goto exit_unsupported;
2372
2373 pages = buf[3];
2374 for (i = 1; i <= pages; i++)
2375 if (buf[3 + i] == page)
2376 goto exit_supported;
2377exit_unsupported:
2378 kfree(buf);
2379 return 0;
2380exit_supported:
2381 kfree(buf);
2382 return 1;
2383}
2384
283b4a9b
SC
2385static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2386 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2387{
2388 int rc;
2389 unsigned char *buf;
2390 u8 ioaccel_status;
2391
2392 this_device->offload_config = 0;
2393 this_device->offload_enabled = 0;
2394
2395 buf = kzalloc(64, GFP_KERNEL);
2396 if (!buf)
2397 return;
1b70150a
SC
2398 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2399 goto out;
283b4a9b 2400 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
b7bb24eb 2401 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
283b4a9b
SC
2402 if (rc != 0)
2403 goto out;
2404
2405#define IOACCEL_STATUS_BYTE 4
2406#define OFFLOAD_CONFIGURED_BIT 0x01
2407#define OFFLOAD_ENABLED_BIT 0x02
2408 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2409 this_device->offload_config =
2410 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2411 if (this_device->offload_config) {
2412 this_device->offload_enabled =
2413 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2414 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2415 this_device->offload_enabled = 0;
2416 }
2417out:
2418 kfree(buf);
2419 return;
2420}
2421
edd16368
SC
2422/* Get the device id from inquiry page 0x83 */
2423static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2424 unsigned char *device_id, int buflen)
2425{
2426 int rc;
2427 unsigned char *buf;
2428
2429 if (buflen > 16)
2430 buflen = 16;
2431 buf = kzalloc(64, GFP_KERNEL);
2432 if (!buf)
a84d794d 2433 return -ENOMEM;
b7bb24eb 2434 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
edd16368
SC
2435 if (rc == 0)
2436 memcpy(device_id, &buf[8], buflen);
2437 kfree(buf);
2438 return rc != 0;
2439}
2440
2441static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2442 struct ReportLUNdata *buf, int bufsize,
2443 int extended_response)
2444{
2445 int rc = IO_OK;
2446 struct CommandList *c;
2447 unsigned char scsi3addr[8];
2448 struct ErrorInfo *ei;
2449
2450 c = cmd_special_alloc(h);
2451 if (c == NULL) { /* trouble... */
2452 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2453 return -1;
2454 }
e89c0ae7
SC
2455 /* address the controller */
2456 memset(scsi3addr, 0, sizeof(scsi3addr));
a2dac136
SC
2457 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2458 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2459 rc = -1;
2460 goto out;
2461 }
edd16368
SC
2462 if (extended_response)
2463 c->Request.CDB[1] = extended_response;
2464 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2465 ei = c->err_info;
2466 if (ei->CommandStatus != 0 &&
2467 ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2468 hpsa_scsi_interpret_error(h, c);
edd16368 2469 rc = -1;
283b4a9b
SC
2470 } else {
2471 if (buf->extended_response_flag != extended_response) {
2472 dev_err(&h->pdev->dev,
2473 "report luns requested format %u, got %u\n",
2474 extended_response,
2475 buf->extended_response_flag);
2476 rc = -1;
2477 }
edd16368 2478 }
a2dac136 2479out:
edd16368
SC
2480 cmd_special_free(h, c);
2481 return rc;
2482}
2483
2484static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2485 struct ReportLUNdata *buf,
2486 int bufsize, int extended_response)
2487{
2488 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2489}
2490
2491static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2492 struct ReportLUNdata *buf, int bufsize)
2493{
2494 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2495}
2496
2497static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2498 int bus, int target, int lun)
2499{
2500 device->bus = bus;
2501 device->target = target;
2502 device->lun = lun;
2503}
2504
9846590e
SC
2505/* Use VPD inquiry to get details of volume status */
2506static int hpsa_get_volume_status(struct ctlr_info *h,
2507 unsigned char scsi3addr[])
2508{
2509 int rc;
2510 int status;
2511 int size;
2512 unsigned char *buf;
2513
2514 buf = kzalloc(64, GFP_KERNEL);
2515 if (!buf)
2516 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2517
2518 /* Does controller have VPD for logical volume status? */
24a4b078 2519 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
9846590e 2520 goto exit_failed;
9846590e
SC
2521
2522 /* Get the size of the VPD return buffer */
2523 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2524 buf, HPSA_VPD_HEADER_SZ);
24a4b078 2525 if (rc != 0)
9846590e 2526 goto exit_failed;
9846590e
SC
2527 size = buf[3];
2528
2529 /* Now get the whole VPD buffer */
2530 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2531 buf, size + HPSA_VPD_HEADER_SZ);
24a4b078 2532 if (rc != 0)
9846590e 2533 goto exit_failed;
9846590e
SC
2534 status = buf[4]; /* status byte */
2535
2536 kfree(buf);
2537 return status;
2538exit_failed:
2539 kfree(buf);
2540 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2541}
2542
2543/* Determine offline status of a volume.
2544 * Return either:
2545 * 0 (not offline)
67955ba3 2546 * 0xff (offline for unknown reasons)
9846590e
SC
2547 * # (integer code indicating one of several NOT READY states
2548 * describing why a volume is to be kept offline)
2549 */
67955ba3 2550static int hpsa_volume_offline(struct ctlr_info *h,
9846590e
SC
2551 unsigned char scsi3addr[])
2552{
2553 struct CommandList *c;
2554 unsigned char *sense, sense_key, asc, ascq;
2555 int ldstat = 0;
2556 u16 cmd_status;
2557 u8 scsi_status;
2558#define ASC_LUN_NOT_READY 0x04
2559#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2560#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2561
2562 c = cmd_alloc(h);
2563 if (!c)
2564 return 0;
2565 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2566 hpsa_scsi_do_simple_cmd_core(h, c);
2567 sense = c->err_info->SenseInfo;
2568 sense_key = sense[2];
2569 asc = sense[12];
2570 ascq = sense[13];
2571 cmd_status = c->err_info->CommandStatus;
2572 scsi_status = c->err_info->ScsiStatus;
2573 cmd_free(h, c);
2574 /* Is the volume 'not ready'? */
2575 if (cmd_status != CMD_TARGET_STATUS ||
2576 scsi_status != SAM_STAT_CHECK_CONDITION ||
2577 sense_key != NOT_READY ||
2578 asc != ASC_LUN_NOT_READY) {
2579 return 0;
2580 }
2581
2582 /* Determine the reason for not ready state */
2583 ldstat = hpsa_get_volume_status(h, scsi3addr);
2584
2585 /* Keep volume offline in certain cases: */
2586 switch (ldstat) {
2587 case HPSA_LV_UNDERGOING_ERASE:
2588 case HPSA_LV_UNDERGOING_RPI:
2589 case HPSA_LV_PENDING_RPI:
2590 case HPSA_LV_ENCRYPTED_NO_KEY:
2591 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2592 case HPSA_LV_UNDERGOING_ENCRYPTION:
2593 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2594 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2595 return ldstat;
2596 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2597 /* If VPD status page isn't available,
2598 * use ASC/ASCQ to determine state
2599 */
2600 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2601 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2602 return ldstat;
2603 break;
2604 default:
2605 break;
2606 }
2607 return 0;
2608}
2609
edd16368 2610static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
2611 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2612 unsigned char *is_OBDR_device)
edd16368 2613{
0b0e1d6c
SC
2614
2615#define OBDR_SIG_OFFSET 43
2616#define OBDR_TAPE_SIG "$DR-10"
2617#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2618#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2619
ea6d3bc3 2620 unsigned char *inq_buff;
0b0e1d6c 2621 unsigned char *obdr_sig;
edd16368 2622
ea6d3bc3 2623 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
edd16368
SC
2624 if (!inq_buff)
2625 goto bail_out;
2626
edd16368
SC
2627 /* Do an inquiry to the device to see what it is. */
2628 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2629 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2630 /* Inquiry failed (msg printed already) */
2631 dev_err(&h->pdev->dev,
2632 "hpsa_update_device_info: inquiry failed\n");
2633 goto bail_out;
2634 }
2635
edd16368
SC
2636 this_device->devtype = (inq_buff[0] & 0x1f);
2637 memcpy(this_device->scsi3addr, scsi3addr, 8);
2638 memcpy(this_device->vendor, &inq_buff[8],
2639 sizeof(this_device->vendor));
2640 memcpy(this_device->model, &inq_buff[16],
2641 sizeof(this_device->model));
edd16368
SC
2642 memset(this_device->device_id, 0,
2643 sizeof(this_device->device_id));
2644 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2645 sizeof(this_device->device_id));
2646
2647 if (this_device->devtype == TYPE_DISK &&
283b4a9b 2648 is_logical_dev_addr_mode(scsi3addr)) {
67955ba3
SC
2649 int volume_offline;
2650
edd16368 2651 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
283b4a9b
SC
2652 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2653 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
67955ba3
SC
2654 volume_offline = hpsa_volume_offline(h, scsi3addr);
2655 if (volume_offline < 0 || volume_offline > 0xff)
2656 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2657 this_device->volume_offline = volume_offline & 0xff;
283b4a9b 2658 } else {
edd16368 2659 this_device->raid_level = RAID_UNKNOWN;
283b4a9b
SC
2660 this_device->offload_config = 0;
2661 this_device->offload_enabled = 0;
9846590e 2662 this_device->volume_offline = 0;
283b4a9b 2663 }
edd16368 2664
0b0e1d6c
SC
2665 if (is_OBDR_device) {
2666 /* See if this is a One-Button-Disaster-Recovery device
2667 * by looking for "$DR-10" at offset 43 in inquiry data.
2668 */
2669 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2670 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2671 strncmp(obdr_sig, OBDR_TAPE_SIG,
2672 OBDR_SIG_LEN) == 0);
2673 }
2674
edd16368
SC
2675 kfree(inq_buff);
2676 return 0;
2677
2678bail_out:
2679 kfree(inq_buff);
2680 return 1;
2681}
2682
4f4eb9f1 2683static unsigned char *ext_target_model[] = {
edd16368
SC
2684 "MSA2012",
2685 "MSA2024",
2686 "MSA2312",
2687 "MSA2324",
fda38518 2688 "P2000 G3 SAS",
e06c8e5c 2689 "MSA 2040 SAS",
edd16368
SC
2690 NULL,
2691};
2692
4f4eb9f1 2693static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
edd16368
SC
2694{
2695 int i;
2696
4f4eb9f1
ST
2697 for (i = 0; ext_target_model[i]; i++)
2698 if (strncmp(device->model, ext_target_model[i],
2699 strlen(ext_target_model[i])) == 0)
edd16368
SC
2700 return 1;
2701 return 0;
2702}
2703
2704/* Helper function to assign bus, target, lun mapping of devices.
4f4eb9f1 2705 * Puts non-external target logical volumes on bus 0, external target logical
edd16368
SC
2706 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2707 * Logical drive target and lun are assigned at this time, but
2708 * physical device lun and target assignment are deferred (assigned
2709 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2710 */
2711static void figure_bus_target_lun(struct ctlr_info *h,
1f310bde 2712 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
edd16368 2713{
1f310bde
SC
2714 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2715
2716 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2717 /* physical device, target and lun filled in later */
edd16368 2718 if (is_hba_lunid(lunaddrbytes))
1f310bde 2719 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
edd16368 2720 else
1f310bde
SC
2721 /* defer target, lun assignment for physical devices */
2722 hpsa_set_bus_target_lun(device, 2, -1, -1);
2723 return;
2724 }
2725 /* It's a logical device */
4f4eb9f1
ST
2726 if (is_ext_target(h, device)) {
2727 /* external target way, put logicals on bus 1
1f310bde
SC
2728 * and match target/lun numbers box
2729 * reports, other smart array, bus 0, target 0, match lunid
2730 */
2731 hpsa_set_bus_target_lun(device,
2732 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2733 return;
edd16368 2734 }
1f310bde 2735 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
edd16368
SC
2736}
2737
2738/*
2739 * If there is no lun 0 on a target, linux won't find any devices.
4f4eb9f1 2740 * For the external targets (arrays), we have to manually detect the enclosure
edd16368
SC
2741 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2742 * it for some reason. *tmpdevice is the target we're adding,
2743 * this_device is a pointer into the current element of currentsd[]
2744 * that we're building up in update_scsi_devices(), below.
2745 * lunzerobits is a bitmap that tracks which targets already have a
2746 * lun 0 assigned.
2747 * Returns 1 if an enclosure was added, 0 if not.
2748 */
4f4eb9f1 2749static int add_ext_target_dev(struct ctlr_info *h,
edd16368 2750 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 2751 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
4f4eb9f1 2752 unsigned long lunzerobits[], int *n_ext_target_devs)
edd16368
SC
2753{
2754 unsigned char scsi3addr[8];
2755
1f310bde 2756 if (test_bit(tmpdevice->target, lunzerobits))
edd16368
SC
2757 return 0; /* There is already a lun 0 on this target. */
2758
2759 if (!is_logical_dev_addr_mode(lunaddrbytes))
2760 return 0; /* It's the logical targets that may lack lun 0. */
2761
4f4eb9f1
ST
2762 if (!is_ext_target(h, tmpdevice))
2763 return 0; /* Only external target devices have this problem. */
edd16368 2764
1f310bde 2765 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
edd16368
SC
2766 return 0;
2767
c4f8a299 2768 memset(scsi3addr, 0, 8);
1f310bde 2769 scsi3addr[3] = tmpdevice->target;
edd16368
SC
2770 if (is_hba_lunid(scsi3addr))
2771 return 0; /* Don't add the RAID controller here. */
2772
339b2b14
SC
2773 if (is_scsi_rev_5(h))
2774 return 0; /* p1210m doesn't need to do this. */
2775
4f4eb9f1 2776 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
aca4a520
ST
2777 dev_warn(&h->pdev->dev, "Maximum number of external "
2778 "target devices exceeded. Check your hardware "
edd16368
SC
2779 "configuration.");
2780 return 0;
2781 }
2782
0b0e1d6c 2783 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368 2784 return 0;
4f4eb9f1 2785 (*n_ext_target_devs)++;
1f310bde
SC
2786 hpsa_set_bus_target_lun(this_device,
2787 tmpdevice->bus, tmpdevice->target, 0);
2788 set_bit(tmpdevice->target, lunzerobits);
edd16368
SC
2789 return 1;
2790}
2791
54b6e9e9
ST
2792/*
2793 * Get address of physical disk used for an ioaccel2 mode command:
2794 * 1. Extract ioaccel2 handle from the command.
2795 * 2. Find a matching ioaccel2 handle from list of physical disks.
2796 * 3. Return:
2797 * 1 and set scsi3addr to address of matching physical
2798 * 0 if no matching physical disk was found.
2799 */
2800static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2801 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2802{
2803 struct ReportExtendedLUNdata *physicals = NULL;
2804 int responsesize = 24; /* size of physical extended response */
2805 int extended = 2; /* flag forces reporting 'other dev info'. */
2806 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2807 u32 nphysicals = 0; /* number of reported physical devs */
2808 int found = 0; /* found match (1) or not (0) */
2809 u32 find; /* handle we need to match */
2810 int i;
2811 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2812 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2813 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2814 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2815 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2816
2817 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2818 return 0; /* no match */
2819
2820 /* point to the ioaccel2 device handle */
2821 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2822 if (c2a == NULL)
2823 return 0; /* no match */
2824
2825 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2826 if (scmd == NULL)
2827 return 0; /* no match */
2828
2829 d = scmd->device->hostdata;
2830 if (d == NULL)
2831 return 0; /* no match */
2832
2833 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2834 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2835 find = c2a->scsi_nexus;
2836
2ba8bfc8
SC
2837 if (h->raid_offload_debug > 0)
2838 dev_info(&h->pdev->dev,
2839 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2840 __func__, scsi_nexus,
2841 d->device_id[0], d->device_id[1], d->device_id[2],
2842 d->device_id[3], d->device_id[4], d->device_id[5],
2843 d->device_id[6], d->device_id[7], d->device_id[8],
2844 d->device_id[9], d->device_id[10], d->device_id[11],
2845 d->device_id[12], d->device_id[13], d->device_id[14],
2846 d->device_id[15]);
2847
54b6e9e9
ST
2848 /* Get the list of physical devices */
2849 physicals = kzalloc(reportsize, GFP_KERNEL);
3b51a7a3
JH
2850 if (physicals == NULL)
2851 return 0;
54b6e9e9
ST
2852 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2853 reportsize, extended)) {
2854 dev_err(&h->pdev->dev,
2855 "Can't lookup %s device handle: report physical LUNs failed.\n",
2856 "HP SSD Smart Path");
2857 kfree(physicals);
2858 return 0;
2859 }
2860 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2861 responsesize;
2862
54b6e9e9
ST
2863 /* find ioaccel2 handle in list of physicals: */
2864 for (i = 0; i < nphysicals; i++) {
d5b5d964
SC
2865 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2866
54b6e9e9 2867 /* handle is in bytes 28-31 of each lun */
d5b5d964 2868 if (entry->ioaccel_handle != find)
54b6e9e9 2869 continue; /* didn't match */
54b6e9e9 2870 found = 1;
d5b5d964 2871 memcpy(scsi3addr, entry->lunid, 8);
2ba8bfc8
SC
2872 if (h->raid_offload_debug > 0)
2873 dev_info(&h->pdev->dev,
d5b5d964 2874 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2ba8bfc8 2875 __func__, find,
d5b5d964 2876 entry->ioaccel_handle, scsi3addr);
54b6e9e9
ST
2877 break; /* found it */
2878 }
2879
2880 kfree(physicals);
2881 if (found)
2882 return 1;
2883 else
2884 return 0;
2885
2886}
edd16368
SC
2887/*
2888 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2889 * logdev. The number of luns in physdev and logdev are returned in
2890 * *nphysicals and *nlogicals, respectively.
2891 * Returns 0 on success, -1 otherwise.
2892 */
2893static int hpsa_gather_lun_info(struct ctlr_info *h,
2894 int reportlunsize,
283b4a9b 2895 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
01a02ffc 2896 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368 2897{
283b4a9b
SC
2898 int physical_entry_size = 8;
2899
2900 *physical_mode = 0;
2901
2902 /* For I/O accelerator mode we need to read physical device handles */
317d4adf
MM
2903 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2904 h->transMethod & CFGTBL_Trans_io_accel2) {
283b4a9b
SC
2905 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2906 physical_entry_size = 24;
2907 }
a93aa1fe 2908 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
283b4a9b 2909 *physical_mode)) {
edd16368
SC
2910 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2911 return -1;
2912 }
283b4a9b
SC
2913 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2914 physical_entry_size;
edd16368
SC
2915 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2916 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2917 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2918 *nphysicals - HPSA_MAX_PHYS_LUN);
2919 *nphysicals = HPSA_MAX_PHYS_LUN;
2920 }
2921 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2922 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2923 return -1;
2924 }
6df1e954 2925 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
2926 /* Reject Logicals in excess of our max capability. */
2927 if (*nlogicals > HPSA_MAX_LUN) {
2928 dev_warn(&h->pdev->dev,
2929 "maximum logical LUNs (%d) exceeded. "
2930 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2931 *nlogicals - HPSA_MAX_LUN);
2932 *nlogicals = HPSA_MAX_LUN;
2933 }
2934 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2935 dev_warn(&h->pdev->dev,
2936 "maximum logical + physical LUNs (%d) exceeded. "
2937 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2938 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2939 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2940 }
2941 return 0;
2942}
2943
339b2b14 2944u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
a93aa1fe
MG
2945 int nphysicals, int nlogicals,
2946 struct ReportExtendedLUNdata *physdev_list,
339b2b14
SC
2947 struct ReportLUNdata *logdev_list)
2948{
2949 /* Helper function, figure out where the LUN ID info is coming from
2950 * given index i, lists of physical and logical devices, where in
2951 * the list the raid controller is supposed to appear (first or last)
2952 */
2953
2954 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2955 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2956
2957 if (i == raid_ctlr_position)
2958 return RAID_CTLR_LUNID;
2959
2960 if (i < logicals_start)
d5b5d964
SC
2961 return &physdev_list->LUN[i -
2962 (raid_ctlr_position == 0)].lunid[0];
339b2b14
SC
2963
2964 if (i < last_device)
2965 return &logdev_list->LUN[i - nphysicals -
2966 (raid_ctlr_position == 0)][0];
2967 BUG();
2968 return NULL;
2969}
2970
316b221a
SC
2971static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2972{
2973 int rc;
6e8e8088 2974 int hba_mode_enabled;
316b221a
SC
2975 struct bmic_controller_parameters *ctlr_params;
2976 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
2977 GFP_KERNEL);
2978
2979 if (!ctlr_params)
96444fbb 2980 return -ENOMEM;
316b221a
SC
2981 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
2982 sizeof(struct bmic_controller_parameters));
96444fbb 2983 if (rc) {
316b221a 2984 kfree(ctlr_params);
96444fbb 2985 return rc;
316b221a 2986 }
6e8e8088
JH
2987
2988 hba_mode_enabled =
2989 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
2990 kfree(ctlr_params);
2991 return hba_mode_enabled;
316b221a
SC
2992}
2993
edd16368
SC
2994static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2995{
2996 /* the idea here is we could get notified
2997 * that some devices have changed, so we do a report
2998 * physical luns and report logical luns cmd, and adjust
2999 * our list of devices accordingly.
3000 *
3001 * The scsi3addr's of devices won't change so long as the
3002 * adapter is not reset. That means we can rescan and
3003 * tell which devices we already know about, vs. new
3004 * devices, vs. disappearing devices.
3005 */
a93aa1fe 3006 struct ReportExtendedLUNdata *physdev_list = NULL;
edd16368 3007 struct ReportLUNdata *logdev_list = NULL;
01a02ffc
SC
3008 u32 nphysicals = 0;
3009 u32 nlogicals = 0;
283b4a9b 3010 int physical_mode = 0;
01a02ffc 3011 u32 ndev_allocated = 0;
edd16368
SC
3012 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3013 int ncurrent = 0;
283b4a9b 3014 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
4f4eb9f1 3015 int i, n_ext_target_devs, ndevs_to_allocate;
339b2b14 3016 int raid_ctlr_position;
2bbf5c7f 3017 int rescan_hba_mode;
aca4a520 3018 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
edd16368 3019
cfe5badc 3020 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
3021 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
3022 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
edd16368
SC
3023 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3024
0b0e1d6c 3025 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
edd16368
SC
3026 dev_err(&h->pdev->dev, "out of memory\n");
3027 goto out;
3028 }
3029 memset(lunzerobits, 0, sizeof(lunzerobits));
3030
316b221a 3031 rescan_hba_mode = hpsa_hba_mode_enabled(h);
96444fbb
JH
3032 if (rescan_hba_mode < 0)
3033 goto out;
316b221a
SC
3034
3035 if (!h->hba_mode_enabled && rescan_hba_mode)
3036 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3037 else if (h->hba_mode_enabled && !rescan_hba_mode)
3038 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3039
3040 h->hba_mode_enabled = rescan_hba_mode;
3041
a93aa1fe
MG
3042 if (hpsa_gather_lun_info(h, reportlunsize,
3043 (struct ReportLUNdata *) physdev_list, &nphysicals,
283b4a9b 3044 &physical_mode, logdev_list, &nlogicals))
edd16368
SC
3045 goto out;
3046
aca4a520
ST
3047 /* We might see up to the maximum number of logical and physical disks
3048 * plus external target devices, and a device for the local RAID
3049 * controller.
edd16368 3050 */
aca4a520 3051 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
edd16368
SC
3052
3053 /* Allocate the per device structures */
3054 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
3055 if (i >= HPSA_MAX_DEVICES) {
3056 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3057 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3058 ndevs_to_allocate - HPSA_MAX_DEVICES);
3059 break;
3060 }
3061
edd16368
SC
3062 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3063 if (!currentsd[i]) {
3064 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3065 __FILE__, __LINE__);
3066 goto out;
3067 }
3068 ndev_allocated++;
3069 }
3070
8645291b 3071 if (is_scsi_rev_5(h))
339b2b14
SC
3072 raid_ctlr_position = 0;
3073 else
3074 raid_ctlr_position = nphysicals + nlogicals;
3075
edd16368 3076 /* adjust our table of devices */
4f4eb9f1 3077 n_ext_target_devs = 0;
edd16368 3078 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 3079 u8 *lunaddrbytes, is_OBDR = 0;
edd16368
SC
3080
3081 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
3082 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3083 i, nphysicals, nlogicals, physdev_list, logdev_list);
edd16368 3084 /* skip masked physical devices. */
339b2b14
SC
3085 if (lunaddrbytes[3] & 0xC0 &&
3086 i < nphysicals + (raid_ctlr_position == 0))
edd16368
SC
3087 continue;
3088
3089 /* Get device type, vendor, model, device id */
0b0e1d6c
SC
3090 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3091 &is_OBDR))
edd16368 3092 continue; /* skip it if we can't talk to it. */
1f310bde 3093 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
edd16368
SC
3094 this_device = currentsd[ncurrent];
3095
3096 /*
4f4eb9f1 3097 * For external target devices, we have to insert a LUN 0 which
edd16368
SC
3098 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3099 * is nonetheless an enclosure device there. We have to
3100 * present that otherwise linux won't find anything if
3101 * there is no lun 0.
3102 */
4f4eb9f1 3103 if (add_ext_target_dev(h, tmpdevice, this_device,
1f310bde 3104 lunaddrbytes, lunzerobits,
4f4eb9f1 3105 &n_ext_target_devs)) {
edd16368
SC
3106 ncurrent++;
3107 this_device = currentsd[ncurrent];
3108 }
3109
3110 *this_device = *tmpdevice;
edd16368
SC
3111
3112 switch (this_device->devtype) {
0b0e1d6c 3113 case TYPE_ROM:
edd16368
SC
3114 /* We don't *really* support actual CD-ROM devices,
3115 * just "One Button Disaster Recovery" tape drive
3116 * which temporarily pretends to be a CD-ROM drive.
3117 * So we check that the device is really an OBDR tape
3118 * device by checking for "$DR-10" in bytes 43-48 of
3119 * the inquiry data.
3120 */
0b0e1d6c
SC
3121 if (is_OBDR)
3122 ncurrent++;
edd16368
SC
3123 break;
3124 case TYPE_DISK:
316b221a
SC
3125 if (h->hba_mode_enabled) {
3126 /* never use raid mapper in HBA mode */
3127 this_device->offload_enabled = 0;
3128 ncurrent++;
3129 break;
3130 } else if (h->acciopath_status) {
3131 if (i >= nphysicals) {
3132 ncurrent++;
3133 break;
3134 }
3135 } else {
3136 if (i < nphysicals)
3137 break;
283b4a9b 3138 ncurrent++;
edd16368 3139 break;
283b4a9b
SC
3140 }
3141 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3142 memcpy(&this_device->ioaccel_handle,
3143 &lunaddrbytes[20],
3144 sizeof(this_device->ioaccel_handle));
3145 ncurrent++;
3146 }
edd16368
SC
3147 break;
3148 case TYPE_TAPE:
3149 case TYPE_MEDIUM_CHANGER:
3150 ncurrent++;
3151 break;
3152 case TYPE_RAID:
3153 /* Only present the Smartarray HBA as a RAID controller.
3154 * If it's a RAID controller other than the HBA itself
3155 * (an external RAID controller, MSA500 or similar)
3156 * don't present it.
3157 */
3158 if (!is_hba_lunid(lunaddrbytes))
3159 break;
3160 ncurrent++;
3161 break;
3162 default:
3163 break;
3164 }
cfe5badc 3165 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
3166 break;
3167 }
3168 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3169out:
3170 kfree(tmpdevice);
3171 for (i = 0; i < ndev_allocated; i++)
3172 kfree(currentsd[i]);
3173 kfree(currentsd);
edd16368
SC
3174 kfree(physdev_list);
3175 kfree(logdev_list);
edd16368
SC
3176}
3177
3178/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3179 * dma mapping and fills in the scatter gather entries of the
3180 * hpsa command, cp.
3181 */
33a2ffce 3182static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
3183 struct CommandList *cp,
3184 struct scsi_cmnd *cmd)
3185{
3186 unsigned int len;
3187 struct scatterlist *sg;
01a02ffc 3188 u64 addr64;
33a2ffce
SC
3189 int use_sg, i, sg_index, chained;
3190 struct SGDescriptor *curr_sg;
edd16368 3191
33a2ffce 3192 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
3193
3194 use_sg = scsi_dma_map(cmd);
3195 if (use_sg < 0)
3196 return use_sg;
3197
3198 if (!use_sg)
3199 goto sglist_finished;
3200
33a2ffce
SC
3201 curr_sg = cp->SG;
3202 chained = 0;
3203 sg_index = 0;
edd16368 3204 scsi_for_each_sg(cmd, sg, use_sg, i) {
33a2ffce
SC
3205 if (i == h->max_cmd_sg_entries - 1 &&
3206 use_sg > h->max_cmd_sg_entries) {
3207 chained = 1;
3208 curr_sg = h->cmd_sg_list[cp->cmdindex];
3209 sg_index = 0;
3210 }
01a02ffc 3211 addr64 = (u64) sg_dma_address(sg);
edd16368 3212 len = sg_dma_len(sg);
33a2ffce
SC
3213 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3214 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3215 curr_sg->Len = len;
e1d9cbfa 3216 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
33a2ffce
SC
3217 curr_sg++;
3218 }
3219
3220 if (use_sg + chained > h->maxSG)
3221 h->maxSG = use_sg + chained;
3222
3223 if (chained) {
3224 cp->Header.SGList = h->max_cmd_sg_entries;
3225 cp->Header.SGTotal = (u16) (use_sg + 1);
e2bea6df
SC
3226 if (hpsa_map_sg_chain_block(h, cp)) {
3227 scsi_dma_unmap(cmd);
3228 return -1;
3229 }
33a2ffce 3230 return 0;
edd16368
SC
3231 }
3232
3233sglist_finished:
3234
01a02ffc
SC
3235 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3236 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
edd16368
SC
3237 return 0;
3238}
3239
283b4a9b
SC
3240#define IO_ACCEL_INELIGIBLE (1)
3241static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3242{
3243 int is_write = 0;
3244 u32 block;
3245 u32 block_cnt;
3246
3247 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3248 switch (cdb[0]) {
3249 case WRITE_6:
3250 case WRITE_12:
3251 is_write = 1;
3252 case READ_6:
3253 case READ_12:
3254 if (*cdb_len == 6) {
3255 block = (((u32) cdb[2]) << 8) | cdb[3];
3256 block_cnt = cdb[4];
3257 } else {
3258 BUG_ON(*cdb_len != 12);
3259 block = (((u32) cdb[2]) << 24) |
3260 (((u32) cdb[3]) << 16) |
3261 (((u32) cdb[4]) << 8) |
3262 cdb[5];
3263 block_cnt =
3264 (((u32) cdb[6]) << 24) |
3265 (((u32) cdb[7]) << 16) |
3266 (((u32) cdb[8]) << 8) |
3267 cdb[9];
3268 }
3269 if (block_cnt > 0xffff)
3270 return IO_ACCEL_INELIGIBLE;
3271
3272 cdb[0] = is_write ? WRITE_10 : READ_10;
3273 cdb[1] = 0;
3274 cdb[2] = (u8) (block >> 24);
3275 cdb[3] = (u8) (block >> 16);
3276 cdb[4] = (u8) (block >> 8);
3277 cdb[5] = (u8) (block);
3278 cdb[6] = 0;
3279 cdb[7] = (u8) (block_cnt >> 8);
3280 cdb[8] = (u8) (block_cnt);
3281 cdb[9] = 0;
3282 *cdb_len = 10;
3283 break;
3284 }
3285 return 0;
3286}
3287
c349775e 3288static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
283b4a9b
SC
3289 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3290 u8 *scsi3addr)
e1f7de0c
MG
3291{
3292 struct scsi_cmnd *cmd = c->scsi_cmd;
e1f7de0c
MG
3293 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3294 unsigned int len;
3295 unsigned int total_len = 0;
3296 struct scatterlist *sg;
3297 u64 addr64;
3298 int use_sg, i;
3299 struct SGDescriptor *curr_sg;
3300 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3301
283b4a9b
SC
3302 /* TODO: implement chaining support */
3303 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3304 return IO_ACCEL_INELIGIBLE;
3305
e1f7de0c
MG
3306 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3307
283b4a9b
SC
3308 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3309 return IO_ACCEL_INELIGIBLE;
3310
e1f7de0c
MG
3311 c->cmd_type = CMD_IOACCEL1;
3312
3313 /* Adjust the DMA address to point to the accelerated command buffer */
3314 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3315 (c->cmdindex * sizeof(*cp));
3316 BUG_ON(c->busaddr & 0x0000007F);
3317
3318 use_sg = scsi_dma_map(cmd);
3319 if (use_sg < 0)
3320 return use_sg;
3321
3322 if (use_sg) {
3323 curr_sg = cp->SG;
3324 scsi_for_each_sg(cmd, sg, use_sg, i) {
3325 addr64 = (u64) sg_dma_address(sg);
3326 len = sg_dma_len(sg);
3327 total_len += len;
3328 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3329 curr_sg->Addr.upper =
3330 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3331 curr_sg->Len = len;
3332
3333 if (i == (scsi_sg_count(cmd) - 1))
3334 curr_sg->Ext = HPSA_SG_LAST;
3335 else
3336 curr_sg->Ext = 0; /* we are not chaining */
3337 curr_sg++;
3338 }
3339
3340 switch (cmd->sc_data_direction) {
3341 case DMA_TO_DEVICE:
3342 control |= IOACCEL1_CONTROL_DATA_OUT;
3343 break;
3344 case DMA_FROM_DEVICE:
3345 control |= IOACCEL1_CONTROL_DATA_IN;
3346 break;
3347 case DMA_NONE:
3348 control |= IOACCEL1_CONTROL_NODATAXFER;
3349 break;
3350 default:
3351 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3352 cmd->sc_data_direction);
3353 BUG();
3354 break;
3355 }
3356 } else {
3357 control |= IOACCEL1_CONTROL_NODATAXFER;
3358 }
3359
c349775e 3360 c->Header.SGList = use_sg;
e1f7de0c 3361 /* Fill out the command structure to submit */
283b4a9b 3362 cp->dev_handle = ioaccel_handle & 0xFFFF;
e1f7de0c
MG
3363 cp->transfer_len = total_len;
3364 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
283b4a9b 3365 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
e1f7de0c 3366 cp->control = control;
283b4a9b
SC
3367 memcpy(cp->CDB, cdb, cdb_len);
3368 memcpy(cp->CISS_LUN, scsi3addr, 8);
c349775e 3369 /* Tag was already set at init time. */
283b4a9b 3370 enqueue_cmd_and_start_io(h, c);
e1f7de0c
MG
3371 return 0;
3372}
edd16368 3373
283b4a9b
SC
3374/*
3375 * Queue a command directly to a device behind the controller using the
3376 * I/O accelerator path.
3377 */
3378static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3379 struct CommandList *c)
3380{
3381 struct scsi_cmnd *cmd = c->scsi_cmd;
3382 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3383
3384 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3385 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3386}
3387
dd0e19f3
ST
3388/*
3389 * Set encryption parameters for the ioaccel2 request
3390 */
3391static void set_encrypt_ioaccel2(struct ctlr_info *h,
3392 struct CommandList *c, struct io_accel2_cmd *cp)
3393{
3394 struct scsi_cmnd *cmd = c->scsi_cmd;
3395 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3396 struct raid_map_data *map = &dev->raid_map;
3397 u64 first_block;
3398
3399 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3400
3401 /* Are we doing encryption on this device */
3402 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3403 return;
3404 /* Set the data encryption key index. */
3405 cp->dekindex = map->dekindex;
3406
3407 /* Set the encryption enable flag, encoded into direction field. */
3408 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3409
3410 /* Set encryption tweak values based on logical block address
3411 * If block size is 512, tweak value is LBA.
3412 * For other block sizes, tweak is (LBA * block size)/ 512)
3413 */
3414 switch (cmd->cmnd[0]) {
3415 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3416 case WRITE_6:
3417 case READ_6:
3418 if (map->volume_blk_size == 512) {
3419 cp->tweak_lower =
3420 (((u32) cmd->cmnd[2]) << 8) |
3421 cmd->cmnd[3];
3422 cp->tweak_upper = 0;
3423 } else {
3424 first_block =
3425 (((u64) cmd->cmnd[2]) << 8) |
3426 cmd->cmnd[3];
3427 first_block = (first_block * map->volume_blk_size)/512;
3428 cp->tweak_lower = (u32)first_block;
3429 cp->tweak_upper = (u32)(first_block >> 32);
3430 }
3431 break;
3432 case WRITE_10:
3433 case READ_10:
3434 if (map->volume_blk_size == 512) {
3435 cp->tweak_lower =
3436 (((u32) cmd->cmnd[2]) << 24) |
3437 (((u32) cmd->cmnd[3]) << 16) |
3438 (((u32) cmd->cmnd[4]) << 8) |
3439 cmd->cmnd[5];
3440 cp->tweak_upper = 0;
3441 } else {
3442 first_block =
3443 (((u64) cmd->cmnd[2]) << 24) |
3444 (((u64) cmd->cmnd[3]) << 16) |
3445 (((u64) cmd->cmnd[4]) << 8) |
3446 cmd->cmnd[5];
3447 first_block = (first_block * map->volume_blk_size)/512;
3448 cp->tweak_lower = (u32)first_block;
3449 cp->tweak_upper = (u32)(first_block >> 32);
3450 }
3451 break;
3452 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3453 case WRITE_12:
3454 case READ_12:
3455 if (map->volume_blk_size == 512) {
3456 cp->tweak_lower =
3457 (((u32) cmd->cmnd[2]) << 24) |
3458 (((u32) cmd->cmnd[3]) << 16) |
3459 (((u32) cmd->cmnd[4]) << 8) |
3460 cmd->cmnd[5];
3461 cp->tweak_upper = 0;
3462 } else {
3463 first_block =
3464 (((u64) cmd->cmnd[2]) << 24) |
3465 (((u64) cmd->cmnd[3]) << 16) |
3466 (((u64) cmd->cmnd[4]) << 8) |
3467 cmd->cmnd[5];
3468 first_block = (first_block * map->volume_blk_size)/512;
3469 cp->tweak_lower = (u32)first_block;
3470 cp->tweak_upper = (u32)(first_block >> 32);
3471 }
3472 break;
3473 case WRITE_16:
3474 case READ_16:
3475 if (map->volume_blk_size == 512) {
3476 cp->tweak_lower =
3477 (((u32) cmd->cmnd[6]) << 24) |
3478 (((u32) cmd->cmnd[7]) << 16) |
3479 (((u32) cmd->cmnd[8]) << 8) |
3480 cmd->cmnd[9];
3481 cp->tweak_upper =
3482 (((u32) cmd->cmnd[2]) << 24) |
3483 (((u32) cmd->cmnd[3]) << 16) |
3484 (((u32) cmd->cmnd[4]) << 8) |
3485 cmd->cmnd[5];
3486 } else {
3487 first_block =
3488 (((u64) cmd->cmnd[2]) << 56) |
3489 (((u64) cmd->cmnd[3]) << 48) |
3490 (((u64) cmd->cmnd[4]) << 40) |
3491 (((u64) cmd->cmnd[5]) << 32) |
3492 (((u64) cmd->cmnd[6]) << 24) |
3493 (((u64) cmd->cmnd[7]) << 16) |
3494 (((u64) cmd->cmnd[8]) << 8) |
3495 cmd->cmnd[9];
3496 first_block = (first_block * map->volume_blk_size)/512;
3497 cp->tweak_lower = (u32)first_block;
3498 cp->tweak_upper = (u32)(first_block >> 32);
3499 }
3500 break;
3501 default:
3502 dev_err(&h->pdev->dev,
3503 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3504 __func__);
3505 BUG();
3506 break;
3507 }
3508}
3509
c349775e
ST
3510static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3511 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3512 u8 *scsi3addr)
3513{
3514 struct scsi_cmnd *cmd = c->scsi_cmd;
3515 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3516 struct ioaccel2_sg_element *curr_sg;
3517 int use_sg, i;
3518 struct scatterlist *sg;
3519 u64 addr64;
3520 u32 len;
3521 u32 total_len = 0;
3522
3523 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3524 return IO_ACCEL_INELIGIBLE;
3525
3526 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3527 return IO_ACCEL_INELIGIBLE;
3528 c->cmd_type = CMD_IOACCEL2;
3529 /* Adjust the DMA address to point to the accelerated command buffer */
3530 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3531 (c->cmdindex * sizeof(*cp));
3532 BUG_ON(c->busaddr & 0x0000007F);
3533
3534 memset(cp, 0, sizeof(*cp));
3535 cp->IU_type = IOACCEL2_IU_TYPE;
3536
3537 use_sg = scsi_dma_map(cmd);
3538 if (use_sg < 0)
3539 return use_sg;
3540
3541 if (use_sg) {
3542 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3543 curr_sg = cp->sg;
3544 scsi_for_each_sg(cmd, sg, use_sg, i) {
3545 addr64 = (u64) sg_dma_address(sg);
3546 len = sg_dma_len(sg);
3547 total_len += len;
3548 curr_sg->address = cpu_to_le64(addr64);
3549 curr_sg->length = cpu_to_le32(len);
3550 curr_sg->reserved[0] = 0;
3551 curr_sg->reserved[1] = 0;
3552 curr_sg->reserved[2] = 0;
3553 curr_sg->chain_indicator = 0;
3554 curr_sg++;
3555 }
3556
3557 switch (cmd->sc_data_direction) {
3558 case DMA_TO_DEVICE:
dd0e19f3
ST
3559 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3560 cp->direction |= IOACCEL2_DIR_DATA_OUT;
c349775e
ST
3561 break;
3562 case DMA_FROM_DEVICE:
dd0e19f3
ST
3563 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3564 cp->direction |= IOACCEL2_DIR_DATA_IN;
c349775e
ST
3565 break;
3566 case DMA_NONE:
dd0e19f3
ST
3567 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3568 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e
ST
3569 break;
3570 default:
3571 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3572 cmd->sc_data_direction);
3573 BUG();
3574 break;
3575 }
3576 } else {
dd0e19f3
ST
3577 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3578 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e 3579 }
dd0e19f3
ST
3580
3581 /* Set encryption parameters, if necessary */
3582 set_encrypt_ioaccel2(h, c, cp);
3583
c349775e 3584 cp->scsi_nexus = ioaccel_handle;
dd0e19f3 3585 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
c349775e
ST
3586 DIRECT_LOOKUP_BIT;
3587 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
c349775e
ST
3588
3589 /* fill in sg elements */
3590 cp->sg_count = (u8) use_sg;
3591
3592 cp->data_len = cpu_to_le32(total_len);
3593 cp->err_ptr = cpu_to_le64(c->busaddr +
3594 offsetof(struct io_accel2_cmd, error_data));
3595 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3596
3597 enqueue_cmd_and_start_io(h, c);
3598 return 0;
3599}
3600
3601/*
3602 * Queue a command to the correct I/O accelerator path.
3603 */
3604static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3605 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3606 u8 *scsi3addr)
3607{
3608 if (h->transMethod & CFGTBL_Trans_io_accel1)
3609 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3610 cdb, cdb_len, scsi3addr);
3611 else
3612 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3613 cdb, cdb_len, scsi3addr);
3614}
3615
6b80b18f
ST
3616static void raid_map_helper(struct raid_map_data *map,
3617 int offload_to_mirror, u32 *map_index, u32 *current_group)
3618{
3619 if (offload_to_mirror == 0) {
3620 /* use physical disk in the first mirrored group. */
3621 *map_index %= map->data_disks_per_row;
3622 return;
3623 }
3624 do {
3625 /* determine mirror group that *map_index indicates */
3626 *current_group = *map_index / map->data_disks_per_row;
3627 if (offload_to_mirror == *current_group)
3628 continue;
3629 if (*current_group < (map->layout_map_count - 1)) {
3630 /* select map index from next group */
3631 *map_index += map->data_disks_per_row;
3632 (*current_group)++;
3633 } else {
3634 /* select map index from first group */
3635 *map_index %= map->data_disks_per_row;
3636 *current_group = 0;
3637 }
3638 } while (offload_to_mirror != *current_group);
3639}
3640
283b4a9b
SC
3641/*
3642 * Attempt to perform offload RAID mapping for a logical volume I/O.
3643 */
3644static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3645 struct CommandList *c)
3646{
3647 struct scsi_cmnd *cmd = c->scsi_cmd;
3648 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3649 struct raid_map_data *map = &dev->raid_map;
3650 struct raid_map_disk_data *dd = &map->data[0];
3651 int is_write = 0;
3652 u32 map_index;
3653 u64 first_block, last_block;
3654 u32 block_cnt;
3655 u32 blocks_per_row;
3656 u64 first_row, last_row;
3657 u32 first_row_offset, last_row_offset;
3658 u32 first_column, last_column;
6b80b18f
ST
3659 u64 r0_first_row, r0_last_row;
3660 u32 r5or6_blocks_per_row;
3661 u64 r5or6_first_row, r5or6_last_row;
3662 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3663 u32 r5or6_first_column, r5or6_last_column;
3664 u32 total_disks_per_row;
3665 u32 stripesize;
3666 u32 first_group, last_group, current_group;
283b4a9b
SC
3667 u32 map_row;
3668 u32 disk_handle;
3669 u64 disk_block;
3670 u32 disk_block_cnt;
3671 u8 cdb[16];
3672 u8 cdb_len;
3673#if BITS_PER_LONG == 32
3674 u64 tmpdiv;
3675#endif
6b80b18f 3676 int offload_to_mirror;
283b4a9b
SC
3677
3678 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3679
3680 /* check for valid opcode, get LBA and block count */
3681 switch (cmd->cmnd[0]) {
3682 case WRITE_6:
3683 is_write = 1;
3684 case READ_6:
3685 first_block =
3686 (((u64) cmd->cmnd[2]) << 8) |
3687 cmd->cmnd[3];
3688 block_cnt = cmd->cmnd[4];
3fa89a04
SC
3689 if (block_cnt == 0)
3690 block_cnt = 256;
283b4a9b
SC
3691 break;
3692 case WRITE_10:
3693 is_write = 1;
3694 case READ_10:
3695 first_block =
3696 (((u64) cmd->cmnd[2]) << 24) |
3697 (((u64) cmd->cmnd[3]) << 16) |
3698 (((u64) cmd->cmnd[4]) << 8) |
3699 cmd->cmnd[5];
3700 block_cnt =
3701 (((u32) cmd->cmnd[7]) << 8) |
3702 cmd->cmnd[8];
3703 break;
3704 case WRITE_12:
3705 is_write = 1;
3706 case READ_12:
3707 first_block =
3708 (((u64) cmd->cmnd[2]) << 24) |
3709 (((u64) cmd->cmnd[3]) << 16) |
3710 (((u64) cmd->cmnd[4]) << 8) |
3711 cmd->cmnd[5];
3712 block_cnt =
3713 (((u32) cmd->cmnd[6]) << 24) |
3714 (((u32) cmd->cmnd[7]) << 16) |
3715 (((u32) cmd->cmnd[8]) << 8) |
3716 cmd->cmnd[9];
3717 break;
3718 case WRITE_16:
3719 is_write = 1;
3720 case READ_16:
3721 first_block =
3722 (((u64) cmd->cmnd[2]) << 56) |
3723 (((u64) cmd->cmnd[3]) << 48) |
3724 (((u64) cmd->cmnd[4]) << 40) |
3725 (((u64) cmd->cmnd[5]) << 32) |
3726 (((u64) cmd->cmnd[6]) << 24) |
3727 (((u64) cmd->cmnd[7]) << 16) |
3728 (((u64) cmd->cmnd[8]) << 8) |
3729 cmd->cmnd[9];
3730 block_cnt =
3731 (((u32) cmd->cmnd[10]) << 24) |
3732 (((u32) cmd->cmnd[11]) << 16) |
3733 (((u32) cmd->cmnd[12]) << 8) |
3734 cmd->cmnd[13];
3735 break;
3736 default:
3737 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3738 }
283b4a9b
SC
3739 last_block = first_block + block_cnt - 1;
3740
3741 /* check for write to non-RAID-0 */
3742 if (is_write && dev->raid_level != 0)
3743 return IO_ACCEL_INELIGIBLE;
3744
3745 /* check for invalid block or wraparound */
3746 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3747 return IO_ACCEL_INELIGIBLE;
3748
3749 /* calculate stripe information for the request */
3750 blocks_per_row = map->data_disks_per_row * map->strip_size;
3751#if BITS_PER_LONG == 32
3752 tmpdiv = first_block;
3753 (void) do_div(tmpdiv, blocks_per_row);
3754 first_row = tmpdiv;
3755 tmpdiv = last_block;
3756 (void) do_div(tmpdiv, blocks_per_row);
3757 last_row = tmpdiv;
3758 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3759 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3760 tmpdiv = first_row_offset;
3761 (void) do_div(tmpdiv, map->strip_size);
3762 first_column = tmpdiv;
3763 tmpdiv = last_row_offset;
3764 (void) do_div(tmpdiv, map->strip_size);
3765 last_column = tmpdiv;
3766#else
3767 first_row = first_block / blocks_per_row;
3768 last_row = last_block / blocks_per_row;
3769 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3770 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3771 first_column = first_row_offset / map->strip_size;
3772 last_column = last_row_offset / map->strip_size;
3773#endif
3774
3775 /* if this isn't a single row/column then give to the controller */
3776 if ((first_row != last_row) || (first_column != last_column))
3777 return IO_ACCEL_INELIGIBLE;
3778
3779 /* proceeding with driver mapping */
6b80b18f
ST
3780 total_disks_per_row = map->data_disks_per_row +
3781 map->metadata_disks_per_row;
283b4a9b
SC
3782 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3783 map->row_cnt;
6b80b18f
ST
3784 map_index = (map_row * total_disks_per_row) + first_column;
3785
3786 switch (dev->raid_level) {
3787 case HPSA_RAID_0:
3788 break; /* nothing special to do */
3789 case HPSA_RAID_1:
3790 /* Handles load balance across RAID 1 members.
3791 * (2-drive R1 and R10 with even # of drives.)
3792 * Appropriate for SSDs, not optimal for HDDs
283b4a9b 3793 */
6b80b18f 3794 BUG_ON(map->layout_map_count != 2);
283b4a9b
SC
3795 if (dev->offload_to_mirror)
3796 map_index += map->data_disks_per_row;
3797 dev->offload_to_mirror = !dev->offload_to_mirror;
6b80b18f
ST
3798 break;
3799 case HPSA_RAID_ADM:
3800 /* Handles N-way mirrors (R1-ADM)
3801 * and R10 with # of drives divisible by 3.)
3802 */
3803 BUG_ON(map->layout_map_count != 3);
3804
3805 offload_to_mirror = dev->offload_to_mirror;
3806 raid_map_helper(map, offload_to_mirror,
3807 &map_index, &current_group);
3808 /* set mirror group to use next time */
3809 offload_to_mirror =
3810 (offload_to_mirror >= map->layout_map_count - 1)
3811 ? 0 : offload_to_mirror + 1;
3812 /* FIXME: remove after debug/dev */
3813 BUG_ON(offload_to_mirror >= map->layout_map_count);
3814 dev_warn(&h->pdev->dev,
3815 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3816 map_index, offload_to_mirror);
3817 dev->offload_to_mirror = offload_to_mirror;
3818 /* Avoid direct use of dev->offload_to_mirror within this
3819 * function since multiple threads might simultaneously
3820 * increment it beyond the range of dev->layout_map_count -1.
3821 */
3822 break;
3823 case HPSA_RAID_5:
3824 case HPSA_RAID_6:
3825 if (map->layout_map_count <= 1)
3826 break;
3827
3828 /* Verify first and last block are in same RAID group */
3829 r5or6_blocks_per_row =
3830 map->strip_size * map->data_disks_per_row;
3831 BUG_ON(r5or6_blocks_per_row == 0);
3832 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3833#if BITS_PER_LONG == 32
3834 tmpdiv = first_block;
3835 first_group = do_div(tmpdiv, stripesize);
3836 tmpdiv = first_group;
3837 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3838 first_group = tmpdiv;
3839 tmpdiv = last_block;
3840 last_group = do_div(tmpdiv, stripesize);
3841 tmpdiv = last_group;
3842 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3843 last_group = tmpdiv;
3844#else
3845 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3846 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
6b80b18f 3847#endif
000ff7c2 3848 if (first_group != last_group)
6b80b18f
ST
3849 return IO_ACCEL_INELIGIBLE;
3850
3851 /* Verify request is in a single row of RAID 5/6 */
3852#if BITS_PER_LONG == 32
3853 tmpdiv = first_block;
3854 (void) do_div(tmpdiv, stripesize);
3855 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3856 tmpdiv = last_block;
3857 (void) do_div(tmpdiv, stripesize);
3858 r5or6_last_row = r0_last_row = tmpdiv;
3859#else
3860 first_row = r5or6_first_row = r0_first_row =
3861 first_block / stripesize;
3862 r5or6_last_row = r0_last_row = last_block / stripesize;
3863#endif
3864 if (r5or6_first_row != r5or6_last_row)
3865 return IO_ACCEL_INELIGIBLE;
3866
3867
3868 /* Verify request is in a single column */
3869#if BITS_PER_LONG == 32
3870 tmpdiv = first_block;
3871 first_row_offset = do_div(tmpdiv, stripesize);
3872 tmpdiv = first_row_offset;
3873 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3874 r5or6_first_row_offset = first_row_offset;
3875 tmpdiv = last_block;
3876 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3877 tmpdiv = r5or6_last_row_offset;
3878 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3879 tmpdiv = r5or6_first_row_offset;
3880 (void) do_div(tmpdiv, map->strip_size);
3881 first_column = r5or6_first_column = tmpdiv;
3882 tmpdiv = r5or6_last_row_offset;
3883 (void) do_div(tmpdiv, map->strip_size);
3884 r5or6_last_column = tmpdiv;
3885#else
3886 first_row_offset = r5or6_first_row_offset =
3887 (u32)((first_block % stripesize) %
3888 r5or6_blocks_per_row);
3889
3890 r5or6_last_row_offset =
3891 (u32)((last_block % stripesize) %
3892 r5or6_blocks_per_row);
3893
3894 first_column = r5or6_first_column =
3895 r5or6_first_row_offset / map->strip_size;
3896 r5or6_last_column =
3897 r5or6_last_row_offset / map->strip_size;
3898#endif
3899 if (r5or6_first_column != r5or6_last_column)
3900 return IO_ACCEL_INELIGIBLE;
3901
3902 /* Request is eligible */
3903 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3904 map->row_cnt;
3905
3906 map_index = (first_group *
3907 (map->row_cnt * total_disks_per_row)) +
3908 (map_row * total_disks_per_row) + first_column;
3909 break;
3910 default:
3911 return IO_ACCEL_INELIGIBLE;
283b4a9b 3912 }
6b80b18f 3913
283b4a9b
SC
3914 disk_handle = dd[map_index].ioaccel_handle;
3915 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3916 (first_row_offset - (first_column * map->strip_size));
3917 disk_block_cnt = block_cnt;
3918
3919 /* handle differing logical/physical block sizes */
3920 if (map->phys_blk_shift) {
3921 disk_block <<= map->phys_blk_shift;
3922 disk_block_cnt <<= map->phys_blk_shift;
3923 }
3924 BUG_ON(disk_block_cnt > 0xffff);
3925
3926 /* build the new CDB for the physical disk I/O */
3927 if (disk_block > 0xffffffff) {
3928 cdb[0] = is_write ? WRITE_16 : READ_16;
3929 cdb[1] = 0;
3930 cdb[2] = (u8) (disk_block >> 56);
3931 cdb[3] = (u8) (disk_block >> 48);
3932 cdb[4] = (u8) (disk_block >> 40);
3933 cdb[5] = (u8) (disk_block >> 32);
3934 cdb[6] = (u8) (disk_block >> 24);
3935 cdb[7] = (u8) (disk_block >> 16);
3936 cdb[8] = (u8) (disk_block >> 8);
3937 cdb[9] = (u8) (disk_block);
3938 cdb[10] = (u8) (disk_block_cnt >> 24);
3939 cdb[11] = (u8) (disk_block_cnt >> 16);
3940 cdb[12] = (u8) (disk_block_cnt >> 8);
3941 cdb[13] = (u8) (disk_block_cnt);
3942 cdb[14] = 0;
3943 cdb[15] = 0;
3944 cdb_len = 16;
3945 } else {
3946 cdb[0] = is_write ? WRITE_10 : READ_10;
3947 cdb[1] = 0;
3948 cdb[2] = (u8) (disk_block >> 24);
3949 cdb[3] = (u8) (disk_block >> 16);
3950 cdb[4] = (u8) (disk_block >> 8);
3951 cdb[5] = (u8) (disk_block);
3952 cdb[6] = 0;
3953 cdb[7] = (u8) (disk_block_cnt >> 8);
3954 cdb[8] = (u8) (disk_block_cnt);
3955 cdb[9] = 0;
3956 cdb_len = 10;
3957 }
3958 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3959 dev->scsi3addr);
3960}
3961
f281233d 3962static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
edd16368
SC
3963 void (*done)(struct scsi_cmnd *))
3964{
3965 struct ctlr_info *h;
3966 struct hpsa_scsi_dev_t *dev;
3967 unsigned char scsi3addr[8];
3968 struct CommandList *c;
283b4a9b 3969 int rc = 0;
edd16368
SC
3970
3971 /* Get the ptr to our adapter structure out of cmd->host. */
3972 h = sdev_to_hba(cmd->device);
3973 dev = cmd->device->hostdata;
3974 if (!dev) {
3975 cmd->result = DID_NO_CONNECT << 16;
3976 done(cmd);
3977 return 0;
3978 }
3979 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3980
094963da 3981 if (unlikely(lockup_detected(h))) {
a0c12413
SC
3982 cmd->result = DID_ERROR << 16;
3983 done(cmd);
3984 return 0;
3985 }
e16a33ad 3986 c = cmd_alloc(h);
edd16368
SC
3987 if (c == NULL) { /* trouble... */
3988 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3989 return SCSI_MLQUEUE_HOST_BUSY;
3990 }
3991
3992 /* Fill in the command list header */
3993
3994 cmd->scsi_done = done; /* save this for use by completion code */
3995
3996 /* save c in case we have to abort it */
3997 cmd->host_scribble = (unsigned char *) c;
3998
3999 c->cmd_type = CMD_SCSI;
4000 c->scsi_cmd = cmd;
e1f7de0c 4001
283b4a9b
SC
4002 /* Call alternate submit routine for I/O accelerated commands.
4003 * Retries always go down the normal I/O path.
4004 */
4005 if (likely(cmd->retries == 0 &&
da0697bd
ST
4006 cmd->request->cmd_type == REQ_TYPE_FS &&
4007 h->acciopath_status)) {
283b4a9b
SC
4008 if (dev->offload_enabled) {
4009 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4010 if (rc == 0)
4011 return 0; /* Sent on ioaccel path */
4012 if (rc < 0) { /* scsi_dma_map failed. */
4013 cmd_free(h, c);
4014 return SCSI_MLQUEUE_HOST_BUSY;
4015 }
4016 } else if (dev->ioaccel_handle) {
4017 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4018 if (rc == 0)
4019 return 0; /* Sent on direct map path */
4020 if (rc < 0) { /* scsi_dma_map failed. */
4021 cmd_free(h, c);
4022 return SCSI_MLQUEUE_HOST_BUSY;
4023 }
4024 }
4025 }
e1f7de0c 4026
edd16368
SC
4027 c->Header.ReplyQueue = 0; /* unused in simple mode */
4028 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
303932fd
DB
4029 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
4030 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
edd16368
SC
4031
4032 /* Fill in the request block... */
4033
4034 c->Request.Timeout = 0;
4035 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4036 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4037 c->Request.CDBLen = cmd->cmd_len;
4038 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4039 c->Request.Type.Type = TYPE_CMD;
4040 c->Request.Type.Attribute = ATTR_SIMPLE;
4041 switch (cmd->sc_data_direction) {
4042 case DMA_TO_DEVICE:
4043 c->Request.Type.Direction = XFER_WRITE;
4044 break;
4045 case DMA_FROM_DEVICE:
4046 c->Request.Type.Direction = XFER_READ;
4047 break;
4048 case DMA_NONE:
4049 c->Request.Type.Direction = XFER_NONE;
4050 break;
4051 case DMA_BIDIRECTIONAL:
4052 /* This can happen if a buggy application does a scsi passthru
4053 * and sets both inlen and outlen to non-zero. ( see
4054 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4055 */
4056
4057 c->Request.Type.Direction = XFER_RSVD;
4058 /* This is technically wrong, and hpsa controllers should
4059 * reject it with CMD_INVALID, which is the most correct
4060 * response, but non-fibre backends appear to let it
4061 * slide by, and give the same results as if this field
4062 * were set correctly. Either way is acceptable for
4063 * our purposes here.
4064 */
4065
4066 break;
4067
4068 default:
4069 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4070 cmd->sc_data_direction);
4071 BUG();
4072 break;
4073 }
4074
33a2ffce 4075 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
edd16368
SC
4076 cmd_free(h, c);
4077 return SCSI_MLQUEUE_HOST_BUSY;
4078 }
4079 enqueue_cmd_and_start_io(h, c);
4080 /* the cmd'll come back via intr handler in complete_scsi_command() */
4081 return 0;
4082}
4083
f281233d
JG
4084static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4085
5f389360
SC
4086static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4087{
4088 unsigned long flags;
4089
4090 /*
4091 * Don't let rescans be initiated on a controller known
4092 * to be locked up. If the controller locks up *during*
4093 * a rescan, that thread is probably hosed, but at least
4094 * we can prevent new rescan threads from piling up on a
4095 * locked up controller.
4096 */
094963da 4097 if (unlikely(lockup_detected(h))) {
5f389360
SC
4098 spin_lock_irqsave(&h->scan_lock, flags);
4099 h->scan_finished = 1;
4100 wake_up_all(&h->scan_wait_queue);
4101 spin_unlock_irqrestore(&h->scan_lock, flags);
4102 return 1;
4103 }
5f389360
SC
4104 return 0;
4105}
4106
a08a8471
SC
4107static void hpsa_scan_start(struct Scsi_Host *sh)
4108{
4109 struct ctlr_info *h = shost_to_hba(sh);
4110 unsigned long flags;
4111
5f389360
SC
4112 if (do_not_scan_if_controller_locked_up(h))
4113 return;
4114
a08a8471
SC
4115 /* wait until any scan already in progress is finished. */
4116 while (1) {
4117 spin_lock_irqsave(&h->scan_lock, flags);
4118 if (h->scan_finished)
4119 break;
4120 spin_unlock_irqrestore(&h->scan_lock, flags);
4121 wait_event(h->scan_wait_queue, h->scan_finished);
4122 /* Note: We don't need to worry about a race between this
4123 * thread and driver unload because the midlayer will
4124 * have incremented the reference count, so unload won't
4125 * happen if we're in here.
4126 */
4127 }
4128 h->scan_finished = 0; /* mark scan as in progress */
4129 spin_unlock_irqrestore(&h->scan_lock, flags);
4130
5f389360
SC
4131 if (do_not_scan_if_controller_locked_up(h))
4132 return;
4133
a08a8471
SC
4134 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4135
4136 spin_lock_irqsave(&h->scan_lock, flags);
4137 h->scan_finished = 1; /* mark scan as finished. */
4138 wake_up_all(&h->scan_wait_queue);
4139 spin_unlock_irqrestore(&h->scan_lock, flags);
4140}
4141
4142static int hpsa_scan_finished(struct Scsi_Host *sh,
4143 unsigned long elapsed_time)
4144{
4145 struct ctlr_info *h = shost_to_hba(sh);
4146 unsigned long flags;
4147 int finished;
4148
4149 spin_lock_irqsave(&h->scan_lock, flags);
4150 finished = h->scan_finished;
4151 spin_unlock_irqrestore(&h->scan_lock, flags);
4152 return finished;
4153}
4154
667e23d4
SC
4155static int hpsa_change_queue_depth(struct scsi_device *sdev,
4156 int qdepth, int reason)
4157{
4158 struct ctlr_info *h = sdev_to_hba(sdev);
4159
4160 if (reason != SCSI_QDEPTH_DEFAULT)
4161 return -ENOTSUPP;
4162
4163 if (qdepth < 1)
4164 qdepth = 1;
4165 else
4166 if (qdepth > h->nr_cmds)
4167 qdepth = h->nr_cmds;
4168 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4169 return sdev->queue_depth;
4170}
4171
edd16368
SC
4172static void hpsa_unregister_scsi(struct ctlr_info *h)
4173{
4174 /* we are being forcibly unloaded, and may not refuse. */
4175 scsi_remove_host(h->scsi_host);
4176 scsi_host_put(h->scsi_host);
4177 h->scsi_host = NULL;
4178}
4179
4180static int hpsa_register_scsi(struct ctlr_info *h)
4181{
b705690d
SC
4182 struct Scsi_Host *sh;
4183 int error;
edd16368 4184
b705690d
SC
4185 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4186 if (sh == NULL)
4187 goto fail;
4188
4189 sh->io_port = 0;
4190 sh->n_io_port = 0;
4191 sh->this_id = -1;
4192 sh->max_channel = 3;
4193 sh->max_cmd_len = MAX_COMMAND_SIZE;
4194 sh->max_lun = HPSA_MAX_LUN;
4195 sh->max_id = HPSA_MAX_LUN;
4196 sh->can_queue = h->nr_cmds;
316b221a
SC
4197 if (h->hba_mode_enabled)
4198 sh->cmd_per_lun = 7;
4199 else
4200 sh->cmd_per_lun = h->nr_cmds;
b705690d
SC
4201 sh->sg_tablesize = h->maxsgentries;
4202 h->scsi_host = sh;
4203 sh->hostdata[0] = (unsigned long) h;
4204 sh->irq = h->intr[h->intr_mode];
4205 sh->unique_id = sh->irq;
4206 error = scsi_add_host(sh, &h->pdev->dev);
4207 if (error)
4208 goto fail_host_put;
4209 scsi_scan_host(sh);
4210 return 0;
4211
4212 fail_host_put:
4213 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4214 " failed for controller %d\n", __func__, h->ctlr);
4215 scsi_host_put(sh);
4216 return error;
4217 fail:
4218 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4219 " failed for controller %d\n", __func__, h->ctlr);
4220 return -ENOMEM;
edd16368
SC
4221}
4222
4223static int wait_for_device_to_become_ready(struct ctlr_info *h,
4224 unsigned char lunaddr[])
4225{
8919358e 4226 int rc;
edd16368
SC
4227 int count = 0;
4228 int waittime = 1; /* seconds */
4229 struct CommandList *c;
4230
4231 c = cmd_special_alloc(h);
4232 if (!c) {
4233 dev_warn(&h->pdev->dev, "out of memory in "
4234 "wait_for_device_to_become_ready.\n");
4235 return IO_ERROR;
4236 }
4237
4238 /* Send test unit ready until device ready, or give up. */
4239 while (count < HPSA_TUR_RETRY_LIMIT) {
4240
4241 /* Wait for a bit. do this first, because if we send
4242 * the TUR right away, the reset will just abort it.
4243 */
4244 msleep(1000 * waittime);
4245 count++;
8919358e 4246 rc = 0; /* Device ready. */
edd16368
SC
4247
4248 /* Increase wait time with each try, up to a point. */
4249 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4250 waittime = waittime * 2;
4251
a2dac136
SC
4252 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4253 (void) fill_cmd(c, TEST_UNIT_READY, h,
4254 NULL, 0, 0, lunaddr, TYPE_CMD);
edd16368
SC
4255 hpsa_scsi_do_simple_cmd_core(h, c);
4256 /* no unmap needed here because no data xfer. */
4257
4258 if (c->err_info->CommandStatus == CMD_SUCCESS)
4259 break;
4260
4261 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4262 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4263 (c->err_info->SenseInfo[2] == NO_SENSE ||
4264 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4265 break;
4266
4267 dev_warn(&h->pdev->dev, "waiting %d secs "
4268 "for device to become ready.\n", waittime);
4269 rc = 1; /* device not ready. */
4270 }
4271
4272 if (rc)
4273 dev_warn(&h->pdev->dev, "giving up on device.\n");
4274 else
4275 dev_warn(&h->pdev->dev, "device is ready.\n");
4276
4277 cmd_special_free(h, c);
4278 return rc;
4279}
4280
4281/* Need at least one of these error handlers to keep ../scsi/hosts.c from
4282 * complaining. Doing a host- or bus-reset can't do anything good here.
4283 */
4284static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4285{
4286 int rc;
4287 struct ctlr_info *h;
4288 struct hpsa_scsi_dev_t *dev;
4289
4290 /* find the controller to which the command to be aborted was sent */
4291 h = sdev_to_hba(scsicmd->device);
4292 if (h == NULL) /* paranoia */
4293 return FAILED;
edd16368
SC
4294 dev = scsicmd->device->hostdata;
4295 if (!dev) {
4296 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4297 "device lookup failed.\n");
4298 return FAILED;
4299 }
d416b0c7
SC
4300 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4301 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
edd16368 4302 /* send a reset to the SCSI LUN which the command was sent to */
bf711ac6 4303 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
edd16368
SC
4304 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4305 return SUCCESS;
4306
4307 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4308 return FAILED;
4309}
4310
6cba3f19
SC
4311static void swizzle_abort_tag(u8 *tag)
4312{
4313 u8 original_tag[8];
4314
4315 memcpy(original_tag, tag, 8);
4316 tag[0] = original_tag[3];
4317 tag[1] = original_tag[2];
4318 tag[2] = original_tag[1];
4319 tag[3] = original_tag[0];
4320 tag[4] = original_tag[7];
4321 tag[5] = original_tag[6];
4322 tag[6] = original_tag[5];
4323 tag[7] = original_tag[4];
4324}
4325
17eb87d2
ST
4326static void hpsa_get_tag(struct ctlr_info *h,
4327 struct CommandList *c, u32 *taglower, u32 *tagupper)
4328{
4329 if (c->cmd_type == CMD_IOACCEL1) {
4330 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4331 &h->ioaccel_cmd_pool[c->cmdindex];
4332 *tagupper = cm1->Tag.upper;
4333 *taglower = cm1->Tag.lower;
54b6e9e9
ST
4334 return;
4335 }
4336 if (c->cmd_type == CMD_IOACCEL2) {
4337 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4338 &h->ioaccel2_cmd_pool[c->cmdindex];
dd0e19f3
ST
4339 /* upper tag not used in ioaccel2 mode */
4340 memset(tagupper, 0, sizeof(*tagupper));
4341 *taglower = cm2->Tag;
54b6e9e9 4342 return;
17eb87d2 4343 }
54b6e9e9
ST
4344 *tagupper = c->Header.Tag.upper;
4345 *taglower = c->Header.Tag.lower;
17eb87d2
ST
4346}
4347
54b6e9e9 4348
75167d2c 4349static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
6cba3f19 4350 struct CommandList *abort, int swizzle)
75167d2c
SC
4351{
4352 int rc = IO_OK;
4353 struct CommandList *c;
4354 struct ErrorInfo *ei;
17eb87d2 4355 u32 tagupper, taglower;
75167d2c
SC
4356
4357 c = cmd_special_alloc(h);
4358 if (c == NULL) { /* trouble... */
4359 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4360 return -ENOMEM;
4361 }
4362
a2dac136
SC
4363 /* fill_cmd can't fail here, no buffer to map */
4364 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4365 0, 0, scsi3addr, TYPE_MSG);
6cba3f19
SC
4366 if (swizzle)
4367 swizzle_abort_tag(&c->Request.CDB[4]);
75167d2c 4368 hpsa_scsi_do_simple_cmd_core(h, c);
17eb87d2 4369 hpsa_get_tag(h, abort, &taglower, &tagupper);
75167d2c 4370 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
17eb87d2 4371 __func__, tagupper, taglower);
75167d2c
SC
4372 /* no unmap needed here because no data xfer. */
4373
4374 ei = c->err_info;
4375 switch (ei->CommandStatus) {
4376 case CMD_SUCCESS:
4377 break;
4378 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4379 rc = -1;
4380 break;
4381 default:
4382 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
17eb87d2 4383 __func__, tagupper, taglower);
d1e8beac 4384 hpsa_scsi_interpret_error(h, c);
75167d2c
SC
4385 rc = -1;
4386 break;
4387 }
4388 cmd_special_free(h, c);
dd0e19f3
ST
4389 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4390 __func__, tagupper, taglower);
75167d2c
SC
4391 return rc;
4392}
4393
4394/*
4395 * hpsa_find_cmd_in_queue
4396 *
4397 * Used to determine whether a command (find) is still present
4398 * in queue_head. Optionally excludes the last element of queue_head.
4399 *
4400 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4401 * not yet been submitted, and so can be aborted by the driver without
4402 * sending an abort to the hardware.
4403 *
4404 * Returns pointer to command if found in queue, NULL otherwise.
4405 */
4406static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4407 struct scsi_cmnd *find, struct list_head *queue_head)
4408{
4409 unsigned long flags;
4410 struct CommandList *c = NULL; /* ptr into cmpQ */
4411
4412 if (!find)
4413 return 0;
4414 spin_lock_irqsave(&h->lock, flags);
4415 list_for_each_entry(c, queue_head, list) {
4416 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4417 continue;
4418 if (c->scsi_cmd == find) {
4419 spin_unlock_irqrestore(&h->lock, flags);
4420 return c;
4421 }
4422 }
4423 spin_unlock_irqrestore(&h->lock, flags);
4424 return NULL;
4425}
4426
6cba3f19
SC
4427static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4428 u8 *tag, struct list_head *queue_head)
4429{
4430 unsigned long flags;
4431 struct CommandList *c;
4432
4433 spin_lock_irqsave(&h->lock, flags);
4434 list_for_each_entry(c, queue_head, list) {
4435 if (memcmp(&c->Header.Tag, tag, 8) != 0)
4436 continue;
4437 spin_unlock_irqrestore(&h->lock, flags);
4438 return c;
4439 }
4440 spin_unlock_irqrestore(&h->lock, flags);
4441 return NULL;
4442}
4443
54b6e9e9
ST
4444/* ioaccel2 path firmware cannot handle abort task requests.
4445 * Change abort requests to physical target reset, and send to the
4446 * address of the physical disk used for the ioaccel 2 command.
4447 * Return 0 on success (IO_OK)
4448 * -1 on failure
4449 */
4450
4451static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4452 unsigned char *scsi3addr, struct CommandList *abort)
4453{
4454 int rc = IO_OK;
4455 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4456 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4457 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4458 unsigned char *psa = &phys_scsi3addr[0];
4459
4460 /* Get a pointer to the hpsa logical device. */
4461 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4462 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4463 if (dev == NULL) {
4464 dev_warn(&h->pdev->dev,
4465 "Cannot abort: no device pointer for command.\n");
4466 return -1; /* not abortable */
4467 }
4468
2ba8bfc8
SC
4469 if (h->raid_offload_debug > 0)
4470 dev_info(&h->pdev->dev,
4471 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4472 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4473 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4474 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4475
54b6e9e9
ST
4476 if (!dev->offload_enabled) {
4477 dev_warn(&h->pdev->dev,
4478 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4479 return -1; /* not abortable */
4480 }
4481
4482 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4483 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4484 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4485 return -1; /* not abortable */
4486 }
4487
4488 /* send the reset */
2ba8bfc8
SC
4489 if (h->raid_offload_debug > 0)
4490 dev_info(&h->pdev->dev,
4491 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4492 psa[0], psa[1], psa[2], psa[3],
4493 psa[4], psa[5], psa[6], psa[7]);
54b6e9e9
ST
4494 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4495 if (rc != 0) {
4496 dev_warn(&h->pdev->dev,
4497 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4498 psa[0], psa[1], psa[2], psa[3],
4499 psa[4], psa[5], psa[6], psa[7]);
4500 return rc; /* failed to reset */
4501 }
4502
4503 /* wait for device to recover */
4504 if (wait_for_device_to_become_ready(h, psa) != 0) {
4505 dev_warn(&h->pdev->dev,
4506 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4507 psa[0], psa[1], psa[2], psa[3],
4508 psa[4], psa[5], psa[6], psa[7]);
4509 return -1; /* failed to recover */
4510 }
4511
4512 /* device recovered */
4513 dev_info(&h->pdev->dev,
4514 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4515 psa[0], psa[1], psa[2], psa[3],
4516 psa[4], psa[5], psa[6], psa[7]);
4517
4518 return rc; /* success */
4519}
4520
6cba3f19
SC
4521/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4522 * tell which kind we're dealing with, so we send the abort both ways. There
4523 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4524 * way we construct our tags but we check anyway in case the assumptions which
4525 * make this true someday become false.
4526 */
4527static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4528 unsigned char *scsi3addr, struct CommandList *abort)
4529{
4530 u8 swizzled_tag[8];
4531 struct CommandList *c;
4532 int rc = 0, rc2 = 0;
4533
54b6e9e9
ST
4534 /* ioccelerator mode 2 commands should be aborted via the
4535 * accelerated path, since RAID path is unaware of these commands,
4536 * but underlying firmware can't handle abort TMF.
4537 * Change abort to physical device reset.
4538 */
4539 if (abort->cmd_type == CMD_IOACCEL2)
4540 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4541
6cba3f19
SC
4542 /* we do not expect to find the swizzled tag in our queue, but
4543 * check anyway just to be sure the assumptions which make this
4544 * the case haven't become wrong.
4545 */
4546 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4547 swizzle_abort_tag(swizzled_tag);
4548 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4549 if (c != NULL) {
4550 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4551 return hpsa_send_abort(h, scsi3addr, abort, 0);
4552 }
4553 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4554
4555 /* if the command is still in our queue, we can't conclude that it was
4556 * aborted (it might have just completed normally) but in any case
4557 * we don't need to try to abort it another way.
4558 */
4559 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4560 if (c)
4561 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4562 return rc && rc2;
4563}
4564
75167d2c
SC
4565/* Send an abort for the specified command.
4566 * If the device and controller support it,
4567 * send a task abort request.
4568 */
4569static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4570{
4571
4572 int i, rc;
4573 struct ctlr_info *h;
4574 struct hpsa_scsi_dev_t *dev;
4575 struct CommandList *abort; /* pointer to command to be aborted */
4576 struct CommandList *found;
4577 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4578 char msg[256]; /* For debug messaging. */
4579 int ml = 0;
17eb87d2 4580 u32 tagupper, taglower;
75167d2c
SC
4581
4582 /* Find the controller of the command to be aborted */
4583 h = sdev_to_hba(sc->device);
4584 if (WARN(h == NULL,
4585 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4586 return FAILED;
4587
4588 /* Check that controller supports some kind of task abort */
4589 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4590 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4591 return FAILED;
4592
4593 memset(msg, 0, sizeof(msg));
9cb78c16 4594 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
75167d2c
SC
4595 h->scsi_host->host_no, sc->device->channel,
4596 sc->device->id, sc->device->lun);
4597
4598 /* Find the device of the command to be aborted */
4599 dev = sc->device->hostdata;
4600 if (!dev) {
4601 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4602 msg);
4603 return FAILED;
4604 }
4605
4606 /* Get SCSI command to be aborted */
4607 abort = (struct CommandList *) sc->host_scribble;
4608 if (abort == NULL) {
4609 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4610 msg);
4611 return FAILED;
4612 }
17eb87d2
ST
4613 hpsa_get_tag(h, abort, &taglower, &tagupper);
4614 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
75167d2c
SC
4615 as = (struct scsi_cmnd *) abort->scsi_cmd;
4616 if (as != NULL)
4617 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4618 as->cmnd[0], as->serial_number);
4619 dev_dbg(&h->pdev->dev, "%s\n", msg);
4620 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4621 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4622
4623 /* Search reqQ to See if command is queued but not submitted,
4624 * if so, complete the command with aborted status and remove
4625 * it from the reqQ.
4626 */
4627 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4628 if (found) {
4629 found->err_info->CommandStatus = CMD_ABORTED;
4630 finish_cmd(found);
4631 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4632 msg);
4633 return SUCCESS;
4634 }
4635
4636 /* not in reqQ, if also not in cmpQ, must have already completed */
4637 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4638 if (!found) {
d6ebd0f7 4639 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
75167d2c
SC
4640 msg);
4641 return SUCCESS;
4642 }
4643
4644 /*
4645 * Command is in flight, or possibly already completed
4646 * by the firmware (but not to the scsi mid layer) but we can't
4647 * distinguish which. Send the abort down.
4648 */
6cba3f19 4649 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
75167d2c
SC
4650 if (rc != 0) {
4651 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4652 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4653 h->scsi_host->host_no,
4654 dev->bus, dev->target, dev->lun);
4655 return FAILED;
4656 }
4657 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4658
4659 /* If the abort(s) above completed and actually aborted the
4660 * command, then the command to be aborted should already be
4661 * completed. If not, wait around a bit more to see if they
4662 * manage to complete normally.
4663 */
4664#define ABORT_COMPLETE_WAIT_SECS 30
4665 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4666 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4667 if (!found)
4668 return SUCCESS;
4669 msleep(100);
4670 }
4671 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4672 msg, ABORT_COMPLETE_WAIT_SECS);
4673 return FAILED;
4674}
4675
4676
edd16368
SC
4677/*
4678 * For operations that cannot sleep, a command block is allocated at init,
4679 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4680 * which ones are free or in use. Lock must be held when calling this.
4681 * cmd_free() is the complement.
4682 */
4683static struct CommandList *cmd_alloc(struct ctlr_info *h)
4684{
4685 struct CommandList *c;
4686 int i;
4687 union u64bit temp64;
4688 dma_addr_t cmd_dma_handle, err_dma_handle;
e16a33ad 4689 unsigned long flags;
edd16368 4690
e16a33ad 4691 spin_lock_irqsave(&h->lock, flags);
edd16368
SC
4692 do {
4693 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
e16a33ad
MG
4694 if (i == h->nr_cmds) {
4695 spin_unlock_irqrestore(&h->lock, flags);
edd16368 4696 return NULL;
e16a33ad 4697 }
edd16368
SC
4698 } while (test_and_set_bit
4699 (i & (BITS_PER_LONG - 1),
4700 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
e16a33ad
MG
4701 spin_unlock_irqrestore(&h->lock, flags);
4702
edd16368
SC
4703 c = h->cmd_pool + i;
4704 memset(c, 0, sizeof(*c));
4705 cmd_dma_handle = h->cmd_pool_dhandle
4706 + i * sizeof(*c);
4707 c->err_info = h->errinfo_pool + i;
4708 memset(c->err_info, 0, sizeof(*c->err_info));
4709 err_dma_handle = h->errinfo_pool_dhandle
4710 + i * sizeof(*c->err_info);
edd16368
SC
4711
4712 c->cmdindex = i;
4713
9e0fc764 4714 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
4715 c->busaddr = (u32) cmd_dma_handle;
4716 temp64.val = (u64) err_dma_handle;
edd16368
SC
4717 c->ErrDesc.Addr.lower = temp64.val32.lower;
4718 c->ErrDesc.Addr.upper = temp64.val32.upper;
4719 c->ErrDesc.Len = sizeof(*c->err_info);
4720
4721 c->h = h;
4722 return c;
4723}
4724
4725/* For operations that can wait for kmalloc to possibly sleep,
4726 * this routine can be called. Lock need not be held to call
4727 * cmd_special_alloc. cmd_special_free() is the complement.
4728 */
4729static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4730{
4731 struct CommandList *c;
4732 union u64bit temp64;
4733 dma_addr_t cmd_dma_handle, err_dma_handle;
4734
7c845eb5 4735 c = pci_zalloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
edd16368
SC
4736 if (c == NULL)
4737 return NULL;
edd16368 4738
e1f7de0c 4739 c->cmd_type = CMD_SCSI;
edd16368
SC
4740 c->cmdindex = -1;
4741
7c845eb5
JP
4742 c->err_info = pci_zalloc_consistent(h->pdev, sizeof(*c->err_info),
4743 &err_dma_handle);
edd16368
SC
4744
4745 if (c->err_info == NULL) {
4746 pci_free_consistent(h->pdev,
4747 sizeof(*c), c, cmd_dma_handle);
4748 return NULL;
4749 }
edd16368 4750
9e0fc764 4751 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
4752 c->busaddr = (u32) cmd_dma_handle;
4753 temp64.val = (u64) err_dma_handle;
edd16368
SC
4754 c->ErrDesc.Addr.lower = temp64.val32.lower;
4755 c->ErrDesc.Addr.upper = temp64.val32.upper;
4756 c->ErrDesc.Len = sizeof(*c->err_info);
4757
4758 c->h = h;
4759 return c;
4760}
4761
4762static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4763{
4764 int i;
e16a33ad 4765 unsigned long flags;
edd16368
SC
4766
4767 i = c - h->cmd_pool;
e16a33ad 4768 spin_lock_irqsave(&h->lock, flags);
edd16368
SC
4769 clear_bit(i & (BITS_PER_LONG - 1),
4770 h->cmd_pool_bits + (i / BITS_PER_LONG));
e16a33ad 4771 spin_unlock_irqrestore(&h->lock, flags);
edd16368
SC
4772}
4773
4774static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4775{
4776 union u64bit temp64;
4777
4778 temp64.val32.lower = c->ErrDesc.Addr.lower;
4779 temp64.val32.upper = c->ErrDesc.Addr.upper;
4780 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4781 c->err_info, (dma_addr_t) temp64.val);
4782 pci_free_consistent(h->pdev, sizeof(*c),
d896f3f3 4783 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
edd16368
SC
4784}
4785
4786#ifdef CONFIG_COMPAT
4787
edd16368
SC
4788static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4789{
4790 IOCTL32_Command_struct __user *arg32 =
4791 (IOCTL32_Command_struct __user *) arg;
4792 IOCTL_Command_struct arg64;
4793 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4794 int err;
4795 u32 cp;
4796
938abd84 4797 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
4798 err = 0;
4799 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4800 sizeof(arg64.LUN_info));
4801 err |= copy_from_user(&arg64.Request, &arg32->Request,
4802 sizeof(arg64.Request));
4803 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4804 sizeof(arg64.error_info));
4805 err |= get_user(arg64.buf_size, &arg32->buf_size);
4806 err |= get_user(cp, &arg32->buf);
4807 arg64.buf = compat_ptr(cp);
4808 err |= copy_to_user(p, &arg64, sizeof(arg64));
4809
4810 if (err)
4811 return -EFAULT;
4812
e39eeaed 4813 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
edd16368
SC
4814 if (err)
4815 return err;
4816 err |= copy_in_user(&arg32->error_info, &p->error_info,
4817 sizeof(arg32->error_info));
4818 if (err)
4819 return -EFAULT;
4820 return err;
4821}
4822
4823static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4824 int cmd, void *arg)
4825{
4826 BIG_IOCTL32_Command_struct __user *arg32 =
4827 (BIG_IOCTL32_Command_struct __user *) arg;
4828 BIG_IOCTL_Command_struct arg64;
4829 BIG_IOCTL_Command_struct __user *p =
4830 compat_alloc_user_space(sizeof(arg64));
4831 int err;
4832 u32 cp;
4833
938abd84 4834 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
4835 err = 0;
4836 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4837 sizeof(arg64.LUN_info));
4838 err |= copy_from_user(&arg64.Request, &arg32->Request,
4839 sizeof(arg64.Request));
4840 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4841 sizeof(arg64.error_info));
4842 err |= get_user(arg64.buf_size, &arg32->buf_size);
4843 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4844 err |= get_user(cp, &arg32->buf);
4845 arg64.buf = compat_ptr(cp);
4846 err |= copy_to_user(p, &arg64, sizeof(arg64));
4847
4848 if (err)
4849 return -EFAULT;
4850
e39eeaed 4851 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
edd16368
SC
4852 if (err)
4853 return err;
4854 err |= copy_in_user(&arg32->error_info, &p->error_info,
4855 sizeof(arg32->error_info));
4856 if (err)
4857 return -EFAULT;
4858 return err;
4859}
71fe75a7
SC
4860
4861static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
4862{
4863 switch (cmd) {
4864 case CCISS_GETPCIINFO:
4865 case CCISS_GETINTINFO:
4866 case CCISS_SETINTINFO:
4867 case CCISS_GETNODENAME:
4868 case CCISS_SETNODENAME:
4869 case CCISS_GETHEARTBEAT:
4870 case CCISS_GETBUSTYPES:
4871 case CCISS_GETFIRMVER:
4872 case CCISS_GETDRIVVER:
4873 case CCISS_REVALIDVOLS:
4874 case CCISS_DEREGDISK:
4875 case CCISS_REGNEWDISK:
4876 case CCISS_REGNEWD:
4877 case CCISS_RESCANDISK:
4878 case CCISS_GETLUNINFO:
4879 return hpsa_ioctl(dev, cmd, arg);
4880
4881 case CCISS_PASSTHRU32:
4882 return hpsa_ioctl32_passthru(dev, cmd, arg);
4883 case CCISS_BIG_PASSTHRU32:
4884 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4885
4886 default:
4887 return -ENOIOCTLCMD;
4888 }
4889}
edd16368
SC
4890#endif
4891
4892static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4893{
4894 struct hpsa_pci_info pciinfo;
4895
4896 if (!argp)
4897 return -EINVAL;
4898 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4899 pciinfo.bus = h->pdev->bus->number;
4900 pciinfo.dev_fn = h->pdev->devfn;
4901 pciinfo.board_id = h->board_id;
4902 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4903 return -EFAULT;
4904 return 0;
4905}
4906
4907static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4908{
4909 DriverVer_type DriverVer;
4910 unsigned char vmaj, vmin, vsubmin;
4911 int rc;
4912
4913 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4914 &vmaj, &vmin, &vsubmin);
4915 if (rc != 3) {
4916 dev_info(&h->pdev->dev, "driver version string '%s' "
4917 "unrecognized.", HPSA_DRIVER_VERSION);
4918 vmaj = 0;
4919 vmin = 0;
4920 vsubmin = 0;
4921 }
4922 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4923 if (!argp)
4924 return -EINVAL;
4925 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4926 return -EFAULT;
4927 return 0;
4928}
4929
4930static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4931{
4932 IOCTL_Command_struct iocommand;
4933 struct CommandList *c;
4934 char *buff = NULL;
4935 union u64bit temp64;
c1f63c8f 4936 int rc = 0;
edd16368
SC
4937
4938 if (!argp)
4939 return -EINVAL;
4940 if (!capable(CAP_SYS_RAWIO))
4941 return -EPERM;
4942 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4943 return -EFAULT;
4944 if ((iocommand.buf_size < 1) &&
4945 (iocommand.Request.Type.Direction != XFER_NONE)) {
4946 return -EINVAL;
4947 }
4948 if (iocommand.buf_size > 0) {
4949 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4950 if (buff == NULL)
4951 return -EFAULT;
9233fb10 4952 if (iocommand.Request.Type.Direction & XFER_WRITE) {
b03a7771
SC
4953 /* Copy the data into the buffer we created */
4954 if (copy_from_user(buff, iocommand.buf,
4955 iocommand.buf_size)) {
c1f63c8f
SC
4956 rc = -EFAULT;
4957 goto out_kfree;
b03a7771
SC
4958 }
4959 } else {
4960 memset(buff, 0, iocommand.buf_size);
edd16368 4961 }
b03a7771 4962 }
edd16368
SC
4963 c = cmd_special_alloc(h);
4964 if (c == NULL) {
c1f63c8f
SC
4965 rc = -ENOMEM;
4966 goto out_kfree;
edd16368
SC
4967 }
4968 /* Fill in the command type */
4969 c->cmd_type = CMD_IOCTL_PEND;
4970 /* Fill in Command Header */
4971 c->Header.ReplyQueue = 0; /* unused in simple mode */
4972 if (iocommand.buf_size > 0) { /* buffer to fill */
4973 c->Header.SGList = 1;
4974 c->Header.SGTotal = 1;
4975 } else { /* no buffers to fill */
4976 c->Header.SGList = 0;
4977 c->Header.SGTotal = 0;
4978 }
4979 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4980 /* use the kernel address the cmd block for tag */
4981 c->Header.Tag.lower = c->busaddr;
4982
4983 /* Fill in Request block */
4984 memcpy(&c->Request, &iocommand.Request,
4985 sizeof(c->Request));
4986
4987 /* Fill in the scatter gather information */
4988 if (iocommand.buf_size > 0) {
4989 temp64.val = pci_map_single(h->pdev, buff,
4990 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
bcc48ffa
SC
4991 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4992 c->SG[0].Addr.lower = 0;
4993 c->SG[0].Addr.upper = 0;
4994 c->SG[0].Len = 0;
4995 rc = -ENOMEM;
4996 goto out;
4997 }
edd16368
SC
4998 c->SG[0].Addr.lower = temp64.val32.lower;
4999 c->SG[0].Addr.upper = temp64.val32.upper;
5000 c->SG[0].Len = iocommand.buf_size;
e1d9cbfa 5001 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
edd16368 5002 }
a0c12413 5003 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
c2dd32e0
SC
5004 if (iocommand.buf_size > 0)
5005 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
5006 check_ioctl_unit_attention(h, c);
5007
5008 /* Copy the error information out */
5009 memcpy(&iocommand.error_info, c->err_info,
5010 sizeof(iocommand.error_info));
5011 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
c1f63c8f
SC
5012 rc = -EFAULT;
5013 goto out;
edd16368 5014 }
9233fb10 5015 if ((iocommand.Request.Type.Direction & XFER_READ) &&
b03a7771 5016 iocommand.buf_size > 0) {
edd16368
SC
5017 /* Copy the data out of the buffer we created */
5018 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
c1f63c8f
SC
5019 rc = -EFAULT;
5020 goto out;
edd16368
SC
5021 }
5022 }
c1f63c8f 5023out:
edd16368 5024 cmd_special_free(h, c);
c1f63c8f
SC
5025out_kfree:
5026 kfree(buff);
5027 return rc;
edd16368
SC
5028}
5029
5030static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5031{
5032 BIG_IOCTL_Command_struct *ioc;
5033 struct CommandList *c;
5034 unsigned char **buff = NULL;
5035 int *buff_size = NULL;
5036 union u64bit temp64;
5037 BYTE sg_used = 0;
5038 int status = 0;
5039 int i;
01a02ffc
SC
5040 u32 left;
5041 u32 sz;
edd16368
SC
5042 BYTE __user *data_ptr;
5043
5044 if (!argp)
5045 return -EINVAL;
5046 if (!capable(CAP_SYS_RAWIO))
5047 return -EPERM;
5048 ioc = (BIG_IOCTL_Command_struct *)
5049 kmalloc(sizeof(*ioc), GFP_KERNEL);
5050 if (!ioc) {
5051 status = -ENOMEM;
5052 goto cleanup1;
5053 }
5054 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5055 status = -EFAULT;
5056 goto cleanup1;
5057 }
5058 if ((ioc->buf_size < 1) &&
5059 (ioc->Request.Type.Direction != XFER_NONE)) {
5060 status = -EINVAL;
5061 goto cleanup1;
5062 }
5063 /* Check kmalloc limits using all SGs */
5064 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5065 status = -EINVAL;
5066 goto cleanup1;
5067 }
d66ae08b 5068 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
5069 status = -EINVAL;
5070 goto cleanup1;
5071 }
d66ae08b 5072 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
5073 if (!buff) {
5074 status = -ENOMEM;
5075 goto cleanup1;
5076 }
d66ae08b 5077 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
5078 if (!buff_size) {
5079 status = -ENOMEM;
5080 goto cleanup1;
5081 }
5082 left = ioc->buf_size;
5083 data_ptr = ioc->buf;
5084 while (left) {
5085 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5086 buff_size[sg_used] = sz;
5087 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5088 if (buff[sg_used] == NULL) {
5089 status = -ENOMEM;
5090 goto cleanup1;
5091 }
9233fb10 5092 if (ioc->Request.Type.Direction & XFER_WRITE) {
edd16368 5093 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
0758f4f7 5094 status = -EFAULT;
edd16368
SC
5095 goto cleanup1;
5096 }
5097 } else
5098 memset(buff[sg_used], 0, sz);
5099 left -= sz;
5100 data_ptr += sz;
5101 sg_used++;
5102 }
5103 c = cmd_special_alloc(h);
5104 if (c == NULL) {
5105 status = -ENOMEM;
5106 goto cleanup1;
5107 }
5108 c->cmd_type = CMD_IOCTL_PEND;
5109 c->Header.ReplyQueue = 0;
b03a7771 5110 c->Header.SGList = c->Header.SGTotal = sg_used;
edd16368
SC
5111 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5112 c->Header.Tag.lower = c->busaddr;
5113 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5114 if (ioc->buf_size > 0) {
5115 int i;
5116 for (i = 0; i < sg_used; i++) {
5117 temp64.val = pci_map_single(h->pdev, buff[i],
5118 buff_size[i], PCI_DMA_BIDIRECTIONAL);
bcc48ffa
SC
5119 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5120 c->SG[i].Addr.lower = 0;
5121 c->SG[i].Addr.upper = 0;
5122 c->SG[i].Len = 0;
5123 hpsa_pci_unmap(h->pdev, c, i,
5124 PCI_DMA_BIDIRECTIONAL);
5125 status = -ENOMEM;
e2d4a1f6 5126 goto cleanup0;
bcc48ffa 5127 }
edd16368
SC
5128 c->SG[i].Addr.lower = temp64.val32.lower;
5129 c->SG[i].Addr.upper = temp64.val32.upper;
5130 c->SG[i].Len = buff_size[i];
e1d9cbfa 5131 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
edd16368
SC
5132 }
5133 }
a0c12413 5134 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
b03a7771
SC
5135 if (sg_used)
5136 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
5137 check_ioctl_unit_attention(h, c);
5138 /* Copy the error information out */
5139 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5140 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
edd16368 5141 status = -EFAULT;
e2d4a1f6 5142 goto cleanup0;
edd16368 5143 }
9233fb10 5144 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
edd16368
SC
5145 /* Copy the data out of the buffer we created */
5146 BYTE __user *ptr = ioc->buf;
5147 for (i = 0; i < sg_used; i++) {
5148 if (copy_to_user(ptr, buff[i], buff_size[i])) {
edd16368 5149 status = -EFAULT;
e2d4a1f6 5150 goto cleanup0;
edd16368
SC
5151 }
5152 ptr += buff_size[i];
5153 }
5154 }
edd16368 5155 status = 0;
e2d4a1f6
SC
5156cleanup0:
5157 cmd_special_free(h, c);
edd16368
SC
5158cleanup1:
5159 if (buff) {
5160 for (i = 0; i < sg_used; i++)
5161 kfree(buff[i]);
5162 kfree(buff);
5163 }
5164 kfree(buff_size);
5165 kfree(ioc);
5166 return status;
5167}
5168
5169static void check_ioctl_unit_attention(struct ctlr_info *h,
5170 struct CommandList *c)
5171{
5172 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5173 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5174 (void) check_for_unit_attention(h, c);
5175}
0390f0c0
SC
5176
5177static int increment_passthru_count(struct ctlr_info *h)
5178{
5179 unsigned long flags;
5180
5181 spin_lock_irqsave(&h->passthru_count_lock, flags);
5182 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5183 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5184 return -1;
5185 }
5186 h->passthru_count++;
5187 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5188 return 0;
5189}
5190
5191static void decrement_passthru_count(struct ctlr_info *h)
5192{
5193 unsigned long flags;
5194
5195 spin_lock_irqsave(&h->passthru_count_lock, flags);
5196 if (h->passthru_count <= 0) {
5197 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5198 /* not expecting to get here. */
5199 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5200 return;
5201 }
5202 h->passthru_count--;
5203 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5204}
5205
edd16368
SC
5206/*
5207 * ioctl
5208 */
5209static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
5210{
5211 struct ctlr_info *h;
5212 void __user *argp = (void __user *)arg;
0390f0c0 5213 int rc;
edd16368
SC
5214
5215 h = sdev_to_hba(dev);
5216
5217 switch (cmd) {
5218 case CCISS_DEREGDISK:
5219 case CCISS_REGNEWDISK:
5220 case CCISS_REGNEWD:
a08a8471 5221 hpsa_scan_start(h->scsi_host);
edd16368
SC
5222 return 0;
5223 case CCISS_GETPCIINFO:
5224 return hpsa_getpciinfo_ioctl(h, argp);
5225 case CCISS_GETDRIVVER:
5226 return hpsa_getdrivver_ioctl(h, argp);
5227 case CCISS_PASSTHRU:
0390f0c0
SC
5228 if (increment_passthru_count(h))
5229 return -EAGAIN;
5230 rc = hpsa_passthru_ioctl(h, argp);
5231 decrement_passthru_count(h);
5232 return rc;
edd16368 5233 case CCISS_BIG_PASSTHRU:
0390f0c0
SC
5234 if (increment_passthru_count(h))
5235 return -EAGAIN;
5236 rc = hpsa_big_passthru_ioctl(h, argp);
5237 decrement_passthru_count(h);
5238 return rc;
edd16368
SC
5239 default:
5240 return -ENOTTY;
5241 }
5242}
5243
6f039790
GKH
5244static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5245 u8 reset_type)
64670ac8
SC
5246{
5247 struct CommandList *c;
5248
5249 c = cmd_alloc(h);
5250 if (!c)
5251 return -ENOMEM;
a2dac136
SC
5252 /* fill_cmd can't fail here, no data buffer to map */
5253 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
64670ac8
SC
5254 RAID_CTLR_LUNID, TYPE_MSG);
5255 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5256 c->waiting = NULL;
5257 enqueue_cmd_and_start_io(h, c);
5258 /* Don't wait for completion, the reset won't complete. Don't free
5259 * the command either. This is the last command we will send before
5260 * re-initializing everything, so it doesn't matter and won't leak.
5261 */
5262 return 0;
5263}
5264
a2dac136 5265static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 5266 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368
SC
5267 int cmd_type)
5268{
5269 int pci_dir = XFER_NONE;
75167d2c 5270 struct CommandList *a; /* for commands to be aborted */
edd16368
SC
5271
5272 c->cmd_type = CMD_IOCTL_PEND;
5273 c->Header.ReplyQueue = 0;
5274 if (buff != NULL && size > 0) {
5275 c->Header.SGList = 1;
5276 c->Header.SGTotal = 1;
5277 } else {
5278 c->Header.SGList = 0;
5279 c->Header.SGTotal = 0;
5280 }
5281 c->Header.Tag.lower = c->busaddr;
5282 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5283
5284 c->Request.Type.Type = cmd_type;
5285 if (cmd_type == TYPE_CMD) {
5286 switch (cmd) {
5287 case HPSA_INQUIRY:
5288 /* are we trying to read a vital product page */
b7bb24eb 5289 if (page_code & VPD_PAGE) {
edd16368 5290 c->Request.CDB[1] = 0x01;
b7bb24eb 5291 c->Request.CDB[2] = (page_code & 0xff);
edd16368
SC
5292 }
5293 c->Request.CDBLen = 6;
5294 c->Request.Type.Attribute = ATTR_SIMPLE;
5295 c->Request.Type.Direction = XFER_READ;
5296 c->Request.Timeout = 0;
5297 c->Request.CDB[0] = HPSA_INQUIRY;
5298 c->Request.CDB[4] = size & 0xFF;
5299 break;
5300 case HPSA_REPORT_LOG:
5301 case HPSA_REPORT_PHYS:
5302 /* Talking to controller so It's a physical command
5303 mode = 00 target = 0. Nothing to write.
5304 */
5305 c->Request.CDBLen = 12;
5306 c->Request.Type.Attribute = ATTR_SIMPLE;
5307 c->Request.Type.Direction = XFER_READ;
5308 c->Request.Timeout = 0;
5309 c->Request.CDB[0] = cmd;
5310 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5311 c->Request.CDB[7] = (size >> 16) & 0xFF;
5312 c->Request.CDB[8] = (size >> 8) & 0xFF;
5313 c->Request.CDB[9] = size & 0xFF;
5314 break;
edd16368
SC
5315 case HPSA_CACHE_FLUSH:
5316 c->Request.CDBLen = 12;
5317 c->Request.Type.Attribute = ATTR_SIMPLE;
5318 c->Request.Type.Direction = XFER_WRITE;
5319 c->Request.Timeout = 0;
5320 c->Request.CDB[0] = BMIC_WRITE;
5321 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
5322 c->Request.CDB[7] = (size >> 8) & 0xFF;
5323 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
5324 break;
5325 case TEST_UNIT_READY:
5326 c->Request.CDBLen = 6;
5327 c->Request.Type.Attribute = ATTR_SIMPLE;
5328 c->Request.Type.Direction = XFER_NONE;
5329 c->Request.Timeout = 0;
5330 break;
283b4a9b
SC
5331 case HPSA_GET_RAID_MAP:
5332 c->Request.CDBLen = 12;
5333 c->Request.Type.Attribute = ATTR_SIMPLE;
5334 c->Request.Type.Direction = XFER_READ;
5335 c->Request.Timeout = 0;
5336 c->Request.CDB[0] = HPSA_CISS_READ;
5337 c->Request.CDB[1] = cmd;
5338 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5339 c->Request.CDB[7] = (size >> 16) & 0xFF;
5340 c->Request.CDB[8] = (size >> 8) & 0xFF;
5341 c->Request.CDB[9] = size & 0xFF;
5342 break;
316b221a
SC
5343 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5344 c->Request.CDBLen = 10;
5345 c->Request.Type.Attribute = ATTR_SIMPLE;
5346 c->Request.Type.Direction = XFER_READ;
5347 c->Request.Timeout = 0;
5348 c->Request.CDB[0] = BMIC_READ;
5349 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5350 c->Request.CDB[7] = (size >> 16) & 0xFF;
5351 c->Request.CDB[8] = (size >> 8) & 0xFF;
5352 break;
edd16368
SC
5353 default:
5354 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5355 BUG();
a2dac136 5356 return -1;
edd16368
SC
5357 }
5358 } else if (cmd_type == TYPE_MSG) {
5359 switch (cmd) {
5360
5361 case HPSA_DEVICE_RESET_MSG:
5362 c->Request.CDBLen = 16;
5363 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
5364 c->Request.Type.Attribute = ATTR_SIMPLE;
5365 c->Request.Type.Direction = XFER_NONE;
5366 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
5367 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5368 c->Request.CDB[0] = cmd;
21e89afd 5369 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
edd16368
SC
5370 /* If bytes 4-7 are zero, it means reset the */
5371 /* LunID device */
5372 c->Request.CDB[4] = 0x00;
5373 c->Request.CDB[5] = 0x00;
5374 c->Request.CDB[6] = 0x00;
5375 c->Request.CDB[7] = 0x00;
75167d2c
SC
5376 break;
5377 case HPSA_ABORT_MSG:
5378 a = buff; /* point to command to be aborted */
5379 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5380 a->Header.Tag.upper, a->Header.Tag.lower,
5381 c->Header.Tag.upper, c->Header.Tag.lower);
5382 c->Request.CDBLen = 16;
5383 c->Request.Type.Type = TYPE_MSG;
5384 c->Request.Type.Attribute = ATTR_SIMPLE;
5385 c->Request.Type.Direction = XFER_WRITE;
5386 c->Request.Timeout = 0; /* Don't time out */
5387 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5388 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5389 c->Request.CDB[2] = 0x00; /* reserved */
5390 c->Request.CDB[3] = 0x00; /* reserved */
5391 /* Tag to abort goes in CDB[4]-CDB[11] */
5392 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
5393 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
5394 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
5395 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
5396 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
5397 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
5398 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
5399 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
5400 c->Request.CDB[12] = 0x00; /* reserved */
5401 c->Request.CDB[13] = 0x00; /* reserved */
5402 c->Request.CDB[14] = 0x00; /* reserved */
5403 c->Request.CDB[15] = 0x00; /* reserved */
edd16368 5404 break;
edd16368
SC
5405 default:
5406 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5407 cmd);
5408 BUG();
5409 }
5410 } else {
5411 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5412 BUG();
5413 }
5414
5415 switch (c->Request.Type.Direction) {
5416 case XFER_READ:
5417 pci_dir = PCI_DMA_FROMDEVICE;
5418 break;
5419 case XFER_WRITE:
5420 pci_dir = PCI_DMA_TODEVICE;
5421 break;
5422 case XFER_NONE:
5423 pci_dir = PCI_DMA_NONE;
5424 break;
5425 default:
5426 pci_dir = PCI_DMA_BIDIRECTIONAL;
5427 }
a2dac136
SC
5428 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5429 return -1;
5430 return 0;
edd16368
SC
5431}
5432
5433/*
5434 * Map (physical) PCI mem into (virtual) kernel space
5435 */
5436static void __iomem *remap_pci_mem(ulong base, ulong size)
5437{
5438 ulong page_base = ((ulong) base) & PAGE_MASK;
5439 ulong page_offs = ((ulong) base) - page_base;
088ba34c
SC
5440 void __iomem *page_remapped = ioremap_nocache(page_base,
5441 page_offs + size);
edd16368
SC
5442
5443 return page_remapped ? (page_remapped + page_offs) : NULL;
5444}
5445
5446/* Takes cmds off the submission queue and sends them to the hardware,
5447 * then puts them on the queue of cmds waiting for completion.
0b57075d 5448 * Assumes h->lock is held
edd16368 5449 */
0b57075d 5450static void start_io(struct ctlr_info *h, unsigned long *flags)
edd16368
SC
5451{
5452 struct CommandList *c;
5453
9e0fc764
SC
5454 while (!list_empty(&h->reqQ)) {
5455 c = list_entry(h->reqQ.next, struct CommandList, list);
edd16368
SC
5456 /* can't do anything if fifo is full */
5457 if ((h->access.fifo_full(h))) {
396883e2 5458 h->fifo_recently_full = 1;
edd16368
SC
5459 dev_warn(&h->pdev->dev, "fifo full\n");
5460 break;
5461 }
396883e2 5462 h->fifo_recently_full = 0;
edd16368
SC
5463
5464 /* Get the first entry from the Request Q */
5465 removeQ(c);
5466 h->Qdepth--;
5467
edd16368
SC
5468 /* Put job onto the completed Q */
5469 addQ(&h->cmpQ, c);
e16a33ad
MG
5470
5471 /* Must increment commands_outstanding before unlocking
5472 * and submitting to avoid race checking for fifo full
5473 * condition.
5474 */
5475 h->commands_outstanding++;
e16a33ad
MG
5476
5477 /* Tell the controller execute command */
0b57075d 5478 spin_unlock_irqrestore(&h->lock, *flags);
e16a33ad 5479 h->access.submit_command(h, c);
0b57075d 5480 spin_lock_irqsave(&h->lock, *flags);
edd16368 5481 }
0b57075d
SC
5482}
5483
5484static void lock_and_start_io(struct ctlr_info *h)
5485{
5486 unsigned long flags;
5487
5488 spin_lock_irqsave(&h->lock, flags);
5489 start_io(h, &flags);
e16a33ad 5490 spin_unlock_irqrestore(&h->lock, flags);
edd16368
SC
5491}
5492
254f796b 5493static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
edd16368 5494{
254f796b 5495 return h->access.command_completed(h, q);
edd16368
SC
5496}
5497
900c5440 5498static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
5499{
5500 return h->access.intr_pending(h);
5501}
5502
5503static inline long interrupt_not_for_us(struct ctlr_info *h)
5504{
10f66018
SC
5505 return (h->access.intr_pending(h) == 0) ||
5506 (h->interrupts_enabled == 0);
edd16368
SC
5507}
5508
01a02ffc
SC
5509static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5510 u32 raw_tag)
edd16368
SC
5511{
5512 if (unlikely(tag_index >= h->nr_cmds)) {
5513 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5514 return 1;
5515 }
5516 return 0;
5517}
5518
5a3d16f5 5519static inline void finish_cmd(struct CommandList *c)
edd16368 5520{
e16a33ad 5521 unsigned long flags;
396883e2
SC
5522 int io_may_be_stalled = 0;
5523 struct ctlr_info *h = c->h;
e16a33ad 5524
396883e2 5525 spin_lock_irqsave(&h->lock, flags);
edd16368 5526 removeQ(c);
396883e2
SC
5527
5528 /*
5529 * Check for possibly stalled i/o.
5530 *
5531 * If a fifo_full condition is encountered, requests will back up
5532 * in h->reqQ. This queue is only emptied out by start_io which is
5533 * only called when a new i/o request comes in. If no i/o's are
5534 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5535 * start_io from here if we detect such a danger.
5536 *
5537 * Normally, we shouldn't hit this case, but pounding on the
5538 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5539 * commands_outstanding is low. We want to avoid calling
5540 * start_io from in here as much as possible, and esp. don't
5541 * want to get in a cycle where we call start_io every time
5542 * through here.
5543 */
5544 if (unlikely(h->fifo_recently_full) &&
5545 h->commands_outstanding < 5)
5546 io_may_be_stalled = 1;
5547
5548 spin_unlock_irqrestore(&h->lock, flags);
5549
e85c5974 5550 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
c349775e
ST
5551 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5552 || c->cmd_type == CMD_IOACCEL2))
1fb011fb 5553 complete_scsi_command(c);
edd16368
SC
5554 else if (c->cmd_type == CMD_IOCTL_PEND)
5555 complete(c->waiting);
396883e2 5556 if (unlikely(io_may_be_stalled))
0b57075d 5557 lock_and_start_io(h);
edd16368
SC
5558}
5559
a104c99f
SC
5560static inline u32 hpsa_tag_contains_index(u32 tag)
5561{
a104c99f
SC
5562 return tag & DIRECT_LOOKUP_BIT;
5563}
5564
5565static inline u32 hpsa_tag_to_index(u32 tag)
5566{
a104c99f
SC
5567 return tag >> DIRECT_LOOKUP_SHIFT;
5568}
5569
a9a3a273
SC
5570
5571static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
a104c99f 5572{
a9a3a273
SC
5573#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5574#define HPSA_SIMPLE_ERROR_BITS 0x03
960a30e7 5575 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
a9a3a273
SC
5576 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5577 return tag & ~HPSA_PERF_ERROR_BITS;
a104c99f
SC
5578}
5579
303932fd 5580/* process completion of an indexed ("direct lookup") command */
1d94f94d 5581static inline void process_indexed_cmd(struct ctlr_info *h,
303932fd
DB
5582 u32 raw_tag)
5583{
5584 u32 tag_index;
5585 struct CommandList *c;
5586
5587 tag_index = hpsa_tag_to_index(raw_tag);
1d94f94d
SC
5588 if (!bad_tag(h, tag_index, raw_tag)) {
5589 c = h->cmd_pool + tag_index;
5590 finish_cmd(c);
5591 }
303932fd
DB
5592}
5593
5594/* process completion of a non-indexed command */
1d94f94d 5595static inline void process_nonindexed_cmd(struct ctlr_info *h,
303932fd
DB
5596 u32 raw_tag)
5597{
5598 u32 tag;
5599 struct CommandList *c = NULL;
e16a33ad 5600 unsigned long flags;
303932fd 5601
a9a3a273 5602 tag = hpsa_tag_discard_error_bits(h, raw_tag);
e16a33ad 5603 spin_lock_irqsave(&h->lock, flags);
9e0fc764 5604 list_for_each_entry(c, &h->cmpQ, list) {
303932fd 5605 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
e16a33ad 5606 spin_unlock_irqrestore(&h->lock, flags);
5a3d16f5 5607 finish_cmd(c);
1d94f94d 5608 return;
303932fd
DB
5609 }
5610 }
e16a33ad 5611 spin_unlock_irqrestore(&h->lock, flags);
303932fd 5612 bad_tag(h, h->nr_cmds + 1, raw_tag);
303932fd
DB
5613}
5614
64670ac8
SC
5615/* Some controllers, like p400, will give us one interrupt
5616 * after a soft reset, even if we turned interrupts off.
5617 * Only need to check for this in the hpsa_xxx_discard_completions
5618 * functions.
5619 */
5620static int ignore_bogus_interrupt(struct ctlr_info *h)
5621{
5622 if (likely(!reset_devices))
5623 return 0;
5624
5625 if (likely(h->interrupts_enabled))
5626 return 0;
5627
5628 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5629 "(known firmware bug.) Ignoring.\n");
5630
5631 return 1;
5632}
5633
254f796b
MG
5634/*
5635 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5636 * Relies on (h-q[x] == x) being true for x such that
5637 * 0 <= x < MAX_REPLY_QUEUES.
5638 */
5639static struct ctlr_info *queue_to_hba(u8 *queue)
64670ac8 5640{
254f796b
MG
5641 return container_of((queue - *queue), struct ctlr_info, q[0]);
5642}
5643
5644static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5645{
5646 struct ctlr_info *h = queue_to_hba(queue);
5647 u8 q = *(u8 *) queue;
64670ac8
SC
5648 u32 raw_tag;
5649
5650 if (ignore_bogus_interrupt(h))
5651 return IRQ_NONE;
5652
5653 if (interrupt_not_for_us(h))
5654 return IRQ_NONE;
a0c12413 5655 h->last_intr_timestamp = get_jiffies_64();
64670ac8 5656 while (interrupt_pending(h)) {
254f796b 5657 raw_tag = get_next_completion(h, q);
64670ac8 5658 while (raw_tag != FIFO_EMPTY)
254f796b 5659 raw_tag = next_command(h, q);
64670ac8 5660 }
64670ac8
SC
5661 return IRQ_HANDLED;
5662}
5663
254f796b 5664static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
64670ac8 5665{
254f796b 5666 struct ctlr_info *h = queue_to_hba(queue);
64670ac8 5667 u32 raw_tag;
254f796b 5668 u8 q = *(u8 *) queue;
64670ac8
SC
5669
5670 if (ignore_bogus_interrupt(h))
5671 return IRQ_NONE;
5672
a0c12413 5673 h->last_intr_timestamp = get_jiffies_64();
254f796b 5674 raw_tag = get_next_completion(h, q);
64670ac8 5675 while (raw_tag != FIFO_EMPTY)
254f796b 5676 raw_tag = next_command(h, q);
64670ac8
SC
5677 return IRQ_HANDLED;
5678}
5679
254f796b 5680static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
edd16368 5681{
254f796b 5682 struct ctlr_info *h = queue_to_hba((u8 *) queue);
303932fd 5683 u32 raw_tag;
254f796b 5684 u8 q = *(u8 *) queue;
edd16368
SC
5685
5686 if (interrupt_not_for_us(h))
5687 return IRQ_NONE;
a0c12413 5688 h->last_intr_timestamp = get_jiffies_64();
10f66018 5689 while (interrupt_pending(h)) {
254f796b 5690 raw_tag = get_next_completion(h, q);
10f66018 5691 while (raw_tag != FIFO_EMPTY) {
1d94f94d
SC
5692 if (likely(hpsa_tag_contains_index(raw_tag)))
5693 process_indexed_cmd(h, raw_tag);
10f66018 5694 else
1d94f94d 5695 process_nonindexed_cmd(h, raw_tag);
254f796b 5696 raw_tag = next_command(h, q);
10f66018
SC
5697 }
5698 }
10f66018
SC
5699 return IRQ_HANDLED;
5700}
5701
254f796b 5702static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
10f66018 5703{
254f796b 5704 struct ctlr_info *h = queue_to_hba(queue);
10f66018 5705 u32 raw_tag;
254f796b 5706 u8 q = *(u8 *) queue;
10f66018 5707
a0c12413 5708 h->last_intr_timestamp = get_jiffies_64();
254f796b 5709 raw_tag = get_next_completion(h, q);
303932fd 5710 while (raw_tag != FIFO_EMPTY) {
1d94f94d
SC
5711 if (likely(hpsa_tag_contains_index(raw_tag)))
5712 process_indexed_cmd(h, raw_tag);
303932fd 5713 else
1d94f94d 5714 process_nonindexed_cmd(h, raw_tag);
254f796b 5715 raw_tag = next_command(h, q);
edd16368 5716 }
edd16368
SC
5717 return IRQ_HANDLED;
5718}
5719
a9a3a273
SC
5720/* Send a message CDB to the firmware. Careful, this only works
5721 * in simple mode, not performant mode due to the tag lookup.
5722 * We only ever use this immediately after a controller reset.
5723 */
6f039790
GKH
5724static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5725 unsigned char type)
edd16368
SC
5726{
5727 struct Command {
5728 struct CommandListHeader CommandHeader;
5729 struct RequestBlock Request;
5730 struct ErrDescriptor ErrorDescriptor;
5731 };
5732 struct Command *cmd;
5733 static const size_t cmd_sz = sizeof(*cmd) +
5734 sizeof(cmd->ErrorDescriptor);
5735 dma_addr_t paddr64;
5736 uint32_t paddr32, tag;
5737 void __iomem *vaddr;
5738 int i, err;
5739
5740 vaddr = pci_ioremap_bar(pdev, 0);
5741 if (vaddr == NULL)
5742 return -ENOMEM;
5743
5744 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5745 * CCISS commands, so they must be allocated from the lower 4GiB of
5746 * memory.
5747 */
5748 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5749 if (err) {
5750 iounmap(vaddr);
5751 return -ENOMEM;
5752 }
5753
5754 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5755 if (cmd == NULL) {
5756 iounmap(vaddr);
5757 return -ENOMEM;
5758 }
5759
5760 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5761 * although there's no guarantee, we assume that the address is at
5762 * least 4-byte aligned (most likely, it's page-aligned).
5763 */
5764 paddr32 = paddr64;
5765
5766 cmd->CommandHeader.ReplyQueue = 0;
5767 cmd->CommandHeader.SGList = 0;
5768 cmd->CommandHeader.SGTotal = 0;
5769 cmd->CommandHeader.Tag.lower = paddr32;
5770 cmd->CommandHeader.Tag.upper = 0;
5771 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5772
5773 cmd->Request.CDBLen = 16;
5774 cmd->Request.Type.Type = TYPE_MSG;
5775 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5776 cmd->Request.Type.Direction = XFER_NONE;
5777 cmd->Request.Timeout = 0; /* Don't time out */
5778 cmd->Request.CDB[0] = opcode;
5779 cmd->Request.CDB[1] = type;
5780 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5781 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
5782 cmd->ErrorDescriptor.Addr.upper = 0;
5783 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
5784
5785 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5786
5787 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5788 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
a9a3a273 5789 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
edd16368
SC
5790 break;
5791 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5792 }
5793
5794 iounmap(vaddr);
5795
5796 /* we leak the DMA buffer here ... no choice since the controller could
5797 * still complete the command.
5798 */
5799 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5800 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5801 opcode, type);
5802 return -ETIMEDOUT;
5803 }
5804
5805 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5806
5807 if (tag & HPSA_ERROR_BIT) {
5808 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5809 opcode, type);
5810 return -EIO;
5811 }
5812
5813 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5814 opcode, type);
5815 return 0;
5816}
5817
edd16368
SC
5818#define hpsa_noop(p) hpsa_message(p, 3, 0)
5819
1df8552a 5820static int hpsa_controller_hard_reset(struct pci_dev *pdev,
cf0b08d0 5821 void * __iomem vaddr, u32 use_doorbell)
1df8552a
SC
5822{
5823 u16 pmcsr;
5824 int pos;
5825
5826 if (use_doorbell) {
5827 /* For everything after the P600, the PCI power state method
5828 * of resetting the controller doesn't work, so we have this
5829 * other way using the doorbell register.
5830 */
5831 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 5832 writel(use_doorbell, vaddr + SA5_DOORBELL);
85009239 5833
00701a96 5834 /* PMC hardware guys tell us we need a 10 second delay after
85009239
SC
5835 * doorbell reset and before any attempt to talk to the board
5836 * at all to ensure that this actually works and doesn't fall
5837 * over in some weird corner cases.
5838 */
00701a96 5839 msleep(10000);
1df8552a
SC
5840 } else { /* Try to do it the PCI power state way */
5841
5842 /* Quoting from the Open CISS Specification: "The Power
5843 * Management Control/Status Register (CSR) controls the power
5844 * state of the device. The normal operating state is D0,
5845 * CSR=00h. The software off state is D3, CSR=03h. To reset
5846 * the controller, place the interface device in D3 then to D0,
5847 * this causes a secondary PCI reset which will reset the
5848 * controller." */
5849
5850 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5851 if (pos == 0) {
5852 dev_err(&pdev->dev,
5853 "hpsa_reset_controller: "
5854 "PCI PM not supported\n");
5855 return -ENODEV;
5856 }
5857 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5858 /* enter the D3hot power management state */
5859 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5860 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5861 pmcsr |= PCI_D3hot;
5862 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5863
5864 msleep(500);
5865
5866 /* enter the D0 power management state */
5867 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5868 pmcsr |= PCI_D0;
5869 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
c4853efe
MM
5870
5871 /*
5872 * The P600 requires a small delay when changing states.
5873 * Otherwise we may think the board did not reset and we bail.
5874 * This for kdump only and is particular to the P600.
5875 */
5876 msleep(500);
1df8552a
SC
5877 }
5878 return 0;
5879}
5880
6f039790 5881static void init_driver_version(char *driver_version, int len)
580ada3c
SC
5882{
5883 memset(driver_version, 0, len);
f79cfec6 5884 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
580ada3c
SC
5885}
5886
6f039790 5887static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
580ada3c
SC
5888{
5889 char *driver_version;
5890 int i, size = sizeof(cfgtable->driver_version);
5891
5892 driver_version = kmalloc(size, GFP_KERNEL);
5893 if (!driver_version)
5894 return -ENOMEM;
5895
5896 init_driver_version(driver_version, size);
5897 for (i = 0; i < size; i++)
5898 writeb(driver_version[i], &cfgtable->driver_version[i]);
5899 kfree(driver_version);
5900 return 0;
5901}
5902
6f039790
GKH
5903static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5904 unsigned char *driver_ver)
580ada3c
SC
5905{
5906 int i;
5907
5908 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5909 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5910}
5911
6f039790 5912static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
580ada3c
SC
5913{
5914
5915 char *driver_ver, *old_driver_ver;
5916 int rc, size = sizeof(cfgtable->driver_version);
5917
5918 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5919 if (!old_driver_ver)
5920 return -ENOMEM;
5921 driver_ver = old_driver_ver + size;
5922
5923 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5924 * should have been changed, otherwise we know the reset failed.
5925 */
5926 init_driver_version(old_driver_ver, size);
5927 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5928 rc = !memcmp(driver_ver, old_driver_ver, size);
5929 kfree(old_driver_ver);
5930 return rc;
5931}
edd16368 5932/* This does a hard reset of the controller using PCI power management
1df8552a 5933 * states or the using the doorbell register.
edd16368 5934 */
6f039790 5935static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
edd16368 5936{
1df8552a
SC
5937 u64 cfg_offset;
5938 u32 cfg_base_addr;
5939 u64 cfg_base_addr_index;
5940 void __iomem *vaddr;
5941 unsigned long paddr;
580ada3c 5942 u32 misc_fw_support;
270d05de 5943 int rc;
1df8552a 5944 struct CfgTable __iomem *cfgtable;
cf0b08d0 5945 u32 use_doorbell;
18867659 5946 u32 board_id;
270d05de 5947 u16 command_register;
edd16368 5948
1df8552a
SC
5949 /* For controllers as old as the P600, this is very nearly
5950 * the same thing as
edd16368
SC
5951 *
5952 * pci_save_state(pci_dev);
5953 * pci_set_power_state(pci_dev, PCI_D3hot);
5954 * pci_set_power_state(pci_dev, PCI_D0);
5955 * pci_restore_state(pci_dev);
5956 *
1df8552a
SC
5957 * For controllers newer than the P600, the pci power state
5958 * method of resetting doesn't work so we have another way
5959 * using the doorbell register.
edd16368 5960 */
18867659 5961
25c1e56a 5962 rc = hpsa_lookup_board_id(pdev, &board_id);
46380786 5963 if (rc < 0 || !ctlr_is_resettable(board_id)) {
25c1e56a
SC
5964 dev_warn(&pdev->dev, "Not resetting device.\n");
5965 return -ENODEV;
5966 }
46380786
SC
5967
5968 /* if controller is soft- but not hard resettable... */
5969 if (!ctlr_is_hard_resettable(board_id))
5970 return -ENOTSUPP; /* try soft reset later. */
18867659 5971
270d05de
SC
5972 /* Save the PCI command register */
5973 pci_read_config_word(pdev, 4, &command_register);
5974 /* Turn the board off. This is so that later pci_restore_state()
5975 * won't turn the board on before the rest of config space is ready.
5976 */
5977 pci_disable_device(pdev);
5978 pci_save_state(pdev);
edd16368 5979
1df8552a
SC
5980 /* find the first memory BAR, so we can find the cfg table */
5981 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5982 if (rc)
5983 return rc;
5984 vaddr = remap_pci_mem(paddr, 0x250);
5985 if (!vaddr)
5986 return -ENOMEM;
edd16368 5987
1df8552a
SC
5988 /* find cfgtable in order to check if reset via doorbell is supported */
5989 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5990 &cfg_base_addr_index, &cfg_offset);
5991 if (rc)
5992 goto unmap_vaddr;
5993 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5994 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5995 if (!cfgtable) {
5996 rc = -ENOMEM;
5997 goto unmap_vaddr;
5998 }
580ada3c
SC
5999 rc = write_driver_ver_to_cfgtable(cfgtable);
6000 if (rc)
6001 goto unmap_vaddr;
edd16368 6002
cf0b08d0
SC
6003 /* If reset via doorbell register is supported, use that.
6004 * There are two such methods. Favor the newest method.
6005 */
1df8552a 6006 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
6007 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6008 if (use_doorbell) {
6009 use_doorbell = DOORBELL_CTLR_RESET2;
6010 } else {
6011 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6012 if (use_doorbell) {
fba63097
MM
6013 dev_warn(&pdev->dev, "Soft reset not supported. "
6014 "Firmware update is required.\n");
64670ac8 6015 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
6016 goto unmap_cfgtable;
6017 }
6018 }
edd16368 6019
1df8552a
SC
6020 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6021 if (rc)
6022 goto unmap_cfgtable;
edd16368 6023
270d05de
SC
6024 pci_restore_state(pdev);
6025 rc = pci_enable_device(pdev);
6026 if (rc) {
6027 dev_warn(&pdev->dev, "failed to enable device.\n");
6028 goto unmap_cfgtable;
edd16368 6029 }
270d05de 6030 pci_write_config_word(pdev, 4, command_register);
edd16368 6031
1df8552a
SC
6032 /* Some devices (notably the HP Smart Array 5i Controller)
6033 need a little pause here */
6034 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6035
fe5389c8
SC
6036 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6037 if (rc) {
6038 dev_warn(&pdev->dev,
64670ac8
SC
6039 "failed waiting for board to become ready "
6040 "after hard reset\n");
fe5389c8
SC
6041 goto unmap_cfgtable;
6042 }
fe5389c8 6043
580ada3c
SC
6044 rc = controller_reset_failed(vaddr);
6045 if (rc < 0)
6046 goto unmap_cfgtable;
6047 if (rc) {
64670ac8
SC
6048 dev_warn(&pdev->dev, "Unable to successfully reset "
6049 "controller. Will try soft reset.\n");
6050 rc = -ENOTSUPP;
580ada3c 6051 } else {
64670ac8 6052 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
6053 }
6054
6055unmap_cfgtable:
6056 iounmap(cfgtable);
6057
6058unmap_vaddr:
6059 iounmap(vaddr);
6060 return rc;
edd16368
SC
6061}
6062
6063/*
6064 * We cannot read the structure directly, for portability we must use
6065 * the io functions.
6066 * This is for debug only.
6067 */
edd16368
SC
6068static void print_cfg_table(struct device *dev, struct CfgTable *tb)
6069{
58f8665c 6070#ifdef HPSA_DEBUG
edd16368
SC
6071 int i;
6072 char temp_name[17];
6073
6074 dev_info(dev, "Controller Configuration information\n");
6075 dev_info(dev, "------------------------------------\n");
6076 for (i = 0; i < 4; i++)
6077 temp_name[i] = readb(&(tb->Signature[i]));
6078 temp_name[4] = '\0';
6079 dev_info(dev, " Signature = %s\n", temp_name);
6080 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6081 dev_info(dev, " Transport methods supported = 0x%x\n",
6082 readl(&(tb->TransportSupport)));
6083 dev_info(dev, " Transport methods active = 0x%x\n",
6084 readl(&(tb->TransportActive)));
6085 dev_info(dev, " Requested transport Method = 0x%x\n",
6086 readl(&(tb->HostWrite.TransportRequest)));
6087 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6088 readl(&(tb->HostWrite.CoalIntDelay)));
6089 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6090 readl(&(tb->HostWrite.CoalIntCount)));
6091 dev_info(dev, " Max outstanding commands = 0x%d\n",
6092 readl(&(tb->CmdsOutMax)));
6093 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6094 for (i = 0; i < 16; i++)
6095 temp_name[i] = readb(&(tb->ServerName[i]));
6096 temp_name[16] = '\0';
6097 dev_info(dev, " Server Name = %s\n", temp_name);
6098 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6099 readl(&(tb->HeartBeat)));
edd16368 6100#endif /* HPSA_DEBUG */
58f8665c 6101}
edd16368
SC
6102
6103static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6104{
6105 int i, offset, mem_type, bar_type;
6106
6107 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6108 return 0;
6109 offset = 0;
6110 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6111 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6112 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6113 offset += 4;
6114 else {
6115 mem_type = pci_resource_flags(pdev, i) &
6116 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6117 switch (mem_type) {
6118 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6119 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6120 offset += 4; /* 32 bit */
6121 break;
6122 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6123 offset += 8;
6124 break;
6125 default: /* reserved in PCI 2.2 */
6126 dev_warn(&pdev->dev,
6127 "base address is invalid\n");
6128 return -1;
6129 break;
6130 }
6131 }
6132 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6133 return i + 1;
6134 }
6135 return -1;
6136}
6137
6138/* If MSI/MSI-X is supported by the kernel we will try to enable it on
6139 * controllers that are capable. If not, we use IO-APIC mode.
6140 */
6141
6f039790 6142static void hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
6143{
6144#ifdef CONFIG_PCI_MSI
254f796b
MG
6145 int err, i;
6146 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6147
6148 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6149 hpsa_msix_entries[i].vector = 0;
6150 hpsa_msix_entries[i].entry = i;
6151 }
edd16368
SC
6152
6153 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
6154 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6155 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 6156 goto default_int_mode;
55c06c71
SC
6157 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6158 dev_info(&h->pdev->dev, "MSIX\n");
eee0f03a 6159 h->msix_vector = MAX_REPLY_QUEUES;
f89439bc
SC
6160 if (h->msix_vector > num_online_cpus())
6161 h->msix_vector = num_online_cpus();
254f796b 6162 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
eee0f03a 6163 h->msix_vector);
edd16368 6164 if (err > 0) {
55c06c71 6165 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368 6166 "available\n", err);
eee0f03a
HR
6167 h->msix_vector = err;
6168 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6169 h->msix_vector);
6170 }
6171 if (!err) {
6172 for (i = 0; i < h->msix_vector; i++)
6173 h->intr[i] = hpsa_msix_entries[i].vector;
6174 return;
edd16368 6175 } else {
55c06c71 6176 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
edd16368 6177 err);
eee0f03a 6178 h->msix_vector = 0;
edd16368
SC
6179 }
6180 }
55c06c71
SC
6181 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6182 dev_info(&h->pdev->dev, "MSI\n");
6183 if (!pci_enable_msi(h->pdev))
edd16368
SC
6184 h->msi_vector = 1;
6185 else
55c06c71 6186 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
6187 }
6188default_int_mode:
6189#endif /* CONFIG_PCI_MSI */
6190 /* if we get here we're going to use the default interrupt mode */
a9a3a273 6191 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
6192}
6193
6f039790 6194static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
e5c880d1
SC
6195{
6196 int i;
6197 u32 subsystem_vendor_id, subsystem_device_id;
6198
6199 subsystem_vendor_id = pdev->subsystem_vendor;
6200 subsystem_device_id = pdev->subsystem_device;
6201 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6202 subsystem_vendor_id;
6203
6204 for (i = 0; i < ARRAY_SIZE(products); i++)
6205 if (*board_id == products[i].board_id)
6206 return i;
6207
6798cc0a
SC
6208 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6209 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6210 !hpsa_allow_any) {
e5c880d1
SC
6211 dev_warn(&pdev->dev, "unrecognized board ID: "
6212 "0x%08x, ignoring.\n", *board_id);
6213 return -ENODEV;
6214 }
6215 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6216}
6217
6f039790
GKH
6218static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6219 unsigned long *memory_bar)
3a7774ce
SC
6220{
6221 int i;
6222
6223 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 6224 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 6225 /* addressing mode bits already removed */
12d2cd47
SC
6226 *memory_bar = pci_resource_start(pdev, i);
6227 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
6228 *memory_bar);
6229 return 0;
6230 }
12d2cd47 6231 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
6232 return -ENODEV;
6233}
6234
6f039790
GKH
6235static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6236 int wait_for_ready)
2c4c8c8b 6237{
fe5389c8 6238 int i, iterations;
2c4c8c8b 6239 u32 scratchpad;
fe5389c8
SC
6240 if (wait_for_ready)
6241 iterations = HPSA_BOARD_READY_ITERATIONS;
6242 else
6243 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 6244
fe5389c8
SC
6245 for (i = 0; i < iterations; i++) {
6246 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6247 if (wait_for_ready) {
6248 if (scratchpad == HPSA_FIRMWARE_READY)
6249 return 0;
6250 } else {
6251 if (scratchpad != HPSA_FIRMWARE_READY)
6252 return 0;
6253 }
2c4c8c8b
SC
6254 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6255 }
fe5389c8 6256 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
6257 return -ENODEV;
6258}
6259
6f039790
GKH
6260static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6261 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6262 u64 *cfg_offset)
a51fd47f
SC
6263{
6264 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6265 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6266 *cfg_base_addr &= (u32) 0x0000ffff;
6267 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6268 if (*cfg_base_addr_index == -1) {
6269 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6270 return -ENODEV;
6271 }
6272 return 0;
6273}
6274
6f039790 6275static int hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 6276{
01a02ffc
SC
6277 u64 cfg_offset;
6278 u32 cfg_base_addr;
6279 u64 cfg_base_addr_index;
303932fd 6280 u32 trans_offset;
a51fd47f 6281 int rc;
77c4495c 6282
a51fd47f
SC
6283 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6284 &cfg_base_addr_index, &cfg_offset);
6285 if (rc)
6286 return rc;
77c4495c 6287 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 6288 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
77c4495c
SC
6289 if (!h->cfgtable)
6290 return -ENOMEM;
580ada3c
SC
6291 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6292 if (rc)
6293 return rc;
77c4495c 6294 /* Find performant mode table. */
a51fd47f 6295 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
6296 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6297 cfg_base_addr_index)+cfg_offset+trans_offset,
6298 sizeof(*h->transtable));
6299 if (!h->transtable)
6300 return -ENOMEM;
6301 return 0;
6302}
6303
6f039790 6304static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
cba3d38b
SC
6305{
6306 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
72ceeaec
SC
6307
6308 /* Limit commands in memory limited kdump scenario. */
6309 if (reset_devices && h->max_commands > 32)
6310 h->max_commands = 32;
6311
cba3d38b
SC
6312 if (h->max_commands < 16) {
6313 dev_warn(&h->pdev->dev, "Controller reports "
6314 "max supported commands of %d, an obvious lie. "
6315 "Using 16. Ensure that firmware is up to date.\n",
6316 h->max_commands);
6317 h->max_commands = 16;
6318 }
6319}
6320
b93d7536
SC
6321/* Interrogate the hardware for some limits:
6322 * max commands, max SG elements without chaining, and with chaining,
6323 * SG chain block size, etc.
6324 */
6f039790 6325static void hpsa_find_board_params(struct ctlr_info *h)
b93d7536 6326{
cba3d38b 6327 hpsa_get_max_perf_mode_cmds(h);
b93d7536
SC
6328 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6329 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
283b4a9b 6330 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
b93d7536
SC
6331 /*
6332 * Limit in-command s/g elements to 32 save dma'able memory.
6333 * Howvever spec says if 0, use 31
6334 */
6335 h->max_cmd_sg_entries = 31;
6336 if (h->maxsgentries > 512) {
6337 h->max_cmd_sg_entries = 32;
6338 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
6339 h->maxsgentries--; /* save one for chain pointer */
6340 } else {
6341 h->maxsgentries = 31; /* default to traditional values */
6342 h->chainsize = 0;
6343 }
75167d2c
SC
6344
6345 /* Find out what task management functions are supported and cache */
6346 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
0e7a7fce
ST
6347 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6348 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6349 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6350 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
b93d7536
SC
6351}
6352
76c46e49
SC
6353static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6354{
0fc9fd40 6355 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
76c46e49
SC
6356 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6357 return false;
6358 }
6359 return true;
6360}
6361
97a5e98c 6362static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
f7c39101 6363{
97a5e98c 6364 u32 driver_support;
f7c39101 6365
97a5e98c 6366 driver_support = readl(&(h->cfgtable->driver_support));
0b9e7b74
AB
6367 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6368#ifdef CONFIG_X86
97a5e98c 6369 driver_support |= ENABLE_SCSI_PREFETCH;
f7c39101 6370#endif
28e13446
SC
6371 driver_support |= ENABLE_UNIT_ATTN;
6372 writel(driver_support, &(h->cfgtable->driver_support));
f7c39101
SC
6373}
6374
3d0eab67
SC
6375/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6376 * in a prefetch beyond physical memory.
6377 */
6378static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6379{
6380 u32 dma_prefetch;
6381
6382 if (h->board_id != 0x3225103C)
6383 return;
6384 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6385 dma_prefetch |= 0x8000;
6386 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6387}
6388
76438d08
SC
6389static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6390{
6391 int i;
6392 u32 doorbell_value;
6393 unsigned long flags;
6394 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6395 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6396 spin_lock_irqsave(&h->lock, flags);
6397 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6398 spin_unlock_irqrestore(&h->lock, flags);
6399 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6400 break;
6401 /* delay and try again */
6402 msleep(20);
6403 }
6404}
6405
6f039790 6406static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
6407{
6408 int i;
6eaf46fd
SC
6409 u32 doorbell_value;
6410 unsigned long flags;
eb6b2ae9
SC
6411
6412 /* under certain very rare conditions, this can take awhile.
6413 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6414 * as we enter this code.)
6415 */
6416 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6eaf46fd
SC
6417 spin_lock_irqsave(&h->lock, flags);
6418 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6419 spin_unlock_irqrestore(&h->lock, flags);
382be668 6420 if (!(doorbell_value & CFGTBL_ChangeReq))
eb6b2ae9
SC
6421 break;
6422 /* delay and try again */
60d3f5b0 6423 usleep_range(10000, 20000);
eb6b2ae9 6424 }
3f4336f3
SC
6425}
6426
6f039790 6427static int hpsa_enter_simple_mode(struct ctlr_info *h)
3f4336f3
SC
6428{
6429 u32 trans_support;
6430
6431 trans_support = readl(&(h->cfgtable->TransportSupport));
6432 if (!(trans_support & SIMPLE_MODE))
6433 return -ENOTSUPP;
6434
6435 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
283b4a9b 6436
3f4336f3
SC
6437 /* Update the field, and then ring the doorbell */
6438 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
b9af4937 6439 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
3f4336f3
SC
6440 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6441 hpsa_wait_for_mode_change_ack(h);
eb6b2ae9 6442 print_cfg_table(&h->pdev->dev, h->cfgtable);
283b4a9b
SC
6443 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6444 goto error;
960a30e7 6445 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9 6446 return 0;
283b4a9b
SC
6447error:
6448 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6449 return -ENODEV;
eb6b2ae9
SC
6450}
6451
6f039790 6452static int hpsa_pci_init(struct ctlr_info *h)
77c4495c 6453{
eb6b2ae9 6454 int prod_index, err;
edd16368 6455
e5c880d1
SC
6456 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6457 if (prod_index < 0)
6458 return -ENODEV;
6459 h->product_name = products[prod_index].product_name;
6460 h->access = *(products[prod_index].access);
edd16368 6461
e5a44df8
MG
6462 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6463 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6464
55c06c71 6465 err = pci_enable_device(h->pdev);
edd16368 6466 if (err) {
55c06c71 6467 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
edd16368
SC
6468 return err;
6469 }
6470
5cb460a6
SC
6471 /* Enable bus mastering (pci_disable_device may disable this) */
6472 pci_set_master(h->pdev);
6473
f79cfec6 6474 err = pci_request_regions(h->pdev, HPSA);
edd16368 6475 if (err) {
55c06c71
SC
6476 dev_err(&h->pdev->dev,
6477 "cannot obtain PCI resources, aborting\n");
edd16368
SC
6478 return err;
6479 }
6b3f4c52 6480 hpsa_interrupt_mode(h);
12d2cd47 6481 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 6482 if (err)
edd16368 6483 goto err_out_free_res;
edd16368 6484 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9
SC
6485 if (!h->vaddr) {
6486 err = -ENOMEM;
6487 goto err_out_free_res;
6488 }
fe5389c8 6489 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 6490 if (err)
edd16368 6491 goto err_out_free_res;
77c4495c
SC
6492 err = hpsa_find_cfgtables(h);
6493 if (err)
edd16368 6494 goto err_out_free_res;
b93d7536 6495 hpsa_find_board_params(h);
edd16368 6496
76c46e49 6497 if (!hpsa_CISS_signature_present(h)) {
edd16368
SC
6498 err = -ENODEV;
6499 goto err_out_free_res;
6500 }
97a5e98c 6501 hpsa_set_driver_support_bits(h);
3d0eab67 6502 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
6503 err = hpsa_enter_simple_mode(h);
6504 if (err)
edd16368 6505 goto err_out_free_res;
edd16368
SC
6506 return 0;
6507
6508err_out_free_res:
204892e9
SC
6509 if (h->transtable)
6510 iounmap(h->transtable);
6511 if (h->cfgtable)
6512 iounmap(h->cfgtable);
6513 if (h->vaddr)
6514 iounmap(h->vaddr);
f0bd0b68 6515 pci_disable_device(h->pdev);
55c06c71 6516 pci_release_regions(h->pdev);
edd16368
SC
6517 return err;
6518}
6519
6f039790 6520static void hpsa_hba_inquiry(struct ctlr_info *h)
339b2b14
SC
6521{
6522 int rc;
6523
6524#define HBA_INQUIRY_BYTE_COUNT 64
6525 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6526 if (!h->hba_inquiry_data)
6527 return;
6528 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6529 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6530 if (rc != 0) {
6531 kfree(h->hba_inquiry_data);
6532 h->hba_inquiry_data = NULL;
6533 }
6534}
6535
6f039790 6536static int hpsa_init_reset_devices(struct pci_dev *pdev)
4c2a8c40 6537{
1df8552a 6538 int rc, i;
4c2a8c40
SC
6539
6540 if (!reset_devices)
6541 return 0;
6542
1df8552a
SC
6543 /* Reset the controller with a PCI power-cycle or via doorbell */
6544 rc = hpsa_kdump_hard_reset_controller(pdev);
4c2a8c40 6545
1df8552a
SC
6546 /* -ENOTSUPP here means we cannot reset the controller
6547 * but it's already (and still) up and running in
18867659
SC
6548 * "performant mode". Or, it might be 640x, which can't reset
6549 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a
SC
6550 */
6551 if (rc == -ENOTSUPP)
64670ac8 6552 return rc; /* just try to do the kdump anyhow. */
1df8552a
SC
6553 if (rc)
6554 return -ENODEV;
4c2a8c40
SC
6555
6556 /* Now try to get the controller to respond to a no-op */
2b870cb3 6557 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
6558 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6559 if (hpsa_noop(pdev) == 0)
6560 break;
6561 else
6562 dev_warn(&pdev->dev, "no-op failed%s\n",
6563 (i < 11 ? "; re-trying" : ""));
6564 }
6565 return 0;
6566}
6567
6f039790 6568static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
2e9d1b36
SC
6569{
6570 h->cmd_pool_bits = kzalloc(
6571 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6572 sizeof(unsigned long), GFP_KERNEL);
6573 h->cmd_pool = pci_alloc_consistent(h->pdev,
6574 h->nr_cmds * sizeof(*h->cmd_pool),
6575 &(h->cmd_pool_dhandle));
6576 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6577 h->nr_cmds * sizeof(*h->errinfo_pool),
6578 &(h->errinfo_pool_dhandle));
6579 if ((h->cmd_pool_bits == NULL)
6580 || (h->cmd_pool == NULL)
6581 || (h->errinfo_pool == NULL)) {
6582 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6583 return -ENOMEM;
6584 }
6585 return 0;
6586}
6587
6588static void hpsa_free_cmd_pool(struct ctlr_info *h)
6589{
6590 kfree(h->cmd_pool_bits);
6591 if (h->cmd_pool)
6592 pci_free_consistent(h->pdev,
6593 h->nr_cmds * sizeof(struct CommandList),
6594 h->cmd_pool, h->cmd_pool_dhandle);
aca9012a
SC
6595 if (h->ioaccel2_cmd_pool)
6596 pci_free_consistent(h->pdev,
6597 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6598 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
2e9d1b36
SC
6599 if (h->errinfo_pool)
6600 pci_free_consistent(h->pdev,
6601 h->nr_cmds * sizeof(struct ErrorInfo),
6602 h->errinfo_pool,
6603 h->errinfo_pool_dhandle);
e1f7de0c
MG
6604 if (h->ioaccel_cmd_pool)
6605 pci_free_consistent(h->pdev,
6606 h->nr_cmds * sizeof(struct io_accel1_cmd),
6607 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
2e9d1b36
SC
6608}
6609
41b3cf08
SC
6610static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6611{
6612 int i, cpu, rc;
6613
6614 cpu = cpumask_first(cpu_online_mask);
6615 for (i = 0; i < h->msix_vector; i++) {
6616 rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6617 cpu = cpumask_next(cpu, cpu_online_mask);
6618 }
6619}
6620
0ae01a32
SC
6621static int hpsa_request_irq(struct ctlr_info *h,
6622 irqreturn_t (*msixhandler)(int, void *),
6623 irqreturn_t (*intxhandler)(int, void *))
6624{
254f796b 6625 int rc, i;
0ae01a32 6626
254f796b
MG
6627 /*
6628 * initialize h->q[x] = x so that interrupt handlers know which
6629 * queue to process.
6630 */
6631 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6632 h->q[i] = (u8) i;
6633
eee0f03a 6634 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
254f796b 6635 /* If performant mode and MSI-X, use multiple reply queues */
eee0f03a 6636 for (i = 0; i < h->msix_vector; i++)
254f796b
MG
6637 rc = request_irq(h->intr[i], msixhandler,
6638 0, h->devname,
6639 &h->q[i]);
41b3cf08 6640 hpsa_irq_affinity_hints(h);
254f796b
MG
6641 } else {
6642 /* Use single reply pool */
eee0f03a 6643 if (h->msix_vector > 0 || h->msi_vector) {
254f796b
MG
6644 rc = request_irq(h->intr[h->intr_mode],
6645 msixhandler, 0, h->devname,
6646 &h->q[h->intr_mode]);
6647 } else {
6648 rc = request_irq(h->intr[h->intr_mode],
6649 intxhandler, IRQF_SHARED, h->devname,
6650 &h->q[h->intr_mode]);
6651 }
6652 }
0ae01a32
SC
6653 if (rc) {
6654 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6655 h->intr[h->intr_mode], h->devname);
6656 return -ENODEV;
6657 }
6658 return 0;
6659}
6660
6f039790 6661static int hpsa_kdump_soft_reset(struct ctlr_info *h)
64670ac8
SC
6662{
6663 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6664 HPSA_RESET_TYPE_CONTROLLER)) {
6665 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6666 return -EIO;
6667 }
6668
6669 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6670 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6671 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6672 return -1;
6673 }
6674
6675 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6676 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6677 dev_warn(&h->pdev->dev, "Board failed to become ready "
6678 "after soft reset.\n");
6679 return -1;
6680 }
6681
6682 return 0;
6683}
6684
254f796b
MG
6685static void free_irqs(struct ctlr_info *h)
6686{
6687 int i;
6688
6689 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6690 /* Single reply queue, only one irq to free */
6691 i = h->intr_mode;
41b3cf08 6692 irq_set_affinity_hint(h->intr[i], NULL);
254f796b
MG
6693 free_irq(h->intr[i], &h->q[i]);
6694 return;
6695 }
6696
41b3cf08
SC
6697 for (i = 0; i < h->msix_vector; i++) {
6698 irq_set_affinity_hint(h->intr[i], NULL);
254f796b 6699 free_irq(h->intr[i], &h->q[i]);
41b3cf08 6700 }
254f796b
MG
6701}
6702
0097f0f4 6703static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
64670ac8 6704{
254f796b 6705 free_irqs(h);
64670ac8 6706#ifdef CONFIG_PCI_MSI
0097f0f4
SC
6707 if (h->msix_vector) {
6708 if (h->pdev->msix_enabled)
6709 pci_disable_msix(h->pdev);
6710 } else if (h->msi_vector) {
6711 if (h->pdev->msi_enabled)
6712 pci_disable_msi(h->pdev);
6713 }
64670ac8 6714#endif /* CONFIG_PCI_MSI */
0097f0f4
SC
6715}
6716
072b0518
SC
6717static void hpsa_free_reply_queues(struct ctlr_info *h)
6718{
6719 int i;
6720
6721 for (i = 0; i < h->nreply_queues; i++) {
6722 if (!h->reply_queue[i].head)
6723 continue;
6724 pci_free_consistent(h->pdev, h->reply_queue_size,
6725 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6726 h->reply_queue[i].head = NULL;
6727 h->reply_queue[i].busaddr = 0;
6728 }
6729}
6730
0097f0f4
SC
6731static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6732{
6733 hpsa_free_irqs_and_disable_msix(h);
64670ac8
SC
6734 hpsa_free_sg_chain_blocks(h);
6735 hpsa_free_cmd_pool(h);
e1f7de0c 6736 kfree(h->ioaccel1_blockFetchTable);
64670ac8 6737 kfree(h->blockFetchTable);
072b0518 6738 hpsa_free_reply_queues(h);
64670ac8
SC
6739 if (h->vaddr)
6740 iounmap(h->vaddr);
6741 if (h->transtable)
6742 iounmap(h->transtable);
6743 if (h->cfgtable)
6744 iounmap(h->cfgtable);
6745 pci_release_regions(h->pdev);
6746 kfree(h);
6747}
6748
a0c12413
SC
6749/* Called when controller lockup detected. */
6750static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6751{
6752 struct CommandList *c = NULL;
6753
6754 assert_spin_locked(&h->lock);
6755 /* Mark all outstanding commands as failed and complete them. */
6756 while (!list_empty(list)) {
6757 c = list_entry(list->next, struct CommandList, list);
6758 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
5a3d16f5 6759 finish_cmd(c);
a0c12413
SC
6760 }
6761}
6762
094963da
SC
6763static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6764{
6765 int i, cpu;
6766
6767 cpu = cpumask_first(cpu_online_mask);
6768 for (i = 0; i < num_online_cpus(); i++) {
6769 u32 *lockup_detected;
6770 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6771 *lockup_detected = value;
6772 cpu = cpumask_next(cpu, cpu_online_mask);
6773 }
6774 wmb(); /* be sure the per-cpu variables are out to memory */
6775}
6776
a0c12413
SC
6777static void controller_lockup_detected(struct ctlr_info *h)
6778{
6779 unsigned long flags;
094963da 6780 u32 lockup_detected;
a0c12413 6781
a0c12413
SC
6782 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6783 spin_lock_irqsave(&h->lock, flags);
094963da
SC
6784 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6785 if (!lockup_detected) {
6786 /* no heartbeat, but controller gave us a zero. */
6787 dev_warn(&h->pdev->dev,
6788 "lockup detected but scratchpad register is zero\n");
6789 lockup_detected = 0xffffffff;
6790 }
6791 set_lockup_detected_for_all_cpus(h, lockup_detected);
a0c12413
SC
6792 spin_unlock_irqrestore(&h->lock, flags);
6793 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
094963da 6794 lockup_detected);
a0c12413
SC
6795 pci_disable_device(h->pdev);
6796 spin_lock_irqsave(&h->lock, flags);
6797 fail_all_cmds_on_list(h, &h->cmpQ);
6798 fail_all_cmds_on_list(h, &h->reqQ);
6799 spin_unlock_irqrestore(&h->lock, flags);
6800}
6801
a0c12413
SC
6802static void detect_controller_lockup(struct ctlr_info *h)
6803{
6804 u64 now;
6805 u32 heartbeat;
6806 unsigned long flags;
6807
a0c12413
SC
6808 now = get_jiffies_64();
6809 /* If we've received an interrupt recently, we're ok. */
6810 if (time_after64(h->last_intr_timestamp +
e85c5974 6811 (h->heartbeat_sample_interval), now))
a0c12413
SC
6812 return;
6813
6814 /*
6815 * If we've already checked the heartbeat recently, we're ok.
6816 * This could happen if someone sends us a signal. We
6817 * otherwise don't care about signals in this thread.
6818 */
6819 if (time_after64(h->last_heartbeat_timestamp +
e85c5974 6820 (h->heartbeat_sample_interval), now))
a0c12413
SC
6821 return;
6822
6823 /* If heartbeat has not changed since we last looked, we're not ok. */
6824 spin_lock_irqsave(&h->lock, flags);
6825 heartbeat = readl(&h->cfgtable->HeartBeat);
6826 spin_unlock_irqrestore(&h->lock, flags);
6827 if (h->last_heartbeat == heartbeat) {
6828 controller_lockup_detected(h);
6829 return;
6830 }
6831
6832 /* We're ok. */
6833 h->last_heartbeat = heartbeat;
6834 h->last_heartbeat_timestamp = now;
6835}
6836
9846590e 6837static void hpsa_ack_ctlr_events(struct ctlr_info *h)
76438d08
SC
6838{
6839 int i;
6840 char *event_type;
6841
e863d68e
ST
6842 /* Clear the driver-requested rescan flag */
6843 h->drv_req_rescan = 0;
6844
76438d08 6845 /* Ask the controller to clear the events we're handling. */
1f7cee8c
SC
6846 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6847 | CFGTBL_Trans_io_accel2)) &&
76438d08
SC
6848 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6849 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6850
6851 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6852 event_type = "state change";
6853 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6854 event_type = "configuration change";
6855 /* Stop sending new RAID offload reqs via the IO accelerator */
6856 scsi_block_requests(h->scsi_host);
6857 for (i = 0; i < h->ndevices; i++)
6858 h->dev[i]->offload_enabled = 0;
23100dd9 6859 hpsa_drain_accel_commands(h);
76438d08
SC
6860 /* Set 'accelerator path config change' bit */
6861 dev_warn(&h->pdev->dev,
6862 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6863 h->events, event_type);
6864 writel(h->events, &(h->cfgtable->clear_event_notify));
6865 /* Set the "clear event notify field update" bit 6 */
6866 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6867 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6868 hpsa_wait_for_clear_event_notify_ack(h);
6869 scsi_unblock_requests(h->scsi_host);
6870 } else {
6871 /* Acknowledge controller notification events. */
6872 writel(h->events, &(h->cfgtable->clear_event_notify));
6873 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6874 hpsa_wait_for_clear_event_notify_ack(h);
6875#if 0
6876 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6877 hpsa_wait_for_mode_change_ack(h);
6878#endif
6879 }
9846590e 6880 return;
76438d08
SC
6881}
6882
6883/* Check a register on the controller to see if there are configuration
6884 * changes (added/changed/removed logical drives, etc.) which mean that
e863d68e
ST
6885 * we should rescan the controller for devices.
6886 * Also check flag for driver-initiated rescan.
76438d08 6887 */
9846590e 6888static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
76438d08 6889{
9846590e
SC
6890 if (h->drv_req_rescan)
6891 return 1;
6892
76438d08 6893 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
9846590e 6894 return 0;
76438d08
SC
6895
6896 h->events = readl(&(h->cfgtable->event_notify));
9846590e
SC
6897 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6898}
76438d08 6899
9846590e
SC
6900/*
6901 * Check if any of the offline devices have become ready
6902 */
6903static int hpsa_offline_devices_ready(struct ctlr_info *h)
6904{
6905 unsigned long flags;
6906 struct offline_device_entry *d;
6907 struct list_head *this, *tmp;
6908
6909 spin_lock_irqsave(&h->offline_device_lock, flags);
6910 list_for_each_safe(this, tmp, &h->offline_device_list) {
6911 d = list_entry(this, struct offline_device_entry,
6912 offline_list);
6913 spin_unlock_irqrestore(&h->offline_device_lock, flags);
d1fea47c
SC
6914 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6915 spin_lock_irqsave(&h->offline_device_lock, flags);
6916 list_del(&d->offline_list);
6917 spin_unlock_irqrestore(&h->offline_device_lock, flags);
9846590e 6918 return 1;
d1fea47c 6919 }
9846590e
SC
6920 spin_lock_irqsave(&h->offline_device_lock, flags);
6921 }
6922 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6923 return 0;
76438d08
SC
6924}
6925
9846590e 6926
8a98db73 6927static void hpsa_monitor_ctlr_worker(struct work_struct *work)
a0c12413
SC
6928{
6929 unsigned long flags;
8a98db73
SC
6930 struct ctlr_info *h = container_of(to_delayed_work(work),
6931 struct ctlr_info, monitor_ctlr_work);
6932 detect_controller_lockup(h);
094963da 6933 if (lockup_detected(h))
8a98db73 6934 return;
9846590e
SC
6935
6936 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6937 scsi_host_get(h->scsi_host);
6938 h->drv_req_rescan = 0;
6939 hpsa_ack_ctlr_events(h);
6940 hpsa_scan_start(h->scsi_host);
6941 scsi_host_put(h->scsi_host);
6942 }
6943
8a98db73
SC
6944 spin_lock_irqsave(&h->lock, flags);
6945 if (h->remove_in_progress) {
6946 spin_unlock_irqrestore(&h->lock, flags);
a0c12413
SC
6947 return;
6948 }
8a98db73
SC
6949 schedule_delayed_work(&h->monitor_ctlr_work,
6950 h->heartbeat_sample_interval);
6951 spin_unlock_irqrestore(&h->lock, flags);
a0c12413
SC
6952}
6953
6f039790 6954static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
edd16368 6955{
4c2a8c40 6956 int dac, rc;
edd16368 6957 struct ctlr_info *h;
64670ac8
SC
6958 int try_soft_reset = 0;
6959 unsigned long flags;
edd16368
SC
6960
6961 if (number_of_controllers == 0)
6962 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 6963
4c2a8c40 6964 rc = hpsa_init_reset_devices(pdev);
64670ac8
SC
6965 if (rc) {
6966 if (rc != -ENOTSUPP)
6967 return rc;
6968 /* If the reset fails in a particular way (it has no way to do
6969 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6970 * a soft reset once we get the controller configured up to the
6971 * point that it can accept a command.
6972 */
6973 try_soft_reset = 1;
6974 rc = 0;
6975 }
6976
6977reinit_after_soft_reset:
edd16368 6978
303932fd
DB
6979 /* Command structures must be aligned on a 32-byte boundary because
6980 * the 5 lower bits of the address are used by the hardware. and by
6981 * the driver. See comments in hpsa.h for more info.
6982 */
303932fd 6983 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368
SC
6984 h = kzalloc(sizeof(*h), GFP_KERNEL);
6985 if (!h)
ecd9aad4 6986 return -ENOMEM;
edd16368 6987
55c06c71 6988 h->pdev = pdev;
a9a3a273 6989 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9e0fc764
SC
6990 INIT_LIST_HEAD(&h->cmpQ);
6991 INIT_LIST_HEAD(&h->reqQ);
9846590e 6992 INIT_LIST_HEAD(&h->offline_device_list);
6eaf46fd 6993 spin_lock_init(&h->lock);
9846590e 6994 spin_lock_init(&h->offline_device_lock);
6eaf46fd 6995 spin_lock_init(&h->scan_lock);
0390f0c0 6996 spin_lock_init(&h->passthru_count_lock);
094963da
SC
6997
6998 /* Allocate and clear per-cpu variable lockup_detected */
6999 h->lockup_detected = alloc_percpu(u32);
2a5ac326
SC
7000 if (!h->lockup_detected) {
7001 rc = -ENOMEM;
094963da 7002 goto clean1;
2a5ac326 7003 }
094963da
SC
7004 set_lockup_detected_for_all_cpus(h, 0);
7005
55c06c71 7006 rc = hpsa_pci_init(h);
ecd9aad4 7007 if (rc != 0)
edd16368
SC
7008 goto clean1;
7009
f79cfec6 7010 sprintf(h->devname, HPSA "%d", number_of_controllers);
edd16368
SC
7011 h->ctlr = number_of_controllers;
7012 number_of_controllers++;
edd16368
SC
7013
7014 /* configure PCI DMA stuff */
ecd9aad4
SC
7015 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7016 if (rc == 0) {
edd16368 7017 dac = 1;
ecd9aad4
SC
7018 } else {
7019 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7020 if (rc == 0) {
7021 dac = 0;
7022 } else {
7023 dev_err(&pdev->dev, "no suitable DMA available\n");
7024 goto clean1;
7025 }
edd16368
SC
7026 }
7027
7028 /* make sure the board interrupts are off */
7029 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 7030
0ae01a32 7031 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
edd16368 7032 goto clean2;
303932fd
DB
7033 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
7034 h->devname, pdev->device,
a9a3a273 7035 h->intr[h->intr_mode], dac ? "" : " not");
2e9d1b36 7036 if (hpsa_allocate_cmd_pool(h))
edd16368 7037 goto clean4;
33a2ffce
SC
7038 if (hpsa_allocate_sg_chain_blocks(h))
7039 goto clean4;
a08a8471
SC
7040 init_waitqueue_head(&h->scan_wait_queue);
7041 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
7042
7043 pci_set_drvdata(pdev, h);
9a41338e 7044 h->ndevices = 0;
316b221a 7045 h->hba_mode_enabled = 0;
9a41338e
SC
7046 h->scsi_host = NULL;
7047 spin_lock_init(&h->devlock);
64670ac8
SC
7048 hpsa_put_ctlr_into_performant_mode(h);
7049
7050 /* At this point, the controller is ready to take commands.
7051 * Now, if reset_devices and the hard reset didn't work, try
7052 * the soft reset and see if that works.
7053 */
7054 if (try_soft_reset) {
7055
7056 /* This is kind of gross. We may or may not get a completion
7057 * from the soft reset command, and if we do, then the value
7058 * from the fifo may or may not be valid. So, we wait 10 secs
7059 * after the reset throwing away any completions we get during
7060 * that time. Unregister the interrupt handler and register
7061 * fake ones to scoop up any residual completions.
7062 */
7063 spin_lock_irqsave(&h->lock, flags);
7064 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7065 spin_unlock_irqrestore(&h->lock, flags);
254f796b 7066 free_irqs(h);
64670ac8
SC
7067 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
7068 hpsa_intx_discard_completions);
7069 if (rc) {
7070 dev_warn(&h->pdev->dev, "Failed to request_irq after "
7071 "soft reset.\n");
7072 goto clean4;
7073 }
7074
7075 rc = hpsa_kdump_soft_reset(h);
7076 if (rc)
7077 /* Neither hard nor soft reset worked, we're hosed. */
7078 goto clean4;
7079
7080 dev_info(&h->pdev->dev, "Board READY.\n");
7081 dev_info(&h->pdev->dev,
7082 "Waiting for stale completions to drain.\n");
7083 h->access.set_intr_mask(h, HPSA_INTR_ON);
7084 msleep(10000);
7085 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7086
7087 rc = controller_reset_failed(h->cfgtable);
7088 if (rc)
7089 dev_info(&h->pdev->dev,
7090 "Soft reset appears to have failed.\n");
7091
7092 /* since the controller's reset, we have to go back and re-init
7093 * everything. Easiest to just forget what we've done and do it
7094 * all over again.
7095 */
7096 hpsa_undo_allocations_after_kdump_soft_reset(h);
7097 try_soft_reset = 0;
7098 if (rc)
7099 /* don't go to clean4, we already unallocated */
7100 return -ENODEV;
7101
7102 goto reinit_after_soft_reset;
7103 }
edd16368 7104
316b221a
SC
7105 /* Enable Accelerated IO path at driver layer */
7106 h->acciopath_status = 1;
da0697bd 7107
e863d68e
ST
7108 h->drv_req_rescan = 0;
7109
edd16368
SC
7110 /* Turn the interrupts on so we can service requests */
7111 h->access.set_intr_mask(h, HPSA_INTR_ON);
7112
339b2b14 7113 hpsa_hba_inquiry(h);
edd16368 7114 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
8a98db73
SC
7115
7116 /* Monitor the controller for firmware lockups */
7117 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
7118 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
7119 schedule_delayed_work(&h->monitor_ctlr_work,
7120 h->heartbeat_sample_interval);
88bf6d62 7121 return 0;
edd16368
SC
7122
7123clean4:
33a2ffce 7124 hpsa_free_sg_chain_blocks(h);
2e9d1b36 7125 hpsa_free_cmd_pool(h);
254f796b 7126 free_irqs(h);
edd16368
SC
7127clean2:
7128clean1:
094963da
SC
7129 if (h->lockup_detected)
7130 free_percpu(h->lockup_detected);
edd16368 7131 kfree(h);
ecd9aad4 7132 return rc;
edd16368
SC
7133}
7134
7135static void hpsa_flush_cache(struct ctlr_info *h)
7136{
7137 char *flush_buf;
7138 struct CommandList *c;
702890e3
SC
7139
7140 /* Don't bother trying to flush the cache if locked up */
094963da 7141 if (unlikely(lockup_detected(h)))
702890e3 7142 return;
edd16368
SC
7143 flush_buf = kzalloc(4, GFP_KERNEL);
7144 if (!flush_buf)
7145 return;
7146
7147 c = cmd_special_alloc(h);
7148 if (!c) {
7149 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7150 goto out_of_memory;
7151 }
a2dac136
SC
7152 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
7153 RAID_CTLR_LUNID, TYPE_CMD)) {
7154 goto out;
7155 }
edd16368
SC
7156 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7157 if (c->err_info->CommandStatus != 0)
a2dac136 7158out:
edd16368
SC
7159 dev_warn(&h->pdev->dev,
7160 "error flushing cache on controller\n");
7161 cmd_special_free(h, c);
7162out_of_memory:
7163 kfree(flush_buf);
7164}
7165
7166static void hpsa_shutdown(struct pci_dev *pdev)
7167{
7168 struct ctlr_info *h;
7169
7170 h = pci_get_drvdata(pdev);
7171 /* Turn board interrupts off and send the flush cache command
7172 * sendcmd will turn off interrupt, and send the flush...
7173 * To write all data in the battery backed cache to disks
7174 */
7175 hpsa_flush_cache(h);
7176 h->access.set_intr_mask(h, HPSA_INTR_OFF);
0097f0f4 7177 hpsa_free_irqs_and_disable_msix(h);
edd16368
SC
7178}
7179
6f039790 7180static void hpsa_free_device_info(struct ctlr_info *h)
55e14e76
SC
7181{
7182 int i;
7183
7184 for (i = 0; i < h->ndevices; i++)
7185 kfree(h->dev[i]);
7186}
7187
6f039790 7188static void hpsa_remove_one(struct pci_dev *pdev)
edd16368
SC
7189{
7190 struct ctlr_info *h;
8a98db73 7191 unsigned long flags;
edd16368
SC
7192
7193 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 7194 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
7195 return;
7196 }
7197 h = pci_get_drvdata(pdev);
8a98db73
SC
7198
7199 /* Get rid of any controller monitoring work items */
7200 spin_lock_irqsave(&h->lock, flags);
7201 h->remove_in_progress = 1;
7202 cancel_delayed_work(&h->monitor_ctlr_work);
7203 spin_unlock_irqrestore(&h->lock, flags);
7204
edd16368
SC
7205 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7206 hpsa_shutdown(pdev);
7207 iounmap(h->vaddr);
204892e9
SC
7208 iounmap(h->transtable);
7209 iounmap(h->cfgtable);
55e14e76 7210 hpsa_free_device_info(h);
33a2ffce 7211 hpsa_free_sg_chain_blocks(h);
edd16368
SC
7212 pci_free_consistent(h->pdev,
7213 h->nr_cmds * sizeof(struct CommandList),
7214 h->cmd_pool, h->cmd_pool_dhandle);
7215 pci_free_consistent(h->pdev,
7216 h->nr_cmds * sizeof(struct ErrorInfo),
7217 h->errinfo_pool, h->errinfo_pool_dhandle);
072b0518 7218 hpsa_free_reply_queues(h);
edd16368 7219 kfree(h->cmd_pool_bits);
303932fd 7220 kfree(h->blockFetchTable);
e1f7de0c 7221 kfree(h->ioaccel1_blockFetchTable);
aca9012a 7222 kfree(h->ioaccel2_blockFetchTable);
339b2b14 7223 kfree(h->hba_inquiry_data);
f0bd0b68 7224 pci_disable_device(pdev);
edd16368 7225 pci_release_regions(pdev);
094963da 7226 free_percpu(h->lockup_detected);
edd16368
SC
7227 kfree(h);
7228}
7229
7230static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7231 __attribute__((unused)) pm_message_t state)
7232{
7233 return -ENOSYS;
7234}
7235
7236static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7237{
7238 return -ENOSYS;
7239}
7240
7241static struct pci_driver hpsa_pci_driver = {
f79cfec6 7242 .name = HPSA,
edd16368 7243 .probe = hpsa_init_one,
6f039790 7244 .remove = hpsa_remove_one,
edd16368
SC
7245 .id_table = hpsa_pci_device_id, /* id_table */
7246 .shutdown = hpsa_shutdown,
7247 .suspend = hpsa_suspend,
7248 .resume = hpsa_resume,
7249};
7250
303932fd
DB
7251/* Fill in bucket_map[], given nsgs (the max number of
7252 * scatter gather elements supported) and bucket[],
7253 * which is an array of 8 integers. The bucket[] array
7254 * contains 8 different DMA transfer sizes (in 16
7255 * byte increments) which the controller uses to fetch
7256 * commands. This function fills in bucket_map[], which
7257 * maps a given number of scatter gather elements to one of
7258 * the 8 DMA transfer sizes. The point of it is to allow the
7259 * controller to only do as much DMA as needed to fetch the
7260 * command, with the DMA transfer size encoded in the lower
7261 * bits of the command address.
7262 */
7263static void calc_bucket_map(int bucket[], int num_buckets,
e1f7de0c 7264 int nsgs, int min_blocks, int *bucket_map)
303932fd
DB
7265{
7266 int i, j, b, size;
7267
303932fd
DB
7268 /* Note, bucket_map must have nsgs+1 entries. */
7269 for (i = 0; i <= nsgs; i++) {
7270 /* Compute size of a command with i SG entries */
e1f7de0c 7271 size = i + min_blocks;
303932fd
DB
7272 b = num_buckets; /* Assume the biggest bucket */
7273 /* Find the bucket that is just big enough */
e1f7de0c 7274 for (j = 0; j < num_buckets; j++) {
303932fd
DB
7275 if (bucket[j] >= size) {
7276 b = j;
7277 break;
7278 }
7279 }
7280 /* for a command with i SG entries, use bucket b. */
7281 bucket_map[i] = b;
7282 }
7283}
7284
e1f7de0c 7285static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
303932fd 7286{
6c311b57
SC
7287 int i;
7288 unsigned long register_value;
e1f7de0c
MG
7289 unsigned long transMethod = CFGTBL_Trans_Performant |
7290 (trans_support & CFGTBL_Trans_use_short_tags) |
b9af4937
SC
7291 CFGTBL_Trans_enable_directed_msix |
7292 (trans_support & (CFGTBL_Trans_io_accel1 |
7293 CFGTBL_Trans_io_accel2));
e1f7de0c 7294 struct access_method access = SA5_performant_access;
def342bd
SC
7295
7296 /* This is a bit complicated. There are 8 registers on
7297 * the controller which we write to to tell it 8 different
7298 * sizes of commands which there may be. It's a way of
7299 * reducing the DMA done to fetch each command. Encoded into
7300 * each command's tag are 3 bits which communicate to the controller
7301 * which of the eight sizes that command fits within. The size of
7302 * each command depends on how many scatter gather entries there are.
7303 * Each SG entry requires 16 bytes. The eight registers are programmed
7304 * with the number of 16-byte blocks a command of that size requires.
7305 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 7306 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
7307 * blocks. Note, this only extends to the SG entries contained
7308 * within the command block, and does not extend to chained blocks
7309 * of SG elements. bft[] contains the eight values we write to
7310 * the registers. They are not evenly distributed, but have more
7311 * sizes for small commands, and fewer sizes for larger commands.
7312 */
d66ae08b 7313 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
b9af4937
SC
7314#define MIN_IOACCEL2_BFT_ENTRY 5
7315#define HPSA_IOACCEL2_HEADER_SZ 4
7316 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7317 13, 14, 15, 16, 17, 18, 19,
7318 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7319 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7320 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7321 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7322 16 * MIN_IOACCEL2_BFT_ENTRY);
7323 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
d66ae08b 7324 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
7325 /* 5 = 1 s/g entry or 4k
7326 * 6 = 2 s/g entry or 8k
7327 * 8 = 4 s/g entry or 16k
7328 * 10 = 6 s/g entry or 24k
7329 */
303932fd 7330
b3a52e79
SC
7331 /* If the controller supports either ioaccel method then
7332 * we can also use the RAID stack submit path that does not
7333 * perform the superfluous readl() after each command submission.
7334 */
7335 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7336 access = SA5_performant_access_no_read;
7337
303932fd 7338 /* Controller spec: zero out this buffer. */
072b0518
SC
7339 for (i = 0; i < h->nreply_queues; i++)
7340 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
303932fd 7341
d66ae08b
SC
7342 bft[7] = SG_ENTRIES_IN_CMD + 4;
7343 calc_bucket_map(bft, ARRAY_SIZE(bft),
e1f7de0c 7344 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
303932fd
DB
7345 for (i = 0; i < 8; i++)
7346 writel(bft[i], &h->transtable->BlockFetch[i]);
7347
7348 /* size of controller ring buffer */
7349 writel(h->max_commands, &h->transtable->RepQSize);
254f796b 7350 writel(h->nreply_queues, &h->transtable->RepQCount);
303932fd
DB
7351 writel(0, &h->transtable->RepQCtrAddrLow32);
7352 writel(0, &h->transtable->RepQCtrAddrHigh32);
254f796b
MG
7353
7354 for (i = 0; i < h->nreply_queues; i++) {
7355 writel(0, &h->transtable->RepQAddr[i].upper);
072b0518 7356 writel(h->reply_queue[i].busaddr,
254f796b
MG
7357 &h->transtable->RepQAddr[i].lower);
7358 }
7359
b9af4937 7360 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
e1f7de0c
MG
7361 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7362 /*
7363 * enable outbound interrupt coalescing in accelerator mode;
7364 */
7365 if (trans_support & CFGTBL_Trans_io_accel1) {
7366 access = SA5_ioaccel_mode1_access;
7367 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7368 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
c349775e
ST
7369 } else {
7370 if (trans_support & CFGTBL_Trans_io_accel2) {
7371 access = SA5_ioaccel_mode2_access;
7372 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7373 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7374 }
e1f7de0c 7375 }
303932fd 7376 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3f4336f3 7377 hpsa_wait_for_mode_change_ack(h);
303932fd
DB
7378 register_value = readl(&(h->cfgtable->TransportActive));
7379 if (!(register_value & CFGTBL_Trans_Performant)) {
7380 dev_warn(&h->pdev->dev, "unable to get board into"
7381 " performant mode\n");
7382 return;
7383 }
960a30e7 7384 /* Change the access methods to the performant access methods */
e1f7de0c
MG
7385 h->access = access;
7386 h->transMethod = transMethod;
7387
b9af4937
SC
7388 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7389 (trans_support & CFGTBL_Trans_io_accel2)))
e1f7de0c
MG
7390 return;
7391
b9af4937
SC
7392 if (trans_support & CFGTBL_Trans_io_accel1) {
7393 /* Set up I/O accelerator mode */
7394 for (i = 0; i < h->nreply_queues; i++) {
7395 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7396 h->reply_queue[i].current_entry =
7397 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7398 }
7399 bft[7] = h->ioaccel_maxsg + 8;
7400 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7401 h->ioaccel1_blockFetchTable);
e1f7de0c 7402
b9af4937 7403 /* initialize all reply queue entries to unused */
072b0518
SC
7404 for (i = 0; i < h->nreply_queues; i++)
7405 memset(h->reply_queue[i].head,
7406 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7407 h->reply_queue_size);
e1f7de0c 7408
b9af4937
SC
7409 /* set all the constant fields in the accelerator command
7410 * frames once at init time to save CPU cycles later.
7411 */
7412 for (i = 0; i < h->nr_cmds; i++) {
7413 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7414
7415 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7416 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7417 (i * sizeof(struct ErrorInfo)));
7418 cp->err_info_len = sizeof(struct ErrorInfo);
7419 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7420 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7421 cp->timeout_sec = 0;
7422 cp->ReplyQueue = 0;
7423 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7424 DIRECT_LOOKUP_BIT;
7425 cp->Tag.upper = 0;
7426 cp->host_addr.lower =
7427 (u32) (h->ioaccel_cmd_pool_dhandle +
7428 (i * sizeof(struct io_accel1_cmd)));
7429 cp->host_addr.upper = 0;
7430 }
7431 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7432 u64 cfg_offset, cfg_base_addr_index;
7433 u32 bft2_offset, cfg_base_addr;
7434 int rc;
7435
7436 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7437 &cfg_base_addr_index, &cfg_offset);
7438 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7439 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7440 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7441 4, h->ioaccel2_blockFetchTable);
7442 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7443 BUILD_BUG_ON(offsetof(struct CfgTable,
7444 io_accel_request_size_offset) != 0xb8);
7445 h->ioaccel2_bft2_regs =
7446 remap_pci_mem(pci_resource_start(h->pdev,
7447 cfg_base_addr_index) +
7448 cfg_offset + bft2_offset,
7449 ARRAY_SIZE(bft2) *
7450 sizeof(*h->ioaccel2_bft2_regs));
7451 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7452 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
e1f7de0c 7453 }
b9af4937
SC
7454 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7455 hpsa_wait_for_mode_change_ack(h);
e1f7de0c
MG
7456}
7457
7458static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7459{
283b4a9b
SC
7460 h->ioaccel_maxsg =
7461 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7462 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7463 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7464
e1f7de0c
MG
7465 /* Command structures must be aligned on a 128-byte boundary
7466 * because the 7 lower bits of the address are used by the
7467 * hardware.
7468 */
e1f7de0c
MG
7469 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7470 IOACCEL1_COMMANDLIST_ALIGNMENT);
7471 h->ioaccel_cmd_pool =
7472 pci_alloc_consistent(h->pdev,
7473 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7474 &(h->ioaccel_cmd_pool_dhandle));
7475
7476 h->ioaccel1_blockFetchTable =
283b4a9b 7477 kmalloc(((h->ioaccel_maxsg + 1) *
e1f7de0c
MG
7478 sizeof(u32)), GFP_KERNEL);
7479
7480 if ((h->ioaccel_cmd_pool == NULL) ||
7481 (h->ioaccel1_blockFetchTable == NULL))
7482 goto clean_up;
7483
7484 memset(h->ioaccel_cmd_pool, 0,
7485 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7486 return 0;
7487
7488clean_up:
7489 if (h->ioaccel_cmd_pool)
7490 pci_free_consistent(h->pdev,
7491 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7492 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7493 kfree(h->ioaccel1_blockFetchTable);
7494 return 1;
6c311b57
SC
7495}
7496
aca9012a
SC
7497static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7498{
7499 /* Allocate ioaccel2 mode command blocks and block fetch table */
7500
7501 h->ioaccel_maxsg =
7502 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7503 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7504 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7505
aca9012a
SC
7506 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7507 IOACCEL2_COMMANDLIST_ALIGNMENT);
7508 h->ioaccel2_cmd_pool =
7509 pci_alloc_consistent(h->pdev,
7510 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7511 &(h->ioaccel2_cmd_pool_dhandle));
7512
7513 h->ioaccel2_blockFetchTable =
7514 kmalloc(((h->ioaccel_maxsg + 1) *
7515 sizeof(u32)), GFP_KERNEL);
7516
7517 if ((h->ioaccel2_cmd_pool == NULL) ||
7518 (h->ioaccel2_blockFetchTable == NULL))
7519 goto clean_up;
7520
7521 memset(h->ioaccel2_cmd_pool, 0,
7522 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7523 return 0;
7524
7525clean_up:
7526 if (h->ioaccel2_cmd_pool)
7527 pci_free_consistent(h->pdev,
7528 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7529 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7530 kfree(h->ioaccel2_blockFetchTable);
7531 return 1;
7532}
7533
6f039790 7534static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
6c311b57
SC
7535{
7536 u32 trans_support;
e1f7de0c
MG
7537 unsigned long transMethod = CFGTBL_Trans_Performant |
7538 CFGTBL_Trans_use_short_tags;
254f796b 7539 int i;
6c311b57 7540
02ec19c8
SC
7541 if (hpsa_simple_mode)
7542 return;
7543
67c99a72 7544 trans_support = readl(&(h->cfgtable->TransportSupport));
7545 if (!(trans_support & PERFORMANT_MODE))
7546 return;
7547
e1f7de0c
MG
7548 /* Check for I/O accelerator mode support */
7549 if (trans_support & CFGTBL_Trans_io_accel1) {
7550 transMethod |= CFGTBL_Trans_io_accel1 |
7551 CFGTBL_Trans_enable_directed_msix;
7552 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7553 goto clean_up;
aca9012a
SC
7554 } else {
7555 if (trans_support & CFGTBL_Trans_io_accel2) {
7556 transMethod |= CFGTBL_Trans_io_accel2 |
7557 CFGTBL_Trans_enable_directed_msix;
7558 if (ioaccel2_alloc_cmds_and_bft(h))
7559 goto clean_up;
7560 }
e1f7de0c
MG
7561 }
7562
eee0f03a 7563 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
cba3d38b 7564 hpsa_get_max_perf_mode_cmds(h);
6c311b57 7565 /* Performant mode ring buffer and supporting data structures */
072b0518 7566 h->reply_queue_size = h->max_commands * sizeof(u64);
6c311b57 7567
254f796b 7568 for (i = 0; i < h->nreply_queues; i++) {
072b0518
SC
7569 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7570 h->reply_queue_size,
7571 &(h->reply_queue[i].busaddr));
7572 if (!h->reply_queue[i].head)
7573 goto clean_up;
254f796b
MG
7574 h->reply_queue[i].size = h->max_commands;
7575 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7576 h->reply_queue[i].current_entry = 0;
7577 }
7578
6c311b57 7579 /* Need a block fetch table for performant mode */
d66ae08b 7580 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57 7581 sizeof(u32)), GFP_KERNEL);
072b0518 7582 if (!h->blockFetchTable)
6c311b57
SC
7583 goto clean_up;
7584
e1f7de0c 7585 hpsa_enter_performant_mode(h, trans_support);
303932fd
DB
7586 return;
7587
7588clean_up:
072b0518 7589 hpsa_free_reply_queues(h);
303932fd
DB
7590 kfree(h->blockFetchTable);
7591}
7592
23100dd9 7593static int is_accelerated_cmd(struct CommandList *c)
76438d08 7594{
23100dd9
SC
7595 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7596}
7597
7598static void hpsa_drain_accel_commands(struct ctlr_info *h)
7599{
7600 struct CommandList *c = NULL;
76438d08 7601 unsigned long flags;
23100dd9 7602 int accel_cmds_out;
76438d08
SC
7603
7604 do { /* wait for all outstanding commands to drain out */
23100dd9 7605 accel_cmds_out = 0;
76438d08 7606 spin_lock_irqsave(&h->lock, flags);
23100dd9
SC
7607 list_for_each_entry(c, &h->cmpQ, list)
7608 accel_cmds_out += is_accelerated_cmd(c);
7609 list_for_each_entry(c, &h->reqQ, list)
7610 accel_cmds_out += is_accelerated_cmd(c);
76438d08 7611 spin_unlock_irqrestore(&h->lock, flags);
23100dd9 7612 if (accel_cmds_out <= 0)
76438d08
SC
7613 break;
7614 msleep(100);
7615 } while (1);
7616}
7617
edd16368
SC
7618/*
7619 * This is it. Register the PCI driver information for the cards we control
7620 * the OS will call our registered routines when it finds one of our cards.
7621 */
7622static int __init hpsa_init(void)
7623{
31468401 7624 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
7625}
7626
7627static void __exit hpsa_cleanup(void)
7628{
7629 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
7630}
7631
e1f7de0c
MG
7632static void __attribute__((unused)) verify_offsets(void)
7633{
dd0e19f3
ST
7634#define VERIFY_OFFSET(member, offset) \
7635 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7636
7637 VERIFY_OFFSET(structure_size, 0);
7638 VERIFY_OFFSET(volume_blk_size, 4);
7639 VERIFY_OFFSET(volume_blk_cnt, 8);
7640 VERIFY_OFFSET(phys_blk_shift, 16);
7641 VERIFY_OFFSET(parity_rotation_shift, 17);
7642 VERIFY_OFFSET(strip_size, 18);
7643 VERIFY_OFFSET(disk_starting_blk, 20);
7644 VERIFY_OFFSET(disk_blk_cnt, 28);
7645 VERIFY_OFFSET(data_disks_per_row, 36);
7646 VERIFY_OFFSET(metadata_disks_per_row, 38);
7647 VERIFY_OFFSET(row_cnt, 40);
7648 VERIFY_OFFSET(layout_map_count, 42);
7649 VERIFY_OFFSET(flags, 44);
7650 VERIFY_OFFSET(dekindex, 46);
7651 /* VERIFY_OFFSET(reserved, 48 */
7652 VERIFY_OFFSET(data, 64);
7653
7654#undef VERIFY_OFFSET
7655
b66cc250
MM
7656#define VERIFY_OFFSET(member, offset) \
7657 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7658
7659 VERIFY_OFFSET(IU_type, 0);
7660 VERIFY_OFFSET(direction, 1);
7661 VERIFY_OFFSET(reply_queue, 2);
7662 /* VERIFY_OFFSET(reserved1, 3); */
7663 VERIFY_OFFSET(scsi_nexus, 4);
7664 VERIFY_OFFSET(Tag, 8);
7665 VERIFY_OFFSET(cdb, 16);
7666 VERIFY_OFFSET(cciss_lun, 32);
7667 VERIFY_OFFSET(data_len, 40);
7668 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7669 VERIFY_OFFSET(sg_count, 45);
7670 /* VERIFY_OFFSET(reserved3 */
7671 VERIFY_OFFSET(err_ptr, 48);
7672 VERIFY_OFFSET(err_len, 56);
7673 /* VERIFY_OFFSET(reserved4 */
7674 VERIFY_OFFSET(sg, 64);
7675
7676#undef VERIFY_OFFSET
7677
e1f7de0c
MG
7678#define VERIFY_OFFSET(member, offset) \
7679 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7680
7681 VERIFY_OFFSET(dev_handle, 0x00);
7682 VERIFY_OFFSET(reserved1, 0x02);
7683 VERIFY_OFFSET(function, 0x03);
7684 VERIFY_OFFSET(reserved2, 0x04);
7685 VERIFY_OFFSET(err_info, 0x0C);
7686 VERIFY_OFFSET(reserved3, 0x10);
7687 VERIFY_OFFSET(err_info_len, 0x12);
7688 VERIFY_OFFSET(reserved4, 0x13);
7689 VERIFY_OFFSET(sgl_offset, 0x14);
7690 VERIFY_OFFSET(reserved5, 0x15);
7691 VERIFY_OFFSET(transfer_len, 0x1C);
7692 VERIFY_OFFSET(reserved6, 0x20);
7693 VERIFY_OFFSET(io_flags, 0x24);
7694 VERIFY_OFFSET(reserved7, 0x26);
7695 VERIFY_OFFSET(LUN, 0x34);
7696 VERIFY_OFFSET(control, 0x3C);
7697 VERIFY_OFFSET(CDB, 0x40);
7698 VERIFY_OFFSET(reserved8, 0x50);
7699 VERIFY_OFFSET(host_context_flags, 0x60);
7700 VERIFY_OFFSET(timeout_sec, 0x62);
7701 VERIFY_OFFSET(ReplyQueue, 0x64);
7702 VERIFY_OFFSET(reserved9, 0x65);
7703 VERIFY_OFFSET(Tag, 0x68);
7704 VERIFY_OFFSET(host_addr, 0x70);
7705 VERIFY_OFFSET(CISS_LUN, 0x78);
7706 VERIFY_OFFSET(SG, 0x78 + 8);
7707#undef VERIFY_OFFSET
7708}
7709
edd16368
SC
7710module_init(hpsa_init);
7711module_exit(hpsa_cleanup);