]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/hpsa.c
Merge tag 'armsoc-late' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
51c35139 3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
edd16368
SC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
e5a44df8 26#include <linux/pci-aspm.h>
edd16368
SC
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
edd16368
SC
32#include <linux/init.h>
33#include <linux/spinlock.h>
edd16368
SC
34#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
667e23d4 45#include <scsi/scsi_tcq.h>
9437ac43 46#include <scsi/scsi_eh.h>
73153fe5 47#include <scsi/scsi_dbg.h>
edd16368
SC
48#include <linux/cciss_ioctl.h>
49#include <linux/string.h>
50#include <linux/bitmap.h>
60063497 51#include <linux/atomic.h>
a0c12413 52#include <linux/jiffies.h>
42a91641 53#include <linux/percpu-defs.h>
094963da 54#include <linux/percpu.h>
2b08b3e9 55#include <asm/unaligned.h>
283b4a9b 56#include <asm/div64.h>
edd16368
SC
57#include "hpsa_cmd.h"
58#include "hpsa.h"
59
60/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
f532a3f9 61#define HPSA_DRIVER_VERSION "3.4.10-0"
edd16368 62#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
f79cfec6 63#define HPSA "hpsa"
edd16368 64
007e7aa9
RE
65/* How long to wait for CISS doorbell communication */
66#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
67#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
68#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
69#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
edd16368
SC
70#define MAX_IOCTL_CONFIG_WAIT 1000
71
72/*define how many times we will try a command because of bus resets */
73#define MAX_CMD_RETRIES 3
74
75/* Embedded module documentation macros - see modules.h */
76MODULE_AUTHOR("Hewlett-Packard Company");
77MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
78 HPSA_DRIVER_VERSION);
79MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
80MODULE_VERSION(HPSA_DRIVER_VERSION);
81MODULE_LICENSE("GPL");
82
83static int hpsa_allow_any;
84module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
85MODULE_PARM_DESC(hpsa_allow_any,
86 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
87static int hpsa_simple_mode;
88module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
89MODULE_PARM_DESC(hpsa_simple_mode,
90 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
91
92/* define the PCI info for the cards we can control */
93static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
163dbcd8
MM
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
f8b01eb9 101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
fe0c9610
MM
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
fe0c9610
MM
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
97b9f53d
MM
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
3b7a45e5 125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
97b9f53d
MM
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
3b7a45e5
JH
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
131 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
fdfa4b6d 134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
8e616a5e
SC
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
138 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
139 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
7c03b870 140 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 141 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
142 {0,}
143};
144
145MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
146
147/* board_id = Subsystem Device ID & Vendor ID
148 * product = Marketing Name for the board
149 * access = Address of the struct of function pointers
150 */
151static struct board_type products[] = {
edd16368
SC
152 {0x3241103C, "Smart Array P212", &SA5_access},
153 {0x3243103C, "Smart Array P410", &SA5_access},
154 {0x3245103C, "Smart Array P410i", &SA5_access},
155 {0x3247103C, "Smart Array P411", &SA5_access},
156 {0x3249103C, "Smart Array P812", &SA5_access},
163dbcd8
MM
157 {0x324A103C, "Smart Array P712m", &SA5_access},
158 {0x324B103C, "Smart Array P711m", &SA5_access},
7d2cce58 159 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
fe0c9610
MM
160 {0x3350103C, "Smart Array P222", &SA5_access},
161 {0x3351103C, "Smart Array P420", &SA5_access},
162 {0x3352103C, "Smart Array P421", &SA5_access},
163 {0x3353103C, "Smart Array P822", &SA5_access},
164 {0x3354103C, "Smart Array P420i", &SA5_access},
165 {0x3355103C, "Smart Array P220i", &SA5_access},
166 {0x3356103C, "Smart Array P721m", &SA5_access},
1fd6c8e3
MM
167 {0x1921103C, "Smart Array P830i", &SA5_access},
168 {0x1922103C, "Smart Array P430", &SA5_access},
169 {0x1923103C, "Smart Array P431", &SA5_access},
170 {0x1924103C, "Smart Array P830", &SA5_access},
171 {0x1926103C, "Smart Array P731m", &SA5_access},
172 {0x1928103C, "Smart Array P230i", &SA5_access},
173 {0x1929103C, "Smart Array P530", &SA5_access},
27fb8137
DB
174 {0x21BD103C, "Smart Array P244br", &SA5_access},
175 {0x21BE103C, "Smart Array P741m", &SA5_access},
176 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
177 {0x21C0103C, "Smart Array P440ar", &SA5_access},
c8ae0ab1 178 {0x21C1103C, "Smart Array P840ar", &SA5_access},
27fb8137
DB
179 {0x21C2103C, "Smart Array P440", &SA5_access},
180 {0x21C3103C, "Smart Array P441", &SA5_access},
97b9f53d 181 {0x21C4103C, "Smart Array", &SA5_access},
27fb8137
DB
182 {0x21C5103C, "Smart Array P841", &SA5_access},
183 {0x21C6103C, "Smart HBA H244br", &SA5_access},
184 {0x21C7103C, "Smart HBA H240", &SA5_access},
185 {0x21C8103C, "Smart HBA H241", &SA5_access},
97b9f53d 186 {0x21C9103C, "Smart Array", &SA5_access},
27fb8137
DB
187 {0x21CA103C, "Smart Array P246br", &SA5_access},
188 {0x21CB103C, "Smart Array P840", &SA5_access},
3b7a45e5
JH
189 {0x21CC103C, "Smart Array", &SA5_access},
190 {0x21CD103C, "Smart Array", &SA5_access},
27fb8137 191 {0x21CE103C, "Smart HBA", &SA5_access},
fdfa4b6d 192 {0x05809005, "SmartHBA-SA", &SA5_access},
8e616a5e
SC
193 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
194 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
195 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
196 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
197 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
edd16368
SC
198 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
199};
200
a58e7e53
WS
201#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
202static const struct scsi_cmnd hpsa_cmd_busy;
203#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
204static const struct scsi_cmnd hpsa_cmd_idle;
edd16368
SC
205static int number_of_controllers;
206
10f66018
SC
207static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
208static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
42a91641 209static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
edd16368
SC
210
211#ifdef CONFIG_COMPAT
42a91641
DB
212static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
213 void __user *arg);
edd16368
SC
214#endif
215
216static void cmd_free(struct ctlr_info *h, struct CommandList *c);
edd16368 217static struct CommandList *cmd_alloc(struct ctlr_info *h);
73153fe5
WS
218static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
219static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
220 struct scsi_cmnd *scmd);
a2dac136 221static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 222 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368 223 int cmd_type);
2c143342 224static void hpsa_free_cmd_pool(struct ctlr_info *h);
b7bb24eb 225#define VPD_PAGE (1 << 8)
edd16368 226
f281233d 227static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
228static void hpsa_scan_start(struct Scsi_Host *);
229static int hpsa_scan_finished(struct Scsi_Host *sh,
230 unsigned long elapsed_time);
7c0a0229 231static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
edd16368
SC
232
233static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
75167d2c 234static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
edd16368 235static int hpsa_slave_alloc(struct scsi_device *sdev);
41ce4c35 236static int hpsa_slave_configure(struct scsi_device *sdev);
edd16368
SC
237static void hpsa_slave_destroy(struct scsi_device *sdev);
238
edd16368 239static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
edd16368
SC
240static int check_for_unit_attention(struct ctlr_info *h,
241 struct CommandList *c);
242static void check_ioctl_unit_attention(struct ctlr_info *h,
243 struct CommandList *c);
303932fd
DB
244/* performant mode helper functions */
245static void calc_bucket_map(int *bucket, int num_buckets,
2b08b3e9 246 int nsgs, int min_blocks, u32 *bucket_map);
105a3dbc
RE
247static void hpsa_free_performant_mode(struct ctlr_info *h);
248static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
254f796b 249static inline u32 next_command(struct ctlr_info *h, u8 q);
6f039790
GKH
250static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
251 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
252 u64 *cfg_offset);
253static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
254 unsigned long *memory_bar);
255static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
256static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
257 int wait_for_ready);
75167d2c 258static inline void finish_cmd(struct CommandList *c);
c706a795 259static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
fe5389c8
SC
260#define BOARD_NOT_READY 0
261#define BOARD_READY 1
23100dd9 262static void hpsa_drain_accel_commands(struct ctlr_info *h);
76438d08 263static void hpsa_flush_cache(struct ctlr_info *h);
c349775e
ST
264static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
265 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 266 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
080ef1cc 267static void hpsa_command_resubmit_worker(struct work_struct *work);
25163bd5
WS
268static u32 lockup_detected(struct ctlr_info *h);
269static int detect_controller_lockup(struct ctlr_info *h);
edd16368 270
edd16368
SC
271static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
272{
273 unsigned long *priv = shost_priv(sdev->host);
274 return (struct ctlr_info *) *priv;
275}
276
a23513e8
SC
277static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
278{
279 unsigned long *priv = shost_priv(sh);
280 return (struct ctlr_info *) *priv;
281}
282
a58e7e53
WS
283static inline bool hpsa_is_cmd_idle(struct CommandList *c)
284{
285 return c->scsi_cmd == SCSI_CMD_IDLE;
286}
287
d604f533
WS
288static inline bool hpsa_is_pending_event(struct CommandList *c)
289{
290 return c->abort_pending || c->reset_pending;
291}
292
9437ac43
SC
293/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
294static void decode_sense_data(const u8 *sense_data, int sense_data_len,
295 u8 *sense_key, u8 *asc, u8 *ascq)
296{
297 struct scsi_sense_hdr sshdr;
298 bool rc;
299
300 *sense_key = -1;
301 *asc = -1;
302 *ascq = -1;
303
304 if (sense_data_len < 1)
305 return;
306
307 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
308 if (rc) {
309 *sense_key = sshdr.sense_key;
310 *asc = sshdr.asc;
311 *ascq = sshdr.ascq;
312 }
313}
314
edd16368
SC
315static int check_for_unit_attention(struct ctlr_info *h,
316 struct CommandList *c)
317{
9437ac43
SC
318 u8 sense_key, asc, ascq;
319 int sense_len;
320
321 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
322 sense_len = sizeof(c->err_info->SenseInfo);
323 else
324 sense_len = c->err_info->SenseLen;
325
326 decode_sense_data(c->err_info->SenseInfo, sense_len,
327 &sense_key, &asc, &ascq);
328 if (sense_key != UNIT_ATTENTION || asc == -1)
edd16368
SC
329 return 0;
330
9437ac43 331 switch (asc) {
edd16368 332 case STATE_CHANGED:
9437ac43 333 dev_warn(&h->pdev->dev,
2946e82b
RE
334 "%s: a state change detected, command retried\n",
335 h->devname);
edd16368
SC
336 break;
337 case LUN_FAILED:
7f73695a 338 dev_warn(&h->pdev->dev,
2946e82b 339 "%s: LUN failure detected\n", h->devname);
edd16368
SC
340 break;
341 case REPORT_LUNS_CHANGED:
7f73695a 342 dev_warn(&h->pdev->dev,
2946e82b 343 "%s: report LUN data changed\n", h->devname);
edd16368 344 /*
4f4eb9f1
ST
345 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
346 * target (array) devices.
edd16368
SC
347 */
348 break;
349 case POWER_OR_RESET:
2946e82b
RE
350 dev_warn(&h->pdev->dev,
351 "%s: a power on or device reset detected\n",
352 h->devname);
edd16368
SC
353 break;
354 case UNIT_ATTENTION_CLEARED:
2946e82b
RE
355 dev_warn(&h->pdev->dev,
356 "%s: unit attention cleared by another initiator\n",
357 h->devname);
edd16368
SC
358 break;
359 default:
2946e82b
RE
360 dev_warn(&h->pdev->dev,
361 "%s: unknown unit attention detected\n",
362 h->devname);
edd16368
SC
363 break;
364 }
365 return 1;
366}
367
852af20a
MB
368static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
369{
370 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
371 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
372 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
373 return 0;
374 dev_warn(&h->pdev->dev, HPSA "device busy");
375 return 1;
376}
377
e985c58f
SC
378static u32 lockup_detected(struct ctlr_info *h);
379static ssize_t host_show_lockup_detected(struct device *dev,
380 struct device_attribute *attr, char *buf)
381{
382 int ld;
383 struct ctlr_info *h;
384 struct Scsi_Host *shost = class_to_shost(dev);
385
386 h = shost_to_hba(shost);
387 ld = lockup_detected(h);
388
389 return sprintf(buf, "ld=%d\n", ld);
390}
391
da0697bd
ST
392static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
393 struct device_attribute *attr,
394 const char *buf, size_t count)
395{
396 int status, len;
397 struct ctlr_info *h;
398 struct Scsi_Host *shost = class_to_shost(dev);
399 char tmpbuf[10];
400
401 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
402 return -EACCES;
403 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
404 strncpy(tmpbuf, buf, len);
405 tmpbuf[len] = '\0';
406 if (sscanf(tmpbuf, "%d", &status) != 1)
407 return -EINVAL;
408 h = shost_to_hba(shost);
409 h->acciopath_status = !!status;
410 dev_warn(&h->pdev->dev,
411 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
412 h->acciopath_status ? "enabled" : "disabled");
413 return count;
414}
415
2ba8bfc8
SC
416static ssize_t host_store_raid_offload_debug(struct device *dev,
417 struct device_attribute *attr,
418 const char *buf, size_t count)
419{
420 int debug_level, len;
421 struct ctlr_info *h;
422 struct Scsi_Host *shost = class_to_shost(dev);
423 char tmpbuf[10];
424
425 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
426 return -EACCES;
427 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
428 strncpy(tmpbuf, buf, len);
429 tmpbuf[len] = '\0';
430 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
431 return -EINVAL;
432 if (debug_level < 0)
433 debug_level = 0;
434 h = shost_to_hba(shost);
435 h->raid_offload_debug = debug_level;
436 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
437 h->raid_offload_debug);
438 return count;
439}
440
edd16368
SC
441static ssize_t host_store_rescan(struct device *dev,
442 struct device_attribute *attr,
443 const char *buf, size_t count)
444{
445 struct ctlr_info *h;
446 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 447 h = shost_to_hba(shost);
31468401 448 hpsa_scan_start(h->scsi_host);
edd16368
SC
449 return count;
450}
451
d28ce020
SC
452static ssize_t host_show_firmware_revision(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 struct ctlr_info *h;
456 struct Scsi_Host *shost = class_to_shost(dev);
457 unsigned char *fwrev;
458
459 h = shost_to_hba(shost);
460 if (!h->hba_inquiry_data)
461 return 0;
462 fwrev = &h->hba_inquiry_data[32];
463 return snprintf(buf, 20, "%c%c%c%c\n",
464 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
465}
466
94a13649
SC
467static ssize_t host_show_commands_outstanding(struct device *dev,
468 struct device_attribute *attr, char *buf)
469{
470 struct Scsi_Host *shost = class_to_shost(dev);
471 struct ctlr_info *h = shost_to_hba(shost);
472
0cbf768e
SC
473 return snprintf(buf, 20, "%d\n",
474 atomic_read(&h->commands_outstanding));
94a13649
SC
475}
476
745a7a25
SC
477static ssize_t host_show_transport_mode(struct device *dev,
478 struct device_attribute *attr, char *buf)
479{
480 struct ctlr_info *h;
481 struct Scsi_Host *shost = class_to_shost(dev);
482
483 h = shost_to_hba(shost);
484 return snprintf(buf, 20, "%s\n",
960a30e7 485 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
486 "performant" : "simple");
487}
488
da0697bd
ST
489static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
490 struct device_attribute *attr, char *buf)
491{
492 struct ctlr_info *h;
493 struct Scsi_Host *shost = class_to_shost(dev);
494
495 h = shost_to_hba(shost);
496 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
497 (h->acciopath_status == 1) ? "enabled" : "disabled");
498}
499
46380786 500/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
501static u32 unresettable_controller[] = {
502 0x324a103C, /* Smart Array P712m */
9b5c48c2 503 0x324b103C, /* Smart Array P711m */
941b1cda
SC
504 0x3223103C, /* Smart Array P800 */
505 0x3234103C, /* Smart Array P400 */
506 0x3235103C, /* Smart Array P400i */
507 0x3211103C, /* Smart Array E200i */
508 0x3212103C, /* Smart Array E200 */
509 0x3213103C, /* Smart Array E200i */
510 0x3214103C, /* Smart Array E200i */
511 0x3215103C, /* Smart Array E200i */
512 0x3237103C, /* Smart Array E500 */
513 0x323D103C, /* Smart Array P700m */
7af0abbc 514 0x40800E11, /* Smart Array 5i */
941b1cda
SC
515 0x409C0E11, /* Smart Array 6400 */
516 0x409D0E11, /* Smart Array 6400 EM */
5a4f934e
TH
517 0x40700E11, /* Smart Array 5300 */
518 0x40820E11, /* Smart Array 532 */
519 0x40830E11, /* Smart Array 5312 */
520 0x409A0E11, /* Smart Array 641 */
521 0x409B0E11, /* Smart Array 642 */
522 0x40910E11, /* Smart Array 6i */
941b1cda
SC
523};
524
46380786
SC
525/* List of controllers which cannot even be soft reset */
526static u32 soft_unresettable_controller[] = {
7af0abbc 527 0x40800E11, /* Smart Array 5i */
5a4f934e
TH
528 0x40700E11, /* Smart Array 5300 */
529 0x40820E11, /* Smart Array 532 */
530 0x40830E11, /* Smart Array 5312 */
531 0x409A0E11, /* Smart Array 641 */
532 0x409B0E11, /* Smart Array 642 */
533 0x40910E11, /* Smart Array 6i */
46380786
SC
534 /* Exclude 640x boards. These are two pci devices in one slot
535 * which share a battery backed cache module. One controls the
536 * cache, the other accesses the cache through the one that controls
537 * it. If we reset the one controlling the cache, the other will
538 * likely not be happy. Just forbid resetting this conjoined mess.
539 * The 640x isn't really supported by hpsa anyway.
540 */
541 0x409C0E11, /* Smart Array 6400 */
542 0x409D0E11, /* Smart Array 6400 EM */
543};
544
9b5c48c2
SC
545static u32 needs_abort_tags_swizzled[] = {
546 0x323D103C, /* Smart Array P700m */
547 0x324a103C, /* Smart Array P712m */
548 0x324b103C, /* SmartArray P711m */
549};
550
551static int board_id_in_array(u32 a[], int nelems, u32 board_id)
941b1cda
SC
552{
553 int i;
554
9b5c48c2
SC
555 for (i = 0; i < nelems; i++)
556 if (a[i] == board_id)
557 return 1;
558 return 0;
46380786
SC
559}
560
9b5c48c2 561static int ctlr_is_hard_resettable(u32 board_id)
46380786 562{
9b5c48c2
SC
563 return !board_id_in_array(unresettable_controller,
564 ARRAY_SIZE(unresettable_controller), board_id);
565}
46380786 566
9b5c48c2
SC
567static int ctlr_is_soft_resettable(u32 board_id)
568{
569 return !board_id_in_array(soft_unresettable_controller,
570 ARRAY_SIZE(soft_unresettable_controller), board_id);
941b1cda
SC
571}
572
46380786
SC
573static int ctlr_is_resettable(u32 board_id)
574{
575 return ctlr_is_hard_resettable(board_id) ||
576 ctlr_is_soft_resettable(board_id);
577}
578
9b5c48c2
SC
579static int ctlr_needs_abort_tags_swizzled(u32 board_id)
580{
581 return board_id_in_array(needs_abort_tags_swizzled,
582 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
583}
584
941b1cda
SC
585static ssize_t host_show_resettable(struct device *dev,
586 struct device_attribute *attr, char *buf)
587{
588 struct ctlr_info *h;
589 struct Scsi_Host *shost = class_to_shost(dev);
590
591 h = shost_to_hba(shost);
46380786 592 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
593}
594
edd16368
SC
595static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
596{
597 return (scsi3addr[3] & 0xC0) == 0x40;
598}
599
f2ef0ce7
RE
600static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
601 "1(+0)ADM", "UNKNOWN"
edd16368 602};
6b80b18f
ST
603#define HPSA_RAID_0 0
604#define HPSA_RAID_4 1
605#define HPSA_RAID_1 2 /* also used for RAID 10 */
606#define HPSA_RAID_5 3 /* also used for RAID 50 */
607#define HPSA_RAID_51 4
608#define HPSA_RAID_6 5 /* also used for RAID 60 */
609#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
edd16368
SC
610#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
611
612static ssize_t raid_level_show(struct device *dev,
613 struct device_attribute *attr, char *buf)
614{
615 ssize_t l = 0;
82a72c0a 616 unsigned char rlevel;
edd16368
SC
617 struct ctlr_info *h;
618 struct scsi_device *sdev;
619 struct hpsa_scsi_dev_t *hdev;
620 unsigned long flags;
621
622 sdev = to_scsi_device(dev);
623 h = sdev_to_hba(sdev);
624 spin_lock_irqsave(&h->lock, flags);
625 hdev = sdev->hostdata;
626 if (!hdev) {
627 spin_unlock_irqrestore(&h->lock, flags);
628 return -ENODEV;
629 }
630
631 /* Is this even a logical drive? */
632 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
633 spin_unlock_irqrestore(&h->lock, flags);
634 l = snprintf(buf, PAGE_SIZE, "N/A\n");
635 return l;
636 }
637
638 rlevel = hdev->raid_level;
639 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 640 if (rlevel > RAID_UNKNOWN)
edd16368
SC
641 rlevel = RAID_UNKNOWN;
642 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
643 return l;
644}
645
646static ssize_t lunid_show(struct device *dev,
647 struct device_attribute *attr, char *buf)
648{
649 struct ctlr_info *h;
650 struct scsi_device *sdev;
651 struct hpsa_scsi_dev_t *hdev;
652 unsigned long flags;
653 unsigned char lunid[8];
654
655 sdev = to_scsi_device(dev);
656 h = sdev_to_hba(sdev);
657 spin_lock_irqsave(&h->lock, flags);
658 hdev = sdev->hostdata;
659 if (!hdev) {
660 spin_unlock_irqrestore(&h->lock, flags);
661 return -ENODEV;
662 }
663 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
664 spin_unlock_irqrestore(&h->lock, flags);
665 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
666 lunid[0], lunid[1], lunid[2], lunid[3],
667 lunid[4], lunid[5], lunid[6], lunid[7]);
668}
669
670static ssize_t unique_id_show(struct device *dev,
671 struct device_attribute *attr, char *buf)
672{
673 struct ctlr_info *h;
674 struct scsi_device *sdev;
675 struct hpsa_scsi_dev_t *hdev;
676 unsigned long flags;
677 unsigned char sn[16];
678
679 sdev = to_scsi_device(dev);
680 h = sdev_to_hba(sdev);
681 spin_lock_irqsave(&h->lock, flags);
682 hdev = sdev->hostdata;
683 if (!hdev) {
684 spin_unlock_irqrestore(&h->lock, flags);
685 return -ENODEV;
686 }
687 memcpy(sn, hdev->device_id, sizeof(sn));
688 spin_unlock_irqrestore(&h->lock, flags);
689 return snprintf(buf, 16 * 2 + 2,
690 "%02X%02X%02X%02X%02X%02X%02X%02X"
691 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
692 sn[0], sn[1], sn[2], sn[3],
693 sn[4], sn[5], sn[6], sn[7],
694 sn[8], sn[9], sn[10], sn[11],
695 sn[12], sn[13], sn[14], sn[15]);
696}
697
c1988684
ST
698static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
699 struct device_attribute *attr, char *buf)
700{
701 struct ctlr_info *h;
702 struct scsi_device *sdev;
703 struct hpsa_scsi_dev_t *hdev;
704 unsigned long flags;
705 int offload_enabled;
706
707 sdev = to_scsi_device(dev);
708 h = sdev_to_hba(sdev);
709 spin_lock_irqsave(&h->lock, flags);
710 hdev = sdev->hostdata;
711 if (!hdev) {
712 spin_unlock_irqrestore(&h->lock, flags);
713 return -ENODEV;
714 }
715 offload_enabled = hdev->offload_enabled;
716 spin_unlock_irqrestore(&h->lock, flags);
717 return snprintf(buf, 20, "%d\n", offload_enabled);
718}
719
3f5eac3a
SC
720static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
721static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
722static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
723static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
c1988684
ST
724static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
725 host_show_hp_ssd_smart_path_enabled, NULL);
da0697bd
ST
726static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
727 host_show_hp_ssd_smart_path_status,
728 host_store_hp_ssd_smart_path_status);
2ba8bfc8
SC
729static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
730 host_store_raid_offload_debug);
3f5eac3a
SC
731static DEVICE_ATTR(firmware_revision, S_IRUGO,
732 host_show_firmware_revision, NULL);
733static DEVICE_ATTR(commands_outstanding, S_IRUGO,
734 host_show_commands_outstanding, NULL);
735static DEVICE_ATTR(transport_mode, S_IRUGO,
736 host_show_transport_mode, NULL);
941b1cda
SC
737static DEVICE_ATTR(resettable, S_IRUGO,
738 host_show_resettable, NULL);
e985c58f
SC
739static DEVICE_ATTR(lockup_detected, S_IRUGO,
740 host_show_lockup_detected, NULL);
3f5eac3a
SC
741
742static struct device_attribute *hpsa_sdev_attrs[] = {
743 &dev_attr_raid_level,
744 &dev_attr_lunid,
745 &dev_attr_unique_id,
c1988684 746 &dev_attr_hp_ssd_smart_path_enabled,
e985c58f 747 &dev_attr_lockup_detected,
3f5eac3a
SC
748 NULL,
749};
750
751static struct device_attribute *hpsa_shost_attrs[] = {
752 &dev_attr_rescan,
753 &dev_attr_firmware_revision,
754 &dev_attr_commands_outstanding,
755 &dev_attr_transport_mode,
941b1cda 756 &dev_attr_resettable,
da0697bd 757 &dev_attr_hp_ssd_smart_path_status,
2ba8bfc8 758 &dev_attr_raid_offload_debug,
3f5eac3a
SC
759 NULL,
760};
761
41ce4c35
SC
762#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
763 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
764
3f5eac3a
SC
765static struct scsi_host_template hpsa_driver_template = {
766 .module = THIS_MODULE,
f79cfec6
SC
767 .name = HPSA,
768 .proc_name = HPSA,
3f5eac3a
SC
769 .queuecommand = hpsa_scsi_queue_command,
770 .scan_start = hpsa_scan_start,
771 .scan_finished = hpsa_scan_finished,
7c0a0229 772 .change_queue_depth = hpsa_change_queue_depth,
3f5eac3a
SC
773 .this_id = -1,
774 .use_clustering = ENABLE_CLUSTERING,
75167d2c 775 .eh_abort_handler = hpsa_eh_abort_handler,
3f5eac3a
SC
776 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
777 .ioctl = hpsa_ioctl,
778 .slave_alloc = hpsa_slave_alloc,
41ce4c35 779 .slave_configure = hpsa_slave_configure,
3f5eac3a
SC
780 .slave_destroy = hpsa_slave_destroy,
781#ifdef CONFIG_COMPAT
782 .compat_ioctl = hpsa_compat_ioctl,
783#endif
784 .sdev_attrs = hpsa_sdev_attrs,
785 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 786 .max_sectors = 8192,
54b2b50c 787 .no_write_same = 1,
3f5eac3a
SC
788};
789
254f796b 790static inline u32 next_command(struct ctlr_info *h, u8 q)
3f5eac3a
SC
791{
792 u32 a;
072b0518 793 struct reply_queue_buffer *rq = &h->reply_queue[q];
3f5eac3a 794
e1f7de0c
MG
795 if (h->transMethod & CFGTBL_Trans_io_accel1)
796 return h->access.command_completed(h, q);
797
3f5eac3a 798 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
254f796b 799 return h->access.command_completed(h, q);
3f5eac3a 800
254f796b
MG
801 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
802 a = rq->head[rq->current_entry];
803 rq->current_entry++;
0cbf768e 804 atomic_dec(&h->commands_outstanding);
3f5eac3a
SC
805 } else {
806 a = FIFO_EMPTY;
807 }
808 /* Check for wraparound */
254f796b
MG
809 if (rq->current_entry == h->max_commands) {
810 rq->current_entry = 0;
811 rq->wraparound ^= 1;
3f5eac3a
SC
812 }
813 return a;
814}
815
c349775e
ST
816/*
817 * There are some special bits in the bus address of the
818 * command that we have to set for the controller to know
819 * how to process the command:
820 *
821 * Normal performant mode:
822 * bit 0: 1 means performant mode, 0 means simple mode.
823 * bits 1-3 = block fetch table entry
824 * bits 4-6 = command type (== 0)
825 *
826 * ioaccel1 mode:
827 * bit 0 = "performant mode" bit.
828 * bits 1-3 = block fetch table entry
829 * bits 4-6 = command type (== 110)
830 * (command type is needed because ioaccel1 mode
831 * commands are submitted through the same register as normal
832 * mode commands, so this is how the controller knows whether
833 * the command is normal mode or ioaccel1 mode.)
834 *
835 * ioaccel2 mode:
836 * bit 0 = "performant mode" bit.
837 * bits 1-4 = block fetch table entry (note extra bit)
838 * bits 4-6 = not needed, because ioaccel2 mode has
839 * a separate special register for submitting commands.
840 */
841
25163bd5
WS
842/*
843 * set_performant_mode: Modify the tag for cciss performant
3f5eac3a
SC
844 * set bit 0 for pull model, bits 3-1 for block fetch
845 * register number
846 */
25163bd5
WS
847#define DEFAULT_REPLY_QUEUE (-1)
848static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
849 int reply_queue)
3f5eac3a 850{
254f796b 851 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
3f5eac3a 852 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
25163bd5
WS
853 if (unlikely(!h->msix_vector))
854 return;
855 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
254f796b 856 c->Header.ReplyQueue =
804a5cb5 857 raw_smp_processor_id() % h->nreply_queues;
25163bd5
WS
858 else
859 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
254f796b 860 }
3f5eac3a
SC
861}
862
c349775e 863static void set_ioaccel1_performant_mode(struct ctlr_info *h,
25163bd5
WS
864 struct CommandList *c,
865 int reply_queue)
c349775e
ST
866{
867 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
868
25163bd5
WS
869 /*
870 * Tell the controller to post the reply to the queue for this
c349775e
ST
871 * processor. This seems to give the best I/O throughput.
872 */
25163bd5
WS
873 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
874 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
875 else
876 cp->ReplyQueue = reply_queue % h->nreply_queues;
877 /*
878 * Set the bits in the address sent down to include:
c349775e
ST
879 * - performant mode bit (bit 0)
880 * - pull count (bits 1-3)
881 * - command type (bits 4-6)
882 */
883 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
884 IOACCEL1_BUSADDR_CMDTYPE;
885}
886
8be986cc
SC
887static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
888 struct CommandList *c,
889 int reply_queue)
890{
891 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
892 &h->ioaccel2_cmd_pool[c->cmdindex];
893
894 /* Tell the controller to post the reply to the queue for this
895 * processor. This seems to give the best I/O throughput.
896 */
897 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
898 cp->reply_queue = smp_processor_id() % h->nreply_queues;
899 else
900 cp->reply_queue = reply_queue % h->nreply_queues;
901 /* Set the bits in the address sent down to include:
902 * - performant mode bit not used in ioaccel mode 2
903 * - pull count (bits 0-3)
904 * - command type isn't needed for ioaccel2
905 */
906 c->busaddr |= h->ioaccel2_blockFetchTable[0];
907}
908
c349775e 909static void set_ioaccel2_performant_mode(struct ctlr_info *h,
25163bd5
WS
910 struct CommandList *c,
911 int reply_queue)
c349775e
ST
912{
913 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
914
25163bd5
WS
915 /*
916 * Tell the controller to post the reply to the queue for this
c349775e
ST
917 * processor. This seems to give the best I/O throughput.
918 */
25163bd5
WS
919 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
920 cp->reply_queue = smp_processor_id() % h->nreply_queues;
921 else
922 cp->reply_queue = reply_queue % h->nreply_queues;
923 /*
924 * Set the bits in the address sent down to include:
c349775e
ST
925 * - performant mode bit not used in ioaccel mode 2
926 * - pull count (bits 0-3)
927 * - command type isn't needed for ioaccel2
928 */
929 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
930}
931
e85c5974
SC
932static int is_firmware_flash_cmd(u8 *cdb)
933{
934 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
935}
936
937/*
938 * During firmware flash, the heartbeat register may not update as frequently
939 * as it should. So we dial down lockup detection during firmware flash. and
940 * dial it back up when firmware flash completes.
941 */
942#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
943#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
944static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
945 struct CommandList *c)
946{
947 if (!is_firmware_flash_cmd(c->Request.CDB))
948 return;
949 atomic_inc(&h->firmware_flash_in_progress);
950 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
951}
952
953static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
954 struct CommandList *c)
955{
956 if (is_firmware_flash_cmd(c->Request.CDB) &&
957 atomic_dec_and_test(&h->firmware_flash_in_progress))
958 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
959}
960
25163bd5
WS
961static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
962 struct CommandList *c, int reply_queue)
3f5eac3a 963{
c05e8866
SC
964 dial_down_lockup_detection_during_fw_flash(h, c);
965 atomic_inc(&h->commands_outstanding);
c349775e
ST
966 switch (c->cmd_type) {
967 case CMD_IOACCEL1:
25163bd5 968 set_ioaccel1_performant_mode(h, c, reply_queue);
c05e8866 969 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
c349775e
ST
970 break;
971 case CMD_IOACCEL2:
25163bd5 972 set_ioaccel2_performant_mode(h, c, reply_queue);
c05e8866 973 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
c349775e 974 break;
8be986cc
SC
975 case IOACCEL2_TMF:
976 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
977 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
978 break;
c349775e 979 default:
25163bd5 980 set_performant_mode(h, c, reply_queue);
c05e8866 981 h->access.submit_command(h, c);
c349775e 982 }
3f5eac3a
SC
983}
984
a58e7e53 985static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
25163bd5 986{
d604f533 987 if (unlikely(hpsa_is_pending_event(c)))
a58e7e53
WS
988 return finish_cmd(c);
989
25163bd5
WS
990 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
991}
992
3f5eac3a
SC
993static inline int is_hba_lunid(unsigned char scsi3addr[])
994{
995 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
996}
997
998static inline int is_scsi_rev_5(struct ctlr_info *h)
999{
1000 if (!h->hba_inquiry_data)
1001 return 0;
1002 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1003 return 1;
1004 return 0;
1005}
1006
edd16368
SC
1007static int hpsa_find_target_lun(struct ctlr_info *h,
1008 unsigned char scsi3addr[], int bus, int *target, int *lun)
1009{
1010 /* finds an unused bus, target, lun for a new physical device
1011 * assumes h->devlock is held
1012 */
1013 int i, found = 0;
cfe5badc 1014 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 1015
263d9401 1016 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
edd16368
SC
1017
1018 for (i = 0; i < h->ndevices; i++) {
1019 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
263d9401 1020 __set_bit(h->dev[i]->target, lun_taken);
edd16368
SC
1021 }
1022
263d9401
AM
1023 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1024 if (i < HPSA_MAX_DEVICES) {
1025 /* *bus = 1; */
1026 *target = i;
1027 *lun = 0;
1028 found = 1;
edd16368
SC
1029 }
1030 return !found;
1031}
1032
0d96ef5f
WS
1033static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1034 struct hpsa_scsi_dev_t *dev, char *description)
1035{
1036 dev_printk(level, &h->pdev->dev,
1037 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1038 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1039 description,
1040 scsi_device_type(dev->devtype),
1041 dev->vendor,
1042 dev->model,
1043 dev->raid_level > RAID_UNKNOWN ?
1044 "RAID-?" : raid_label[dev->raid_level],
1045 dev->offload_config ? '+' : '-',
1046 dev->offload_enabled ? '+' : '-',
1047 dev->expose_state);
1048}
1049
edd16368
SC
1050/* Add an entry into h->dev[] array. */
1051static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1052 struct hpsa_scsi_dev_t *device,
1053 struct hpsa_scsi_dev_t *added[], int *nadded)
1054{
1055 /* assumes h->devlock is held */
1056 int n = h->ndevices;
1057 int i;
1058 unsigned char addr1[8], addr2[8];
1059 struct hpsa_scsi_dev_t *sd;
1060
cfe5badc 1061 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
1062 dev_err(&h->pdev->dev, "too many devices, some will be "
1063 "inaccessible.\n");
1064 return -1;
1065 }
1066
1067 /* physical devices do not have lun or target assigned until now. */
1068 if (device->lun != -1)
1069 /* Logical device, lun is already assigned. */
1070 goto lun_assigned;
1071
1072 /* If this device a non-zero lun of a multi-lun device
1073 * byte 4 of the 8-byte LUN addr will contain the logical
2b08b3e9 1074 * unit no, zero otherwise.
edd16368
SC
1075 */
1076 if (device->scsi3addr[4] == 0) {
1077 /* This is not a non-zero lun of a multi-lun device */
1078 if (hpsa_find_target_lun(h, device->scsi3addr,
1079 device->bus, &device->target, &device->lun) != 0)
1080 return -1;
1081 goto lun_assigned;
1082 }
1083
1084 /* This is a non-zero lun of a multi-lun device.
1085 * Search through our list and find the device which
1086 * has the same 8 byte LUN address, excepting byte 4.
1087 * Assign the same bus and target for this new LUN.
1088 * Use the logical unit number from the firmware.
1089 */
1090 memcpy(addr1, device->scsi3addr, 8);
1091 addr1[4] = 0;
1092 for (i = 0; i < n; i++) {
1093 sd = h->dev[i];
1094 memcpy(addr2, sd->scsi3addr, 8);
1095 addr2[4] = 0;
1096 /* differ only in byte 4? */
1097 if (memcmp(addr1, addr2, 8) == 0) {
1098 device->bus = sd->bus;
1099 device->target = sd->target;
1100 device->lun = device->scsi3addr[4];
1101 break;
1102 }
1103 }
1104 if (device->lun == -1) {
1105 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1106 " suspect firmware bug or unsupported hardware "
1107 "configuration.\n");
1108 return -1;
1109 }
1110
1111lun_assigned:
1112
1113 h->dev[n] = device;
1114 h->ndevices++;
1115 added[*nadded] = device;
1116 (*nadded)++;
0d96ef5f
WS
1117 hpsa_show_dev_msg(KERN_INFO, h, device,
1118 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
a473d86c
RE
1119 device->offload_to_be_enabled = device->offload_enabled;
1120 device->offload_enabled = 0;
edd16368
SC
1121 return 0;
1122}
1123
bd9244f7
ST
1124/* Update an entry in h->dev[] array. */
1125static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1126 int entry, struct hpsa_scsi_dev_t *new_entry)
1127{
a473d86c 1128 int offload_enabled;
bd9244f7
ST
1129 /* assumes h->devlock is held */
1130 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1131
1132 /* Raid level changed. */
1133 h->dev[entry]->raid_level = new_entry->raid_level;
250fb125 1134
03383736
DB
1135 /* Raid offload parameters changed. Careful about the ordering. */
1136 if (new_entry->offload_config && new_entry->offload_enabled) {
1137 /*
1138 * if drive is newly offload_enabled, we want to copy the
1139 * raid map data first. If previously offload_enabled and
1140 * offload_config were set, raid map data had better be
1141 * the same as it was before. if raid map data is changed
1142 * then it had better be the case that
1143 * h->dev[entry]->offload_enabled is currently 0.
1144 */
1145 h->dev[entry]->raid_map = new_entry->raid_map;
1146 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
03383736 1147 }
a3144e0b
JH
1148 if (new_entry->hba_ioaccel_enabled) {
1149 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1150 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1151 }
1152 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
250fb125 1153 h->dev[entry]->offload_config = new_entry->offload_config;
9fb0de2d 1154 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
03383736 1155 h->dev[entry]->queue_depth = new_entry->queue_depth;
250fb125 1156
41ce4c35
SC
1157 /*
1158 * We can turn off ioaccel offload now, but need to delay turning
1159 * it on until we can update h->dev[entry]->phys_disk[], but we
1160 * can't do that until all the devices are updated.
1161 */
1162 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1163 if (!new_entry->offload_enabled)
1164 h->dev[entry]->offload_enabled = 0;
1165
a473d86c
RE
1166 offload_enabled = h->dev[entry]->offload_enabled;
1167 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
0d96ef5f 1168 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
a473d86c 1169 h->dev[entry]->offload_enabled = offload_enabled;
bd9244f7
ST
1170}
1171
2a8ccf31
SC
1172/* Replace an entry from h->dev[] array. */
1173static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1174 int entry, struct hpsa_scsi_dev_t *new_entry,
1175 struct hpsa_scsi_dev_t *added[], int *nadded,
1176 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1177{
1178 /* assumes h->devlock is held */
cfe5badc 1179 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
1180 removed[*nremoved] = h->dev[entry];
1181 (*nremoved)++;
01350d05
SC
1182
1183 /*
1184 * New physical devices won't have target/lun assigned yet
1185 * so we need to preserve the values in the slot we are replacing.
1186 */
1187 if (new_entry->target == -1) {
1188 new_entry->target = h->dev[entry]->target;
1189 new_entry->lun = h->dev[entry]->lun;
1190 }
1191
2a8ccf31
SC
1192 h->dev[entry] = new_entry;
1193 added[*nadded] = new_entry;
1194 (*nadded)++;
0d96ef5f 1195 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
a473d86c
RE
1196 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1197 new_entry->offload_enabled = 0;
2a8ccf31
SC
1198}
1199
edd16368
SC
1200/* Remove an entry from h->dev[] array. */
1201static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1202 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1203{
1204 /* assumes h->devlock is held */
1205 int i;
1206 struct hpsa_scsi_dev_t *sd;
1207
cfe5badc 1208 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
1209
1210 sd = h->dev[entry];
1211 removed[*nremoved] = h->dev[entry];
1212 (*nremoved)++;
1213
1214 for (i = entry; i < h->ndevices-1; i++)
1215 h->dev[i] = h->dev[i+1];
1216 h->ndevices--;
0d96ef5f 1217 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
edd16368
SC
1218}
1219
1220#define SCSI3ADDR_EQ(a, b) ( \
1221 (a)[7] == (b)[7] && \
1222 (a)[6] == (b)[6] && \
1223 (a)[5] == (b)[5] && \
1224 (a)[4] == (b)[4] && \
1225 (a)[3] == (b)[3] && \
1226 (a)[2] == (b)[2] && \
1227 (a)[1] == (b)[1] && \
1228 (a)[0] == (b)[0])
1229
1230static void fixup_botched_add(struct ctlr_info *h,
1231 struct hpsa_scsi_dev_t *added)
1232{
1233 /* called when scsi_add_device fails in order to re-adjust
1234 * h->dev[] to match the mid layer's view.
1235 */
1236 unsigned long flags;
1237 int i, j;
1238
1239 spin_lock_irqsave(&h->lock, flags);
1240 for (i = 0; i < h->ndevices; i++) {
1241 if (h->dev[i] == added) {
1242 for (j = i; j < h->ndevices-1; j++)
1243 h->dev[j] = h->dev[j+1];
1244 h->ndevices--;
1245 break;
1246 }
1247 }
1248 spin_unlock_irqrestore(&h->lock, flags);
1249 kfree(added);
1250}
1251
1252static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1253 struct hpsa_scsi_dev_t *dev2)
1254{
edd16368
SC
1255 /* we compare everything except lun and target as these
1256 * are not yet assigned. Compare parts likely
1257 * to differ first
1258 */
1259 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1260 sizeof(dev1->scsi3addr)) != 0)
1261 return 0;
1262 if (memcmp(dev1->device_id, dev2->device_id,
1263 sizeof(dev1->device_id)) != 0)
1264 return 0;
1265 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1266 return 0;
1267 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1268 return 0;
edd16368
SC
1269 if (dev1->devtype != dev2->devtype)
1270 return 0;
edd16368
SC
1271 if (dev1->bus != dev2->bus)
1272 return 0;
1273 return 1;
1274}
1275
bd9244f7
ST
1276static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1277 struct hpsa_scsi_dev_t *dev2)
1278{
1279 /* Device attributes that can change, but don't mean
1280 * that the device is a different device, nor that the OS
1281 * needs to be told anything about the change.
1282 */
1283 if (dev1->raid_level != dev2->raid_level)
1284 return 1;
250fb125
SC
1285 if (dev1->offload_config != dev2->offload_config)
1286 return 1;
1287 if (dev1->offload_enabled != dev2->offload_enabled)
1288 return 1;
03383736
DB
1289 if (dev1->queue_depth != dev2->queue_depth)
1290 return 1;
bd9244f7
ST
1291 return 0;
1292}
1293
edd16368
SC
1294/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1295 * and return needle location in *index. If scsi3addr matches, but not
1296 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
bd9244f7
ST
1297 * location in *index.
1298 * In the case of a minor device attribute change, such as RAID level, just
1299 * return DEVICE_UPDATED, along with the updated device's location in index.
1300 * If needle not found, return DEVICE_NOT_FOUND.
edd16368
SC
1301 */
1302static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1303 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1304 int *index)
1305{
1306 int i;
1307#define DEVICE_NOT_FOUND 0
1308#define DEVICE_CHANGED 1
1309#define DEVICE_SAME 2
bd9244f7 1310#define DEVICE_UPDATED 3
edd16368 1311 for (i = 0; i < haystack_size; i++) {
23231048
SC
1312 if (haystack[i] == NULL) /* previously removed. */
1313 continue;
edd16368
SC
1314 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1315 *index = i;
bd9244f7
ST
1316 if (device_is_the_same(needle, haystack[i])) {
1317 if (device_updated(needle, haystack[i]))
1318 return DEVICE_UPDATED;
edd16368 1319 return DEVICE_SAME;
bd9244f7 1320 } else {
9846590e
SC
1321 /* Keep offline devices offline */
1322 if (needle->volume_offline)
1323 return DEVICE_NOT_FOUND;
edd16368 1324 return DEVICE_CHANGED;
bd9244f7 1325 }
edd16368
SC
1326 }
1327 }
1328 *index = -1;
1329 return DEVICE_NOT_FOUND;
1330}
1331
9846590e
SC
1332static void hpsa_monitor_offline_device(struct ctlr_info *h,
1333 unsigned char scsi3addr[])
1334{
1335 struct offline_device_entry *device;
1336 unsigned long flags;
1337
1338 /* Check to see if device is already on the list */
1339 spin_lock_irqsave(&h->offline_device_lock, flags);
1340 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1341 if (memcmp(device->scsi3addr, scsi3addr,
1342 sizeof(device->scsi3addr)) == 0) {
1343 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1344 return;
1345 }
1346 }
1347 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1348
1349 /* Device is not on the list, add it. */
1350 device = kmalloc(sizeof(*device), GFP_KERNEL);
1351 if (!device) {
1352 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1353 return;
1354 }
1355 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1356 spin_lock_irqsave(&h->offline_device_lock, flags);
1357 list_add_tail(&device->offline_list, &h->offline_device_list);
1358 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1359}
1360
1361/* Print a message explaining various offline volume states */
1362static void hpsa_show_volume_status(struct ctlr_info *h,
1363 struct hpsa_scsi_dev_t *sd)
1364{
1365 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1366 dev_info(&h->pdev->dev,
1367 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1368 h->scsi_host->host_no,
1369 sd->bus, sd->target, sd->lun);
1370 switch (sd->volume_offline) {
1371 case HPSA_LV_OK:
1372 break;
1373 case HPSA_LV_UNDERGOING_ERASE:
1374 dev_info(&h->pdev->dev,
1375 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1376 h->scsi_host->host_no,
1377 sd->bus, sd->target, sd->lun);
1378 break;
1379 case HPSA_LV_UNDERGOING_RPI:
1380 dev_info(&h->pdev->dev,
1381 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1382 h->scsi_host->host_no,
1383 sd->bus, sd->target, sd->lun);
1384 break;
1385 case HPSA_LV_PENDING_RPI:
1386 dev_info(&h->pdev->dev,
1387 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1388 h->scsi_host->host_no,
1389 sd->bus, sd->target, sd->lun);
1390 break;
1391 case HPSA_LV_ENCRYPTED_NO_KEY:
1392 dev_info(&h->pdev->dev,
1393 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1394 h->scsi_host->host_no,
1395 sd->bus, sd->target, sd->lun);
1396 break;
1397 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1398 dev_info(&h->pdev->dev,
1399 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1400 h->scsi_host->host_no,
1401 sd->bus, sd->target, sd->lun);
1402 break;
1403 case HPSA_LV_UNDERGOING_ENCRYPTION:
1404 dev_info(&h->pdev->dev,
1405 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1406 h->scsi_host->host_no,
1407 sd->bus, sd->target, sd->lun);
1408 break;
1409 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1410 dev_info(&h->pdev->dev,
1411 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1412 h->scsi_host->host_no,
1413 sd->bus, sd->target, sd->lun);
1414 break;
1415 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1416 dev_info(&h->pdev->dev,
1417 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1418 h->scsi_host->host_no,
1419 sd->bus, sd->target, sd->lun);
1420 break;
1421 case HPSA_LV_PENDING_ENCRYPTION:
1422 dev_info(&h->pdev->dev,
1423 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1424 h->scsi_host->host_no,
1425 sd->bus, sd->target, sd->lun);
1426 break;
1427 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1428 dev_info(&h->pdev->dev,
1429 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1430 h->scsi_host->host_no,
1431 sd->bus, sd->target, sd->lun);
1432 break;
1433 }
1434}
1435
03383736
DB
1436/*
1437 * Figure the list of physical drive pointers for a logical drive with
1438 * raid offload configured.
1439 */
1440static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1441 struct hpsa_scsi_dev_t *dev[], int ndevices,
1442 struct hpsa_scsi_dev_t *logical_drive)
1443{
1444 struct raid_map_data *map = &logical_drive->raid_map;
1445 struct raid_map_disk_data *dd = &map->data[0];
1446 int i, j;
1447 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1448 le16_to_cpu(map->metadata_disks_per_row);
1449 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1450 le16_to_cpu(map->layout_map_count) *
1451 total_disks_per_row;
1452 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1453 total_disks_per_row;
1454 int qdepth;
1455
1456 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1457 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1458
d604f533
WS
1459 logical_drive->nphysical_disks = nraid_map_entries;
1460
03383736
DB
1461 qdepth = 0;
1462 for (i = 0; i < nraid_map_entries; i++) {
1463 logical_drive->phys_disk[i] = NULL;
1464 if (!logical_drive->offload_config)
1465 continue;
1466 for (j = 0; j < ndevices; j++) {
1467 if (dev[j]->devtype != TYPE_DISK)
1468 continue;
1469 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1470 continue;
1471 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1472 continue;
1473
1474 logical_drive->phys_disk[i] = dev[j];
1475 if (i < nphys_disk)
1476 qdepth = min(h->nr_cmds, qdepth +
1477 logical_drive->phys_disk[i]->queue_depth);
1478 break;
1479 }
1480
1481 /*
1482 * This can happen if a physical drive is removed and
1483 * the logical drive is degraded. In that case, the RAID
1484 * map data will refer to a physical disk which isn't actually
1485 * present. And in that case offload_enabled should already
1486 * be 0, but we'll turn it off here just in case
1487 */
1488 if (!logical_drive->phys_disk[i]) {
1489 logical_drive->offload_enabled = 0;
41ce4c35
SC
1490 logical_drive->offload_to_be_enabled = 0;
1491 logical_drive->queue_depth = 8;
03383736
DB
1492 }
1493 }
1494 if (nraid_map_entries)
1495 /*
1496 * This is correct for reads, too high for full stripe writes,
1497 * way too high for partial stripe writes
1498 */
1499 logical_drive->queue_depth = qdepth;
1500 else
1501 logical_drive->queue_depth = h->nr_cmds;
1502}
1503
1504static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1505 struct hpsa_scsi_dev_t *dev[], int ndevices)
1506{
1507 int i;
1508
1509 for (i = 0; i < ndevices; i++) {
1510 if (dev[i]->devtype != TYPE_DISK)
1511 continue;
1512 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1513 continue;
41ce4c35
SC
1514
1515 /*
1516 * If offload is currently enabled, the RAID map and
1517 * phys_disk[] assignment *better* not be changing
1518 * and since it isn't changing, we do not need to
1519 * update it.
1520 */
1521 if (dev[i]->offload_enabled)
1522 continue;
1523
03383736
DB
1524 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1525 }
1526}
1527
4967bd3e 1528static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
edd16368
SC
1529 struct hpsa_scsi_dev_t *sd[], int nsds)
1530{
1531 /* sd contains scsi3 addresses and devtypes, and inquiry
1532 * data. This function takes what's in sd to be the current
1533 * reality and updates h->dev[] to reflect that reality.
1534 */
1535 int i, entry, device_change, changes = 0;
1536 struct hpsa_scsi_dev_t *csd;
1537 unsigned long flags;
1538 struct hpsa_scsi_dev_t **added, **removed;
1539 int nadded, nremoved;
1540 struct Scsi_Host *sh = NULL;
1541
cfe5badc
ST
1542 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1543 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1544
1545 if (!added || !removed) {
1546 dev_warn(&h->pdev->dev, "out of memory in "
1547 "adjust_hpsa_scsi_table\n");
1548 goto free_and_out;
1549 }
1550
1551 spin_lock_irqsave(&h->devlock, flags);
1552
1553 /* find any devices in h->dev[] that are not in
1554 * sd[] and remove them from h->dev[], and for any
1555 * devices which have changed, remove the old device
1556 * info and add the new device info.
bd9244f7
ST
1557 * If minor device attributes change, just update
1558 * the existing device structure.
edd16368
SC
1559 */
1560 i = 0;
1561 nremoved = 0;
1562 nadded = 0;
1563 while (i < h->ndevices) {
1564 csd = h->dev[i];
1565 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1566 if (device_change == DEVICE_NOT_FOUND) {
1567 changes++;
1568 hpsa_scsi_remove_entry(h, hostno, i,
1569 removed, &nremoved);
1570 continue; /* remove ^^^, hence i not incremented */
1571 } else if (device_change == DEVICE_CHANGED) {
1572 changes++;
2a8ccf31
SC
1573 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1574 added, &nadded, removed, &nremoved);
c7f172dc
SC
1575 /* Set it to NULL to prevent it from being freed
1576 * at the bottom of hpsa_update_scsi_devices()
1577 */
1578 sd[entry] = NULL;
bd9244f7
ST
1579 } else if (device_change == DEVICE_UPDATED) {
1580 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
edd16368
SC
1581 }
1582 i++;
1583 }
1584
1585 /* Now, make sure every device listed in sd[] is also
1586 * listed in h->dev[], adding them if they aren't found
1587 */
1588
1589 for (i = 0; i < nsds; i++) {
1590 if (!sd[i]) /* if already added above. */
1591 continue;
9846590e
SC
1592
1593 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1594 * as the SCSI mid-layer does not handle such devices well.
1595 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1596 * at 160Hz, and prevents the system from coming up.
1597 */
1598 if (sd[i]->volume_offline) {
1599 hpsa_show_volume_status(h, sd[i]);
0d96ef5f 1600 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
9846590e
SC
1601 continue;
1602 }
1603
edd16368
SC
1604 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1605 h->ndevices, &entry);
1606 if (device_change == DEVICE_NOT_FOUND) {
1607 changes++;
1608 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1609 added, &nadded) != 0)
1610 break;
1611 sd[i] = NULL; /* prevent from being freed later. */
1612 } else if (device_change == DEVICE_CHANGED) {
1613 /* should never happen... */
1614 changes++;
1615 dev_warn(&h->pdev->dev,
1616 "device unexpectedly changed.\n");
1617 /* but if it does happen, we just ignore that device */
1618 }
1619 }
41ce4c35
SC
1620 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1621
1622 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1623 * any logical drives that need it enabled.
1624 */
1625 for (i = 0; i < h->ndevices; i++)
1626 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1627
edd16368
SC
1628 spin_unlock_irqrestore(&h->devlock, flags);
1629
9846590e
SC
1630 /* Monitor devices which are in one of several NOT READY states to be
1631 * brought online later. This must be done without holding h->devlock,
1632 * so don't touch h->dev[]
1633 */
1634 for (i = 0; i < nsds; i++) {
1635 if (!sd[i]) /* if already added above. */
1636 continue;
1637 if (sd[i]->volume_offline)
1638 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1639 }
1640
edd16368
SC
1641 /* Don't notify scsi mid layer of any changes the first time through
1642 * (or if there are no changes) scsi_scan_host will do it later the
1643 * first time through.
1644 */
1645 if (hostno == -1 || !changes)
1646 goto free_and_out;
1647
1648 sh = h->scsi_host;
1649 /* Notify scsi mid layer of any removed devices */
1650 for (i = 0; i < nremoved; i++) {
41ce4c35
SC
1651 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1652 struct scsi_device *sdev =
1653 scsi_device_lookup(sh, removed[i]->bus,
1654 removed[i]->target, removed[i]->lun);
1655 if (sdev != NULL) {
1656 scsi_remove_device(sdev);
1657 scsi_device_put(sdev);
1658 } else {
1659 /*
1660 * We don't expect to get here.
1661 * future cmds to this device will get selection
1662 * timeout as if the device was gone.
1663 */
0d96ef5f
WS
1664 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1665 "didn't find device for removal.");
41ce4c35 1666 }
edd16368
SC
1667 }
1668 kfree(removed[i]);
1669 removed[i] = NULL;
1670 }
1671
1672 /* Notify scsi mid layer of any added devices */
1673 for (i = 0; i < nadded; i++) {
41ce4c35
SC
1674 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1675 continue;
edd16368
SC
1676 if (scsi_add_device(sh, added[i]->bus,
1677 added[i]->target, added[i]->lun) == 0)
1678 continue;
0d96ef5f
WS
1679 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1680 "addition failed, device not added.");
edd16368
SC
1681 /* now we have to remove it from h->dev,
1682 * since it didn't get added to scsi mid layer
1683 */
1684 fixup_botched_add(h, added[i]);
105a3dbc 1685 added[i] = NULL;
edd16368
SC
1686 }
1687
1688free_and_out:
1689 kfree(added);
1690 kfree(removed);
edd16368
SC
1691}
1692
1693/*
9e03aa2f 1694 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
edd16368
SC
1695 * Assume's h->devlock is held.
1696 */
1697static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1698 int bus, int target, int lun)
1699{
1700 int i;
1701 struct hpsa_scsi_dev_t *sd;
1702
1703 for (i = 0; i < h->ndevices; i++) {
1704 sd = h->dev[i];
1705 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1706 return sd;
1707 }
1708 return NULL;
1709}
1710
edd16368
SC
1711static int hpsa_slave_alloc(struct scsi_device *sdev)
1712{
1713 struct hpsa_scsi_dev_t *sd;
1714 unsigned long flags;
1715 struct ctlr_info *h;
1716
1717 h = sdev_to_hba(sdev);
1718 spin_lock_irqsave(&h->devlock, flags);
1719 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1720 sdev_id(sdev), sdev->lun);
41ce4c35 1721 if (likely(sd)) {
03383736 1722 atomic_set(&sd->ioaccel_cmds_out, 0);
41ce4c35
SC
1723 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1724 } else
1725 sdev->hostdata = NULL;
edd16368
SC
1726 spin_unlock_irqrestore(&h->devlock, flags);
1727 return 0;
1728}
1729
41ce4c35
SC
1730/* configure scsi device based on internal per-device structure */
1731static int hpsa_slave_configure(struct scsi_device *sdev)
1732{
1733 struct hpsa_scsi_dev_t *sd;
1734 int queue_depth;
1735
1736 sd = sdev->hostdata;
1737 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1738
1739 if (sd)
1740 queue_depth = sd->queue_depth != 0 ?
1741 sd->queue_depth : sdev->host->can_queue;
1742 else
1743 queue_depth = sdev->host->can_queue;
1744
1745 scsi_change_queue_depth(sdev, queue_depth);
1746
1747 return 0;
1748}
1749
edd16368
SC
1750static void hpsa_slave_destroy(struct scsi_device *sdev)
1751{
bcc44255 1752 /* nothing to do. */
edd16368
SC
1753}
1754
d9a729f3
WS
1755static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1756{
1757 int i;
1758
1759 if (!h->ioaccel2_cmd_sg_list)
1760 return;
1761 for (i = 0; i < h->nr_cmds; i++) {
1762 kfree(h->ioaccel2_cmd_sg_list[i]);
1763 h->ioaccel2_cmd_sg_list[i] = NULL;
1764 }
1765 kfree(h->ioaccel2_cmd_sg_list);
1766 h->ioaccel2_cmd_sg_list = NULL;
1767}
1768
1769static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1770{
1771 int i;
1772
1773 if (h->chainsize <= 0)
1774 return 0;
1775
1776 h->ioaccel2_cmd_sg_list =
1777 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1778 GFP_KERNEL);
1779 if (!h->ioaccel2_cmd_sg_list)
1780 return -ENOMEM;
1781 for (i = 0; i < h->nr_cmds; i++) {
1782 h->ioaccel2_cmd_sg_list[i] =
1783 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1784 h->maxsgentries, GFP_KERNEL);
1785 if (!h->ioaccel2_cmd_sg_list[i])
1786 goto clean;
1787 }
1788 return 0;
1789
1790clean:
1791 hpsa_free_ioaccel2_sg_chain_blocks(h);
1792 return -ENOMEM;
1793}
1794
33a2ffce
SC
1795static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1796{
1797 int i;
1798
1799 if (!h->cmd_sg_list)
1800 return;
1801 for (i = 0; i < h->nr_cmds; i++) {
1802 kfree(h->cmd_sg_list[i]);
1803 h->cmd_sg_list[i] = NULL;
1804 }
1805 kfree(h->cmd_sg_list);
1806 h->cmd_sg_list = NULL;
1807}
1808
105a3dbc 1809static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
33a2ffce
SC
1810{
1811 int i;
1812
1813 if (h->chainsize <= 0)
1814 return 0;
1815
1816 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1817 GFP_KERNEL);
3d4e6af8
RE
1818 if (!h->cmd_sg_list) {
1819 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
33a2ffce 1820 return -ENOMEM;
3d4e6af8 1821 }
33a2ffce
SC
1822 for (i = 0; i < h->nr_cmds; i++) {
1823 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1824 h->chainsize, GFP_KERNEL);
3d4e6af8
RE
1825 if (!h->cmd_sg_list[i]) {
1826 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
33a2ffce 1827 goto clean;
3d4e6af8 1828 }
33a2ffce
SC
1829 }
1830 return 0;
1831
1832clean:
1833 hpsa_free_sg_chain_blocks(h);
1834 return -ENOMEM;
1835}
1836
d9a729f3
WS
1837static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1838 struct io_accel2_cmd *cp, struct CommandList *c)
1839{
1840 struct ioaccel2_sg_element *chain_block;
1841 u64 temp64;
1842 u32 chain_size;
1843
1844 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1845 chain_size = le32_to_cpu(cp->data_len);
1846 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1847 PCI_DMA_TODEVICE);
1848 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1849 /* prevent subsequent unmapping */
1850 cp->sg->address = 0;
1851 return -1;
1852 }
1853 cp->sg->address = cpu_to_le64(temp64);
1854 return 0;
1855}
1856
1857static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1858 struct io_accel2_cmd *cp)
1859{
1860 struct ioaccel2_sg_element *chain_sg;
1861 u64 temp64;
1862 u32 chain_size;
1863
1864 chain_sg = cp->sg;
1865 temp64 = le64_to_cpu(chain_sg->address);
1866 chain_size = le32_to_cpu(cp->data_len);
1867 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1868}
1869
e2bea6df 1870static int hpsa_map_sg_chain_block(struct ctlr_info *h,
33a2ffce
SC
1871 struct CommandList *c)
1872{
1873 struct SGDescriptor *chain_sg, *chain_block;
1874 u64 temp64;
50a0decf 1875 u32 chain_len;
33a2ffce
SC
1876
1877 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1878 chain_block = h->cmd_sg_list[c->cmdindex];
50a0decf
SC
1879 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1880 chain_len = sizeof(*chain_sg) *
2b08b3e9 1881 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
50a0decf
SC
1882 chain_sg->Len = cpu_to_le32(chain_len);
1883 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
33a2ffce 1884 PCI_DMA_TODEVICE);
e2bea6df
SC
1885 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1886 /* prevent subsequent unmapping */
50a0decf 1887 chain_sg->Addr = cpu_to_le64(0);
e2bea6df
SC
1888 return -1;
1889 }
50a0decf 1890 chain_sg->Addr = cpu_to_le64(temp64);
e2bea6df 1891 return 0;
33a2ffce
SC
1892}
1893
1894static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1895 struct CommandList *c)
1896{
1897 struct SGDescriptor *chain_sg;
33a2ffce 1898
50a0decf 1899 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
33a2ffce
SC
1900 return;
1901
1902 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
50a0decf
SC
1903 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1904 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
33a2ffce
SC
1905}
1906
a09c1441
ST
1907
1908/* Decode the various types of errors on ioaccel2 path.
1909 * Return 1 for any error that should generate a RAID path retry.
1910 * Return 0 for errors that don't require a RAID path retry.
1911 */
1912static int handle_ioaccel_mode2_error(struct ctlr_info *h,
c349775e
ST
1913 struct CommandList *c,
1914 struct scsi_cmnd *cmd,
1915 struct io_accel2_cmd *c2)
1916{
1917 int data_len;
a09c1441 1918 int retry = 0;
c40820d5 1919 u32 ioaccel2_resid = 0;
c349775e
ST
1920
1921 switch (c2->error_data.serv_response) {
1922 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1923 switch (c2->error_data.status) {
1924 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1925 break;
1926 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
ee6b1889 1927 cmd->result |= SAM_STAT_CHECK_CONDITION;
c349775e 1928 if (c2->error_data.data_present !=
ee6b1889
SC
1929 IOACCEL2_SENSE_DATA_PRESENT) {
1930 memset(cmd->sense_buffer, 0,
1931 SCSI_SENSE_BUFFERSIZE);
c349775e 1932 break;
ee6b1889 1933 }
c349775e
ST
1934 /* copy the sense data */
1935 data_len = c2->error_data.sense_data_len;
1936 if (data_len > SCSI_SENSE_BUFFERSIZE)
1937 data_len = SCSI_SENSE_BUFFERSIZE;
1938 if (data_len > sizeof(c2->error_data.sense_data_buff))
1939 data_len =
1940 sizeof(c2->error_data.sense_data_buff);
1941 memcpy(cmd->sense_buffer,
1942 c2->error_data.sense_data_buff, data_len);
a09c1441 1943 retry = 1;
c349775e
ST
1944 break;
1945 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
a09c1441 1946 retry = 1;
c349775e
ST
1947 break;
1948 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
a09c1441 1949 retry = 1;
c349775e
ST
1950 break;
1951 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
4a8da22b 1952 retry = 1;
c349775e
ST
1953 break;
1954 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
a09c1441 1955 retry = 1;
c349775e
ST
1956 break;
1957 default:
a09c1441 1958 retry = 1;
c349775e
ST
1959 break;
1960 }
1961 break;
1962 case IOACCEL2_SERV_RESPONSE_FAILURE:
c40820d5
JH
1963 switch (c2->error_data.status) {
1964 case IOACCEL2_STATUS_SR_IO_ERROR:
1965 case IOACCEL2_STATUS_SR_IO_ABORTED:
1966 case IOACCEL2_STATUS_SR_OVERRUN:
1967 retry = 1;
1968 break;
1969 case IOACCEL2_STATUS_SR_UNDERRUN:
1970 cmd->result = (DID_OK << 16); /* host byte */
1971 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1972 ioaccel2_resid = get_unaligned_le32(
1973 &c2->error_data.resid_cnt[0]);
1974 scsi_set_resid(cmd, ioaccel2_resid);
1975 break;
1976 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1977 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1978 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1979 /* We will get an event from ctlr to trigger rescan */
1980 retry = 1;
1981 break;
1982 default:
1983 retry = 1;
c40820d5 1984 }
c349775e
ST
1985 break;
1986 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1987 break;
1988 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1989 break;
1990 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
a09c1441 1991 retry = 1;
c349775e
ST
1992 break;
1993 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
c349775e
ST
1994 break;
1995 default:
a09c1441 1996 retry = 1;
c349775e
ST
1997 break;
1998 }
a09c1441
ST
1999
2000 return retry; /* retry on raid path? */
c349775e
ST
2001}
2002
a58e7e53
WS
2003static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2004 struct CommandList *c)
2005{
d604f533
WS
2006 bool do_wake = false;
2007
a58e7e53
WS
2008 /*
2009 * Prevent the following race in the abort handler:
2010 *
2011 * 1. LLD is requested to abort a SCSI command
2012 * 2. The SCSI command completes
2013 * 3. The struct CommandList associated with step 2 is made available
2014 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2015 * 5. Abort handler follows scsi_cmnd->host_scribble and
2016 * finds struct CommandList and tries to aborts it
2017 * Now we have aborted the wrong command.
2018 *
d604f533
WS
2019 * Reset c->scsi_cmd here so that the abort or reset handler will know
2020 * this command has completed. Then, check to see if the handler is
a58e7e53
WS
2021 * waiting for this command, and, if so, wake it.
2022 */
2023 c->scsi_cmd = SCSI_CMD_IDLE;
d604f533 2024 mb(); /* Declare command idle before checking for pending events. */
a58e7e53 2025 if (c->abort_pending) {
d604f533 2026 do_wake = true;
a58e7e53 2027 c->abort_pending = false;
a58e7e53 2028 }
d604f533
WS
2029 if (c->reset_pending) {
2030 unsigned long flags;
2031 struct hpsa_scsi_dev_t *dev;
2032
2033 /*
2034 * There appears to be a reset pending; lock the lock and
2035 * reconfirm. If so, then decrement the count of outstanding
2036 * commands and wake the reset command if this is the last one.
2037 */
2038 spin_lock_irqsave(&h->lock, flags);
2039 dev = c->reset_pending; /* Re-fetch under the lock. */
2040 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2041 do_wake = true;
2042 c->reset_pending = NULL;
2043 spin_unlock_irqrestore(&h->lock, flags);
2044 }
2045
2046 if (do_wake)
2047 wake_up_all(&h->event_sync_wait_queue);
a58e7e53
WS
2048}
2049
73153fe5
WS
2050static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2051 struct CommandList *c)
2052{
2053 hpsa_cmd_resolve_events(h, c);
2054 cmd_tagged_free(h, c);
2055}
2056
8a0ff92c
WS
2057static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2058 struct CommandList *c, struct scsi_cmnd *cmd)
2059{
73153fe5 2060 hpsa_cmd_resolve_and_free(h, c);
8a0ff92c
WS
2061 cmd->scsi_done(cmd);
2062}
2063
2064static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2065{
2066 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2067 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2068}
2069
a58e7e53
WS
2070static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2071{
2072 cmd->result = DID_ABORT << 16;
2073}
2074
2075static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2076 struct scsi_cmnd *cmd)
2077{
2078 hpsa_set_scsi_cmd_aborted(cmd);
2079 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2080 c->Request.CDB, c->err_info->ScsiStatus);
73153fe5 2081 hpsa_cmd_resolve_and_free(h, c);
a58e7e53
WS
2082}
2083
c349775e
ST
2084static void process_ioaccel2_completion(struct ctlr_info *h,
2085 struct CommandList *c, struct scsi_cmnd *cmd,
2086 struct hpsa_scsi_dev_t *dev)
2087{
2088 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2089
2090 /* check for good status */
2091 if (likely(c2->error_data.serv_response == 0 &&
8a0ff92c
WS
2092 c2->error_data.status == 0))
2093 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e 2094
8a0ff92c
WS
2095 /*
2096 * Any RAID offload error results in retry which will use
c349775e
ST
2097 * the normal I/O path so the controller can handle whatever's
2098 * wrong.
2099 */
2100 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2101 c2->error_data.serv_response ==
2102 IOACCEL2_SERV_RESPONSE_FAILURE) {
080ef1cc
DB
2103 if (c2->error_data.status ==
2104 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2105 dev->offload_enabled = 0;
8a0ff92c
WS
2106
2107 return hpsa_retry_cmd(h, c);
a09c1441 2108 }
080ef1cc
DB
2109
2110 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
8a0ff92c 2111 return hpsa_retry_cmd(h, c);
080ef1cc 2112
8a0ff92c 2113 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e
ST
2114}
2115
9437ac43
SC
2116/* Returns 0 on success, < 0 otherwise. */
2117static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2118 struct CommandList *cp)
2119{
2120 u8 tmf_status = cp->err_info->ScsiStatus;
2121
2122 switch (tmf_status) {
2123 case CISS_TMF_COMPLETE:
2124 /*
2125 * CISS_TMF_COMPLETE never happens, instead,
2126 * ei->CommandStatus == 0 for this case.
2127 */
2128 case CISS_TMF_SUCCESS:
2129 return 0;
2130 case CISS_TMF_INVALID_FRAME:
2131 case CISS_TMF_NOT_SUPPORTED:
2132 case CISS_TMF_FAILED:
2133 case CISS_TMF_WRONG_LUN:
2134 case CISS_TMF_OVERLAPPED_TAG:
2135 break;
2136 default:
2137 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2138 tmf_status);
2139 break;
2140 }
2141 return -tmf_status;
2142}
2143
1fb011fb 2144static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
2145{
2146 struct scsi_cmnd *cmd;
2147 struct ctlr_info *h;
2148 struct ErrorInfo *ei;
283b4a9b 2149 struct hpsa_scsi_dev_t *dev;
d9a729f3 2150 struct io_accel2_cmd *c2;
edd16368 2151
9437ac43
SC
2152 u8 sense_key;
2153 u8 asc; /* additional sense code */
2154 u8 ascq; /* additional sense code qualifier */
db111e18 2155 unsigned long sense_data_size;
edd16368
SC
2156
2157 ei = cp->err_info;
7fa3030c 2158 cmd = cp->scsi_cmd;
edd16368 2159 h = cp->h;
283b4a9b 2160 dev = cmd->device->hostdata;
d9a729f3 2161 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
edd16368
SC
2162
2163 scsi_dma_unmap(cmd); /* undo the DMA mappings */
e1f7de0c 2164 if ((cp->cmd_type == CMD_SCSI) &&
2b08b3e9 2165 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
33a2ffce 2166 hpsa_unmap_sg_chain_block(h, cp);
edd16368 2167
d9a729f3
WS
2168 if ((cp->cmd_type == CMD_IOACCEL2) &&
2169 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2170 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2171
edd16368
SC
2172 cmd->result = (DID_OK << 16); /* host byte */
2173 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
c349775e 2174
03383736
DB
2175 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2176 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2177
25163bd5
WS
2178 /*
2179 * We check for lockup status here as it may be set for
2180 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2181 * fail_all_oustanding_cmds()
2182 */
2183 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2184 /* DID_NO_CONNECT will prevent a retry */
2185 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 2186 return hpsa_cmd_free_and_done(h, cp, cmd);
25163bd5
WS
2187 }
2188
d604f533
WS
2189 if ((unlikely(hpsa_is_pending_event(cp)))) {
2190 if (cp->reset_pending)
2191 return hpsa_cmd_resolve_and_free(h, cp);
2192 if (cp->abort_pending)
2193 return hpsa_cmd_abort_and_free(h, cp, cmd);
2194 }
2195
c349775e
ST
2196 if (cp->cmd_type == CMD_IOACCEL2)
2197 return process_ioaccel2_completion(h, cp, cmd, dev);
2198
6aa4c361 2199 scsi_set_resid(cmd, ei->ResidualCnt);
8a0ff92c
WS
2200 if (ei->CommandStatus == 0)
2201 return hpsa_cmd_free_and_done(h, cp, cmd);
6aa4c361 2202
e1f7de0c
MG
2203 /* For I/O accelerator commands, copy over some fields to the normal
2204 * CISS header used below for error handling.
2205 */
2206 if (cp->cmd_type == CMD_IOACCEL1) {
2207 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2b08b3e9
DB
2208 cp->Header.SGList = scsi_sg_count(cmd);
2209 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2210 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2211 IOACCEL1_IOFLAGS_CDBLEN_MASK;
50a0decf 2212 cp->Header.tag = c->tag;
e1f7de0c
MG
2213 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2214 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
283b4a9b
SC
2215
2216 /* Any RAID offload error results in retry which will use
2217 * the normal I/O path so the controller can handle whatever's
2218 * wrong.
2219 */
2220 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2221 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2222 dev->offload_enabled = 0;
d604f533 2223 return hpsa_retry_cmd(h, cp);
283b4a9b 2224 }
e1f7de0c
MG
2225 }
2226
edd16368
SC
2227 /* an error has occurred */
2228 switch (ei->CommandStatus) {
2229
2230 case CMD_TARGET_STATUS:
9437ac43
SC
2231 cmd->result |= ei->ScsiStatus;
2232 /* copy the sense data */
2233 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2234 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2235 else
2236 sense_data_size = sizeof(ei->SenseInfo);
2237 if (ei->SenseLen < sense_data_size)
2238 sense_data_size = ei->SenseLen;
2239 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2240 if (ei->ScsiStatus)
2241 decode_sense_data(ei->SenseInfo, sense_data_size,
2242 &sense_key, &asc, &ascq);
edd16368 2243 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1d3b3609 2244 if (sense_key == ABORTED_COMMAND) {
2e311fba 2245 cmd->result |= DID_SOFT_ERROR << 16;
1d3b3609
MG
2246 break;
2247 }
edd16368
SC
2248 break;
2249 }
edd16368
SC
2250 /* Problem was not a check condition
2251 * Pass it up to the upper layers...
2252 */
2253 if (ei->ScsiStatus) {
2254 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2255 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2256 "Returning result: 0x%x\n",
2257 cp, ei->ScsiStatus,
2258 sense_key, asc, ascq,
2259 cmd->result);
2260 } else { /* scsi status is zero??? How??? */
2261 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2262 "Returning no connection.\n", cp),
2263
2264 /* Ordinarily, this case should never happen,
2265 * but there is a bug in some released firmware
2266 * revisions that allows it to happen if, for
2267 * example, a 4100 backplane loses power and
2268 * the tape drive is in it. We assume that
2269 * it's a fatal error of some kind because we
2270 * can't show that it wasn't. We will make it
2271 * look like selection timeout since that is
2272 * the most common reason for this to occur,
2273 * and it's severe enough.
2274 */
2275
2276 cmd->result = DID_NO_CONNECT << 16;
2277 }
2278 break;
2279
2280 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2281 break;
2282 case CMD_DATA_OVERRUN:
f42e81e1
SC
2283 dev_warn(&h->pdev->dev,
2284 "CDB %16phN data overrun\n", cp->Request.CDB);
edd16368
SC
2285 break;
2286 case CMD_INVALID: {
2287 /* print_bytes(cp, sizeof(*cp), 1, 0);
2288 print_cmd(cp); */
2289 /* We get CMD_INVALID if you address a non-existent device
2290 * instead of a selection timeout (no response). You will
2291 * see this if you yank out a drive, then try to access it.
2292 * This is kind of a shame because it means that any other
2293 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2294 * missing target. */
2295 cmd->result = DID_NO_CONNECT << 16;
2296 }
2297 break;
2298 case CMD_PROTOCOL_ERR:
256d0eaa 2299 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2300 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2301 cp->Request.CDB);
edd16368
SC
2302 break;
2303 case CMD_HARDWARE_ERR:
2304 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2305 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2306 cp->Request.CDB);
edd16368
SC
2307 break;
2308 case CMD_CONNECTION_LOST:
2309 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2310 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2311 cp->Request.CDB);
edd16368
SC
2312 break;
2313 case CMD_ABORTED:
a58e7e53
WS
2314 /* Return now to avoid calling scsi_done(). */
2315 return hpsa_cmd_abort_and_free(h, cp, cmd);
edd16368
SC
2316 case CMD_ABORT_FAILED:
2317 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2318 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2319 cp->Request.CDB);
edd16368
SC
2320 break;
2321 case CMD_UNSOLICITED_ABORT:
f6e76055 2322 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
f42e81e1
SC
2323 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2324 cp->Request.CDB);
edd16368
SC
2325 break;
2326 case CMD_TIMEOUT:
2327 cmd->result = DID_TIME_OUT << 16;
f42e81e1
SC
2328 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2329 cp->Request.CDB);
edd16368 2330 break;
1d5e2ed0
SC
2331 case CMD_UNABORTABLE:
2332 cmd->result = DID_ERROR << 16;
2333 dev_warn(&h->pdev->dev, "Command unabortable\n");
2334 break;
9437ac43
SC
2335 case CMD_TMF_STATUS:
2336 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2337 cmd->result = DID_ERROR << 16;
2338 break;
283b4a9b
SC
2339 case CMD_IOACCEL_DISABLED:
2340 /* This only handles the direct pass-through case since RAID
2341 * offload is handled above. Just attempt a retry.
2342 */
2343 cmd->result = DID_SOFT_ERROR << 16;
2344 dev_warn(&h->pdev->dev,
2345 "cp %p had HP SSD Smart Path error\n", cp);
2346 break;
edd16368
SC
2347 default:
2348 cmd->result = DID_ERROR << 16;
2349 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2350 cp, ei->CommandStatus);
2351 }
8a0ff92c
WS
2352
2353 return hpsa_cmd_free_and_done(h, cp, cmd);
edd16368
SC
2354}
2355
edd16368
SC
2356static void hpsa_pci_unmap(struct pci_dev *pdev,
2357 struct CommandList *c, int sg_used, int data_direction)
2358{
2359 int i;
edd16368 2360
50a0decf
SC
2361 for (i = 0; i < sg_used; i++)
2362 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2363 le32_to_cpu(c->SG[i].Len),
2364 data_direction);
edd16368
SC
2365}
2366
a2dac136 2367static int hpsa_map_one(struct pci_dev *pdev,
edd16368
SC
2368 struct CommandList *cp,
2369 unsigned char *buf,
2370 size_t buflen,
2371 int data_direction)
2372{
01a02ffc 2373 u64 addr64;
edd16368
SC
2374
2375 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2376 cp->Header.SGList = 0;
50a0decf 2377 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2378 return 0;
edd16368
SC
2379 }
2380
50a0decf 2381 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
eceaae18 2382 if (dma_mapping_error(&pdev->dev, addr64)) {
a2dac136 2383 /* Prevent subsequent unmap of something never mapped */
eceaae18 2384 cp->Header.SGList = 0;
50a0decf 2385 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2386 return -1;
eceaae18 2387 }
50a0decf
SC
2388 cp->SG[0].Addr = cpu_to_le64(addr64);
2389 cp->SG[0].Len = cpu_to_le32(buflen);
2390 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2391 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2392 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
a2dac136 2393 return 0;
edd16368
SC
2394}
2395
25163bd5
WS
2396#define NO_TIMEOUT ((unsigned long) -1)
2397#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2398static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2399 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
edd16368
SC
2400{
2401 DECLARE_COMPLETION_ONSTACK(wait);
2402
2403 c->waiting = &wait;
25163bd5
WS
2404 __enqueue_cmd_and_start_io(h, c, reply_queue);
2405 if (timeout_msecs == NO_TIMEOUT) {
2406 /* TODO: get rid of this no-timeout thing */
2407 wait_for_completion_io(&wait);
2408 return IO_OK;
2409 }
2410 if (!wait_for_completion_io_timeout(&wait,
2411 msecs_to_jiffies(timeout_msecs))) {
2412 dev_warn(&h->pdev->dev, "Command timed out.\n");
2413 return -ETIMEDOUT;
2414 }
2415 return IO_OK;
2416}
2417
2418static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2419 int reply_queue, unsigned long timeout_msecs)
2420{
2421 if (unlikely(lockup_detected(h))) {
2422 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2423 return IO_OK;
2424 }
2425 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
edd16368
SC
2426}
2427
094963da
SC
2428static u32 lockup_detected(struct ctlr_info *h)
2429{
2430 int cpu;
2431 u32 rc, *lockup_detected;
2432
2433 cpu = get_cpu();
2434 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2435 rc = *lockup_detected;
2436 put_cpu();
2437 return rc;
2438}
2439
9c2fc160 2440#define MAX_DRIVER_CMD_RETRIES 25
25163bd5
WS
2441static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2442 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
edd16368 2443{
9c2fc160 2444 int backoff_time = 10, retry_count = 0;
25163bd5 2445 int rc;
edd16368
SC
2446
2447 do {
7630abd0 2448 memset(c->err_info, 0, sizeof(*c->err_info));
25163bd5
WS
2449 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2450 timeout_msecs);
2451 if (rc)
2452 break;
edd16368 2453 retry_count++;
9c2fc160
SC
2454 if (retry_count > 3) {
2455 msleep(backoff_time);
2456 if (backoff_time < 1000)
2457 backoff_time *= 2;
2458 }
852af20a 2459 } while ((check_for_unit_attention(h, c) ||
9c2fc160
SC
2460 check_for_busy(h, c)) &&
2461 retry_count <= MAX_DRIVER_CMD_RETRIES);
edd16368 2462 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
25163bd5
WS
2463 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2464 rc = -EIO;
2465 return rc;
edd16368
SC
2466}
2467
d1e8beac
SC
2468static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2469 struct CommandList *c)
edd16368 2470{
d1e8beac
SC
2471 const u8 *cdb = c->Request.CDB;
2472 const u8 *lun = c->Header.LUN.LunAddrBytes;
2473
2474 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2475 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2476 txt, lun[0], lun[1], lun[2], lun[3],
2477 lun[4], lun[5], lun[6], lun[7],
2478 cdb[0], cdb[1], cdb[2], cdb[3],
2479 cdb[4], cdb[5], cdb[6], cdb[7],
2480 cdb[8], cdb[9], cdb[10], cdb[11],
2481 cdb[12], cdb[13], cdb[14], cdb[15]);
2482}
2483
2484static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2485 struct CommandList *cp)
2486{
2487 const struct ErrorInfo *ei = cp->err_info;
edd16368 2488 struct device *d = &cp->h->pdev->dev;
9437ac43
SC
2489 u8 sense_key, asc, ascq;
2490 int sense_len;
edd16368 2491
edd16368
SC
2492 switch (ei->CommandStatus) {
2493 case CMD_TARGET_STATUS:
9437ac43
SC
2494 if (ei->SenseLen > sizeof(ei->SenseInfo))
2495 sense_len = sizeof(ei->SenseInfo);
2496 else
2497 sense_len = ei->SenseLen;
2498 decode_sense_data(ei->SenseInfo, sense_len,
2499 &sense_key, &asc, &ascq);
d1e8beac
SC
2500 hpsa_print_cmd(h, "SCSI status", cp);
2501 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
9437ac43
SC
2502 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2503 sense_key, asc, ascq);
d1e8beac 2504 else
9437ac43 2505 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
edd16368
SC
2506 if (ei->ScsiStatus == 0)
2507 dev_warn(d, "SCSI status is abnormally zero. "
2508 "(probably indicates selection timeout "
2509 "reported incorrectly due to a known "
2510 "firmware bug, circa July, 2001.)\n");
2511 break;
2512 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
edd16368
SC
2513 break;
2514 case CMD_DATA_OVERRUN:
d1e8beac 2515 hpsa_print_cmd(h, "overrun condition", cp);
edd16368
SC
2516 break;
2517 case CMD_INVALID: {
2518 /* controller unfortunately reports SCSI passthru's
2519 * to non-existent targets as invalid commands.
2520 */
d1e8beac
SC
2521 hpsa_print_cmd(h, "invalid command", cp);
2522 dev_warn(d, "probably means device no longer present\n");
edd16368
SC
2523 }
2524 break;
2525 case CMD_PROTOCOL_ERR:
d1e8beac 2526 hpsa_print_cmd(h, "protocol error", cp);
edd16368
SC
2527 break;
2528 case CMD_HARDWARE_ERR:
d1e8beac 2529 hpsa_print_cmd(h, "hardware error", cp);
edd16368
SC
2530 break;
2531 case CMD_CONNECTION_LOST:
d1e8beac 2532 hpsa_print_cmd(h, "connection lost", cp);
edd16368
SC
2533 break;
2534 case CMD_ABORTED:
d1e8beac 2535 hpsa_print_cmd(h, "aborted", cp);
edd16368
SC
2536 break;
2537 case CMD_ABORT_FAILED:
d1e8beac 2538 hpsa_print_cmd(h, "abort failed", cp);
edd16368
SC
2539 break;
2540 case CMD_UNSOLICITED_ABORT:
d1e8beac 2541 hpsa_print_cmd(h, "unsolicited abort", cp);
edd16368
SC
2542 break;
2543 case CMD_TIMEOUT:
d1e8beac 2544 hpsa_print_cmd(h, "timed out", cp);
edd16368 2545 break;
1d5e2ed0 2546 case CMD_UNABORTABLE:
d1e8beac 2547 hpsa_print_cmd(h, "unabortable", cp);
1d5e2ed0 2548 break;
25163bd5
WS
2549 case CMD_CTLR_LOCKUP:
2550 hpsa_print_cmd(h, "controller lockup detected", cp);
2551 break;
edd16368 2552 default:
d1e8beac
SC
2553 hpsa_print_cmd(h, "unknown status", cp);
2554 dev_warn(d, "Unknown command status %x\n",
edd16368
SC
2555 ei->CommandStatus);
2556 }
2557}
2558
2559static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
b7bb24eb 2560 u16 page, unsigned char *buf,
edd16368
SC
2561 unsigned char bufsize)
2562{
2563 int rc = IO_OK;
2564 struct CommandList *c;
2565 struct ErrorInfo *ei;
2566
45fcb86e 2567 c = cmd_alloc(h);
edd16368 2568
a2dac136
SC
2569 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2570 page, scsi3addr, TYPE_CMD)) {
2571 rc = -1;
2572 goto out;
2573 }
25163bd5
WS
2574 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2575 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2576 if (rc)
2577 goto out;
edd16368
SC
2578 ei = c->err_info;
2579 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2580 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2581 rc = -1;
2582 }
a2dac136 2583out:
45fcb86e 2584 cmd_free(h, c);
edd16368
SC
2585 return rc;
2586}
2587
316b221a
SC
2588static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2589 unsigned char *scsi3addr, unsigned char page,
2590 struct bmic_controller_parameters *buf, size_t bufsize)
2591{
2592 int rc = IO_OK;
2593 struct CommandList *c;
2594 struct ErrorInfo *ei;
2595
45fcb86e 2596 c = cmd_alloc(h);
316b221a
SC
2597 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2598 page, scsi3addr, TYPE_CMD)) {
2599 rc = -1;
2600 goto out;
2601 }
25163bd5
WS
2602 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2603 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2604 if (rc)
2605 goto out;
316b221a
SC
2606 ei = c->err_info;
2607 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2608 hpsa_scsi_interpret_error(h, c);
2609 rc = -1;
2610 }
2611out:
45fcb86e 2612 cmd_free(h, c);
316b221a 2613 return rc;
bf43caf3 2614}
316b221a 2615
bf711ac6 2616static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
25163bd5 2617 u8 reset_type, int reply_queue)
edd16368
SC
2618{
2619 int rc = IO_OK;
2620 struct CommandList *c;
2621 struct ErrorInfo *ei;
2622
45fcb86e 2623 c = cmd_alloc(h);
edd16368 2624
edd16368 2625
a2dac136 2626 /* fill_cmd can't fail here, no data buffer to map. */
bf711ac6
ST
2627 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2628 scsi3addr, TYPE_MSG);
2629 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
25163bd5
WS
2630 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2631 if (rc) {
2632 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2633 goto out;
2634 }
edd16368
SC
2635 /* no unmap needed here because no data xfer. */
2636
2637 ei = c->err_info;
2638 if (ei->CommandStatus != 0) {
d1e8beac 2639 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2640 rc = -1;
2641 }
25163bd5 2642out:
45fcb86e 2643 cmd_free(h, c);
edd16368
SC
2644 return rc;
2645}
2646
d604f533
WS
2647static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2648 struct hpsa_scsi_dev_t *dev,
2649 unsigned char *scsi3addr)
2650{
2651 int i;
2652 bool match = false;
2653 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2654 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2655
2656 if (hpsa_is_cmd_idle(c))
2657 return false;
2658
2659 switch (c->cmd_type) {
2660 case CMD_SCSI:
2661 case CMD_IOCTL_PEND:
2662 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2663 sizeof(c->Header.LUN.LunAddrBytes));
2664 break;
2665
2666 case CMD_IOACCEL1:
2667 case CMD_IOACCEL2:
2668 if (c->phys_disk == dev) {
2669 /* HBA mode match */
2670 match = true;
2671 } else {
2672 /* Possible RAID mode -- check each phys dev. */
2673 /* FIXME: Do we need to take out a lock here? If
2674 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2675 * instead. */
2676 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2677 /* FIXME: an alternate test might be
2678 *
2679 * match = dev->phys_disk[i]->ioaccel_handle
2680 * == c2->scsi_nexus; */
2681 match = dev->phys_disk[i] == c->phys_disk;
2682 }
2683 }
2684 break;
2685
2686 case IOACCEL2_TMF:
2687 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2688 match = dev->phys_disk[i]->ioaccel_handle ==
2689 le32_to_cpu(ac->it_nexus);
2690 }
2691 break;
2692
2693 case 0: /* The command is in the middle of being initialized. */
2694 match = false;
2695 break;
2696
2697 default:
2698 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2699 c->cmd_type);
2700 BUG();
2701 }
2702
2703 return match;
2704}
2705
2706static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2707 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2708{
2709 int i;
2710 int rc = 0;
2711
2712 /* We can really only handle one reset at a time */
2713 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2714 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2715 return -EINTR;
2716 }
2717
2718 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2719
2720 for (i = 0; i < h->nr_cmds; i++) {
2721 struct CommandList *c = h->cmd_pool + i;
2722 int refcount = atomic_inc_return(&c->refcount);
2723
2724 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2725 unsigned long flags;
2726
2727 /*
2728 * Mark the target command as having a reset pending,
2729 * then lock a lock so that the command cannot complete
2730 * while we're considering it. If the command is not
2731 * idle then count it; otherwise revoke the event.
2732 */
2733 c->reset_pending = dev;
2734 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2735 if (!hpsa_is_cmd_idle(c))
2736 atomic_inc(&dev->reset_cmds_out);
2737 else
2738 c->reset_pending = NULL;
2739 spin_unlock_irqrestore(&h->lock, flags);
2740 }
2741
2742 cmd_free(h, c);
2743 }
2744
2745 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2746 if (!rc)
2747 wait_event(h->event_sync_wait_queue,
2748 atomic_read(&dev->reset_cmds_out) == 0 ||
2749 lockup_detected(h));
2750
2751 if (unlikely(lockup_detected(h))) {
2752 dev_warn(&h->pdev->dev,
2753 "Controller lockup detected during reset wait\n");
2754 mutex_unlock(&h->reset_mutex);
2755 rc = -ENODEV;
2756 }
2757
2758 if (unlikely(rc))
2759 atomic_set(&dev->reset_cmds_out, 0);
2760
2761 mutex_unlock(&h->reset_mutex);
2762 return rc;
2763}
2764
edd16368
SC
2765static void hpsa_get_raid_level(struct ctlr_info *h,
2766 unsigned char *scsi3addr, unsigned char *raid_level)
2767{
2768 int rc;
2769 unsigned char *buf;
2770
2771 *raid_level = RAID_UNKNOWN;
2772 buf = kzalloc(64, GFP_KERNEL);
2773 if (!buf)
2774 return;
b7bb24eb 2775 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
edd16368
SC
2776 if (rc == 0)
2777 *raid_level = buf[8];
2778 if (*raid_level > RAID_UNKNOWN)
2779 *raid_level = RAID_UNKNOWN;
2780 kfree(buf);
2781 return;
2782}
2783
283b4a9b
SC
2784#define HPSA_MAP_DEBUG
2785#ifdef HPSA_MAP_DEBUG
2786static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2787 struct raid_map_data *map_buff)
2788{
2789 struct raid_map_disk_data *dd = &map_buff->data[0];
2790 int map, row, col;
2791 u16 map_cnt, row_cnt, disks_per_row;
2792
2793 if (rc != 0)
2794 return;
2795
2ba8bfc8
SC
2796 /* Show details only if debugging has been activated. */
2797 if (h->raid_offload_debug < 2)
2798 return;
2799
283b4a9b
SC
2800 dev_info(&h->pdev->dev, "structure_size = %u\n",
2801 le32_to_cpu(map_buff->structure_size));
2802 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2803 le32_to_cpu(map_buff->volume_blk_size));
2804 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2805 le64_to_cpu(map_buff->volume_blk_cnt));
2806 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2807 map_buff->phys_blk_shift);
2808 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2809 map_buff->parity_rotation_shift);
2810 dev_info(&h->pdev->dev, "strip_size = %u\n",
2811 le16_to_cpu(map_buff->strip_size));
2812 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2813 le64_to_cpu(map_buff->disk_starting_blk));
2814 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2815 le64_to_cpu(map_buff->disk_blk_cnt));
2816 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2817 le16_to_cpu(map_buff->data_disks_per_row));
2818 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2819 le16_to_cpu(map_buff->metadata_disks_per_row));
2820 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2821 le16_to_cpu(map_buff->row_cnt));
2822 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2823 le16_to_cpu(map_buff->layout_map_count));
2b08b3e9 2824 dev_info(&h->pdev->dev, "flags = 0x%x\n",
dd0e19f3 2825 le16_to_cpu(map_buff->flags));
2b08b3e9
DB
2826 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2827 le16_to_cpu(map_buff->flags) &
2828 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dd0e19f3
ST
2829 dev_info(&h->pdev->dev, "dekindex = %u\n",
2830 le16_to_cpu(map_buff->dekindex));
283b4a9b
SC
2831 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2832 for (map = 0; map < map_cnt; map++) {
2833 dev_info(&h->pdev->dev, "Map%u:\n", map);
2834 row_cnt = le16_to_cpu(map_buff->row_cnt);
2835 for (row = 0; row < row_cnt; row++) {
2836 dev_info(&h->pdev->dev, " Row%u:\n", row);
2837 disks_per_row =
2838 le16_to_cpu(map_buff->data_disks_per_row);
2839 for (col = 0; col < disks_per_row; col++, dd++)
2840 dev_info(&h->pdev->dev,
2841 " D%02u: h=0x%04x xor=%u,%u\n",
2842 col, dd->ioaccel_handle,
2843 dd->xor_mult[0], dd->xor_mult[1]);
2844 disks_per_row =
2845 le16_to_cpu(map_buff->metadata_disks_per_row);
2846 for (col = 0; col < disks_per_row; col++, dd++)
2847 dev_info(&h->pdev->dev,
2848 " M%02u: h=0x%04x xor=%u,%u\n",
2849 col, dd->ioaccel_handle,
2850 dd->xor_mult[0], dd->xor_mult[1]);
2851 }
2852 }
2853}
2854#else
2855static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2856 __attribute__((unused)) int rc,
2857 __attribute__((unused)) struct raid_map_data *map_buff)
2858{
2859}
2860#endif
2861
2862static int hpsa_get_raid_map(struct ctlr_info *h,
2863 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2864{
2865 int rc = 0;
2866 struct CommandList *c;
2867 struct ErrorInfo *ei;
2868
45fcb86e 2869 c = cmd_alloc(h);
bf43caf3 2870
283b4a9b
SC
2871 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2872 sizeof(this_device->raid_map), 0,
2873 scsi3addr, TYPE_CMD)) {
2dd02d74
RE
2874 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2875 cmd_free(h, c);
2876 return -1;
283b4a9b 2877 }
25163bd5
WS
2878 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2879 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2880 if (rc)
2881 goto out;
283b4a9b
SC
2882 ei = c->err_info;
2883 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2884 hpsa_scsi_interpret_error(h, c);
25163bd5
WS
2885 rc = -1;
2886 goto out;
283b4a9b 2887 }
45fcb86e 2888 cmd_free(h, c);
283b4a9b
SC
2889
2890 /* @todo in the future, dynamically allocate RAID map memory */
2891 if (le32_to_cpu(this_device->raid_map.structure_size) >
2892 sizeof(this_device->raid_map)) {
2893 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2894 rc = -1;
2895 }
2896 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2897 return rc;
25163bd5
WS
2898out:
2899 cmd_free(h, c);
2900 return rc;
283b4a9b
SC
2901}
2902
03383736
DB
2903static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2904 unsigned char scsi3addr[], u16 bmic_device_index,
2905 struct bmic_identify_physical_device *buf, size_t bufsize)
2906{
2907 int rc = IO_OK;
2908 struct CommandList *c;
2909 struct ErrorInfo *ei;
2910
2911 c = cmd_alloc(h);
2912 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2913 0, RAID_CTLR_LUNID, TYPE_CMD);
2914 if (rc)
2915 goto out;
2916
2917 c->Request.CDB[2] = bmic_device_index & 0xff;
2918 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2919
25163bd5
WS
2920 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2921 NO_TIMEOUT);
03383736
DB
2922 ei = c->err_info;
2923 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2924 hpsa_scsi_interpret_error(h, c);
2925 rc = -1;
2926 }
2927out:
2928 cmd_free(h, c);
2929 return rc;
2930}
2931
1b70150a
SC
2932static int hpsa_vpd_page_supported(struct ctlr_info *h,
2933 unsigned char scsi3addr[], u8 page)
2934{
2935 int rc;
2936 int i;
2937 int pages;
2938 unsigned char *buf, bufsize;
2939
2940 buf = kzalloc(256, GFP_KERNEL);
2941 if (!buf)
2942 return 0;
2943
2944 /* Get the size of the page list first */
2945 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2946 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2947 buf, HPSA_VPD_HEADER_SZ);
2948 if (rc != 0)
2949 goto exit_unsupported;
2950 pages = buf[3];
2951 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2952 bufsize = pages + HPSA_VPD_HEADER_SZ;
2953 else
2954 bufsize = 255;
2955
2956 /* Get the whole VPD page list */
2957 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2958 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2959 buf, bufsize);
2960 if (rc != 0)
2961 goto exit_unsupported;
2962
2963 pages = buf[3];
2964 for (i = 1; i <= pages; i++)
2965 if (buf[3 + i] == page)
2966 goto exit_supported;
2967exit_unsupported:
2968 kfree(buf);
2969 return 0;
2970exit_supported:
2971 kfree(buf);
2972 return 1;
2973}
2974
283b4a9b
SC
2975static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2976 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2977{
2978 int rc;
2979 unsigned char *buf;
2980 u8 ioaccel_status;
2981
2982 this_device->offload_config = 0;
2983 this_device->offload_enabled = 0;
41ce4c35 2984 this_device->offload_to_be_enabled = 0;
283b4a9b
SC
2985
2986 buf = kzalloc(64, GFP_KERNEL);
2987 if (!buf)
2988 return;
1b70150a
SC
2989 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2990 goto out;
283b4a9b 2991 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
b7bb24eb 2992 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
283b4a9b
SC
2993 if (rc != 0)
2994 goto out;
2995
2996#define IOACCEL_STATUS_BYTE 4
2997#define OFFLOAD_CONFIGURED_BIT 0x01
2998#define OFFLOAD_ENABLED_BIT 0x02
2999 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3000 this_device->offload_config =
3001 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3002 if (this_device->offload_config) {
3003 this_device->offload_enabled =
3004 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3005 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3006 this_device->offload_enabled = 0;
3007 }
41ce4c35 3008 this_device->offload_to_be_enabled = this_device->offload_enabled;
283b4a9b
SC
3009out:
3010 kfree(buf);
3011 return;
3012}
3013
edd16368
SC
3014/* Get the device id from inquiry page 0x83 */
3015static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3016 unsigned char *device_id, int buflen)
3017{
3018 int rc;
3019 unsigned char *buf;
3020
3021 if (buflen > 16)
3022 buflen = 16;
3023 buf = kzalloc(64, GFP_KERNEL);
3024 if (!buf)
a84d794d 3025 return -ENOMEM;
b7bb24eb 3026 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
edd16368
SC
3027 if (rc == 0)
3028 memcpy(device_id, &buf[8], buflen);
3029 kfree(buf);
3030 return rc != 0;
3031}
3032
3033static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
03383736 3034 void *buf, int bufsize,
edd16368
SC
3035 int extended_response)
3036{
3037 int rc = IO_OK;
3038 struct CommandList *c;
3039 unsigned char scsi3addr[8];
3040 struct ErrorInfo *ei;
3041
45fcb86e 3042 c = cmd_alloc(h);
bf43caf3 3043
e89c0ae7
SC
3044 /* address the controller */
3045 memset(scsi3addr, 0, sizeof(scsi3addr));
a2dac136
SC
3046 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3047 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3048 rc = -1;
3049 goto out;
3050 }
edd16368
SC
3051 if (extended_response)
3052 c->Request.CDB[1] = extended_response;
25163bd5
WS
3053 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3054 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3055 if (rc)
3056 goto out;
edd16368
SC
3057 ei = c->err_info;
3058 if (ei->CommandStatus != 0 &&
3059 ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 3060 hpsa_scsi_interpret_error(h, c);
edd16368 3061 rc = -1;
283b4a9b 3062 } else {
03383736
DB
3063 struct ReportLUNdata *rld = buf;
3064
3065 if (rld->extended_response_flag != extended_response) {
283b4a9b
SC
3066 dev_err(&h->pdev->dev,
3067 "report luns requested format %u, got %u\n",
3068 extended_response,
03383736 3069 rld->extended_response_flag);
283b4a9b
SC
3070 rc = -1;
3071 }
edd16368 3072 }
a2dac136 3073out:
45fcb86e 3074 cmd_free(h, c);
edd16368
SC
3075 return rc;
3076}
3077
3078static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
03383736 3079 struct ReportExtendedLUNdata *buf, int bufsize)
edd16368 3080{
03383736
DB
3081 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3082 HPSA_REPORT_PHYS_EXTENDED);
edd16368
SC
3083}
3084
3085static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3086 struct ReportLUNdata *buf, int bufsize)
3087{
3088 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3089}
3090
3091static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3092 int bus, int target, int lun)
3093{
3094 device->bus = bus;
3095 device->target = target;
3096 device->lun = lun;
3097}
3098
9846590e
SC
3099/* Use VPD inquiry to get details of volume status */
3100static int hpsa_get_volume_status(struct ctlr_info *h,
3101 unsigned char scsi3addr[])
3102{
3103 int rc;
3104 int status;
3105 int size;
3106 unsigned char *buf;
3107
3108 buf = kzalloc(64, GFP_KERNEL);
3109 if (!buf)
3110 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3111
3112 /* Does controller have VPD for logical volume status? */
24a4b078 3113 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
9846590e 3114 goto exit_failed;
9846590e
SC
3115
3116 /* Get the size of the VPD return buffer */
3117 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3118 buf, HPSA_VPD_HEADER_SZ);
24a4b078 3119 if (rc != 0)
9846590e 3120 goto exit_failed;
9846590e
SC
3121 size = buf[3];
3122
3123 /* Now get the whole VPD buffer */
3124 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3125 buf, size + HPSA_VPD_HEADER_SZ);
24a4b078 3126 if (rc != 0)
9846590e 3127 goto exit_failed;
9846590e
SC
3128 status = buf[4]; /* status byte */
3129
3130 kfree(buf);
3131 return status;
3132exit_failed:
3133 kfree(buf);
3134 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3135}
3136
3137/* Determine offline status of a volume.
3138 * Return either:
3139 * 0 (not offline)
67955ba3 3140 * 0xff (offline for unknown reasons)
9846590e
SC
3141 * # (integer code indicating one of several NOT READY states
3142 * describing why a volume is to be kept offline)
3143 */
67955ba3 3144static int hpsa_volume_offline(struct ctlr_info *h,
9846590e
SC
3145 unsigned char scsi3addr[])
3146{
3147 struct CommandList *c;
9437ac43
SC
3148 unsigned char *sense;
3149 u8 sense_key, asc, ascq;
3150 int sense_len;
25163bd5 3151 int rc, ldstat = 0;
9846590e
SC
3152 u16 cmd_status;
3153 u8 scsi_status;
3154#define ASC_LUN_NOT_READY 0x04
3155#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3156#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3157
3158 c = cmd_alloc(h);
bf43caf3 3159
9846590e 3160 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
25163bd5
WS
3161 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3162 if (rc) {
3163 cmd_free(h, c);
3164 return 0;
3165 }
9846590e 3166 sense = c->err_info->SenseInfo;
9437ac43
SC
3167 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3168 sense_len = sizeof(c->err_info->SenseInfo);
3169 else
3170 sense_len = c->err_info->SenseLen;
3171 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
9846590e
SC
3172 cmd_status = c->err_info->CommandStatus;
3173 scsi_status = c->err_info->ScsiStatus;
3174 cmd_free(h, c);
3175 /* Is the volume 'not ready'? */
3176 if (cmd_status != CMD_TARGET_STATUS ||
3177 scsi_status != SAM_STAT_CHECK_CONDITION ||
3178 sense_key != NOT_READY ||
3179 asc != ASC_LUN_NOT_READY) {
3180 return 0;
3181 }
3182
3183 /* Determine the reason for not ready state */
3184 ldstat = hpsa_get_volume_status(h, scsi3addr);
3185
3186 /* Keep volume offline in certain cases: */
3187 switch (ldstat) {
3188 case HPSA_LV_UNDERGOING_ERASE:
3189 case HPSA_LV_UNDERGOING_RPI:
3190 case HPSA_LV_PENDING_RPI:
3191 case HPSA_LV_ENCRYPTED_NO_KEY:
3192 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3193 case HPSA_LV_UNDERGOING_ENCRYPTION:
3194 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3195 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3196 return ldstat;
3197 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3198 /* If VPD status page isn't available,
3199 * use ASC/ASCQ to determine state
3200 */
3201 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3202 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3203 return ldstat;
3204 break;
3205 default:
3206 break;
3207 }
3208 return 0;
3209}
3210
9b5c48c2
SC
3211/*
3212 * Find out if a logical device supports aborts by simply trying one.
3213 * Smart Array may claim not to support aborts on logical drives, but
3214 * if a MSA2000 * is connected, the drives on that will be presented
3215 * by the Smart Array as logical drives, and aborts may be sent to
3216 * those devices successfully. So the simplest way to find out is
3217 * to simply try an abort and see how the device responds.
3218 */
3219static int hpsa_device_supports_aborts(struct ctlr_info *h,
3220 unsigned char *scsi3addr)
3221{
3222 struct CommandList *c;
3223 struct ErrorInfo *ei;
3224 int rc = 0;
3225
3226 u64 tag = (u64) -1; /* bogus tag */
3227
3228 /* Assume that physical devices support aborts */
3229 if (!is_logical_dev_addr_mode(scsi3addr))
3230 return 1;
3231
3232 c = cmd_alloc(h);
bf43caf3 3233
9b5c48c2
SC
3234 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3235 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3236 /* no unmap needed here because no data xfer. */
3237 ei = c->err_info;
3238 switch (ei->CommandStatus) {
3239 case CMD_INVALID:
3240 rc = 0;
3241 break;
3242 case CMD_UNABORTABLE:
3243 case CMD_ABORT_FAILED:
3244 rc = 1;
3245 break;
9437ac43
SC
3246 case CMD_TMF_STATUS:
3247 rc = hpsa_evaluate_tmf_status(h, c);
3248 break;
9b5c48c2
SC
3249 default:
3250 rc = 0;
3251 break;
3252 }
3253 cmd_free(h, c);
3254 return rc;
3255}
3256
edd16368 3257static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
3258 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3259 unsigned char *is_OBDR_device)
edd16368 3260{
0b0e1d6c
SC
3261
3262#define OBDR_SIG_OFFSET 43
3263#define OBDR_TAPE_SIG "$DR-10"
3264#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3265#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3266
ea6d3bc3 3267 unsigned char *inq_buff;
0b0e1d6c 3268 unsigned char *obdr_sig;
edd16368 3269
ea6d3bc3 3270 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
edd16368
SC
3271 if (!inq_buff)
3272 goto bail_out;
3273
edd16368
SC
3274 /* Do an inquiry to the device to see what it is. */
3275 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3276 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3277 /* Inquiry failed (msg printed already) */
3278 dev_err(&h->pdev->dev,
3279 "hpsa_update_device_info: inquiry failed\n");
3280 goto bail_out;
3281 }
3282
edd16368
SC
3283 this_device->devtype = (inq_buff[0] & 0x1f);
3284 memcpy(this_device->scsi3addr, scsi3addr, 8);
3285 memcpy(this_device->vendor, &inq_buff[8],
3286 sizeof(this_device->vendor));
3287 memcpy(this_device->model, &inq_buff[16],
3288 sizeof(this_device->model));
edd16368
SC
3289 memset(this_device->device_id, 0,
3290 sizeof(this_device->device_id));
3291 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3292 sizeof(this_device->device_id));
3293
3294 if (this_device->devtype == TYPE_DISK &&
283b4a9b 3295 is_logical_dev_addr_mode(scsi3addr)) {
67955ba3
SC
3296 int volume_offline;
3297
edd16368 3298 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
283b4a9b
SC
3299 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3300 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
67955ba3
SC
3301 volume_offline = hpsa_volume_offline(h, scsi3addr);
3302 if (volume_offline < 0 || volume_offline > 0xff)
3303 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3304 this_device->volume_offline = volume_offline & 0xff;
283b4a9b 3305 } else {
edd16368 3306 this_device->raid_level = RAID_UNKNOWN;
283b4a9b
SC
3307 this_device->offload_config = 0;
3308 this_device->offload_enabled = 0;
41ce4c35 3309 this_device->offload_to_be_enabled = 0;
a3144e0b 3310 this_device->hba_ioaccel_enabled = 0;
9846590e 3311 this_device->volume_offline = 0;
03383736 3312 this_device->queue_depth = h->nr_cmds;
283b4a9b 3313 }
edd16368 3314
0b0e1d6c
SC
3315 if (is_OBDR_device) {
3316 /* See if this is a One-Button-Disaster-Recovery device
3317 * by looking for "$DR-10" at offset 43 in inquiry data.
3318 */
3319 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3320 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3321 strncmp(obdr_sig, OBDR_TAPE_SIG,
3322 OBDR_SIG_LEN) == 0);
3323 }
edd16368
SC
3324 kfree(inq_buff);
3325 return 0;
3326
3327bail_out:
3328 kfree(inq_buff);
3329 return 1;
3330}
3331
9b5c48c2
SC
3332static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3333 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3334{
3335 unsigned long flags;
3336 int rc, entry;
3337 /*
3338 * See if this device supports aborts. If we already know
3339 * the device, we already know if it supports aborts, otherwise
3340 * we have to find out if it supports aborts by trying one.
3341 */
3342 spin_lock_irqsave(&h->devlock, flags);
3343 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3344 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3345 entry >= 0 && entry < h->ndevices) {
3346 dev->supports_aborts = h->dev[entry]->supports_aborts;
3347 spin_unlock_irqrestore(&h->devlock, flags);
3348 } else {
3349 spin_unlock_irqrestore(&h->devlock, flags);
3350 dev->supports_aborts =
3351 hpsa_device_supports_aborts(h, scsi3addr);
3352 if (dev->supports_aborts < 0)
3353 dev->supports_aborts = 0;
3354 }
3355}
3356
4f4eb9f1 3357static unsigned char *ext_target_model[] = {
edd16368
SC
3358 "MSA2012",
3359 "MSA2024",
3360 "MSA2312",
3361 "MSA2324",
fda38518 3362 "P2000 G3 SAS",
e06c8e5c 3363 "MSA 2040 SAS",
edd16368
SC
3364 NULL,
3365};
3366
4f4eb9f1 3367static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
edd16368
SC
3368{
3369 int i;
3370
4f4eb9f1
ST
3371 for (i = 0; ext_target_model[i]; i++)
3372 if (strncmp(device->model, ext_target_model[i],
3373 strlen(ext_target_model[i])) == 0)
edd16368
SC
3374 return 1;
3375 return 0;
3376}
3377
3378/* Helper function to assign bus, target, lun mapping of devices.
4f4eb9f1 3379 * Puts non-external target logical volumes on bus 0, external target logical
edd16368
SC
3380 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3381 * Logical drive target and lun are assigned at this time, but
3382 * physical device lun and target assignment are deferred (assigned
3383 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3384 */
3385static void figure_bus_target_lun(struct ctlr_info *h,
1f310bde 3386 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
edd16368 3387{
1f310bde
SC
3388 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3389
3390 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3391 /* physical device, target and lun filled in later */
edd16368 3392 if (is_hba_lunid(lunaddrbytes))
1f310bde 3393 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
edd16368 3394 else
1f310bde
SC
3395 /* defer target, lun assignment for physical devices */
3396 hpsa_set_bus_target_lun(device, 2, -1, -1);
3397 return;
3398 }
3399 /* It's a logical device */
4f4eb9f1
ST
3400 if (is_ext_target(h, device)) {
3401 /* external target way, put logicals on bus 1
1f310bde
SC
3402 * and match target/lun numbers box
3403 * reports, other smart array, bus 0, target 0, match lunid
3404 */
3405 hpsa_set_bus_target_lun(device,
3406 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3407 return;
edd16368 3408 }
1f310bde 3409 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
edd16368
SC
3410}
3411
3412/*
3413 * If there is no lun 0 on a target, linux won't find any devices.
4f4eb9f1 3414 * For the external targets (arrays), we have to manually detect the enclosure
edd16368
SC
3415 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3416 * it for some reason. *tmpdevice is the target we're adding,
3417 * this_device is a pointer into the current element of currentsd[]
3418 * that we're building up in update_scsi_devices(), below.
3419 * lunzerobits is a bitmap that tracks which targets already have a
3420 * lun 0 assigned.
3421 * Returns 1 if an enclosure was added, 0 if not.
3422 */
4f4eb9f1 3423static int add_ext_target_dev(struct ctlr_info *h,
edd16368 3424 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 3425 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
4f4eb9f1 3426 unsigned long lunzerobits[], int *n_ext_target_devs)
edd16368
SC
3427{
3428 unsigned char scsi3addr[8];
3429
1f310bde 3430 if (test_bit(tmpdevice->target, lunzerobits))
edd16368
SC
3431 return 0; /* There is already a lun 0 on this target. */
3432
3433 if (!is_logical_dev_addr_mode(lunaddrbytes))
3434 return 0; /* It's the logical targets that may lack lun 0. */
3435
4f4eb9f1
ST
3436 if (!is_ext_target(h, tmpdevice))
3437 return 0; /* Only external target devices have this problem. */
edd16368 3438
1f310bde 3439 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
edd16368
SC
3440 return 0;
3441
c4f8a299 3442 memset(scsi3addr, 0, 8);
1f310bde 3443 scsi3addr[3] = tmpdevice->target;
edd16368
SC
3444 if (is_hba_lunid(scsi3addr))
3445 return 0; /* Don't add the RAID controller here. */
3446
339b2b14
SC
3447 if (is_scsi_rev_5(h))
3448 return 0; /* p1210m doesn't need to do this. */
3449
4f4eb9f1 3450 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
aca4a520
ST
3451 dev_warn(&h->pdev->dev, "Maximum number of external "
3452 "target devices exceeded. Check your hardware "
edd16368
SC
3453 "configuration.");
3454 return 0;
3455 }
3456
0b0e1d6c 3457 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368 3458 return 0;
4f4eb9f1 3459 (*n_ext_target_devs)++;
1f310bde
SC
3460 hpsa_set_bus_target_lun(this_device,
3461 tmpdevice->bus, tmpdevice->target, 0);
9b5c48c2 3462 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
1f310bde 3463 set_bit(tmpdevice->target, lunzerobits);
edd16368
SC
3464 return 1;
3465}
3466
54b6e9e9
ST
3467/*
3468 * Get address of physical disk used for an ioaccel2 mode command:
3469 * 1. Extract ioaccel2 handle from the command.
3470 * 2. Find a matching ioaccel2 handle from list of physical disks.
3471 * 3. Return:
3472 * 1 and set scsi3addr to address of matching physical
3473 * 0 if no matching physical disk was found.
3474 */
3475static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3476 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3477{
41ce4c35
SC
3478 struct io_accel2_cmd *c2 =
3479 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3480 unsigned long flags;
54b6e9e9 3481 int i;
54b6e9e9 3482
41ce4c35
SC
3483 spin_lock_irqsave(&h->devlock, flags);
3484 for (i = 0; i < h->ndevices; i++)
3485 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3486 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3487 sizeof(h->dev[i]->scsi3addr));
3488 spin_unlock_irqrestore(&h->devlock, flags);
3489 return 1;
3490 }
3491 spin_unlock_irqrestore(&h->devlock, flags);
3492 return 0;
54b6e9e9 3493}
41ce4c35 3494
edd16368
SC
3495/*
3496 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3497 * logdev. The number of luns in physdev and logdev are returned in
3498 * *nphysicals and *nlogicals, respectively.
3499 * Returns 0 on success, -1 otherwise.
3500 */
3501static int hpsa_gather_lun_info(struct ctlr_info *h,
03383736 3502 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
01a02ffc 3503 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368 3504{
03383736 3505 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
edd16368
SC
3506 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3507 return -1;
3508 }
03383736 3509 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
edd16368 3510 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
03383736
DB
3511 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3512 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
edd16368
SC
3513 *nphysicals = HPSA_MAX_PHYS_LUN;
3514 }
03383736 3515 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
edd16368
SC
3516 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3517 return -1;
3518 }
6df1e954 3519 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
3520 /* Reject Logicals in excess of our max capability. */
3521 if (*nlogicals > HPSA_MAX_LUN) {
3522 dev_warn(&h->pdev->dev,
3523 "maximum logical LUNs (%d) exceeded. "
3524 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3525 *nlogicals - HPSA_MAX_LUN);
3526 *nlogicals = HPSA_MAX_LUN;
3527 }
3528 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3529 dev_warn(&h->pdev->dev,
3530 "maximum logical + physical LUNs (%d) exceeded. "
3531 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3532 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3533 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3534 }
3535 return 0;
3536}
3537
42a91641
DB
3538static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3539 int i, int nphysicals, int nlogicals,
a93aa1fe 3540 struct ReportExtendedLUNdata *physdev_list,
339b2b14
SC
3541 struct ReportLUNdata *logdev_list)
3542{
3543 /* Helper function, figure out where the LUN ID info is coming from
3544 * given index i, lists of physical and logical devices, where in
3545 * the list the raid controller is supposed to appear (first or last)
3546 */
3547
3548 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3549 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3550
3551 if (i == raid_ctlr_position)
3552 return RAID_CTLR_LUNID;
3553
3554 if (i < logicals_start)
d5b5d964
SC
3555 return &physdev_list->LUN[i -
3556 (raid_ctlr_position == 0)].lunid[0];
339b2b14
SC
3557
3558 if (i < last_device)
3559 return &logdev_list->LUN[i - nphysicals -
3560 (raid_ctlr_position == 0)][0];
3561 BUG();
3562 return NULL;
3563}
3564
316b221a
SC
3565static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3566{
3567 int rc;
6e8e8088 3568 int hba_mode_enabled;
316b221a
SC
3569 struct bmic_controller_parameters *ctlr_params;
3570 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3571 GFP_KERNEL);
3572
3573 if (!ctlr_params)
96444fbb 3574 return -ENOMEM;
316b221a
SC
3575 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3576 sizeof(struct bmic_controller_parameters));
96444fbb 3577 if (rc) {
316b221a 3578 kfree(ctlr_params);
96444fbb 3579 return rc;
316b221a 3580 }
6e8e8088
JH
3581
3582 hba_mode_enabled =
3583 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3584 kfree(ctlr_params);
3585 return hba_mode_enabled;
316b221a
SC
3586}
3587
03383736
DB
3588/* get physical drive ioaccel handle and queue depth */
3589static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3590 struct hpsa_scsi_dev_t *dev,
3591 u8 *lunaddrbytes,
3592 struct bmic_identify_physical_device *id_phys)
3593{
3594 int rc;
3595 struct ext_report_lun_entry *rle =
3596 (struct ext_report_lun_entry *) lunaddrbytes;
3597
3598 dev->ioaccel_handle = rle->ioaccel_handle;
a3144e0b
JH
3599 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3600 dev->hba_ioaccel_enabled = 1;
03383736
DB
3601 memset(id_phys, 0, sizeof(*id_phys));
3602 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3603 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3604 sizeof(*id_phys));
3605 if (!rc)
3606 /* Reserve space for FW operations */
3607#define DRIVE_CMDS_RESERVED_FOR_FW 2
3608#define DRIVE_QUEUE_DEPTH 7
3609 dev->queue_depth =
3610 le16_to_cpu(id_phys->current_queue_depth_limit) -
3611 DRIVE_CMDS_RESERVED_FOR_FW;
3612 else
3613 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3614 atomic_set(&dev->ioaccel_cmds_out, 0);
d604f533 3615 atomic_set(&dev->reset_cmds_out, 0);
03383736
DB
3616}
3617
edd16368
SC
3618static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3619{
3620 /* the idea here is we could get notified
3621 * that some devices have changed, so we do a report
3622 * physical luns and report logical luns cmd, and adjust
3623 * our list of devices accordingly.
3624 *
3625 * The scsi3addr's of devices won't change so long as the
3626 * adapter is not reset. That means we can rescan and
3627 * tell which devices we already know about, vs. new
3628 * devices, vs. disappearing devices.
3629 */
a93aa1fe 3630 struct ReportExtendedLUNdata *physdev_list = NULL;
edd16368 3631 struct ReportLUNdata *logdev_list = NULL;
03383736 3632 struct bmic_identify_physical_device *id_phys = NULL;
01a02ffc
SC
3633 u32 nphysicals = 0;
3634 u32 nlogicals = 0;
3635 u32 ndev_allocated = 0;
edd16368
SC
3636 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3637 int ncurrent = 0;
4f4eb9f1 3638 int i, n_ext_target_devs, ndevs_to_allocate;
339b2b14 3639 int raid_ctlr_position;
2bbf5c7f 3640 int rescan_hba_mode;
aca4a520 3641 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
edd16368 3642
cfe5badc 3643 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
92084715
SC
3644 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3645 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
edd16368 3646 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
03383736 3647 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
edd16368 3648
03383736
DB
3649 if (!currentsd || !physdev_list || !logdev_list ||
3650 !tmpdevice || !id_phys) {
edd16368
SC
3651 dev_err(&h->pdev->dev, "out of memory\n");
3652 goto out;
3653 }
3654 memset(lunzerobits, 0, sizeof(lunzerobits));
3655
316b221a 3656 rescan_hba_mode = hpsa_hba_mode_enabled(h);
96444fbb
JH
3657 if (rescan_hba_mode < 0)
3658 goto out;
316b221a
SC
3659
3660 if (!h->hba_mode_enabled && rescan_hba_mode)
3661 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3662 else if (h->hba_mode_enabled && !rescan_hba_mode)
3663 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3664
3665 h->hba_mode_enabled = rescan_hba_mode;
3666
03383736
DB
3667 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3668 logdev_list, &nlogicals))
edd16368
SC
3669 goto out;
3670
aca4a520
ST
3671 /* We might see up to the maximum number of logical and physical disks
3672 * plus external target devices, and a device for the local RAID
3673 * controller.
edd16368 3674 */
aca4a520 3675 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
edd16368
SC
3676
3677 /* Allocate the per device structures */
3678 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
3679 if (i >= HPSA_MAX_DEVICES) {
3680 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3681 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3682 ndevs_to_allocate - HPSA_MAX_DEVICES);
3683 break;
3684 }
3685
edd16368
SC
3686 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3687 if (!currentsd[i]) {
3688 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3689 __FILE__, __LINE__);
3690 goto out;
3691 }
3692 ndev_allocated++;
3693 }
3694
8645291b 3695 if (is_scsi_rev_5(h))
339b2b14
SC
3696 raid_ctlr_position = 0;
3697 else
3698 raid_ctlr_position = nphysicals + nlogicals;
3699
edd16368 3700 /* adjust our table of devices */
4f4eb9f1 3701 n_ext_target_devs = 0;
edd16368 3702 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 3703 u8 *lunaddrbytes, is_OBDR = 0;
edd16368
SC
3704
3705 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
3706 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3707 i, nphysicals, nlogicals, physdev_list, logdev_list);
41ce4c35
SC
3708
3709 /* skip masked non-disk devices */
3710 if (MASKED_DEVICE(lunaddrbytes))
3711 if (i < nphysicals + (raid_ctlr_position == 0) &&
3712 NON_DISK_PHYS_DEV(lunaddrbytes))
3713 continue;
edd16368
SC
3714
3715 /* Get device type, vendor, model, device id */
0b0e1d6c
SC
3716 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3717 &is_OBDR))
edd16368 3718 continue; /* skip it if we can't talk to it. */
1f310bde 3719 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
9b5c48c2 3720 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
edd16368
SC
3721 this_device = currentsd[ncurrent];
3722
3723 /*
4f4eb9f1 3724 * For external target devices, we have to insert a LUN 0 which
edd16368
SC
3725 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3726 * is nonetheless an enclosure device there. We have to
3727 * present that otherwise linux won't find anything if
3728 * there is no lun 0.
3729 */
4f4eb9f1 3730 if (add_ext_target_dev(h, tmpdevice, this_device,
1f310bde 3731 lunaddrbytes, lunzerobits,
4f4eb9f1 3732 &n_ext_target_devs)) {
edd16368
SC
3733 ncurrent++;
3734 this_device = currentsd[ncurrent];
3735 }
3736
3737 *this_device = *tmpdevice;
edd16368 3738
41ce4c35
SC
3739 /* do not expose masked devices */
3740 if (MASKED_DEVICE(lunaddrbytes) &&
3741 i < nphysicals + (raid_ctlr_position == 0)) {
3742 if (h->hba_mode_enabled)
3743 dev_warn(&h->pdev->dev,
3744 "Masked physical device detected\n");
3745 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3746 } else {
3747 this_device->expose_state =
3748 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3749 }
3750
edd16368 3751 switch (this_device->devtype) {
0b0e1d6c 3752 case TYPE_ROM:
edd16368
SC
3753 /* We don't *really* support actual CD-ROM devices,
3754 * just "One Button Disaster Recovery" tape drive
3755 * which temporarily pretends to be a CD-ROM drive.
3756 * So we check that the device is really an OBDR tape
3757 * device by checking for "$DR-10" in bytes 43-48 of
3758 * the inquiry data.
3759 */
0b0e1d6c
SC
3760 if (is_OBDR)
3761 ncurrent++;
edd16368
SC
3762 break;
3763 case TYPE_DISK:
ecf418d1 3764 if (i >= nphysicals) {
316b221a
SC
3765 ncurrent++;
3766 break;
283b4a9b 3767 }
ecf418d1
JH
3768
3769 if (h->hba_mode_enabled)
3770 /* never use raid mapper in HBA mode */
3771 this_device->offload_enabled = 0;
3772 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3773 h->transMethod & CFGTBL_Trans_io_accel2))
3774 break;
3775
3776 hpsa_get_ioaccel_drive_info(h, this_device,
3777 lunaddrbytes, id_phys);
3778 atomic_set(&this_device->ioaccel_cmds_out, 0);
3779 ncurrent++;
edd16368
SC
3780 break;
3781 case TYPE_TAPE:
3782 case TYPE_MEDIUM_CHANGER:
3783 ncurrent++;
3784 break;
41ce4c35
SC
3785 case TYPE_ENCLOSURE:
3786 if (h->hba_mode_enabled)
3787 ncurrent++;
3788 break;
edd16368
SC
3789 case TYPE_RAID:
3790 /* Only present the Smartarray HBA as a RAID controller.
3791 * If it's a RAID controller other than the HBA itself
3792 * (an external RAID controller, MSA500 or similar)
3793 * don't present it.
3794 */
3795 if (!is_hba_lunid(lunaddrbytes))
3796 break;
3797 ncurrent++;
3798 break;
3799 default:
3800 break;
3801 }
cfe5badc 3802 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
3803 break;
3804 }
3805 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3806out:
3807 kfree(tmpdevice);
3808 for (i = 0; i < ndev_allocated; i++)
3809 kfree(currentsd[i]);
3810 kfree(currentsd);
edd16368
SC
3811 kfree(physdev_list);
3812 kfree(logdev_list);
03383736 3813 kfree(id_phys);
edd16368
SC
3814}
3815
ec5cbf04
WS
3816static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3817 struct scatterlist *sg)
3818{
3819 u64 addr64 = (u64) sg_dma_address(sg);
3820 unsigned int len = sg_dma_len(sg);
3821
3822 desc->Addr = cpu_to_le64(addr64);
3823 desc->Len = cpu_to_le32(len);
3824 desc->Ext = 0;
3825}
3826
c7ee65b3
WS
3827/*
3828 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
edd16368
SC
3829 * dma mapping and fills in the scatter gather entries of the
3830 * hpsa command, cp.
3831 */
33a2ffce 3832static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
3833 struct CommandList *cp,
3834 struct scsi_cmnd *cmd)
3835{
edd16368 3836 struct scatterlist *sg;
b3a7ba7c 3837 int use_sg, i, sg_limit, chained, last_sg;
33a2ffce 3838 struct SGDescriptor *curr_sg;
edd16368 3839
33a2ffce 3840 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
3841
3842 use_sg = scsi_dma_map(cmd);
3843 if (use_sg < 0)
3844 return use_sg;
3845
3846 if (!use_sg)
3847 goto sglist_finished;
3848
b3a7ba7c
WS
3849 /*
3850 * If the number of entries is greater than the max for a single list,
3851 * then we have a chained list; we will set up all but one entry in the
3852 * first list (the last entry is saved for link information);
3853 * otherwise, we don't have a chained list and we'll set up at each of
3854 * the entries in the one list.
3855 */
33a2ffce 3856 curr_sg = cp->SG;
b3a7ba7c
WS
3857 chained = use_sg > h->max_cmd_sg_entries;
3858 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3859 last_sg = scsi_sg_count(cmd) - 1;
3860 scsi_for_each_sg(cmd, sg, sg_limit, i) {
ec5cbf04 3861 hpsa_set_sg_descriptor(curr_sg, sg);
33a2ffce
SC
3862 curr_sg++;
3863 }
ec5cbf04 3864
b3a7ba7c
WS
3865 if (chained) {
3866 /*
3867 * Continue with the chained list. Set curr_sg to the chained
3868 * list. Modify the limit to the total count less the entries
3869 * we've already set up. Resume the scan at the list entry
3870 * where the previous loop left off.
3871 */
3872 curr_sg = h->cmd_sg_list[cp->cmdindex];
3873 sg_limit = use_sg - sg_limit;
3874 for_each_sg(sg, sg, sg_limit, i) {
3875 hpsa_set_sg_descriptor(curr_sg, sg);
3876 curr_sg++;
3877 }
3878 }
3879
ec5cbf04 3880 /* Back the pointer up to the last entry and mark it as "last". */
b3a7ba7c 3881 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
33a2ffce
SC
3882
3883 if (use_sg + chained > h->maxSG)
3884 h->maxSG = use_sg + chained;
3885
3886 if (chained) {
3887 cp->Header.SGList = h->max_cmd_sg_entries;
50a0decf 3888 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
e2bea6df
SC
3889 if (hpsa_map_sg_chain_block(h, cp)) {
3890 scsi_dma_unmap(cmd);
3891 return -1;
3892 }
33a2ffce 3893 return 0;
edd16368
SC
3894 }
3895
3896sglist_finished:
3897
01a02ffc 3898 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
c7ee65b3 3899 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
edd16368
SC
3900 return 0;
3901}
3902
283b4a9b
SC
3903#define IO_ACCEL_INELIGIBLE (1)
3904static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3905{
3906 int is_write = 0;
3907 u32 block;
3908 u32 block_cnt;
3909
3910 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3911 switch (cdb[0]) {
3912 case WRITE_6:
3913 case WRITE_12:
3914 is_write = 1;
3915 case READ_6:
3916 case READ_12:
3917 if (*cdb_len == 6) {
3918 block = (((u32) cdb[2]) << 8) | cdb[3];
3919 block_cnt = cdb[4];
3920 } else {
3921 BUG_ON(*cdb_len != 12);
3922 block = (((u32) cdb[2]) << 24) |
3923 (((u32) cdb[3]) << 16) |
3924 (((u32) cdb[4]) << 8) |
3925 cdb[5];
3926 block_cnt =
3927 (((u32) cdb[6]) << 24) |
3928 (((u32) cdb[7]) << 16) |
3929 (((u32) cdb[8]) << 8) |
3930 cdb[9];
3931 }
3932 if (block_cnt > 0xffff)
3933 return IO_ACCEL_INELIGIBLE;
3934
3935 cdb[0] = is_write ? WRITE_10 : READ_10;
3936 cdb[1] = 0;
3937 cdb[2] = (u8) (block >> 24);
3938 cdb[3] = (u8) (block >> 16);
3939 cdb[4] = (u8) (block >> 8);
3940 cdb[5] = (u8) (block);
3941 cdb[6] = 0;
3942 cdb[7] = (u8) (block_cnt >> 8);
3943 cdb[8] = (u8) (block_cnt);
3944 cdb[9] = 0;
3945 *cdb_len = 10;
3946 break;
3947 }
3948 return 0;
3949}
3950
c349775e 3951static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
283b4a9b 3952 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 3953 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
e1f7de0c
MG
3954{
3955 struct scsi_cmnd *cmd = c->scsi_cmd;
e1f7de0c
MG
3956 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3957 unsigned int len;
3958 unsigned int total_len = 0;
3959 struct scatterlist *sg;
3960 u64 addr64;
3961 int use_sg, i;
3962 struct SGDescriptor *curr_sg;
3963 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3964
283b4a9b 3965 /* TODO: implement chaining support */
03383736
DB
3966 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3967 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 3968 return IO_ACCEL_INELIGIBLE;
03383736 3969 }
283b4a9b 3970
e1f7de0c
MG
3971 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3972
03383736
DB
3973 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3974 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 3975 return IO_ACCEL_INELIGIBLE;
03383736 3976 }
283b4a9b 3977
e1f7de0c
MG
3978 c->cmd_type = CMD_IOACCEL1;
3979
3980 /* Adjust the DMA address to point to the accelerated command buffer */
3981 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3982 (c->cmdindex * sizeof(*cp));
3983 BUG_ON(c->busaddr & 0x0000007F);
3984
3985 use_sg = scsi_dma_map(cmd);
03383736
DB
3986 if (use_sg < 0) {
3987 atomic_dec(&phys_disk->ioaccel_cmds_out);
e1f7de0c 3988 return use_sg;
03383736 3989 }
e1f7de0c
MG
3990
3991 if (use_sg) {
3992 curr_sg = cp->SG;
3993 scsi_for_each_sg(cmd, sg, use_sg, i) {
3994 addr64 = (u64) sg_dma_address(sg);
3995 len = sg_dma_len(sg);
3996 total_len += len;
50a0decf
SC
3997 curr_sg->Addr = cpu_to_le64(addr64);
3998 curr_sg->Len = cpu_to_le32(len);
3999 curr_sg->Ext = cpu_to_le32(0);
e1f7de0c
MG
4000 curr_sg++;
4001 }
50a0decf 4002 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
e1f7de0c
MG
4003
4004 switch (cmd->sc_data_direction) {
4005 case DMA_TO_DEVICE:
4006 control |= IOACCEL1_CONTROL_DATA_OUT;
4007 break;
4008 case DMA_FROM_DEVICE:
4009 control |= IOACCEL1_CONTROL_DATA_IN;
4010 break;
4011 case DMA_NONE:
4012 control |= IOACCEL1_CONTROL_NODATAXFER;
4013 break;
4014 default:
4015 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4016 cmd->sc_data_direction);
4017 BUG();
4018 break;
4019 }
4020 } else {
4021 control |= IOACCEL1_CONTROL_NODATAXFER;
4022 }
4023
c349775e 4024 c->Header.SGList = use_sg;
e1f7de0c 4025 /* Fill out the command structure to submit */
2b08b3e9
DB
4026 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4027 cp->transfer_len = cpu_to_le32(total_len);
4028 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4029 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4030 cp->control = cpu_to_le32(control);
283b4a9b
SC
4031 memcpy(cp->CDB, cdb, cdb_len);
4032 memcpy(cp->CISS_LUN, scsi3addr, 8);
c349775e 4033 /* Tag was already set at init time. */
283b4a9b 4034 enqueue_cmd_and_start_io(h, c);
e1f7de0c
MG
4035 return 0;
4036}
edd16368 4037
283b4a9b
SC
4038/*
4039 * Queue a command directly to a device behind the controller using the
4040 * I/O accelerator path.
4041 */
4042static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4043 struct CommandList *c)
4044{
4045 struct scsi_cmnd *cmd = c->scsi_cmd;
4046 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4047
03383736
DB
4048 c->phys_disk = dev;
4049
283b4a9b 4050 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
03383736 4051 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
283b4a9b
SC
4052}
4053
dd0e19f3
ST
4054/*
4055 * Set encryption parameters for the ioaccel2 request
4056 */
4057static void set_encrypt_ioaccel2(struct ctlr_info *h,
4058 struct CommandList *c, struct io_accel2_cmd *cp)
4059{
4060 struct scsi_cmnd *cmd = c->scsi_cmd;
4061 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4062 struct raid_map_data *map = &dev->raid_map;
4063 u64 first_block;
4064
dd0e19f3 4065 /* Are we doing encryption on this device */
2b08b3e9 4066 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
dd0e19f3
ST
4067 return;
4068 /* Set the data encryption key index. */
4069 cp->dekindex = map->dekindex;
4070
4071 /* Set the encryption enable flag, encoded into direction field. */
4072 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4073
4074 /* Set encryption tweak values based on logical block address
4075 * If block size is 512, tweak value is LBA.
4076 * For other block sizes, tweak is (LBA * block size)/ 512)
4077 */
4078 switch (cmd->cmnd[0]) {
4079 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4080 case WRITE_6:
4081 case READ_6:
2b08b3e9 4082 first_block = get_unaligned_be16(&cmd->cmnd[2]);
dd0e19f3
ST
4083 break;
4084 case WRITE_10:
4085 case READ_10:
dd0e19f3
ST
4086 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4087 case WRITE_12:
4088 case READ_12:
2b08b3e9 4089 first_block = get_unaligned_be32(&cmd->cmnd[2]);
dd0e19f3
ST
4090 break;
4091 case WRITE_16:
4092 case READ_16:
2b08b3e9 4093 first_block = get_unaligned_be64(&cmd->cmnd[2]);
dd0e19f3
ST
4094 break;
4095 default:
4096 dev_err(&h->pdev->dev,
2b08b3e9
DB
4097 "ERROR: %s: size (0x%x) not supported for encryption\n",
4098 __func__, cmd->cmnd[0]);
dd0e19f3
ST
4099 BUG();
4100 break;
4101 }
2b08b3e9
DB
4102
4103 if (le32_to_cpu(map->volume_blk_size) != 512)
4104 first_block = first_block *
4105 le32_to_cpu(map->volume_blk_size)/512;
4106
4107 cp->tweak_lower = cpu_to_le32(first_block);
4108 cp->tweak_upper = cpu_to_le32(first_block >> 32);
dd0e19f3
ST
4109}
4110
c349775e
ST
4111static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4112 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4113 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e
ST
4114{
4115 struct scsi_cmnd *cmd = c->scsi_cmd;
4116 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4117 struct ioaccel2_sg_element *curr_sg;
4118 int use_sg, i;
4119 struct scatterlist *sg;
4120 u64 addr64;
4121 u32 len;
4122 u32 total_len = 0;
4123
d9a729f3 4124 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
c349775e 4125
03383736
DB
4126 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4127 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4128 return IO_ACCEL_INELIGIBLE;
03383736
DB
4129 }
4130
c349775e
ST
4131 c->cmd_type = CMD_IOACCEL2;
4132 /* Adjust the DMA address to point to the accelerated command buffer */
4133 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4134 (c->cmdindex * sizeof(*cp));
4135 BUG_ON(c->busaddr & 0x0000007F);
4136
4137 memset(cp, 0, sizeof(*cp));
4138 cp->IU_type = IOACCEL2_IU_TYPE;
4139
4140 use_sg = scsi_dma_map(cmd);
03383736
DB
4141 if (use_sg < 0) {
4142 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4143 return use_sg;
03383736 4144 }
c349775e
ST
4145
4146 if (use_sg) {
c349775e 4147 curr_sg = cp->sg;
d9a729f3
WS
4148 if (use_sg > h->ioaccel_maxsg) {
4149 addr64 = le64_to_cpu(
4150 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4151 curr_sg->address = cpu_to_le64(addr64);
4152 curr_sg->length = 0;
4153 curr_sg->reserved[0] = 0;
4154 curr_sg->reserved[1] = 0;
4155 curr_sg->reserved[2] = 0;
4156 curr_sg->chain_indicator = 0x80;
4157
4158 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4159 }
c349775e
ST
4160 scsi_for_each_sg(cmd, sg, use_sg, i) {
4161 addr64 = (u64) sg_dma_address(sg);
4162 len = sg_dma_len(sg);
4163 total_len += len;
4164 curr_sg->address = cpu_to_le64(addr64);
4165 curr_sg->length = cpu_to_le32(len);
4166 curr_sg->reserved[0] = 0;
4167 curr_sg->reserved[1] = 0;
4168 curr_sg->reserved[2] = 0;
4169 curr_sg->chain_indicator = 0;
4170 curr_sg++;
4171 }
4172
4173 switch (cmd->sc_data_direction) {
4174 case DMA_TO_DEVICE:
dd0e19f3
ST
4175 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4176 cp->direction |= IOACCEL2_DIR_DATA_OUT;
c349775e
ST
4177 break;
4178 case DMA_FROM_DEVICE:
dd0e19f3
ST
4179 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4180 cp->direction |= IOACCEL2_DIR_DATA_IN;
c349775e
ST
4181 break;
4182 case DMA_NONE:
dd0e19f3
ST
4183 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4184 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e
ST
4185 break;
4186 default:
4187 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4188 cmd->sc_data_direction);
4189 BUG();
4190 break;
4191 }
4192 } else {
dd0e19f3
ST
4193 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4194 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e 4195 }
dd0e19f3
ST
4196
4197 /* Set encryption parameters, if necessary */
4198 set_encrypt_ioaccel2(h, c, cp);
4199
2b08b3e9 4200 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
f2405db8 4201 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
c349775e 4202 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
c349775e 4203
c349775e
ST
4204 cp->data_len = cpu_to_le32(total_len);
4205 cp->err_ptr = cpu_to_le64(c->busaddr +
4206 offsetof(struct io_accel2_cmd, error_data));
50a0decf 4207 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
c349775e 4208
d9a729f3
WS
4209 /* fill in sg elements */
4210 if (use_sg > h->ioaccel_maxsg) {
4211 cp->sg_count = 1;
4212 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4213 atomic_dec(&phys_disk->ioaccel_cmds_out);
4214 scsi_dma_unmap(cmd);
4215 return -1;
4216 }
4217 } else
4218 cp->sg_count = (u8) use_sg;
4219
c349775e
ST
4220 enqueue_cmd_and_start_io(h, c);
4221 return 0;
4222}
4223
4224/*
4225 * Queue a command to the correct I/O accelerator path.
4226 */
4227static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4228 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4229 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e 4230{
03383736
DB
4231 /* Try to honor the device's queue depth */
4232 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4233 phys_disk->queue_depth) {
4234 atomic_dec(&phys_disk->ioaccel_cmds_out);
4235 return IO_ACCEL_INELIGIBLE;
4236 }
c349775e
ST
4237 if (h->transMethod & CFGTBL_Trans_io_accel1)
4238 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
03383736
DB
4239 cdb, cdb_len, scsi3addr,
4240 phys_disk);
c349775e
ST
4241 else
4242 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
03383736
DB
4243 cdb, cdb_len, scsi3addr,
4244 phys_disk);
c349775e
ST
4245}
4246
6b80b18f
ST
4247static void raid_map_helper(struct raid_map_data *map,
4248 int offload_to_mirror, u32 *map_index, u32 *current_group)
4249{
4250 if (offload_to_mirror == 0) {
4251 /* use physical disk in the first mirrored group. */
2b08b3e9 4252 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4253 return;
4254 }
4255 do {
4256 /* determine mirror group that *map_index indicates */
2b08b3e9
DB
4257 *current_group = *map_index /
4258 le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4259 if (offload_to_mirror == *current_group)
4260 continue;
2b08b3e9 4261 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
6b80b18f 4262 /* select map index from next group */
2b08b3e9 4263 *map_index += le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4264 (*current_group)++;
4265 } else {
4266 /* select map index from first group */
2b08b3e9 4267 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4268 *current_group = 0;
4269 }
4270 } while (offload_to_mirror != *current_group);
4271}
4272
283b4a9b
SC
4273/*
4274 * Attempt to perform offload RAID mapping for a logical volume I/O.
4275 */
4276static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4277 struct CommandList *c)
4278{
4279 struct scsi_cmnd *cmd = c->scsi_cmd;
4280 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4281 struct raid_map_data *map = &dev->raid_map;
4282 struct raid_map_disk_data *dd = &map->data[0];
4283 int is_write = 0;
4284 u32 map_index;
4285 u64 first_block, last_block;
4286 u32 block_cnt;
4287 u32 blocks_per_row;
4288 u64 first_row, last_row;
4289 u32 first_row_offset, last_row_offset;
4290 u32 first_column, last_column;
6b80b18f
ST
4291 u64 r0_first_row, r0_last_row;
4292 u32 r5or6_blocks_per_row;
4293 u64 r5or6_first_row, r5or6_last_row;
4294 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4295 u32 r5or6_first_column, r5or6_last_column;
4296 u32 total_disks_per_row;
4297 u32 stripesize;
4298 u32 first_group, last_group, current_group;
283b4a9b
SC
4299 u32 map_row;
4300 u32 disk_handle;
4301 u64 disk_block;
4302 u32 disk_block_cnt;
4303 u8 cdb[16];
4304 u8 cdb_len;
2b08b3e9 4305 u16 strip_size;
283b4a9b
SC
4306#if BITS_PER_LONG == 32
4307 u64 tmpdiv;
4308#endif
6b80b18f 4309 int offload_to_mirror;
283b4a9b 4310
283b4a9b
SC
4311 /* check for valid opcode, get LBA and block count */
4312 switch (cmd->cmnd[0]) {
4313 case WRITE_6:
4314 is_write = 1;
4315 case READ_6:
4316 first_block =
4317 (((u64) cmd->cmnd[2]) << 8) |
4318 cmd->cmnd[3];
4319 block_cnt = cmd->cmnd[4];
3fa89a04
SC
4320 if (block_cnt == 0)
4321 block_cnt = 256;
283b4a9b
SC
4322 break;
4323 case WRITE_10:
4324 is_write = 1;
4325 case READ_10:
4326 first_block =
4327 (((u64) cmd->cmnd[2]) << 24) |
4328 (((u64) cmd->cmnd[3]) << 16) |
4329 (((u64) cmd->cmnd[4]) << 8) |
4330 cmd->cmnd[5];
4331 block_cnt =
4332 (((u32) cmd->cmnd[7]) << 8) |
4333 cmd->cmnd[8];
4334 break;
4335 case WRITE_12:
4336 is_write = 1;
4337 case READ_12:
4338 first_block =
4339 (((u64) cmd->cmnd[2]) << 24) |
4340 (((u64) cmd->cmnd[3]) << 16) |
4341 (((u64) cmd->cmnd[4]) << 8) |
4342 cmd->cmnd[5];
4343 block_cnt =
4344 (((u32) cmd->cmnd[6]) << 24) |
4345 (((u32) cmd->cmnd[7]) << 16) |
4346 (((u32) cmd->cmnd[8]) << 8) |
4347 cmd->cmnd[9];
4348 break;
4349 case WRITE_16:
4350 is_write = 1;
4351 case READ_16:
4352 first_block =
4353 (((u64) cmd->cmnd[2]) << 56) |
4354 (((u64) cmd->cmnd[3]) << 48) |
4355 (((u64) cmd->cmnd[4]) << 40) |
4356 (((u64) cmd->cmnd[5]) << 32) |
4357 (((u64) cmd->cmnd[6]) << 24) |
4358 (((u64) cmd->cmnd[7]) << 16) |
4359 (((u64) cmd->cmnd[8]) << 8) |
4360 cmd->cmnd[9];
4361 block_cnt =
4362 (((u32) cmd->cmnd[10]) << 24) |
4363 (((u32) cmd->cmnd[11]) << 16) |
4364 (((u32) cmd->cmnd[12]) << 8) |
4365 cmd->cmnd[13];
4366 break;
4367 default:
4368 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4369 }
283b4a9b
SC
4370 last_block = first_block + block_cnt - 1;
4371
4372 /* check for write to non-RAID-0 */
4373 if (is_write && dev->raid_level != 0)
4374 return IO_ACCEL_INELIGIBLE;
4375
4376 /* check for invalid block or wraparound */
2b08b3e9
DB
4377 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4378 last_block < first_block)
283b4a9b
SC
4379 return IO_ACCEL_INELIGIBLE;
4380
4381 /* calculate stripe information for the request */
2b08b3e9
DB
4382 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4383 le16_to_cpu(map->strip_size);
4384 strip_size = le16_to_cpu(map->strip_size);
283b4a9b
SC
4385#if BITS_PER_LONG == 32
4386 tmpdiv = first_block;
4387 (void) do_div(tmpdiv, blocks_per_row);
4388 first_row = tmpdiv;
4389 tmpdiv = last_block;
4390 (void) do_div(tmpdiv, blocks_per_row);
4391 last_row = tmpdiv;
4392 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4393 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4394 tmpdiv = first_row_offset;
2b08b3e9 4395 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4396 first_column = tmpdiv;
4397 tmpdiv = last_row_offset;
2b08b3e9 4398 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4399 last_column = tmpdiv;
4400#else
4401 first_row = first_block / blocks_per_row;
4402 last_row = last_block / blocks_per_row;
4403 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4404 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
2b08b3e9
DB
4405 first_column = first_row_offset / strip_size;
4406 last_column = last_row_offset / strip_size;
283b4a9b
SC
4407#endif
4408
4409 /* if this isn't a single row/column then give to the controller */
4410 if ((first_row != last_row) || (first_column != last_column))
4411 return IO_ACCEL_INELIGIBLE;
4412
4413 /* proceeding with driver mapping */
2b08b3e9
DB
4414 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4415 le16_to_cpu(map->metadata_disks_per_row);
283b4a9b 4416 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4417 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4418 map_index = (map_row * total_disks_per_row) + first_column;
4419
4420 switch (dev->raid_level) {
4421 case HPSA_RAID_0:
4422 break; /* nothing special to do */
4423 case HPSA_RAID_1:
4424 /* Handles load balance across RAID 1 members.
4425 * (2-drive R1 and R10 with even # of drives.)
4426 * Appropriate for SSDs, not optimal for HDDs
283b4a9b 4427 */
2b08b3e9 4428 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
283b4a9b 4429 if (dev->offload_to_mirror)
2b08b3e9 4430 map_index += le16_to_cpu(map->data_disks_per_row);
283b4a9b 4431 dev->offload_to_mirror = !dev->offload_to_mirror;
6b80b18f
ST
4432 break;
4433 case HPSA_RAID_ADM:
4434 /* Handles N-way mirrors (R1-ADM)
4435 * and R10 with # of drives divisible by 3.)
4436 */
2b08b3e9 4437 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
6b80b18f
ST
4438
4439 offload_to_mirror = dev->offload_to_mirror;
4440 raid_map_helper(map, offload_to_mirror,
4441 &map_index, &current_group);
4442 /* set mirror group to use next time */
4443 offload_to_mirror =
2b08b3e9
DB
4444 (offload_to_mirror >=
4445 le16_to_cpu(map->layout_map_count) - 1)
6b80b18f 4446 ? 0 : offload_to_mirror + 1;
6b80b18f
ST
4447 dev->offload_to_mirror = offload_to_mirror;
4448 /* Avoid direct use of dev->offload_to_mirror within this
4449 * function since multiple threads might simultaneously
4450 * increment it beyond the range of dev->layout_map_count -1.
4451 */
4452 break;
4453 case HPSA_RAID_5:
4454 case HPSA_RAID_6:
2b08b3e9 4455 if (le16_to_cpu(map->layout_map_count) <= 1)
6b80b18f
ST
4456 break;
4457
4458 /* Verify first and last block are in same RAID group */
4459 r5or6_blocks_per_row =
2b08b3e9
DB
4460 le16_to_cpu(map->strip_size) *
4461 le16_to_cpu(map->data_disks_per_row);
6b80b18f 4462 BUG_ON(r5or6_blocks_per_row == 0);
2b08b3e9
DB
4463 stripesize = r5or6_blocks_per_row *
4464 le16_to_cpu(map->layout_map_count);
6b80b18f
ST
4465#if BITS_PER_LONG == 32
4466 tmpdiv = first_block;
4467 first_group = do_div(tmpdiv, stripesize);
4468 tmpdiv = first_group;
4469 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4470 first_group = tmpdiv;
4471 tmpdiv = last_block;
4472 last_group = do_div(tmpdiv, stripesize);
4473 tmpdiv = last_group;
4474 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4475 last_group = tmpdiv;
4476#else
4477 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4478 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
6b80b18f 4479#endif
000ff7c2 4480 if (first_group != last_group)
6b80b18f
ST
4481 return IO_ACCEL_INELIGIBLE;
4482
4483 /* Verify request is in a single row of RAID 5/6 */
4484#if BITS_PER_LONG == 32
4485 tmpdiv = first_block;
4486 (void) do_div(tmpdiv, stripesize);
4487 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4488 tmpdiv = last_block;
4489 (void) do_div(tmpdiv, stripesize);
4490 r5or6_last_row = r0_last_row = tmpdiv;
4491#else
4492 first_row = r5or6_first_row = r0_first_row =
4493 first_block / stripesize;
4494 r5or6_last_row = r0_last_row = last_block / stripesize;
4495#endif
4496 if (r5or6_first_row != r5or6_last_row)
4497 return IO_ACCEL_INELIGIBLE;
4498
4499
4500 /* Verify request is in a single column */
4501#if BITS_PER_LONG == 32
4502 tmpdiv = first_block;
4503 first_row_offset = do_div(tmpdiv, stripesize);
4504 tmpdiv = first_row_offset;
4505 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4506 r5or6_first_row_offset = first_row_offset;
4507 tmpdiv = last_block;
4508 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4509 tmpdiv = r5or6_last_row_offset;
4510 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4511 tmpdiv = r5or6_first_row_offset;
4512 (void) do_div(tmpdiv, map->strip_size);
4513 first_column = r5or6_first_column = tmpdiv;
4514 tmpdiv = r5or6_last_row_offset;
4515 (void) do_div(tmpdiv, map->strip_size);
4516 r5or6_last_column = tmpdiv;
4517#else
4518 first_row_offset = r5or6_first_row_offset =
4519 (u32)((first_block % stripesize) %
4520 r5or6_blocks_per_row);
4521
4522 r5or6_last_row_offset =
4523 (u32)((last_block % stripesize) %
4524 r5or6_blocks_per_row);
4525
4526 first_column = r5or6_first_column =
2b08b3e9 4527 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
6b80b18f 4528 r5or6_last_column =
2b08b3e9 4529 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
6b80b18f
ST
4530#endif
4531 if (r5or6_first_column != r5or6_last_column)
4532 return IO_ACCEL_INELIGIBLE;
4533
4534 /* Request is eligible */
4535 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4536 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4537
4538 map_index = (first_group *
2b08b3e9 4539 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
6b80b18f
ST
4540 (map_row * total_disks_per_row) + first_column;
4541 break;
4542 default:
4543 return IO_ACCEL_INELIGIBLE;
283b4a9b 4544 }
6b80b18f 4545
07543e0c
SC
4546 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4547 return IO_ACCEL_INELIGIBLE;
4548
03383736
DB
4549 c->phys_disk = dev->phys_disk[map_index];
4550
283b4a9b 4551 disk_handle = dd[map_index].ioaccel_handle;
2b08b3e9
DB
4552 disk_block = le64_to_cpu(map->disk_starting_blk) +
4553 first_row * le16_to_cpu(map->strip_size) +
4554 (first_row_offset - first_column *
4555 le16_to_cpu(map->strip_size));
283b4a9b
SC
4556 disk_block_cnt = block_cnt;
4557
4558 /* handle differing logical/physical block sizes */
4559 if (map->phys_blk_shift) {
4560 disk_block <<= map->phys_blk_shift;
4561 disk_block_cnt <<= map->phys_blk_shift;
4562 }
4563 BUG_ON(disk_block_cnt > 0xffff);
4564
4565 /* build the new CDB for the physical disk I/O */
4566 if (disk_block > 0xffffffff) {
4567 cdb[0] = is_write ? WRITE_16 : READ_16;
4568 cdb[1] = 0;
4569 cdb[2] = (u8) (disk_block >> 56);
4570 cdb[3] = (u8) (disk_block >> 48);
4571 cdb[4] = (u8) (disk_block >> 40);
4572 cdb[5] = (u8) (disk_block >> 32);
4573 cdb[6] = (u8) (disk_block >> 24);
4574 cdb[7] = (u8) (disk_block >> 16);
4575 cdb[8] = (u8) (disk_block >> 8);
4576 cdb[9] = (u8) (disk_block);
4577 cdb[10] = (u8) (disk_block_cnt >> 24);
4578 cdb[11] = (u8) (disk_block_cnt >> 16);
4579 cdb[12] = (u8) (disk_block_cnt >> 8);
4580 cdb[13] = (u8) (disk_block_cnt);
4581 cdb[14] = 0;
4582 cdb[15] = 0;
4583 cdb_len = 16;
4584 } else {
4585 cdb[0] = is_write ? WRITE_10 : READ_10;
4586 cdb[1] = 0;
4587 cdb[2] = (u8) (disk_block >> 24);
4588 cdb[3] = (u8) (disk_block >> 16);
4589 cdb[4] = (u8) (disk_block >> 8);
4590 cdb[5] = (u8) (disk_block);
4591 cdb[6] = 0;
4592 cdb[7] = (u8) (disk_block_cnt >> 8);
4593 cdb[8] = (u8) (disk_block_cnt);
4594 cdb[9] = 0;
4595 cdb_len = 10;
4596 }
4597 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
03383736
DB
4598 dev->scsi3addr,
4599 dev->phys_disk[map_index]);
283b4a9b
SC
4600}
4601
25163bd5
WS
4602/*
4603 * Submit commands down the "normal" RAID stack path
4604 * All callers to hpsa_ciss_submit must check lockup_detected
4605 * beforehand, before (opt.) and after calling cmd_alloc
4606 */
574f05d3
SC
4607static int hpsa_ciss_submit(struct ctlr_info *h,
4608 struct CommandList *c, struct scsi_cmnd *cmd,
4609 unsigned char scsi3addr[])
edd16368 4610{
edd16368 4611 cmd->host_scribble = (unsigned char *) c;
edd16368
SC
4612 c->cmd_type = CMD_SCSI;
4613 c->scsi_cmd = cmd;
4614 c->Header.ReplyQueue = 0; /* unused in simple mode */
4615 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
f2405db8 4616 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
edd16368
SC
4617
4618 /* Fill in the request block... */
4619
4620 c->Request.Timeout = 0;
edd16368
SC
4621 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4622 c->Request.CDBLen = cmd->cmd_len;
4623 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
edd16368
SC
4624 switch (cmd->sc_data_direction) {
4625 case DMA_TO_DEVICE:
a505b86f
SC
4626 c->Request.type_attr_dir =
4627 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
4628 break;
4629 case DMA_FROM_DEVICE:
a505b86f
SC
4630 c->Request.type_attr_dir =
4631 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
edd16368
SC
4632 break;
4633 case DMA_NONE:
a505b86f
SC
4634 c->Request.type_attr_dir =
4635 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
4636 break;
4637 case DMA_BIDIRECTIONAL:
4638 /* This can happen if a buggy application does a scsi passthru
4639 * and sets both inlen and outlen to non-zero. ( see
4640 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4641 */
4642
a505b86f
SC
4643 c->Request.type_attr_dir =
4644 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
edd16368
SC
4645 /* This is technically wrong, and hpsa controllers should
4646 * reject it with CMD_INVALID, which is the most correct
4647 * response, but non-fibre backends appear to let it
4648 * slide by, and give the same results as if this field
4649 * were set correctly. Either way is acceptable for
4650 * our purposes here.
4651 */
4652
4653 break;
4654
4655 default:
4656 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4657 cmd->sc_data_direction);
4658 BUG();
4659 break;
4660 }
4661
33a2ffce 4662 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
73153fe5 4663 hpsa_cmd_resolve_and_free(h, c);
edd16368
SC
4664 return SCSI_MLQUEUE_HOST_BUSY;
4665 }
4666 enqueue_cmd_and_start_io(h, c);
4667 /* the cmd'll come back via intr handler in complete_scsi_command() */
4668 return 0;
4669}
4670
360c73bd
SC
4671static void hpsa_cmd_init(struct ctlr_info *h, int index,
4672 struct CommandList *c)
4673{
4674 dma_addr_t cmd_dma_handle, err_dma_handle;
4675
4676 /* Zero out all of commandlist except the last field, refcount */
4677 memset(c, 0, offsetof(struct CommandList, refcount));
4678 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4679 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4680 c->err_info = h->errinfo_pool + index;
4681 memset(c->err_info, 0, sizeof(*c->err_info));
4682 err_dma_handle = h->errinfo_pool_dhandle
4683 + index * sizeof(*c->err_info);
4684 c->cmdindex = index;
4685 c->busaddr = (u32) cmd_dma_handle;
4686 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4687 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4688 c->h = h;
a58e7e53 4689 c->scsi_cmd = SCSI_CMD_IDLE;
360c73bd
SC
4690}
4691
4692static void hpsa_preinitialize_commands(struct ctlr_info *h)
4693{
4694 int i;
4695
4696 for (i = 0; i < h->nr_cmds; i++) {
4697 struct CommandList *c = h->cmd_pool + i;
4698
4699 hpsa_cmd_init(h, i, c);
4700 atomic_set(&c->refcount, 0);
4701 }
4702}
4703
4704static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4705 struct CommandList *c)
4706{
4707 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4708
73153fe5
WS
4709 BUG_ON(c->cmdindex != index);
4710
360c73bd
SC
4711 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4712 memset(c->err_info, 0, sizeof(*c->err_info));
4713 c->busaddr = (u32) cmd_dma_handle;
4714}
4715
592a0ad5
WS
4716static int hpsa_ioaccel_submit(struct ctlr_info *h,
4717 struct CommandList *c, struct scsi_cmnd *cmd,
4718 unsigned char *scsi3addr)
4719{
4720 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4721 int rc = IO_ACCEL_INELIGIBLE;
4722
4723 cmd->host_scribble = (unsigned char *) c;
4724
4725 if (dev->offload_enabled) {
4726 hpsa_cmd_init(h, c->cmdindex, c);
4727 c->cmd_type = CMD_SCSI;
4728 c->scsi_cmd = cmd;
4729 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4730 if (rc < 0) /* scsi_dma_map failed. */
4731 rc = SCSI_MLQUEUE_HOST_BUSY;
a3144e0b 4732 } else if (dev->hba_ioaccel_enabled) {
592a0ad5
WS
4733 hpsa_cmd_init(h, c->cmdindex, c);
4734 c->cmd_type = CMD_SCSI;
4735 c->scsi_cmd = cmd;
4736 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4737 if (rc < 0) /* scsi_dma_map failed. */
4738 rc = SCSI_MLQUEUE_HOST_BUSY;
4739 }
4740 return rc;
4741}
4742
080ef1cc
DB
4743static void hpsa_command_resubmit_worker(struct work_struct *work)
4744{
4745 struct scsi_cmnd *cmd;
4746 struct hpsa_scsi_dev_t *dev;
8a0ff92c 4747 struct CommandList *c = container_of(work, struct CommandList, work);
080ef1cc
DB
4748
4749 cmd = c->scsi_cmd;
4750 dev = cmd->device->hostdata;
4751 if (!dev) {
4752 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 4753 return hpsa_cmd_free_and_done(c->h, c, cmd);
080ef1cc 4754 }
d604f533
WS
4755 if (c->reset_pending)
4756 return hpsa_cmd_resolve_and_free(c->h, c);
a58e7e53
WS
4757 if (c->abort_pending)
4758 return hpsa_cmd_abort_and_free(c->h, c, cmd);
592a0ad5
WS
4759 if (c->cmd_type == CMD_IOACCEL2) {
4760 struct ctlr_info *h = c->h;
4761 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4762 int rc;
4763
4764 if (c2->error_data.serv_response ==
4765 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4766 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4767 if (rc == 0)
4768 return;
4769 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4770 /*
4771 * If we get here, it means dma mapping failed.
4772 * Try again via scsi mid layer, which will
4773 * then get SCSI_MLQUEUE_HOST_BUSY.
4774 */
4775 cmd->result = DID_IMM_RETRY << 16;
8a0ff92c 4776 return hpsa_cmd_free_and_done(h, c, cmd);
592a0ad5
WS
4777 }
4778 /* else, fall thru and resubmit down CISS path */
4779 }
4780 }
360c73bd 4781 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
080ef1cc
DB
4782 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4783 /*
4784 * If we get here, it means dma mapping failed. Try
4785 * again via scsi mid layer, which will then get
4786 * SCSI_MLQUEUE_HOST_BUSY.
592a0ad5
WS
4787 *
4788 * hpsa_ciss_submit will have already freed c
4789 * if it encountered a dma mapping failure.
080ef1cc
DB
4790 */
4791 cmd->result = DID_IMM_RETRY << 16;
4792 cmd->scsi_done(cmd);
4793 }
4794}
4795
574f05d3
SC
4796/* Running in struct Scsi_Host->host_lock less mode */
4797static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4798{
4799 struct ctlr_info *h;
4800 struct hpsa_scsi_dev_t *dev;
4801 unsigned char scsi3addr[8];
4802 struct CommandList *c;
4803 int rc = 0;
4804
4805 /* Get the ptr to our adapter structure out of cmd->host. */
4806 h = sdev_to_hba(cmd->device);
73153fe5
WS
4807
4808 BUG_ON(cmd->request->tag < 0);
4809
574f05d3
SC
4810 dev = cmd->device->hostdata;
4811 if (!dev) {
4812 cmd->result = DID_NO_CONNECT << 16;
4813 cmd->scsi_done(cmd);
4814 return 0;
4815 }
574f05d3 4816
73153fe5 4817 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
bf43caf3 4818
407863cb 4819 if (unlikely(lockup_detected(h))) {
25163bd5 4820 cmd->result = DID_NO_CONNECT << 16;
407863cb
SC
4821 cmd->scsi_done(cmd);
4822 return 0;
4823 }
73153fe5 4824 c = cmd_tagged_alloc(h, cmd);
574f05d3 4825
407863cb
SC
4826 /*
4827 * Call alternate submit routine for I/O accelerated commands.
574f05d3
SC
4828 * Retries always go down the normal I/O path.
4829 */
4830 if (likely(cmd->retries == 0 &&
4831 cmd->request->cmd_type == REQ_TYPE_FS &&
4832 h->acciopath_status)) {
592a0ad5
WS
4833 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4834 if (rc == 0)
4835 return 0;
4836 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
73153fe5 4837 hpsa_cmd_resolve_and_free(h, c);
592a0ad5 4838 return SCSI_MLQUEUE_HOST_BUSY;
574f05d3
SC
4839 }
4840 }
4841 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4842}
4843
8ebc9248 4844static void hpsa_scan_complete(struct ctlr_info *h)
5f389360
SC
4845{
4846 unsigned long flags;
4847
8ebc9248
WS
4848 spin_lock_irqsave(&h->scan_lock, flags);
4849 h->scan_finished = 1;
4850 wake_up_all(&h->scan_wait_queue);
4851 spin_unlock_irqrestore(&h->scan_lock, flags);
5f389360
SC
4852}
4853
a08a8471
SC
4854static void hpsa_scan_start(struct Scsi_Host *sh)
4855{
4856 struct ctlr_info *h = shost_to_hba(sh);
4857 unsigned long flags;
4858
8ebc9248
WS
4859 /*
4860 * Don't let rescans be initiated on a controller known to be locked
4861 * up. If the controller locks up *during* a rescan, that thread is
4862 * probably hosed, but at least we can prevent new rescan threads from
4863 * piling up on a locked up controller.
4864 */
4865 if (unlikely(lockup_detected(h)))
4866 return hpsa_scan_complete(h);
5f389360 4867
a08a8471
SC
4868 /* wait until any scan already in progress is finished. */
4869 while (1) {
4870 spin_lock_irqsave(&h->scan_lock, flags);
4871 if (h->scan_finished)
4872 break;
4873 spin_unlock_irqrestore(&h->scan_lock, flags);
4874 wait_event(h->scan_wait_queue, h->scan_finished);
4875 /* Note: We don't need to worry about a race between this
4876 * thread and driver unload because the midlayer will
4877 * have incremented the reference count, so unload won't
4878 * happen if we're in here.
4879 */
4880 }
4881 h->scan_finished = 0; /* mark scan as in progress */
4882 spin_unlock_irqrestore(&h->scan_lock, flags);
4883
8ebc9248
WS
4884 if (unlikely(lockup_detected(h)))
4885 return hpsa_scan_complete(h);
5f389360 4886
a08a8471
SC
4887 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4888
8ebc9248 4889 hpsa_scan_complete(h);
a08a8471
SC
4890}
4891
7c0a0229
DB
4892static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4893{
03383736
DB
4894 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4895
4896 if (!logical_drive)
4897 return -ENODEV;
7c0a0229
DB
4898
4899 if (qdepth < 1)
4900 qdepth = 1;
03383736
DB
4901 else if (qdepth > logical_drive->queue_depth)
4902 qdepth = logical_drive->queue_depth;
4903
4904 return scsi_change_queue_depth(sdev, qdepth);
7c0a0229
DB
4905}
4906
a08a8471
SC
4907static int hpsa_scan_finished(struct Scsi_Host *sh,
4908 unsigned long elapsed_time)
4909{
4910 struct ctlr_info *h = shost_to_hba(sh);
4911 unsigned long flags;
4912 int finished;
4913
4914 spin_lock_irqsave(&h->scan_lock, flags);
4915 finished = h->scan_finished;
4916 spin_unlock_irqrestore(&h->scan_lock, flags);
4917 return finished;
4918}
4919
2946e82b 4920static int hpsa_scsi_host_alloc(struct ctlr_info *h)
edd16368 4921{
b705690d
SC
4922 struct Scsi_Host *sh;
4923 int error;
edd16368 4924
b705690d 4925 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2946e82b
RE
4926 if (sh == NULL) {
4927 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4928 return -ENOMEM;
4929 }
b705690d
SC
4930
4931 sh->io_port = 0;
4932 sh->n_io_port = 0;
4933 sh->this_id = -1;
4934 sh->max_channel = 3;
4935 sh->max_cmd_len = MAX_COMMAND_SIZE;
4936 sh->max_lun = HPSA_MAX_LUN;
4937 sh->max_id = HPSA_MAX_LUN;
41ce4c35 4938 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
03383736 4939 sh->cmd_per_lun = sh->can_queue;
b705690d 4940 sh->sg_tablesize = h->maxsgentries;
b705690d
SC
4941 sh->hostdata[0] = (unsigned long) h;
4942 sh->irq = h->intr[h->intr_mode];
4943 sh->unique_id = sh->irq;
73153fe5
WS
4944 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4945 if (error) {
4946 dev_err(&h->pdev->dev,
4947 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4948 __func__, h->ctlr);
2946e82b
RE
4949 scsi_host_put(sh);
4950 return error;
73153fe5 4951 }
2946e82b 4952 h->scsi_host = sh;
b705690d 4953 return 0;
2946e82b 4954}
b705690d 4955
2946e82b
RE
4956static int hpsa_scsi_add_host(struct ctlr_info *h)
4957{
4958 int rv;
4959
4960 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4961 if (rv) {
4962 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4963 return rv;
4964 }
4965 scsi_scan_host(h->scsi_host);
4966 return 0;
edd16368
SC
4967}
4968
73153fe5
WS
4969/*
4970 * The block layer has already gone to the trouble of picking out a unique,
4971 * small-integer tag for this request. We use an offset from that value as
4972 * an index to select our command block. (The offset allows us to reserve the
4973 * low-numbered entries for our own uses.)
4974 */
4975static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4976{
4977 int idx = scmd->request->tag;
4978
4979 if (idx < 0)
4980 return idx;
4981
4982 /* Offset to leave space for internal cmds. */
4983 return idx += HPSA_NRESERVED_CMDS;
4984}
4985
b69324ff
WS
4986/*
4987 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4988 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4989 */
4990static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4991 struct CommandList *c, unsigned char lunaddr[],
4992 int reply_queue)
4993{
4994 int rc;
4995
4996 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4997 (void) fill_cmd(c, TEST_UNIT_READY, h,
4998 NULL, 0, 0, lunaddr, TYPE_CMD);
4999 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5000 if (rc)
5001 return rc;
5002 /* no unmap needed here because no data xfer. */
5003
5004 /* Check if the unit is already ready. */
5005 if (c->err_info->CommandStatus == CMD_SUCCESS)
5006 return 0;
5007
5008 /*
5009 * The first command sent after reset will receive "unit attention" to
5010 * indicate that the LUN has been reset...this is actually what we're
5011 * looking for (but, success is good too).
5012 */
5013 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5014 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5015 (c->err_info->SenseInfo[2] == NO_SENSE ||
5016 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5017 return 0;
5018
5019 return 1;
5020}
5021
5022/*
5023 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5024 * returns zero when the unit is ready, and non-zero when giving up.
5025 */
5026static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5027 struct CommandList *c,
5028 unsigned char lunaddr[], int reply_queue)
edd16368 5029{
8919358e 5030 int rc;
edd16368
SC
5031 int count = 0;
5032 int waittime = 1; /* seconds */
edd16368
SC
5033
5034 /* Send test unit ready until device ready, or give up. */
b69324ff 5035 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
edd16368 5036
b69324ff
WS
5037 /*
5038 * Wait for a bit. do this first, because if we send
edd16368
SC
5039 * the TUR right away, the reset will just abort it.
5040 */
5041 msleep(1000 * waittime);
b69324ff
WS
5042
5043 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5044 if (!rc)
5045 break;
edd16368
SC
5046
5047 /* Increase wait time with each try, up to a point. */
5048 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
b69324ff 5049 waittime *= 2;
edd16368 5050
b69324ff
WS
5051 dev_warn(&h->pdev->dev,
5052 "waiting %d secs for device to become ready.\n",
5053 waittime);
5054 }
edd16368 5055
b69324ff
WS
5056 return rc;
5057}
edd16368 5058
b69324ff
WS
5059static int wait_for_device_to_become_ready(struct ctlr_info *h,
5060 unsigned char lunaddr[],
5061 int reply_queue)
5062{
5063 int first_queue;
5064 int last_queue;
5065 int rq;
5066 int rc = 0;
5067 struct CommandList *c;
5068
5069 c = cmd_alloc(h);
5070
5071 /*
5072 * If no specific reply queue was requested, then send the TUR
5073 * repeatedly, requesting a reply on each reply queue; otherwise execute
5074 * the loop exactly once using only the specified queue.
5075 */
5076 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5077 first_queue = 0;
5078 last_queue = h->nreply_queues - 1;
5079 } else {
5080 first_queue = reply_queue;
5081 last_queue = reply_queue;
5082 }
5083
5084 for (rq = first_queue; rq <= last_queue; rq++) {
5085 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5086 if (rc)
edd16368 5087 break;
edd16368
SC
5088 }
5089
5090 if (rc)
5091 dev_warn(&h->pdev->dev, "giving up on device.\n");
5092 else
5093 dev_warn(&h->pdev->dev, "device is ready.\n");
5094
45fcb86e 5095 cmd_free(h, c);
edd16368
SC
5096 return rc;
5097}
5098
5099/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5100 * complaining. Doing a host- or bus-reset can't do anything good here.
5101 */
5102static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5103{
5104 int rc;
5105 struct ctlr_info *h;
5106 struct hpsa_scsi_dev_t *dev;
73153fe5 5107 char msg[40];
edd16368
SC
5108
5109 /* find the controller to which the command to be aborted was sent */
5110 h = sdev_to_hba(scsicmd->device);
5111 if (h == NULL) /* paranoia */
5112 return FAILED;
e345893b
DB
5113
5114 if (lockup_detected(h))
5115 return FAILED;
5116
edd16368
SC
5117 dev = scsicmd->device->hostdata;
5118 if (!dev) {
d604f533 5119 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
edd16368
SC
5120 return FAILED;
5121 }
25163bd5
WS
5122
5123 /* if controller locked up, we can guarantee command won't complete */
5124 if (lockup_detected(h)) {
73153fe5
WS
5125 sprintf(msg, "cmd %d RESET FAILED, lockup detected",
5126 hpsa_get_cmd_index(scsicmd));
5127 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5128 return FAILED;
5129 }
5130
5131 /* this reset request might be the result of a lockup; check */
5132 if (detect_controller_lockup(h)) {
73153fe5
WS
5133 sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
5134 hpsa_get_cmd_index(scsicmd));
5135 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5136 return FAILED;
5137 }
5138
d604f533
WS
5139 /* Do not attempt on controller */
5140 if (is_hba_lunid(dev->scsi3addr))
5141 return SUCCESS;
5142
25163bd5
WS
5143 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
5144
edd16368 5145 /* send a reset to the SCSI LUN which the command was sent to */
d604f533
WS
5146 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
5147 DEFAULT_REPLY_QUEUE);
5148 sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
5149 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5150 return rc == 0 ? SUCCESS : FAILED;
edd16368
SC
5151}
5152
6cba3f19
SC
5153static void swizzle_abort_tag(u8 *tag)
5154{
5155 u8 original_tag[8];
5156
5157 memcpy(original_tag, tag, 8);
5158 tag[0] = original_tag[3];
5159 tag[1] = original_tag[2];
5160 tag[2] = original_tag[1];
5161 tag[3] = original_tag[0];
5162 tag[4] = original_tag[7];
5163 tag[5] = original_tag[6];
5164 tag[6] = original_tag[5];
5165 tag[7] = original_tag[4];
5166}
5167
17eb87d2 5168static void hpsa_get_tag(struct ctlr_info *h,
2b08b3e9 5169 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
17eb87d2 5170{
2b08b3e9 5171 u64 tag;
17eb87d2
ST
5172 if (c->cmd_type == CMD_IOACCEL1) {
5173 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5174 &h->ioaccel_cmd_pool[c->cmdindex];
2b08b3e9
DB
5175 tag = le64_to_cpu(cm1->tag);
5176 *tagupper = cpu_to_le32(tag >> 32);
5177 *taglower = cpu_to_le32(tag);
54b6e9e9
ST
5178 return;
5179 }
5180 if (c->cmd_type == CMD_IOACCEL2) {
5181 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5182 &h->ioaccel2_cmd_pool[c->cmdindex];
dd0e19f3
ST
5183 /* upper tag not used in ioaccel2 mode */
5184 memset(tagupper, 0, sizeof(*tagupper));
5185 *taglower = cm2->Tag;
54b6e9e9 5186 return;
17eb87d2 5187 }
2b08b3e9
DB
5188 tag = le64_to_cpu(c->Header.tag);
5189 *tagupper = cpu_to_le32(tag >> 32);
5190 *taglower = cpu_to_le32(tag);
17eb87d2
ST
5191}
5192
75167d2c 5193static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
9b5c48c2 5194 struct CommandList *abort, int reply_queue)
75167d2c
SC
5195{
5196 int rc = IO_OK;
5197 struct CommandList *c;
5198 struct ErrorInfo *ei;
2b08b3e9 5199 __le32 tagupper, taglower;
75167d2c 5200
45fcb86e 5201 c = cmd_alloc(h);
75167d2c 5202
a2dac136 5203 /* fill_cmd can't fail here, no buffer to map */
9b5c48c2 5204 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
a2dac136 5205 0, 0, scsi3addr, TYPE_MSG);
9b5c48c2 5206 if (h->needs_abort_tags_swizzled)
6cba3f19 5207 swizzle_abort_tag(&c->Request.CDB[4]);
25163bd5 5208 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
17eb87d2 5209 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5210 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
17eb87d2 5211 __func__, tagupper, taglower);
75167d2c
SC
5212 /* no unmap needed here because no data xfer. */
5213
5214 ei = c->err_info;
5215 switch (ei->CommandStatus) {
5216 case CMD_SUCCESS:
5217 break;
9437ac43
SC
5218 case CMD_TMF_STATUS:
5219 rc = hpsa_evaluate_tmf_status(h, c);
5220 break;
75167d2c
SC
5221 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5222 rc = -1;
5223 break;
5224 default:
5225 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
17eb87d2 5226 __func__, tagupper, taglower);
d1e8beac 5227 hpsa_scsi_interpret_error(h, c);
75167d2c
SC
5228 rc = -1;
5229 break;
5230 }
45fcb86e 5231 cmd_free(h, c);
dd0e19f3
ST
5232 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5233 __func__, tagupper, taglower);
75167d2c
SC
5234 return rc;
5235}
5236
8be986cc
SC
5237static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5238 struct CommandList *command_to_abort, int reply_queue)
5239{
5240 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5241 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5242 struct io_accel2_cmd *c2a =
5243 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
a58e7e53 5244 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
8be986cc
SC
5245 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5246
5247 /*
5248 * We're overlaying struct hpsa_tmf_struct on top of something which
5249 * was allocated as a struct io_accel2_cmd, so we better be sure it
5250 * actually fits, and doesn't overrun the error info space.
5251 */
5252 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5253 sizeof(struct io_accel2_cmd));
5254 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5255 offsetof(struct hpsa_tmf_struct, error_len) +
5256 sizeof(ac->error_len));
5257
5258 c->cmd_type = IOACCEL2_TMF;
a58e7e53
WS
5259 c->scsi_cmd = SCSI_CMD_BUSY;
5260
8be986cc
SC
5261 /* Adjust the DMA address to point to the accelerated command buffer */
5262 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5263 (c->cmdindex * sizeof(struct io_accel2_cmd));
5264 BUG_ON(c->busaddr & 0x0000007F);
5265
5266 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5267 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5268 ac->reply_queue = reply_queue;
5269 ac->tmf = IOACCEL2_TMF_ABORT;
5270 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5271 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5272 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5273 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5274 ac->error_ptr = cpu_to_le64(c->busaddr +
5275 offsetof(struct io_accel2_cmd, error_data));
5276 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5277}
5278
54b6e9e9
ST
5279/* ioaccel2 path firmware cannot handle abort task requests.
5280 * Change abort requests to physical target reset, and send to the
5281 * address of the physical disk used for the ioaccel 2 command.
5282 * Return 0 on success (IO_OK)
5283 * -1 on failure
5284 */
5285
5286static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
25163bd5 5287 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
54b6e9e9
ST
5288{
5289 int rc = IO_OK;
5290 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5291 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5292 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5293 unsigned char *psa = &phys_scsi3addr[0];
5294
5295 /* Get a pointer to the hpsa logical device. */
7fa3030c 5296 scmd = abort->scsi_cmd;
54b6e9e9
ST
5297 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5298 if (dev == NULL) {
5299 dev_warn(&h->pdev->dev,
5300 "Cannot abort: no device pointer for command.\n");
5301 return -1; /* not abortable */
5302 }
5303
2ba8bfc8
SC
5304 if (h->raid_offload_debug > 0)
5305 dev_info(&h->pdev->dev,
0d96ef5f 5306 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2ba8bfc8 5307 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
0d96ef5f 5308 "Reset as abort",
2ba8bfc8
SC
5309 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5310 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5311
54b6e9e9
ST
5312 if (!dev->offload_enabled) {
5313 dev_warn(&h->pdev->dev,
5314 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5315 return -1; /* not abortable */
5316 }
5317
5318 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5319 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5320 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5321 return -1; /* not abortable */
5322 }
5323
5324 /* send the reset */
2ba8bfc8
SC
5325 if (h->raid_offload_debug > 0)
5326 dev_info(&h->pdev->dev,
5327 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5328 psa[0], psa[1], psa[2], psa[3],
5329 psa[4], psa[5], psa[6], psa[7]);
d604f533 5330 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
54b6e9e9
ST
5331 if (rc != 0) {
5332 dev_warn(&h->pdev->dev,
5333 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5334 psa[0], psa[1], psa[2], psa[3],
5335 psa[4], psa[5], psa[6], psa[7]);
5336 return rc; /* failed to reset */
5337 }
5338
5339 /* wait for device to recover */
b69324ff 5340 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
54b6e9e9
ST
5341 dev_warn(&h->pdev->dev,
5342 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5343 psa[0], psa[1], psa[2], psa[3],
5344 psa[4], psa[5], psa[6], psa[7]);
5345 return -1; /* failed to recover */
5346 }
5347
5348 /* device recovered */
5349 dev_info(&h->pdev->dev,
5350 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5351 psa[0], psa[1], psa[2], psa[3],
5352 psa[4], psa[5], psa[6], psa[7]);
5353
5354 return rc; /* success */
5355}
5356
8be986cc
SC
5357static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5358 struct CommandList *abort, int reply_queue)
5359{
5360 int rc = IO_OK;
5361 struct CommandList *c;
5362 __le32 taglower, tagupper;
5363 struct hpsa_scsi_dev_t *dev;
5364 struct io_accel2_cmd *c2;
5365
5366 dev = abort->scsi_cmd->device->hostdata;
5367 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5368 return -1;
5369
5370 c = cmd_alloc(h);
5371 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5372 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5373 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5374 hpsa_get_tag(h, abort, &taglower, &tagupper);
5375 dev_dbg(&h->pdev->dev,
5376 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5377 __func__, tagupper, taglower);
5378 /* no unmap needed here because no data xfer. */
5379
5380 dev_dbg(&h->pdev->dev,
5381 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5382 __func__, tagupper, taglower, c2->error_data.serv_response);
5383 switch (c2->error_data.serv_response) {
5384 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5385 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5386 rc = 0;
5387 break;
5388 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5389 case IOACCEL2_SERV_RESPONSE_FAILURE:
5390 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5391 rc = -1;
5392 break;
5393 default:
5394 dev_warn(&h->pdev->dev,
5395 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5396 __func__, tagupper, taglower,
5397 c2->error_data.serv_response);
5398 rc = -1;
5399 }
5400 cmd_free(h, c);
5401 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5402 tagupper, taglower);
5403 return rc;
5404}
5405
6cba3f19 5406static int hpsa_send_abort_both_ways(struct ctlr_info *h,
25163bd5 5407 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
6cba3f19 5408{
8be986cc
SC
5409 /*
5410 * ioccelerator mode 2 commands should be aborted via the
54b6e9e9 5411 * accelerated path, since RAID path is unaware of these commands,
8be986cc
SC
5412 * but not all underlying firmware can handle abort TMF.
5413 * Change abort to physical device reset when abort TMF is unsupported.
54b6e9e9 5414 */
8be986cc
SC
5415 if (abort->cmd_type == CMD_IOACCEL2) {
5416 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5417 return hpsa_send_abort_ioaccel2(h, abort,
5418 reply_queue);
5419 else
5420 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
25163bd5 5421 abort, reply_queue);
8be986cc 5422 }
9b5c48c2 5423 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
25163bd5 5424}
54b6e9e9 5425
25163bd5
WS
5426/* Find out which reply queue a command was meant to return on */
5427static int hpsa_extract_reply_queue(struct ctlr_info *h,
5428 struct CommandList *c)
5429{
5430 if (c->cmd_type == CMD_IOACCEL2)
5431 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5432 return c->Header.ReplyQueue;
6cba3f19
SC
5433}
5434
9b5c48c2
SC
5435/*
5436 * Limit concurrency of abort commands to prevent
5437 * over-subscription of commands
5438 */
5439static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5440{
5441#define ABORT_CMD_WAIT_MSECS 5000
5442 return !wait_event_timeout(h->abort_cmd_wait_queue,
5443 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5444 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5445}
5446
75167d2c
SC
5447/* Send an abort for the specified command.
5448 * If the device and controller support it,
5449 * send a task abort request.
5450 */
5451static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5452{
5453
a58e7e53 5454 int rc;
75167d2c
SC
5455 struct ctlr_info *h;
5456 struct hpsa_scsi_dev_t *dev;
5457 struct CommandList *abort; /* pointer to command to be aborted */
75167d2c
SC
5458 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5459 char msg[256]; /* For debug messaging. */
5460 int ml = 0;
2b08b3e9 5461 __le32 tagupper, taglower;
25163bd5
WS
5462 int refcount, reply_queue;
5463
5464 if (sc == NULL)
5465 return FAILED;
75167d2c 5466
9b5c48c2
SC
5467 if (sc->device == NULL)
5468 return FAILED;
5469
75167d2c
SC
5470 /* Find the controller of the command to be aborted */
5471 h = sdev_to_hba(sc->device);
9b5c48c2 5472 if (h == NULL)
75167d2c
SC
5473 return FAILED;
5474
25163bd5
WS
5475 /* Find the device of the command to be aborted */
5476 dev = sc->device->hostdata;
5477 if (!dev) {
5478 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5479 msg);
e345893b 5480 return FAILED;
25163bd5
WS
5481 }
5482
5483 /* If controller locked up, we can guarantee command won't complete */
5484 if (lockup_detected(h)) {
5485 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5486 "ABORT FAILED, lockup detected");
5487 return FAILED;
5488 }
5489
5490 /* This is a good time to check if controller lockup has occurred */
5491 if (detect_controller_lockup(h)) {
5492 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5493 "ABORT FAILED, new lockup detected");
5494 return FAILED;
5495 }
e345893b 5496
75167d2c
SC
5497 /* Check that controller supports some kind of task abort */
5498 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5499 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5500 return FAILED;
5501
5502 memset(msg, 0, sizeof(msg));
4b761557 5503 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
75167d2c 5504 h->scsi_host->host_no, sc->device->channel,
0d96ef5f 5505 sc->device->id, sc->device->lun,
4b761557 5506 "Aborting command", sc);
75167d2c 5507
75167d2c
SC
5508 /* Get SCSI command to be aborted */
5509 abort = (struct CommandList *) sc->host_scribble;
5510 if (abort == NULL) {
281a7fd0
WS
5511 /* This can happen if the command already completed. */
5512 return SUCCESS;
5513 }
5514 refcount = atomic_inc_return(&abort->refcount);
5515 if (refcount == 1) { /* Command is done already. */
5516 cmd_free(h, abort);
5517 return SUCCESS;
75167d2c 5518 }
9b5c48c2
SC
5519
5520 /* Don't bother trying the abort if we know it won't work. */
5521 if (abort->cmd_type != CMD_IOACCEL2 &&
5522 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5523 cmd_free(h, abort);
5524 return FAILED;
5525 }
5526
a58e7e53
WS
5527 /*
5528 * Check that we're aborting the right command.
5529 * It's possible the CommandList already completed and got re-used.
5530 */
5531 if (abort->scsi_cmd != sc) {
5532 cmd_free(h, abort);
5533 return SUCCESS;
5534 }
5535
5536 abort->abort_pending = true;
17eb87d2 5537 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5538 reply_queue = hpsa_extract_reply_queue(h, abort);
17eb87d2 5539 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
7fa3030c 5540 as = abort->scsi_cmd;
75167d2c 5541 if (as != NULL)
4b761557
RE
5542 ml += sprintf(msg+ml,
5543 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5544 as->cmd_len, as->cmnd[0], as->cmnd[1],
5545 as->serial_number);
5546 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
0d96ef5f 5547 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
4b761557 5548
75167d2c
SC
5549 /*
5550 * Command is in flight, or possibly already completed
5551 * by the firmware (but not to the scsi mid layer) but we can't
5552 * distinguish which. Send the abort down.
5553 */
9b5c48c2
SC
5554 if (wait_for_available_abort_cmd(h)) {
5555 dev_warn(&h->pdev->dev,
4b761557
RE
5556 "%s FAILED, timeout waiting for an abort command to become available.\n",
5557 msg);
9b5c48c2
SC
5558 cmd_free(h, abort);
5559 return FAILED;
5560 }
25163bd5 5561 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
9b5c48c2
SC
5562 atomic_inc(&h->abort_cmds_available);
5563 wake_up_all(&h->abort_cmd_wait_queue);
75167d2c 5564 if (rc != 0) {
4b761557 5565 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
0d96ef5f 5566 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4b761557 5567 "FAILED to abort command");
281a7fd0 5568 cmd_free(h, abort);
75167d2c
SC
5569 return FAILED;
5570 }
4b761557 5571 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
d604f533 5572 wait_event(h->event_sync_wait_queue,
a58e7e53 5573 abort->scsi_cmd != sc || lockup_detected(h));
281a7fd0 5574 cmd_free(h, abort);
a58e7e53 5575 return !lockup_detected(h) ? SUCCESS : FAILED;
75167d2c
SC
5576}
5577
73153fe5
WS
5578/*
5579 * For operations with an associated SCSI command, a command block is allocated
5580 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5581 * block request tag as an index into a table of entries. cmd_tagged_free() is
5582 * the complement, although cmd_free() may be called instead.
5583 */
5584static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5585 struct scsi_cmnd *scmd)
5586{
5587 int idx = hpsa_get_cmd_index(scmd);
5588 struct CommandList *c = h->cmd_pool + idx;
5589
5590 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5591 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5592 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5593 /* The index value comes from the block layer, so if it's out of
5594 * bounds, it's probably not our bug.
5595 */
5596 BUG();
5597 }
5598
5599 atomic_inc(&c->refcount);
5600 if (unlikely(!hpsa_is_cmd_idle(c))) {
5601 /*
5602 * We expect that the SCSI layer will hand us a unique tag
5603 * value. Thus, there should never be a collision here between
5604 * two requests...because if the selected command isn't idle
5605 * then someone is going to be very disappointed.
5606 */
5607 dev_err(&h->pdev->dev,
5608 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5609 idx);
5610 if (c->scsi_cmd != NULL)
5611 scsi_print_command(c->scsi_cmd);
5612 scsi_print_command(scmd);
5613 }
5614
5615 hpsa_cmd_partial_init(h, idx, c);
5616 return c;
5617}
5618
5619static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5620{
5621 /*
5622 * Release our reference to the block. We don't need to do anything
5623 * else to free it, because it is accessed by index. (There's no point
5624 * in checking the result of the decrement, since we cannot guarantee
5625 * that there isn't a concurrent abort which is also accessing it.)
5626 */
5627 (void)atomic_dec(&c->refcount);
5628}
5629
edd16368
SC
5630/*
5631 * For operations that cannot sleep, a command block is allocated at init,
5632 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5633 * which ones are free or in use. Lock must be held when calling this.
5634 * cmd_free() is the complement.
bf43caf3
RE
5635 * This function never gives up and returns NULL. If it hangs,
5636 * another thread must call cmd_free() to free some tags.
edd16368 5637 */
281a7fd0 5638
edd16368
SC
5639static struct CommandList *cmd_alloc(struct ctlr_info *h)
5640{
5641 struct CommandList *c;
360c73bd 5642 int refcount, i;
73153fe5 5643 int offset = 0;
4c413128 5644
33811026
RE
5645 /*
5646 * There is some *extremely* small but non-zero chance that that
4c413128
SC
5647 * multiple threads could get in here, and one thread could
5648 * be scanning through the list of bits looking for a free
5649 * one, but the free ones are always behind him, and other
5650 * threads sneak in behind him and eat them before he can
5651 * get to them, so that while there is always a free one, a
5652 * very unlucky thread might be starved anyway, never able to
5653 * beat the other threads. In reality, this happens so
5654 * infrequently as to be indistinguishable from never.
73153fe5
WS
5655 *
5656 * Note that we start allocating commands before the SCSI host structure
5657 * is initialized. Since the search starts at bit zero, this
5658 * all works, since we have at least one command structure available;
5659 * however, it means that the structures with the low indexes have to be
5660 * reserved for driver-initiated requests, while requests from the block
5661 * layer will use the higher indexes.
4c413128 5662 */
edd16368 5663
281a7fd0 5664 for (;;) {
73153fe5
WS
5665 i = find_next_zero_bit(h->cmd_pool_bits,
5666 HPSA_NRESERVED_CMDS,
5667 offset);
5668 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
281a7fd0
WS
5669 offset = 0;
5670 continue;
5671 }
5672 c = h->cmd_pool + i;
5673 refcount = atomic_inc_return(&c->refcount);
5674 if (unlikely(refcount > 1)) {
5675 cmd_free(h, c); /* already in use */
73153fe5 5676 offset = (i + 1) % HPSA_NRESERVED_CMDS;
281a7fd0
WS
5677 continue;
5678 }
5679 set_bit(i & (BITS_PER_LONG - 1),
5680 h->cmd_pool_bits + (i / BITS_PER_LONG));
5681 break; /* it's ours now. */
5682 }
360c73bd 5683 hpsa_cmd_partial_init(h, i, c);
edd16368
SC
5684 return c;
5685}
5686
73153fe5
WS
5687/*
5688 * This is the complementary operation to cmd_alloc(). Note, however, in some
5689 * corner cases it may also be used to free blocks allocated by
5690 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5691 * the clear-bit is harmless.
5692 */
edd16368
SC
5693static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5694{
281a7fd0
WS
5695 if (atomic_dec_and_test(&c->refcount)) {
5696 int i;
edd16368 5697
281a7fd0
WS
5698 i = c - h->cmd_pool;
5699 clear_bit(i & (BITS_PER_LONG - 1),
5700 h->cmd_pool_bits + (i / BITS_PER_LONG));
5701 }
edd16368
SC
5702}
5703
edd16368
SC
5704#ifdef CONFIG_COMPAT
5705
42a91641
DB
5706static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5707 void __user *arg)
edd16368
SC
5708{
5709 IOCTL32_Command_struct __user *arg32 =
5710 (IOCTL32_Command_struct __user *) arg;
5711 IOCTL_Command_struct arg64;
5712 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5713 int err;
5714 u32 cp;
5715
938abd84 5716 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5717 err = 0;
5718 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5719 sizeof(arg64.LUN_info));
5720 err |= copy_from_user(&arg64.Request, &arg32->Request,
5721 sizeof(arg64.Request));
5722 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5723 sizeof(arg64.error_info));
5724 err |= get_user(arg64.buf_size, &arg32->buf_size);
5725 err |= get_user(cp, &arg32->buf);
5726 arg64.buf = compat_ptr(cp);
5727 err |= copy_to_user(p, &arg64, sizeof(arg64));
5728
5729 if (err)
5730 return -EFAULT;
5731
42a91641 5732 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
edd16368
SC
5733 if (err)
5734 return err;
5735 err |= copy_in_user(&arg32->error_info, &p->error_info,
5736 sizeof(arg32->error_info));
5737 if (err)
5738 return -EFAULT;
5739 return err;
5740}
5741
5742static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
42a91641 5743 int cmd, void __user *arg)
edd16368
SC
5744{
5745 BIG_IOCTL32_Command_struct __user *arg32 =
5746 (BIG_IOCTL32_Command_struct __user *) arg;
5747 BIG_IOCTL_Command_struct arg64;
5748 BIG_IOCTL_Command_struct __user *p =
5749 compat_alloc_user_space(sizeof(arg64));
5750 int err;
5751 u32 cp;
5752
938abd84 5753 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5754 err = 0;
5755 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5756 sizeof(arg64.LUN_info));
5757 err |= copy_from_user(&arg64.Request, &arg32->Request,
5758 sizeof(arg64.Request));
5759 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5760 sizeof(arg64.error_info));
5761 err |= get_user(arg64.buf_size, &arg32->buf_size);
5762 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5763 err |= get_user(cp, &arg32->buf);
5764 arg64.buf = compat_ptr(cp);
5765 err |= copy_to_user(p, &arg64, sizeof(arg64));
5766
5767 if (err)
5768 return -EFAULT;
5769
42a91641 5770 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
edd16368
SC
5771 if (err)
5772 return err;
5773 err |= copy_in_user(&arg32->error_info, &p->error_info,
5774 sizeof(arg32->error_info));
5775 if (err)
5776 return -EFAULT;
5777 return err;
5778}
71fe75a7 5779
42a91641 5780static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
71fe75a7
SC
5781{
5782 switch (cmd) {
5783 case CCISS_GETPCIINFO:
5784 case CCISS_GETINTINFO:
5785 case CCISS_SETINTINFO:
5786 case CCISS_GETNODENAME:
5787 case CCISS_SETNODENAME:
5788 case CCISS_GETHEARTBEAT:
5789 case CCISS_GETBUSTYPES:
5790 case CCISS_GETFIRMVER:
5791 case CCISS_GETDRIVVER:
5792 case CCISS_REVALIDVOLS:
5793 case CCISS_DEREGDISK:
5794 case CCISS_REGNEWDISK:
5795 case CCISS_REGNEWD:
5796 case CCISS_RESCANDISK:
5797 case CCISS_GETLUNINFO:
5798 return hpsa_ioctl(dev, cmd, arg);
5799
5800 case CCISS_PASSTHRU32:
5801 return hpsa_ioctl32_passthru(dev, cmd, arg);
5802 case CCISS_BIG_PASSTHRU32:
5803 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5804
5805 default:
5806 return -ENOIOCTLCMD;
5807 }
5808}
edd16368
SC
5809#endif
5810
5811static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5812{
5813 struct hpsa_pci_info pciinfo;
5814
5815 if (!argp)
5816 return -EINVAL;
5817 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5818 pciinfo.bus = h->pdev->bus->number;
5819 pciinfo.dev_fn = h->pdev->devfn;
5820 pciinfo.board_id = h->board_id;
5821 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5822 return -EFAULT;
5823 return 0;
5824}
5825
5826static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5827{
5828 DriverVer_type DriverVer;
5829 unsigned char vmaj, vmin, vsubmin;
5830 int rc;
5831
5832 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5833 &vmaj, &vmin, &vsubmin);
5834 if (rc != 3) {
5835 dev_info(&h->pdev->dev, "driver version string '%s' "
5836 "unrecognized.", HPSA_DRIVER_VERSION);
5837 vmaj = 0;
5838 vmin = 0;
5839 vsubmin = 0;
5840 }
5841 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5842 if (!argp)
5843 return -EINVAL;
5844 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5845 return -EFAULT;
5846 return 0;
5847}
5848
5849static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5850{
5851 IOCTL_Command_struct iocommand;
5852 struct CommandList *c;
5853 char *buff = NULL;
50a0decf 5854 u64 temp64;
c1f63c8f 5855 int rc = 0;
edd16368
SC
5856
5857 if (!argp)
5858 return -EINVAL;
5859 if (!capable(CAP_SYS_RAWIO))
5860 return -EPERM;
5861 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5862 return -EFAULT;
5863 if ((iocommand.buf_size < 1) &&
5864 (iocommand.Request.Type.Direction != XFER_NONE)) {
5865 return -EINVAL;
5866 }
5867 if (iocommand.buf_size > 0) {
5868 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5869 if (buff == NULL)
2dd02d74 5870 return -ENOMEM;
9233fb10 5871 if (iocommand.Request.Type.Direction & XFER_WRITE) {
b03a7771
SC
5872 /* Copy the data into the buffer we created */
5873 if (copy_from_user(buff, iocommand.buf,
5874 iocommand.buf_size)) {
c1f63c8f
SC
5875 rc = -EFAULT;
5876 goto out_kfree;
b03a7771
SC
5877 }
5878 } else {
5879 memset(buff, 0, iocommand.buf_size);
edd16368 5880 }
b03a7771 5881 }
45fcb86e 5882 c = cmd_alloc(h);
bf43caf3 5883
edd16368
SC
5884 /* Fill in the command type */
5885 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 5886 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
5887 /* Fill in Command Header */
5888 c->Header.ReplyQueue = 0; /* unused in simple mode */
5889 if (iocommand.buf_size > 0) { /* buffer to fill */
5890 c->Header.SGList = 1;
50a0decf 5891 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
5892 } else { /* no buffers to fill */
5893 c->Header.SGList = 0;
50a0decf 5894 c->Header.SGTotal = cpu_to_le16(0);
edd16368
SC
5895 }
5896 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
edd16368
SC
5897
5898 /* Fill in Request block */
5899 memcpy(&c->Request, &iocommand.Request,
5900 sizeof(c->Request));
5901
5902 /* Fill in the scatter gather information */
5903 if (iocommand.buf_size > 0) {
50a0decf 5904 temp64 = pci_map_single(h->pdev, buff,
edd16368 5905 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
5906 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5907 c->SG[0].Addr = cpu_to_le64(0);
5908 c->SG[0].Len = cpu_to_le32(0);
bcc48ffa
SC
5909 rc = -ENOMEM;
5910 goto out;
5911 }
50a0decf
SC
5912 c->SG[0].Addr = cpu_to_le64(temp64);
5913 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5914 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
edd16368 5915 }
25163bd5 5916 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
c2dd32e0
SC
5917 if (iocommand.buf_size > 0)
5918 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368 5919 check_ioctl_unit_attention(h, c);
25163bd5
WS
5920 if (rc) {
5921 rc = -EIO;
5922 goto out;
5923 }
edd16368
SC
5924
5925 /* Copy the error information out */
5926 memcpy(&iocommand.error_info, c->err_info,
5927 sizeof(iocommand.error_info));
5928 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
c1f63c8f
SC
5929 rc = -EFAULT;
5930 goto out;
edd16368 5931 }
9233fb10 5932 if ((iocommand.Request.Type.Direction & XFER_READ) &&
b03a7771 5933 iocommand.buf_size > 0) {
edd16368
SC
5934 /* Copy the data out of the buffer we created */
5935 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
c1f63c8f
SC
5936 rc = -EFAULT;
5937 goto out;
edd16368
SC
5938 }
5939 }
c1f63c8f 5940out:
45fcb86e 5941 cmd_free(h, c);
c1f63c8f
SC
5942out_kfree:
5943 kfree(buff);
5944 return rc;
edd16368
SC
5945}
5946
5947static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5948{
5949 BIG_IOCTL_Command_struct *ioc;
5950 struct CommandList *c;
5951 unsigned char **buff = NULL;
5952 int *buff_size = NULL;
50a0decf 5953 u64 temp64;
edd16368
SC
5954 BYTE sg_used = 0;
5955 int status = 0;
01a02ffc
SC
5956 u32 left;
5957 u32 sz;
edd16368
SC
5958 BYTE __user *data_ptr;
5959
5960 if (!argp)
5961 return -EINVAL;
5962 if (!capable(CAP_SYS_RAWIO))
5963 return -EPERM;
5964 ioc = (BIG_IOCTL_Command_struct *)
5965 kmalloc(sizeof(*ioc), GFP_KERNEL);
5966 if (!ioc) {
5967 status = -ENOMEM;
5968 goto cleanup1;
5969 }
5970 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5971 status = -EFAULT;
5972 goto cleanup1;
5973 }
5974 if ((ioc->buf_size < 1) &&
5975 (ioc->Request.Type.Direction != XFER_NONE)) {
5976 status = -EINVAL;
5977 goto cleanup1;
5978 }
5979 /* Check kmalloc limits using all SGs */
5980 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5981 status = -EINVAL;
5982 goto cleanup1;
5983 }
d66ae08b 5984 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
5985 status = -EINVAL;
5986 goto cleanup1;
5987 }
d66ae08b 5988 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
5989 if (!buff) {
5990 status = -ENOMEM;
5991 goto cleanup1;
5992 }
d66ae08b 5993 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
5994 if (!buff_size) {
5995 status = -ENOMEM;
5996 goto cleanup1;
5997 }
5998 left = ioc->buf_size;
5999 data_ptr = ioc->buf;
6000 while (left) {
6001 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6002 buff_size[sg_used] = sz;
6003 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6004 if (buff[sg_used] == NULL) {
6005 status = -ENOMEM;
6006 goto cleanup1;
6007 }
9233fb10 6008 if (ioc->Request.Type.Direction & XFER_WRITE) {
edd16368 6009 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
0758f4f7 6010 status = -EFAULT;
edd16368
SC
6011 goto cleanup1;
6012 }
6013 } else
6014 memset(buff[sg_used], 0, sz);
6015 left -= sz;
6016 data_ptr += sz;
6017 sg_used++;
6018 }
45fcb86e 6019 c = cmd_alloc(h);
bf43caf3 6020
edd16368 6021 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6022 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368 6023 c->Header.ReplyQueue = 0;
50a0decf
SC
6024 c->Header.SGList = (u8) sg_used;
6025 c->Header.SGTotal = cpu_to_le16(sg_used);
edd16368 6026 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
edd16368
SC
6027 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6028 if (ioc->buf_size > 0) {
6029 int i;
6030 for (i = 0; i < sg_used; i++) {
50a0decf 6031 temp64 = pci_map_single(h->pdev, buff[i],
edd16368 6032 buff_size[i], PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
6033 if (dma_mapping_error(&h->pdev->dev,
6034 (dma_addr_t) temp64)) {
6035 c->SG[i].Addr = cpu_to_le64(0);
6036 c->SG[i].Len = cpu_to_le32(0);
bcc48ffa
SC
6037 hpsa_pci_unmap(h->pdev, c, i,
6038 PCI_DMA_BIDIRECTIONAL);
6039 status = -ENOMEM;
e2d4a1f6 6040 goto cleanup0;
bcc48ffa 6041 }
50a0decf
SC
6042 c->SG[i].Addr = cpu_to_le64(temp64);
6043 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6044 c->SG[i].Ext = cpu_to_le32(0);
edd16368 6045 }
50a0decf 6046 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
edd16368 6047 }
25163bd5 6048 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
b03a7771
SC
6049 if (sg_used)
6050 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368 6051 check_ioctl_unit_attention(h, c);
25163bd5
WS
6052 if (status) {
6053 status = -EIO;
6054 goto cleanup0;
6055 }
6056
edd16368
SC
6057 /* Copy the error information out */
6058 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6059 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
edd16368 6060 status = -EFAULT;
e2d4a1f6 6061 goto cleanup0;
edd16368 6062 }
9233fb10 6063 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
2b08b3e9
DB
6064 int i;
6065
edd16368
SC
6066 /* Copy the data out of the buffer we created */
6067 BYTE __user *ptr = ioc->buf;
6068 for (i = 0; i < sg_used; i++) {
6069 if (copy_to_user(ptr, buff[i], buff_size[i])) {
edd16368 6070 status = -EFAULT;
e2d4a1f6 6071 goto cleanup0;
edd16368
SC
6072 }
6073 ptr += buff_size[i];
6074 }
6075 }
edd16368 6076 status = 0;
e2d4a1f6 6077cleanup0:
45fcb86e 6078 cmd_free(h, c);
edd16368
SC
6079cleanup1:
6080 if (buff) {
2b08b3e9
DB
6081 int i;
6082
edd16368
SC
6083 for (i = 0; i < sg_used; i++)
6084 kfree(buff[i]);
6085 kfree(buff);
6086 }
6087 kfree(buff_size);
6088 kfree(ioc);
6089 return status;
6090}
6091
6092static void check_ioctl_unit_attention(struct ctlr_info *h,
6093 struct CommandList *c)
6094{
6095 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6096 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6097 (void) check_for_unit_attention(h, c);
6098}
0390f0c0 6099
edd16368
SC
6100/*
6101 * ioctl
6102 */
42a91641 6103static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
edd16368
SC
6104{
6105 struct ctlr_info *h;
6106 void __user *argp = (void __user *)arg;
0390f0c0 6107 int rc;
edd16368
SC
6108
6109 h = sdev_to_hba(dev);
6110
6111 switch (cmd) {
6112 case CCISS_DEREGDISK:
6113 case CCISS_REGNEWDISK:
6114 case CCISS_REGNEWD:
a08a8471 6115 hpsa_scan_start(h->scsi_host);
edd16368
SC
6116 return 0;
6117 case CCISS_GETPCIINFO:
6118 return hpsa_getpciinfo_ioctl(h, argp);
6119 case CCISS_GETDRIVVER:
6120 return hpsa_getdrivver_ioctl(h, argp);
6121 case CCISS_PASSTHRU:
34f0c627 6122 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6123 return -EAGAIN;
6124 rc = hpsa_passthru_ioctl(h, argp);
34f0c627 6125 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6126 return rc;
edd16368 6127 case CCISS_BIG_PASSTHRU:
34f0c627 6128 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6129 return -EAGAIN;
6130 rc = hpsa_big_passthru_ioctl(h, argp);
34f0c627 6131 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6132 return rc;
edd16368
SC
6133 default:
6134 return -ENOTTY;
6135 }
6136}
6137
bf43caf3 6138static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6f039790 6139 u8 reset_type)
64670ac8
SC
6140{
6141 struct CommandList *c;
6142
6143 c = cmd_alloc(h);
bf43caf3 6144
a2dac136
SC
6145 /* fill_cmd can't fail here, no data buffer to map */
6146 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
64670ac8
SC
6147 RAID_CTLR_LUNID, TYPE_MSG);
6148 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6149 c->waiting = NULL;
6150 enqueue_cmd_and_start_io(h, c);
6151 /* Don't wait for completion, the reset won't complete. Don't free
6152 * the command either. This is the last command we will send before
6153 * re-initializing everything, so it doesn't matter and won't leak.
6154 */
bf43caf3 6155 return;
64670ac8
SC
6156}
6157
a2dac136 6158static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 6159 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368
SC
6160 int cmd_type)
6161{
6162 int pci_dir = XFER_NONE;
9b5c48c2 6163 u64 tag; /* for commands to be aborted */
edd16368
SC
6164
6165 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6166 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
6167 c->Header.ReplyQueue = 0;
6168 if (buff != NULL && size > 0) {
6169 c->Header.SGList = 1;
50a0decf 6170 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
6171 } else {
6172 c->Header.SGList = 0;
50a0decf 6173 c->Header.SGTotal = cpu_to_le16(0);
edd16368 6174 }
edd16368
SC
6175 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6176
edd16368
SC
6177 if (cmd_type == TYPE_CMD) {
6178 switch (cmd) {
6179 case HPSA_INQUIRY:
6180 /* are we trying to read a vital product page */
b7bb24eb 6181 if (page_code & VPD_PAGE) {
edd16368 6182 c->Request.CDB[1] = 0x01;
b7bb24eb 6183 c->Request.CDB[2] = (page_code & 0xff);
edd16368
SC
6184 }
6185 c->Request.CDBLen = 6;
a505b86f
SC
6186 c->Request.type_attr_dir =
6187 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6188 c->Request.Timeout = 0;
6189 c->Request.CDB[0] = HPSA_INQUIRY;
6190 c->Request.CDB[4] = size & 0xFF;
6191 break;
6192 case HPSA_REPORT_LOG:
6193 case HPSA_REPORT_PHYS:
6194 /* Talking to controller so It's a physical command
6195 mode = 00 target = 0. Nothing to write.
6196 */
6197 c->Request.CDBLen = 12;
a505b86f
SC
6198 c->Request.type_attr_dir =
6199 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6200 c->Request.Timeout = 0;
6201 c->Request.CDB[0] = cmd;
6202 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6203 c->Request.CDB[7] = (size >> 16) & 0xFF;
6204 c->Request.CDB[8] = (size >> 8) & 0xFF;
6205 c->Request.CDB[9] = size & 0xFF;
6206 break;
edd16368
SC
6207 case HPSA_CACHE_FLUSH:
6208 c->Request.CDBLen = 12;
a505b86f
SC
6209 c->Request.type_attr_dir =
6210 TYPE_ATTR_DIR(cmd_type,
6211 ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
6212 c->Request.Timeout = 0;
6213 c->Request.CDB[0] = BMIC_WRITE;
6214 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
6215 c->Request.CDB[7] = (size >> 8) & 0xFF;
6216 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
6217 break;
6218 case TEST_UNIT_READY:
6219 c->Request.CDBLen = 6;
a505b86f
SC
6220 c->Request.type_attr_dir =
6221 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
6222 c->Request.Timeout = 0;
6223 break;
283b4a9b
SC
6224 case HPSA_GET_RAID_MAP:
6225 c->Request.CDBLen = 12;
a505b86f
SC
6226 c->Request.type_attr_dir =
6227 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
283b4a9b
SC
6228 c->Request.Timeout = 0;
6229 c->Request.CDB[0] = HPSA_CISS_READ;
6230 c->Request.CDB[1] = cmd;
6231 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6232 c->Request.CDB[7] = (size >> 16) & 0xFF;
6233 c->Request.CDB[8] = (size >> 8) & 0xFF;
6234 c->Request.CDB[9] = size & 0xFF;
6235 break;
316b221a
SC
6236 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6237 c->Request.CDBLen = 10;
a505b86f
SC
6238 c->Request.type_attr_dir =
6239 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
316b221a
SC
6240 c->Request.Timeout = 0;
6241 c->Request.CDB[0] = BMIC_READ;
6242 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6243 c->Request.CDB[7] = (size >> 16) & 0xFF;
6244 c->Request.CDB[8] = (size >> 8) & 0xFF;
6245 break;
03383736
DB
6246 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6247 c->Request.CDBLen = 10;
6248 c->Request.type_attr_dir =
6249 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6250 c->Request.Timeout = 0;
6251 c->Request.CDB[0] = BMIC_READ;
6252 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6253 c->Request.CDB[7] = (size >> 16) & 0xFF;
6254 c->Request.CDB[8] = (size >> 8) & 0XFF;
6255 break;
edd16368
SC
6256 default:
6257 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6258 BUG();
a2dac136 6259 return -1;
edd16368
SC
6260 }
6261 } else if (cmd_type == TYPE_MSG) {
6262 switch (cmd) {
6263
6264 case HPSA_DEVICE_RESET_MSG:
6265 c->Request.CDBLen = 16;
a505b86f
SC
6266 c->Request.type_attr_dir =
6267 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368 6268 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
6269 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6270 c->Request.CDB[0] = cmd;
21e89afd 6271 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
edd16368
SC
6272 /* If bytes 4-7 are zero, it means reset the */
6273 /* LunID device */
6274 c->Request.CDB[4] = 0x00;
6275 c->Request.CDB[5] = 0x00;
6276 c->Request.CDB[6] = 0x00;
6277 c->Request.CDB[7] = 0x00;
75167d2c
SC
6278 break;
6279 case HPSA_ABORT_MSG:
9b5c48c2 6280 memcpy(&tag, buff, sizeof(tag));
2b08b3e9 6281 dev_dbg(&h->pdev->dev,
9b5c48c2
SC
6282 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6283 tag, c->Header.tag);
75167d2c 6284 c->Request.CDBLen = 16;
a505b86f
SC
6285 c->Request.type_attr_dir =
6286 TYPE_ATTR_DIR(cmd_type,
6287 ATTR_SIMPLE, XFER_WRITE);
75167d2c
SC
6288 c->Request.Timeout = 0; /* Don't time out */
6289 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6290 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6291 c->Request.CDB[2] = 0x00; /* reserved */
6292 c->Request.CDB[3] = 0x00; /* reserved */
6293 /* Tag to abort goes in CDB[4]-CDB[11] */
9b5c48c2 6294 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
75167d2c
SC
6295 c->Request.CDB[12] = 0x00; /* reserved */
6296 c->Request.CDB[13] = 0x00; /* reserved */
6297 c->Request.CDB[14] = 0x00; /* reserved */
6298 c->Request.CDB[15] = 0x00; /* reserved */
edd16368 6299 break;
edd16368
SC
6300 default:
6301 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6302 cmd);
6303 BUG();
6304 }
6305 } else {
6306 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6307 BUG();
6308 }
6309
a505b86f 6310 switch (GET_DIR(c->Request.type_attr_dir)) {
edd16368
SC
6311 case XFER_READ:
6312 pci_dir = PCI_DMA_FROMDEVICE;
6313 break;
6314 case XFER_WRITE:
6315 pci_dir = PCI_DMA_TODEVICE;
6316 break;
6317 case XFER_NONE:
6318 pci_dir = PCI_DMA_NONE;
6319 break;
6320 default:
6321 pci_dir = PCI_DMA_BIDIRECTIONAL;
6322 }
a2dac136
SC
6323 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6324 return -1;
6325 return 0;
edd16368
SC
6326}
6327
6328/*
6329 * Map (physical) PCI mem into (virtual) kernel space
6330 */
6331static void __iomem *remap_pci_mem(ulong base, ulong size)
6332{
6333 ulong page_base = ((ulong) base) & PAGE_MASK;
6334 ulong page_offs = ((ulong) base) - page_base;
088ba34c
SC
6335 void __iomem *page_remapped = ioremap_nocache(page_base,
6336 page_offs + size);
edd16368
SC
6337
6338 return page_remapped ? (page_remapped + page_offs) : NULL;
6339}
6340
254f796b 6341static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
edd16368 6342{
254f796b 6343 return h->access.command_completed(h, q);
edd16368
SC
6344}
6345
900c5440 6346static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
6347{
6348 return h->access.intr_pending(h);
6349}
6350
6351static inline long interrupt_not_for_us(struct ctlr_info *h)
6352{
10f66018
SC
6353 return (h->access.intr_pending(h) == 0) ||
6354 (h->interrupts_enabled == 0);
edd16368
SC
6355}
6356
01a02ffc
SC
6357static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6358 u32 raw_tag)
edd16368
SC
6359{
6360 if (unlikely(tag_index >= h->nr_cmds)) {
6361 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6362 return 1;
6363 }
6364 return 0;
6365}
6366
5a3d16f5 6367static inline void finish_cmd(struct CommandList *c)
edd16368 6368{
e85c5974 6369 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
c349775e
ST
6370 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6371 || c->cmd_type == CMD_IOACCEL2))
1fb011fb 6372 complete_scsi_command(c);
8be986cc 6373 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
edd16368 6374 complete(c->waiting);
a104c99f
SC
6375}
6376
a9a3a273
SC
6377
6378static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
a104c99f 6379{
a9a3a273
SC
6380#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6381#define HPSA_SIMPLE_ERROR_BITS 0x03
960a30e7 6382 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
a9a3a273
SC
6383 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6384 return tag & ~HPSA_PERF_ERROR_BITS;
a104c99f
SC
6385}
6386
303932fd 6387/* process completion of an indexed ("direct lookup") command */
1d94f94d 6388static inline void process_indexed_cmd(struct ctlr_info *h,
303932fd
DB
6389 u32 raw_tag)
6390{
6391 u32 tag_index;
6392 struct CommandList *c;
6393
f2405db8 6394 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
1d94f94d
SC
6395 if (!bad_tag(h, tag_index, raw_tag)) {
6396 c = h->cmd_pool + tag_index;
6397 finish_cmd(c);
6398 }
303932fd
DB
6399}
6400
64670ac8
SC
6401/* Some controllers, like p400, will give us one interrupt
6402 * after a soft reset, even if we turned interrupts off.
6403 * Only need to check for this in the hpsa_xxx_discard_completions
6404 * functions.
6405 */
6406static int ignore_bogus_interrupt(struct ctlr_info *h)
6407{
6408 if (likely(!reset_devices))
6409 return 0;
6410
6411 if (likely(h->interrupts_enabled))
6412 return 0;
6413
6414 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6415 "(known firmware bug.) Ignoring.\n");
6416
6417 return 1;
6418}
6419
254f796b
MG
6420/*
6421 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6422 * Relies on (h-q[x] == x) being true for x such that
6423 * 0 <= x < MAX_REPLY_QUEUES.
6424 */
6425static struct ctlr_info *queue_to_hba(u8 *queue)
64670ac8 6426{
254f796b
MG
6427 return container_of((queue - *queue), struct ctlr_info, q[0]);
6428}
6429
6430static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6431{
6432 struct ctlr_info *h = queue_to_hba(queue);
6433 u8 q = *(u8 *) queue;
64670ac8
SC
6434 u32 raw_tag;
6435
6436 if (ignore_bogus_interrupt(h))
6437 return IRQ_NONE;
6438
6439 if (interrupt_not_for_us(h))
6440 return IRQ_NONE;
a0c12413 6441 h->last_intr_timestamp = get_jiffies_64();
64670ac8 6442 while (interrupt_pending(h)) {
254f796b 6443 raw_tag = get_next_completion(h, q);
64670ac8 6444 while (raw_tag != FIFO_EMPTY)
254f796b 6445 raw_tag = next_command(h, q);
64670ac8 6446 }
64670ac8
SC
6447 return IRQ_HANDLED;
6448}
6449
254f796b 6450static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
64670ac8 6451{
254f796b 6452 struct ctlr_info *h = queue_to_hba(queue);
64670ac8 6453 u32 raw_tag;
254f796b 6454 u8 q = *(u8 *) queue;
64670ac8
SC
6455
6456 if (ignore_bogus_interrupt(h))
6457 return IRQ_NONE;
6458
a0c12413 6459 h->last_intr_timestamp = get_jiffies_64();
254f796b 6460 raw_tag = get_next_completion(h, q);
64670ac8 6461 while (raw_tag != FIFO_EMPTY)
254f796b 6462 raw_tag = next_command(h, q);
64670ac8
SC
6463 return IRQ_HANDLED;
6464}
6465
254f796b 6466static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
edd16368 6467{
254f796b 6468 struct ctlr_info *h = queue_to_hba((u8 *) queue);
303932fd 6469 u32 raw_tag;
254f796b 6470 u8 q = *(u8 *) queue;
edd16368
SC
6471
6472 if (interrupt_not_for_us(h))
6473 return IRQ_NONE;
a0c12413 6474 h->last_intr_timestamp = get_jiffies_64();
10f66018 6475 while (interrupt_pending(h)) {
254f796b 6476 raw_tag = get_next_completion(h, q);
10f66018 6477 while (raw_tag != FIFO_EMPTY) {
f2405db8 6478 process_indexed_cmd(h, raw_tag);
254f796b 6479 raw_tag = next_command(h, q);
10f66018
SC
6480 }
6481 }
10f66018
SC
6482 return IRQ_HANDLED;
6483}
6484
254f796b 6485static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
10f66018 6486{
254f796b 6487 struct ctlr_info *h = queue_to_hba(queue);
10f66018 6488 u32 raw_tag;
254f796b 6489 u8 q = *(u8 *) queue;
10f66018 6490
a0c12413 6491 h->last_intr_timestamp = get_jiffies_64();
254f796b 6492 raw_tag = get_next_completion(h, q);
303932fd 6493 while (raw_tag != FIFO_EMPTY) {
f2405db8 6494 process_indexed_cmd(h, raw_tag);
254f796b 6495 raw_tag = next_command(h, q);
edd16368 6496 }
edd16368
SC
6497 return IRQ_HANDLED;
6498}
6499
a9a3a273
SC
6500/* Send a message CDB to the firmware. Careful, this only works
6501 * in simple mode, not performant mode due to the tag lookup.
6502 * We only ever use this immediately after a controller reset.
6503 */
6f039790
GKH
6504static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6505 unsigned char type)
edd16368
SC
6506{
6507 struct Command {
6508 struct CommandListHeader CommandHeader;
6509 struct RequestBlock Request;
6510 struct ErrDescriptor ErrorDescriptor;
6511 };
6512 struct Command *cmd;
6513 static const size_t cmd_sz = sizeof(*cmd) +
6514 sizeof(cmd->ErrorDescriptor);
6515 dma_addr_t paddr64;
2b08b3e9
DB
6516 __le32 paddr32;
6517 u32 tag;
edd16368
SC
6518 void __iomem *vaddr;
6519 int i, err;
6520
6521 vaddr = pci_ioremap_bar(pdev, 0);
6522 if (vaddr == NULL)
6523 return -ENOMEM;
6524
6525 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6526 * CCISS commands, so they must be allocated from the lower 4GiB of
6527 * memory.
6528 */
6529 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6530 if (err) {
6531 iounmap(vaddr);
1eaec8f3 6532 return err;
edd16368
SC
6533 }
6534
6535 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6536 if (cmd == NULL) {
6537 iounmap(vaddr);
6538 return -ENOMEM;
6539 }
6540
6541 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6542 * although there's no guarantee, we assume that the address is at
6543 * least 4-byte aligned (most likely, it's page-aligned).
6544 */
2b08b3e9 6545 paddr32 = cpu_to_le32(paddr64);
edd16368
SC
6546
6547 cmd->CommandHeader.ReplyQueue = 0;
6548 cmd->CommandHeader.SGList = 0;
50a0decf 6549 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
2b08b3e9 6550 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
edd16368
SC
6551 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6552
6553 cmd->Request.CDBLen = 16;
a505b86f
SC
6554 cmd->Request.type_attr_dir =
6555 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
edd16368
SC
6556 cmd->Request.Timeout = 0; /* Don't time out */
6557 cmd->Request.CDB[0] = opcode;
6558 cmd->Request.CDB[1] = type;
6559 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
50a0decf 6560 cmd->ErrorDescriptor.Addr =
2b08b3e9 6561 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
50a0decf 6562 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
edd16368 6563
2b08b3e9 6564 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
edd16368
SC
6565
6566 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6567 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2b08b3e9 6568 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
edd16368
SC
6569 break;
6570 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6571 }
6572
6573 iounmap(vaddr);
6574
6575 /* we leak the DMA buffer here ... no choice since the controller could
6576 * still complete the command.
6577 */
6578 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6579 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6580 opcode, type);
6581 return -ETIMEDOUT;
6582 }
6583
6584 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6585
6586 if (tag & HPSA_ERROR_BIT) {
6587 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6588 opcode, type);
6589 return -EIO;
6590 }
6591
6592 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6593 opcode, type);
6594 return 0;
6595}
6596
edd16368
SC
6597#define hpsa_noop(p) hpsa_message(p, 3, 0)
6598
1df8552a 6599static int hpsa_controller_hard_reset(struct pci_dev *pdev,
42a91641 6600 void __iomem *vaddr, u32 use_doorbell)
1df8552a 6601{
1df8552a
SC
6602
6603 if (use_doorbell) {
6604 /* For everything after the P600, the PCI power state method
6605 * of resetting the controller doesn't work, so we have this
6606 * other way using the doorbell register.
6607 */
6608 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 6609 writel(use_doorbell, vaddr + SA5_DOORBELL);
85009239 6610
00701a96 6611 /* PMC hardware guys tell us we need a 10 second delay after
85009239
SC
6612 * doorbell reset and before any attempt to talk to the board
6613 * at all to ensure that this actually works and doesn't fall
6614 * over in some weird corner cases.
6615 */
00701a96 6616 msleep(10000);
1df8552a
SC
6617 } else { /* Try to do it the PCI power state way */
6618
6619 /* Quoting from the Open CISS Specification: "The Power
6620 * Management Control/Status Register (CSR) controls the power
6621 * state of the device. The normal operating state is D0,
6622 * CSR=00h. The software off state is D3, CSR=03h. To reset
6623 * the controller, place the interface device in D3 then to D0,
6624 * this causes a secondary PCI reset which will reset the
6625 * controller." */
2662cab8
DB
6626
6627 int rc = 0;
6628
1df8552a 6629 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2662cab8 6630
1df8552a 6631 /* enter the D3hot power management state */
2662cab8
DB
6632 rc = pci_set_power_state(pdev, PCI_D3hot);
6633 if (rc)
6634 return rc;
1df8552a
SC
6635
6636 msleep(500);
6637
6638 /* enter the D0 power management state */
2662cab8
DB
6639 rc = pci_set_power_state(pdev, PCI_D0);
6640 if (rc)
6641 return rc;
c4853efe
MM
6642
6643 /*
6644 * The P600 requires a small delay when changing states.
6645 * Otherwise we may think the board did not reset and we bail.
6646 * This for kdump only and is particular to the P600.
6647 */
6648 msleep(500);
1df8552a
SC
6649 }
6650 return 0;
6651}
6652
6f039790 6653static void init_driver_version(char *driver_version, int len)
580ada3c
SC
6654{
6655 memset(driver_version, 0, len);
f79cfec6 6656 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
580ada3c
SC
6657}
6658
6f039790 6659static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6660{
6661 char *driver_version;
6662 int i, size = sizeof(cfgtable->driver_version);
6663
6664 driver_version = kmalloc(size, GFP_KERNEL);
6665 if (!driver_version)
6666 return -ENOMEM;
6667
6668 init_driver_version(driver_version, size);
6669 for (i = 0; i < size; i++)
6670 writeb(driver_version[i], &cfgtable->driver_version[i]);
6671 kfree(driver_version);
6672 return 0;
6673}
6674
6f039790
GKH
6675static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6676 unsigned char *driver_ver)
580ada3c
SC
6677{
6678 int i;
6679
6680 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6681 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6682}
6683
6f039790 6684static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6685{
6686
6687 char *driver_ver, *old_driver_ver;
6688 int rc, size = sizeof(cfgtable->driver_version);
6689
6690 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6691 if (!old_driver_ver)
6692 return -ENOMEM;
6693 driver_ver = old_driver_ver + size;
6694
6695 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6696 * should have been changed, otherwise we know the reset failed.
6697 */
6698 init_driver_version(old_driver_ver, size);
6699 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6700 rc = !memcmp(driver_ver, old_driver_ver, size);
6701 kfree(old_driver_ver);
6702 return rc;
6703}
edd16368 6704/* This does a hard reset of the controller using PCI power management
1df8552a 6705 * states or the using the doorbell register.
edd16368 6706 */
6b6c1cd7 6707static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
edd16368 6708{
1df8552a
SC
6709 u64 cfg_offset;
6710 u32 cfg_base_addr;
6711 u64 cfg_base_addr_index;
6712 void __iomem *vaddr;
6713 unsigned long paddr;
580ada3c 6714 u32 misc_fw_support;
270d05de 6715 int rc;
1df8552a 6716 struct CfgTable __iomem *cfgtable;
cf0b08d0 6717 u32 use_doorbell;
270d05de 6718 u16 command_register;
edd16368 6719
1df8552a
SC
6720 /* For controllers as old as the P600, this is very nearly
6721 * the same thing as
edd16368
SC
6722 *
6723 * pci_save_state(pci_dev);
6724 * pci_set_power_state(pci_dev, PCI_D3hot);
6725 * pci_set_power_state(pci_dev, PCI_D0);
6726 * pci_restore_state(pci_dev);
6727 *
1df8552a
SC
6728 * For controllers newer than the P600, the pci power state
6729 * method of resetting doesn't work so we have another way
6730 * using the doorbell register.
edd16368 6731 */
18867659 6732
60f923b9
RE
6733 if (!ctlr_is_resettable(board_id)) {
6734 dev_warn(&pdev->dev, "Controller not resettable\n");
25c1e56a
SC
6735 return -ENODEV;
6736 }
46380786
SC
6737
6738 /* if controller is soft- but not hard resettable... */
6739 if (!ctlr_is_hard_resettable(board_id))
6740 return -ENOTSUPP; /* try soft reset later. */
18867659 6741
270d05de
SC
6742 /* Save the PCI command register */
6743 pci_read_config_word(pdev, 4, &command_register);
270d05de 6744 pci_save_state(pdev);
edd16368 6745
1df8552a
SC
6746 /* find the first memory BAR, so we can find the cfg table */
6747 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6748 if (rc)
6749 return rc;
6750 vaddr = remap_pci_mem(paddr, 0x250);
6751 if (!vaddr)
6752 return -ENOMEM;
edd16368 6753
1df8552a
SC
6754 /* find cfgtable in order to check if reset via doorbell is supported */
6755 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6756 &cfg_base_addr_index, &cfg_offset);
6757 if (rc)
6758 goto unmap_vaddr;
6759 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6760 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6761 if (!cfgtable) {
6762 rc = -ENOMEM;
6763 goto unmap_vaddr;
6764 }
580ada3c
SC
6765 rc = write_driver_ver_to_cfgtable(cfgtable);
6766 if (rc)
03741d95 6767 goto unmap_cfgtable;
edd16368 6768
cf0b08d0
SC
6769 /* If reset via doorbell register is supported, use that.
6770 * There are two such methods. Favor the newest method.
6771 */
1df8552a 6772 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
6773 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6774 if (use_doorbell) {
6775 use_doorbell = DOORBELL_CTLR_RESET2;
6776 } else {
6777 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6778 if (use_doorbell) {
050f7147
SC
6779 dev_warn(&pdev->dev,
6780 "Soft reset not supported. Firmware update is required.\n");
64670ac8 6781 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
6782 goto unmap_cfgtable;
6783 }
6784 }
edd16368 6785
1df8552a
SC
6786 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6787 if (rc)
6788 goto unmap_cfgtable;
edd16368 6789
270d05de 6790 pci_restore_state(pdev);
270d05de 6791 pci_write_config_word(pdev, 4, command_register);
edd16368 6792
1df8552a
SC
6793 /* Some devices (notably the HP Smart Array 5i Controller)
6794 need a little pause here */
6795 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6796
fe5389c8
SC
6797 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6798 if (rc) {
6799 dev_warn(&pdev->dev,
050f7147 6800 "Failed waiting for board to become ready after hard reset\n");
fe5389c8
SC
6801 goto unmap_cfgtable;
6802 }
fe5389c8 6803
580ada3c
SC
6804 rc = controller_reset_failed(vaddr);
6805 if (rc < 0)
6806 goto unmap_cfgtable;
6807 if (rc) {
64670ac8
SC
6808 dev_warn(&pdev->dev, "Unable to successfully reset "
6809 "controller. Will try soft reset.\n");
6810 rc = -ENOTSUPP;
580ada3c 6811 } else {
64670ac8 6812 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
6813 }
6814
6815unmap_cfgtable:
6816 iounmap(cfgtable);
6817
6818unmap_vaddr:
6819 iounmap(vaddr);
6820 return rc;
edd16368
SC
6821}
6822
6823/*
6824 * We cannot read the structure directly, for portability we must use
6825 * the io functions.
6826 * This is for debug only.
6827 */
42a91641 6828static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
edd16368 6829{
58f8665c 6830#ifdef HPSA_DEBUG
edd16368
SC
6831 int i;
6832 char temp_name[17];
6833
6834 dev_info(dev, "Controller Configuration information\n");
6835 dev_info(dev, "------------------------------------\n");
6836 for (i = 0; i < 4; i++)
6837 temp_name[i] = readb(&(tb->Signature[i]));
6838 temp_name[4] = '\0';
6839 dev_info(dev, " Signature = %s\n", temp_name);
6840 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6841 dev_info(dev, " Transport methods supported = 0x%x\n",
6842 readl(&(tb->TransportSupport)));
6843 dev_info(dev, " Transport methods active = 0x%x\n",
6844 readl(&(tb->TransportActive)));
6845 dev_info(dev, " Requested transport Method = 0x%x\n",
6846 readl(&(tb->HostWrite.TransportRequest)));
6847 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6848 readl(&(tb->HostWrite.CoalIntDelay)));
6849 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6850 readl(&(tb->HostWrite.CoalIntCount)));
69d6e33d 6851 dev_info(dev, " Max outstanding commands = %d\n",
edd16368
SC
6852 readl(&(tb->CmdsOutMax)));
6853 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6854 for (i = 0; i < 16; i++)
6855 temp_name[i] = readb(&(tb->ServerName[i]));
6856 temp_name[16] = '\0';
6857 dev_info(dev, " Server Name = %s\n", temp_name);
6858 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6859 readl(&(tb->HeartBeat)));
edd16368 6860#endif /* HPSA_DEBUG */
58f8665c 6861}
edd16368
SC
6862
6863static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6864{
6865 int i, offset, mem_type, bar_type;
6866
6867 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6868 return 0;
6869 offset = 0;
6870 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6871 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6872 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6873 offset += 4;
6874 else {
6875 mem_type = pci_resource_flags(pdev, i) &
6876 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6877 switch (mem_type) {
6878 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6879 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6880 offset += 4; /* 32 bit */
6881 break;
6882 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6883 offset += 8;
6884 break;
6885 default: /* reserved in PCI 2.2 */
6886 dev_warn(&pdev->dev,
6887 "base address is invalid\n");
6888 return -1;
6889 break;
6890 }
6891 }
6892 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6893 return i + 1;
6894 }
6895 return -1;
6896}
6897
cc64c817
RE
6898static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6899{
6900 if (h->msix_vector) {
6901 if (h->pdev->msix_enabled)
6902 pci_disable_msix(h->pdev);
105a3dbc 6903 h->msix_vector = 0;
cc64c817
RE
6904 } else if (h->msi_vector) {
6905 if (h->pdev->msi_enabled)
6906 pci_disable_msi(h->pdev);
105a3dbc 6907 h->msi_vector = 0;
cc64c817
RE
6908 }
6909}
6910
edd16368 6911/* If MSI/MSI-X is supported by the kernel we will try to enable it on
050f7147 6912 * controllers that are capable. If not, we use legacy INTx mode.
edd16368 6913 */
6f039790 6914static void hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
6915{
6916#ifdef CONFIG_PCI_MSI
254f796b
MG
6917 int err, i;
6918 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6919
6920 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6921 hpsa_msix_entries[i].vector = 0;
6922 hpsa_msix_entries[i].entry = i;
6923 }
edd16368
SC
6924
6925 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
6926 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6927 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 6928 goto default_int_mode;
55c06c71 6929 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
050f7147 6930 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
eee0f03a 6931 h->msix_vector = MAX_REPLY_QUEUES;
f89439bc
SC
6932 if (h->msix_vector > num_online_cpus())
6933 h->msix_vector = num_online_cpus();
18fce3c4
AG
6934 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6935 1, h->msix_vector);
6936 if (err < 0) {
6937 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6938 h->msix_vector = 0;
6939 goto single_msi_mode;
6940 } else if (err < h->msix_vector) {
55c06c71 6941 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368 6942 "available\n", err);
edd16368 6943 }
18fce3c4
AG
6944 h->msix_vector = err;
6945 for (i = 0; i < h->msix_vector; i++)
6946 h->intr[i] = hpsa_msix_entries[i].vector;
6947 return;
edd16368 6948 }
18fce3c4 6949single_msi_mode:
55c06c71 6950 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
050f7147 6951 dev_info(&h->pdev->dev, "MSI capable controller\n");
55c06c71 6952 if (!pci_enable_msi(h->pdev))
edd16368
SC
6953 h->msi_vector = 1;
6954 else
55c06c71 6955 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
6956 }
6957default_int_mode:
6958#endif /* CONFIG_PCI_MSI */
6959 /* if we get here we're going to use the default interrupt mode */
a9a3a273 6960 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
6961}
6962
6f039790 6963static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
e5c880d1
SC
6964{
6965 int i;
6966 u32 subsystem_vendor_id, subsystem_device_id;
6967
6968 subsystem_vendor_id = pdev->subsystem_vendor;
6969 subsystem_device_id = pdev->subsystem_device;
6970 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6971 subsystem_vendor_id;
6972
6973 for (i = 0; i < ARRAY_SIZE(products); i++)
6974 if (*board_id == products[i].board_id)
6975 return i;
6976
6798cc0a
SC
6977 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6978 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6979 !hpsa_allow_any) {
e5c880d1
SC
6980 dev_warn(&pdev->dev, "unrecognized board ID: "
6981 "0x%08x, ignoring.\n", *board_id);
6982 return -ENODEV;
6983 }
6984 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6985}
6986
6f039790
GKH
6987static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6988 unsigned long *memory_bar)
3a7774ce
SC
6989{
6990 int i;
6991
6992 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 6993 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 6994 /* addressing mode bits already removed */
12d2cd47
SC
6995 *memory_bar = pci_resource_start(pdev, i);
6996 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
6997 *memory_bar);
6998 return 0;
6999 }
12d2cd47 7000 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
7001 return -ENODEV;
7002}
7003
6f039790
GKH
7004static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7005 int wait_for_ready)
2c4c8c8b 7006{
fe5389c8 7007 int i, iterations;
2c4c8c8b 7008 u32 scratchpad;
fe5389c8
SC
7009 if (wait_for_ready)
7010 iterations = HPSA_BOARD_READY_ITERATIONS;
7011 else
7012 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 7013
fe5389c8
SC
7014 for (i = 0; i < iterations; i++) {
7015 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7016 if (wait_for_ready) {
7017 if (scratchpad == HPSA_FIRMWARE_READY)
7018 return 0;
7019 } else {
7020 if (scratchpad != HPSA_FIRMWARE_READY)
7021 return 0;
7022 }
2c4c8c8b
SC
7023 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7024 }
fe5389c8 7025 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
7026 return -ENODEV;
7027}
7028
6f039790
GKH
7029static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7030 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7031 u64 *cfg_offset)
a51fd47f
SC
7032{
7033 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7034 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7035 *cfg_base_addr &= (u32) 0x0000ffff;
7036 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7037 if (*cfg_base_addr_index == -1) {
7038 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7039 return -ENODEV;
7040 }
7041 return 0;
7042}
7043
195f2c65
RE
7044static void hpsa_free_cfgtables(struct ctlr_info *h)
7045{
105a3dbc 7046 if (h->transtable) {
195f2c65 7047 iounmap(h->transtable);
105a3dbc
RE
7048 h->transtable = NULL;
7049 }
7050 if (h->cfgtable) {
195f2c65 7051 iounmap(h->cfgtable);
105a3dbc
RE
7052 h->cfgtable = NULL;
7053 }
195f2c65
RE
7054}
7055
7056/* Find and map CISS config table and transfer table
7057+ * several items must be unmapped (freed) later
7058+ * */
6f039790 7059static int hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 7060{
01a02ffc
SC
7061 u64 cfg_offset;
7062 u32 cfg_base_addr;
7063 u64 cfg_base_addr_index;
303932fd 7064 u32 trans_offset;
a51fd47f 7065 int rc;
77c4495c 7066
a51fd47f
SC
7067 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7068 &cfg_base_addr_index, &cfg_offset);
7069 if (rc)
7070 return rc;
77c4495c 7071 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 7072 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
cd3c81c4
RE
7073 if (!h->cfgtable) {
7074 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
77c4495c 7075 return -ENOMEM;
cd3c81c4 7076 }
580ada3c
SC
7077 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7078 if (rc)
7079 return rc;
77c4495c 7080 /* Find performant mode table. */
a51fd47f 7081 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
7082 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7083 cfg_base_addr_index)+cfg_offset+trans_offset,
7084 sizeof(*h->transtable));
195f2c65
RE
7085 if (!h->transtable) {
7086 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7087 hpsa_free_cfgtables(h);
77c4495c 7088 return -ENOMEM;
195f2c65 7089 }
77c4495c
SC
7090 return 0;
7091}
7092
6f039790 7093static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
cba3d38b 7094{
41ce4c35
SC
7095#define MIN_MAX_COMMANDS 16
7096 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7097
7098 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
72ceeaec
SC
7099
7100 /* Limit commands in memory limited kdump scenario. */
7101 if (reset_devices && h->max_commands > 32)
7102 h->max_commands = 32;
7103
41ce4c35
SC
7104 if (h->max_commands < MIN_MAX_COMMANDS) {
7105 dev_warn(&h->pdev->dev,
7106 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7107 h->max_commands,
7108 MIN_MAX_COMMANDS);
7109 h->max_commands = MIN_MAX_COMMANDS;
cba3d38b
SC
7110 }
7111}
7112
c7ee65b3
WS
7113/* If the controller reports that the total max sg entries is greater than 512,
7114 * then we know that chained SG blocks work. (Original smart arrays did not
7115 * support chained SG blocks and would return zero for max sg entries.)
7116 */
7117static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7118{
7119 return h->maxsgentries > 512;
7120}
7121
b93d7536
SC
7122/* Interrogate the hardware for some limits:
7123 * max commands, max SG elements without chaining, and with chaining,
7124 * SG chain block size, etc.
7125 */
6f039790 7126static void hpsa_find_board_params(struct ctlr_info *h)
b93d7536 7127{
cba3d38b 7128 hpsa_get_max_perf_mode_cmds(h);
45fcb86e 7129 h->nr_cmds = h->max_commands;
b93d7536 7130 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
283b4a9b 7131 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
c7ee65b3
WS
7132 if (hpsa_supports_chained_sg_blocks(h)) {
7133 /* Limit in-command s/g elements to 32 save dma'able memory. */
b93d7536 7134 h->max_cmd_sg_entries = 32;
1a63ea6f 7135 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
b93d7536
SC
7136 h->maxsgentries--; /* save one for chain pointer */
7137 } else {
c7ee65b3
WS
7138 /*
7139 * Original smart arrays supported at most 31 s/g entries
7140 * embedded inline in the command (trying to use more
7141 * would lock up the controller)
7142 */
7143 h->max_cmd_sg_entries = 31;
1a63ea6f 7144 h->maxsgentries = 31; /* default to traditional values */
c7ee65b3 7145 h->chainsize = 0;
b93d7536 7146 }
75167d2c
SC
7147
7148 /* Find out what task management functions are supported and cache */
7149 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
0e7a7fce
ST
7150 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7151 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7152 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7153 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
8be986cc
SC
7154 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7155 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
b93d7536
SC
7156}
7157
76c46e49
SC
7158static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7159{
0fc9fd40 7160 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
050f7147 7161 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
76c46e49
SC
7162 return false;
7163 }
7164 return true;
7165}
7166
97a5e98c 7167static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
f7c39101 7168{
97a5e98c 7169 u32 driver_support;
f7c39101 7170
97a5e98c 7171 driver_support = readl(&(h->cfgtable->driver_support));
0b9e7b74
AB
7172 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7173#ifdef CONFIG_X86
97a5e98c 7174 driver_support |= ENABLE_SCSI_PREFETCH;
f7c39101 7175#endif
28e13446
SC
7176 driver_support |= ENABLE_UNIT_ATTN;
7177 writel(driver_support, &(h->cfgtable->driver_support));
f7c39101
SC
7178}
7179
3d0eab67
SC
7180/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7181 * in a prefetch beyond physical memory.
7182 */
7183static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7184{
7185 u32 dma_prefetch;
7186
7187 if (h->board_id != 0x3225103C)
7188 return;
7189 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7190 dma_prefetch |= 0x8000;
7191 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7192}
7193
c706a795 7194static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
76438d08
SC
7195{
7196 int i;
7197 u32 doorbell_value;
7198 unsigned long flags;
7199 /* wait until the clear_event_notify bit 6 is cleared by controller. */
007e7aa9 7200 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
76438d08
SC
7201 spin_lock_irqsave(&h->lock, flags);
7202 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7203 spin_unlock_irqrestore(&h->lock, flags);
7204 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
c706a795 7205 goto done;
76438d08 7206 /* delay and try again */
007e7aa9 7207 msleep(CLEAR_EVENT_WAIT_INTERVAL);
76438d08 7208 }
c706a795
RE
7209 return -ENODEV;
7210done:
7211 return 0;
76438d08
SC
7212}
7213
c706a795 7214static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
7215{
7216 int i;
6eaf46fd
SC
7217 u32 doorbell_value;
7218 unsigned long flags;
eb6b2ae9
SC
7219
7220 /* under certain very rare conditions, this can take awhile.
7221 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7222 * as we enter this code.)
7223 */
007e7aa9 7224 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
25163bd5
WS
7225 if (h->remove_in_progress)
7226 goto done;
6eaf46fd
SC
7227 spin_lock_irqsave(&h->lock, flags);
7228 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7229 spin_unlock_irqrestore(&h->lock, flags);
382be668 7230 if (!(doorbell_value & CFGTBL_ChangeReq))
c706a795 7231 goto done;
eb6b2ae9 7232 /* delay and try again */
007e7aa9 7233 msleep(MODE_CHANGE_WAIT_INTERVAL);
eb6b2ae9 7234 }
c706a795
RE
7235 return -ENODEV;
7236done:
7237 return 0;
3f4336f3
SC
7238}
7239
c706a795 7240/* return -ENODEV or other reason on error, 0 on success */
6f039790 7241static int hpsa_enter_simple_mode(struct ctlr_info *h)
3f4336f3
SC
7242{
7243 u32 trans_support;
7244
7245 trans_support = readl(&(h->cfgtable->TransportSupport));
7246 if (!(trans_support & SIMPLE_MODE))
7247 return -ENOTSUPP;
7248
7249 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
283b4a9b 7250
3f4336f3
SC
7251 /* Update the field, and then ring the doorbell */
7252 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
b9af4937 7253 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
3f4336f3 7254 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
7255 if (hpsa_wait_for_mode_change_ack(h))
7256 goto error;
eb6b2ae9 7257 print_cfg_table(&h->pdev->dev, h->cfgtable);
283b4a9b
SC
7258 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7259 goto error;
960a30e7 7260 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9 7261 return 0;
283b4a9b 7262error:
050f7147 7263 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
283b4a9b 7264 return -ENODEV;
eb6b2ae9
SC
7265}
7266
195f2c65
RE
7267/* free items allocated or mapped by hpsa_pci_init */
7268static void hpsa_free_pci_init(struct ctlr_info *h)
7269{
7270 hpsa_free_cfgtables(h); /* pci_init 4 */
7271 iounmap(h->vaddr); /* pci_init 3 */
105a3dbc 7272 h->vaddr = NULL;
195f2c65 7273 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
943a7021
RE
7274 /*
7275 * call pci_disable_device before pci_release_regions per
7276 * Documentation/PCI/pci.txt
7277 */
195f2c65 7278 pci_disable_device(h->pdev); /* pci_init 1 */
943a7021 7279 pci_release_regions(h->pdev); /* pci_init 2 */
195f2c65
RE
7280}
7281
7282/* several items must be freed later */
6f039790 7283static int hpsa_pci_init(struct ctlr_info *h)
77c4495c 7284{
eb6b2ae9 7285 int prod_index, err;
edd16368 7286
e5c880d1
SC
7287 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7288 if (prod_index < 0)
60f923b9 7289 return prod_index;
e5c880d1
SC
7290 h->product_name = products[prod_index].product_name;
7291 h->access = *(products[prod_index].access);
edd16368 7292
9b5c48c2
SC
7293 h->needs_abort_tags_swizzled =
7294 ctlr_needs_abort_tags_swizzled(h->board_id);
7295
e5a44df8
MG
7296 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7297 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7298
55c06c71 7299 err = pci_enable_device(h->pdev);
edd16368 7300 if (err) {
195f2c65 7301 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
943a7021 7302 pci_disable_device(h->pdev);
edd16368
SC
7303 return err;
7304 }
7305
f79cfec6 7306 err = pci_request_regions(h->pdev, HPSA);
edd16368 7307 if (err) {
55c06c71 7308 dev_err(&h->pdev->dev,
195f2c65 7309 "failed to obtain PCI resources\n");
943a7021
RE
7310 pci_disable_device(h->pdev);
7311 return err;
edd16368 7312 }
4fa604e1
RE
7313
7314 pci_set_master(h->pdev);
7315
6b3f4c52 7316 hpsa_interrupt_mode(h);
12d2cd47 7317 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 7318 if (err)
195f2c65 7319 goto clean2; /* intmode+region, pci */
edd16368 7320 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9 7321 if (!h->vaddr) {
195f2c65 7322 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
204892e9 7323 err = -ENOMEM;
195f2c65 7324 goto clean2; /* intmode+region, pci */
204892e9 7325 }
fe5389c8 7326 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 7327 if (err)
195f2c65 7328 goto clean3; /* vaddr, intmode+region, pci */
77c4495c
SC
7329 err = hpsa_find_cfgtables(h);
7330 if (err)
195f2c65 7331 goto clean3; /* vaddr, intmode+region, pci */
b93d7536 7332 hpsa_find_board_params(h);
edd16368 7333
76c46e49 7334 if (!hpsa_CISS_signature_present(h)) {
edd16368 7335 err = -ENODEV;
195f2c65 7336 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368 7337 }
97a5e98c 7338 hpsa_set_driver_support_bits(h);
3d0eab67 7339 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
7340 err = hpsa_enter_simple_mode(h);
7341 if (err)
195f2c65 7342 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368
SC
7343 return 0;
7344
195f2c65
RE
7345clean4: /* cfgtables, vaddr, intmode+region, pci */
7346 hpsa_free_cfgtables(h);
7347clean3: /* vaddr, intmode+region, pci */
7348 iounmap(h->vaddr);
105a3dbc 7349 h->vaddr = NULL;
195f2c65
RE
7350clean2: /* intmode+region, pci */
7351 hpsa_disable_interrupt_mode(h);
943a7021
RE
7352 /*
7353 * call pci_disable_device before pci_release_regions per
7354 * Documentation/PCI/pci.txt
7355 */
195f2c65 7356 pci_disable_device(h->pdev);
943a7021 7357 pci_release_regions(h->pdev);
edd16368
SC
7358 return err;
7359}
7360
6f039790 7361static void hpsa_hba_inquiry(struct ctlr_info *h)
339b2b14
SC
7362{
7363 int rc;
7364
7365#define HBA_INQUIRY_BYTE_COUNT 64
7366 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7367 if (!h->hba_inquiry_data)
7368 return;
7369 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7370 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7371 if (rc != 0) {
7372 kfree(h->hba_inquiry_data);
7373 h->hba_inquiry_data = NULL;
7374 }
7375}
7376
6b6c1cd7 7377static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
4c2a8c40 7378{
1df8552a 7379 int rc, i;
3b747298 7380 void __iomem *vaddr;
4c2a8c40
SC
7381
7382 if (!reset_devices)
7383 return 0;
7384
132aa220
TH
7385 /* kdump kernel is loading, we don't know in which state is
7386 * the pci interface. The dev->enable_cnt is equal zero
7387 * so we call enable+disable, wait a while and switch it on.
7388 */
7389 rc = pci_enable_device(pdev);
7390 if (rc) {
7391 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7392 return -ENODEV;
7393 }
7394 pci_disable_device(pdev);
7395 msleep(260); /* a randomly chosen number */
7396 rc = pci_enable_device(pdev);
7397 if (rc) {
7398 dev_warn(&pdev->dev, "failed to enable device.\n");
7399 return -ENODEV;
7400 }
4fa604e1 7401
859c75ab 7402 pci_set_master(pdev);
4fa604e1 7403
3b747298
TH
7404 vaddr = pci_ioremap_bar(pdev, 0);
7405 if (vaddr == NULL) {
7406 rc = -ENOMEM;
7407 goto out_disable;
7408 }
7409 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7410 iounmap(vaddr);
7411
1df8552a 7412 /* Reset the controller with a PCI power-cycle or via doorbell */
6b6c1cd7 7413 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
4c2a8c40 7414
1df8552a
SC
7415 /* -ENOTSUPP here means we cannot reset the controller
7416 * but it's already (and still) up and running in
18867659
SC
7417 * "performant mode". Or, it might be 640x, which can't reset
7418 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a 7419 */
adf1b3a3 7420 if (rc)
132aa220 7421 goto out_disable;
4c2a8c40
SC
7422
7423 /* Now try to get the controller to respond to a no-op */
1ba66c9c 7424 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
7425 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7426 if (hpsa_noop(pdev) == 0)
7427 break;
7428 else
7429 dev_warn(&pdev->dev, "no-op failed%s\n",
7430 (i < 11 ? "; re-trying" : ""));
7431 }
132aa220
TH
7432
7433out_disable:
7434
7435 pci_disable_device(pdev);
7436 return rc;
4c2a8c40
SC
7437}
7438
1fb7c98a
RE
7439static void hpsa_free_cmd_pool(struct ctlr_info *h)
7440{
7441 kfree(h->cmd_pool_bits);
105a3dbc
RE
7442 h->cmd_pool_bits = NULL;
7443 if (h->cmd_pool) {
1fb7c98a
RE
7444 pci_free_consistent(h->pdev,
7445 h->nr_cmds * sizeof(struct CommandList),
7446 h->cmd_pool,
7447 h->cmd_pool_dhandle);
105a3dbc
RE
7448 h->cmd_pool = NULL;
7449 h->cmd_pool_dhandle = 0;
7450 }
7451 if (h->errinfo_pool) {
1fb7c98a
RE
7452 pci_free_consistent(h->pdev,
7453 h->nr_cmds * sizeof(struct ErrorInfo),
7454 h->errinfo_pool,
7455 h->errinfo_pool_dhandle);
105a3dbc
RE
7456 h->errinfo_pool = NULL;
7457 h->errinfo_pool_dhandle = 0;
7458 }
1fb7c98a
RE
7459}
7460
d37ffbe4 7461static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
2e9d1b36
SC
7462{
7463 h->cmd_pool_bits = kzalloc(
7464 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7465 sizeof(unsigned long), GFP_KERNEL);
7466 h->cmd_pool = pci_alloc_consistent(h->pdev,
7467 h->nr_cmds * sizeof(*h->cmd_pool),
7468 &(h->cmd_pool_dhandle));
7469 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7470 h->nr_cmds * sizeof(*h->errinfo_pool),
7471 &(h->errinfo_pool_dhandle));
7472 if ((h->cmd_pool_bits == NULL)
7473 || (h->cmd_pool == NULL)
7474 || (h->errinfo_pool == NULL)) {
7475 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
2c143342 7476 goto clean_up;
2e9d1b36 7477 }
360c73bd 7478 hpsa_preinitialize_commands(h);
2e9d1b36 7479 return 0;
2c143342
RE
7480clean_up:
7481 hpsa_free_cmd_pool(h);
7482 return -ENOMEM;
2e9d1b36
SC
7483}
7484
41b3cf08
SC
7485static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7486{
ec429952 7487 int i, cpu;
41b3cf08
SC
7488
7489 cpu = cpumask_first(cpu_online_mask);
7490 for (i = 0; i < h->msix_vector; i++) {
ec429952 7491 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
41b3cf08
SC
7492 cpu = cpumask_next(cpu, cpu_online_mask);
7493 }
7494}
7495
ec501a18
RE
7496/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7497static void hpsa_free_irqs(struct ctlr_info *h)
7498{
7499 int i;
7500
7501 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7502 /* Single reply queue, only one irq to free */
7503 i = h->intr_mode;
7504 irq_set_affinity_hint(h->intr[i], NULL);
7505 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7506 h->q[i] = 0;
ec501a18
RE
7507 return;
7508 }
7509
7510 for (i = 0; i < h->msix_vector; i++) {
7511 irq_set_affinity_hint(h->intr[i], NULL);
7512 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7513 h->q[i] = 0;
ec501a18 7514 }
a4e17fc1
RE
7515 for (; i < MAX_REPLY_QUEUES; i++)
7516 h->q[i] = 0;
ec501a18
RE
7517}
7518
9ee61794
RE
7519/* returns 0 on success; cleans up and returns -Enn on error */
7520static int hpsa_request_irqs(struct ctlr_info *h,
0ae01a32
SC
7521 irqreturn_t (*msixhandler)(int, void *),
7522 irqreturn_t (*intxhandler)(int, void *))
7523{
254f796b 7524 int rc, i;
0ae01a32 7525
254f796b
MG
7526 /*
7527 * initialize h->q[x] = x so that interrupt handlers know which
7528 * queue to process.
7529 */
7530 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7531 h->q[i] = (u8) i;
7532
eee0f03a 7533 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
254f796b 7534 /* If performant mode and MSI-X, use multiple reply queues */
a4e17fc1 7535 for (i = 0; i < h->msix_vector; i++) {
8b47004a 7536 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
254f796b 7537 rc = request_irq(h->intr[i], msixhandler,
8b47004a 7538 0, h->intrname[i],
254f796b 7539 &h->q[i]);
a4e17fc1
RE
7540 if (rc) {
7541 int j;
7542
7543 dev_err(&h->pdev->dev,
7544 "failed to get irq %d for %s\n",
7545 h->intr[i], h->devname);
7546 for (j = 0; j < i; j++) {
7547 free_irq(h->intr[j], &h->q[j]);
7548 h->q[j] = 0;
7549 }
7550 for (; j < MAX_REPLY_QUEUES; j++)
7551 h->q[j] = 0;
7552 return rc;
7553 }
7554 }
41b3cf08 7555 hpsa_irq_affinity_hints(h);
254f796b
MG
7556 } else {
7557 /* Use single reply pool */
eee0f03a 7558 if (h->msix_vector > 0 || h->msi_vector) {
8b47004a
RE
7559 if (h->msix_vector)
7560 sprintf(h->intrname[h->intr_mode],
7561 "%s-msix", h->devname);
7562 else
7563 sprintf(h->intrname[h->intr_mode],
7564 "%s-msi", h->devname);
254f796b 7565 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7566 msixhandler, 0,
7567 h->intrname[h->intr_mode],
254f796b
MG
7568 &h->q[h->intr_mode]);
7569 } else {
8b47004a
RE
7570 sprintf(h->intrname[h->intr_mode],
7571 "%s-intx", h->devname);
254f796b 7572 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7573 intxhandler, IRQF_SHARED,
7574 h->intrname[h->intr_mode],
254f796b
MG
7575 &h->q[h->intr_mode]);
7576 }
105a3dbc 7577 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
254f796b 7578 }
0ae01a32 7579 if (rc) {
195f2c65 7580 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
0ae01a32 7581 h->intr[h->intr_mode], h->devname);
195f2c65 7582 hpsa_free_irqs(h);
0ae01a32
SC
7583 return -ENODEV;
7584 }
7585 return 0;
7586}
7587
6f039790 7588static int hpsa_kdump_soft_reset(struct ctlr_info *h)
64670ac8 7589{
39c53f55 7590 int rc;
bf43caf3 7591 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
64670ac8
SC
7592
7593 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
39c53f55
RE
7594 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7595 if (rc) {
64670ac8 7596 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
39c53f55 7597 return rc;
64670ac8
SC
7598 }
7599
7600 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
39c53f55
RE
7601 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7602 if (rc) {
64670ac8
SC
7603 dev_warn(&h->pdev->dev, "Board failed to become ready "
7604 "after soft reset.\n");
39c53f55 7605 return rc;
64670ac8
SC
7606 }
7607
7608 return 0;
7609}
7610
072b0518
SC
7611static void hpsa_free_reply_queues(struct ctlr_info *h)
7612{
7613 int i;
7614
7615 for (i = 0; i < h->nreply_queues; i++) {
7616 if (!h->reply_queue[i].head)
7617 continue;
1fb7c98a
RE
7618 pci_free_consistent(h->pdev,
7619 h->reply_queue_size,
7620 h->reply_queue[i].head,
7621 h->reply_queue[i].busaddr);
072b0518
SC
7622 h->reply_queue[i].head = NULL;
7623 h->reply_queue[i].busaddr = 0;
7624 }
105a3dbc 7625 h->reply_queue_size = 0;
072b0518
SC
7626}
7627
0097f0f4
SC
7628static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7629{
105a3dbc
RE
7630 hpsa_free_performant_mode(h); /* init_one 7 */
7631 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7632 hpsa_free_cmd_pool(h); /* init_one 5 */
7633 hpsa_free_irqs(h); /* init_one 4 */
2946e82b
RE
7634 scsi_host_put(h->scsi_host); /* init_one 3 */
7635 h->scsi_host = NULL; /* init_one 3 */
7636 hpsa_free_pci_init(h); /* init_one 2_5 */
9ecd953a
RE
7637 free_percpu(h->lockup_detected); /* init_one 2 */
7638 h->lockup_detected = NULL; /* init_one 2 */
7639 if (h->resubmit_wq) {
7640 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7641 h->resubmit_wq = NULL;
7642 }
7643 if (h->rescan_ctlr_wq) {
7644 destroy_workqueue(h->rescan_ctlr_wq);
7645 h->rescan_ctlr_wq = NULL;
7646 }
105a3dbc 7647 kfree(h); /* init_one 1 */
64670ac8
SC
7648}
7649
a0c12413 7650/* Called when controller lockup detected. */
f2405db8 7651static void fail_all_outstanding_cmds(struct ctlr_info *h)
a0c12413 7652{
281a7fd0
WS
7653 int i, refcount;
7654 struct CommandList *c;
25163bd5 7655 int failcount = 0;
a0c12413 7656
080ef1cc 7657 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
f2405db8 7658 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 7659 c = h->cmd_pool + i;
281a7fd0
WS
7660 refcount = atomic_inc_return(&c->refcount);
7661 if (refcount > 1) {
25163bd5 7662 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
281a7fd0 7663 finish_cmd(c);
433b5f4d 7664 atomic_dec(&h->commands_outstanding);
25163bd5 7665 failcount++;
281a7fd0
WS
7666 }
7667 cmd_free(h, c);
a0c12413 7668 }
25163bd5
WS
7669 dev_warn(&h->pdev->dev,
7670 "failed %d commands in fail_all\n", failcount);
a0c12413
SC
7671}
7672
094963da
SC
7673static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7674{
c8ed0010 7675 int cpu;
094963da 7676
c8ed0010 7677 for_each_online_cpu(cpu) {
094963da
SC
7678 u32 *lockup_detected;
7679 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7680 *lockup_detected = value;
094963da
SC
7681 }
7682 wmb(); /* be sure the per-cpu variables are out to memory */
7683}
7684
a0c12413
SC
7685static void controller_lockup_detected(struct ctlr_info *h)
7686{
7687 unsigned long flags;
094963da 7688 u32 lockup_detected;
a0c12413 7689
a0c12413
SC
7690 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7691 spin_lock_irqsave(&h->lock, flags);
094963da
SC
7692 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7693 if (!lockup_detected) {
7694 /* no heartbeat, but controller gave us a zero. */
7695 dev_warn(&h->pdev->dev,
25163bd5
WS
7696 "lockup detected after %d but scratchpad register is zero\n",
7697 h->heartbeat_sample_interval / HZ);
094963da
SC
7698 lockup_detected = 0xffffffff;
7699 }
7700 set_lockup_detected_for_all_cpus(h, lockup_detected);
a0c12413 7701 spin_unlock_irqrestore(&h->lock, flags);
25163bd5
WS
7702 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7703 lockup_detected, h->heartbeat_sample_interval / HZ);
a0c12413 7704 pci_disable_device(h->pdev);
f2405db8 7705 fail_all_outstanding_cmds(h);
a0c12413
SC
7706}
7707
25163bd5 7708static int detect_controller_lockup(struct ctlr_info *h)
a0c12413
SC
7709{
7710 u64 now;
7711 u32 heartbeat;
7712 unsigned long flags;
7713
a0c12413
SC
7714 now = get_jiffies_64();
7715 /* If we've received an interrupt recently, we're ok. */
7716 if (time_after64(h->last_intr_timestamp +
e85c5974 7717 (h->heartbeat_sample_interval), now))
25163bd5 7718 return false;
a0c12413
SC
7719
7720 /*
7721 * If we've already checked the heartbeat recently, we're ok.
7722 * This could happen if someone sends us a signal. We
7723 * otherwise don't care about signals in this thread.
7724 */
7725 if (time_after64(h->last_heartbeat_timestamp +
e85c5974 7726 (h->heartbeat_sample_interval), now))
25163bd5 7727 return false;
a0c12413
SC
7728
7729 /* If heartbeat has not changed since we last looked, we're not ok. */
7730 spin_lock_irqsave(&h->lock, flags);
7731 heartbeat = readl(&h->cfgtable->HeartBeat);
7732 spin_unlock_irqrestore(&h->lock, flags);
7733 if (h->last_heartbeat == heartbeat) {
7734 controller_lockup_detected(h);
25163bd5 7735 return true;
a0c12413
SC
7736 }
7737
7738 /* We're ok. */
7739 h->last_heartbeat = heartbeat;
7740 h->last_heartbeat_timestamp = now;
25163bd5 7741 return false;
a0c12413
SC
7742}
7743
9846590e 7744static void hpsa_ack_ctlr_events(struct ctlr_info *h)
76438d08
SC
7745{
7746 int i;
7747 char *event_type;
7748
e4aa3e6a
SC
7749 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7750 return;
7751
76438d08 7752 /* Ask the controller to clear the events we're handling. */
1f7cee8c
SC
7753 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7754 | CFGTBL_Trans_io_accel2)) &&
76438d08
SC
7755 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7756 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7757
7758 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7759 event_type = "state change";
7760 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7761 event_type = "configuration change";
7762 /* Stop sending new RAID offload reqs via the IO accelerator */
7763 scsi_block_requests(h->scsi_host);
7764 for (i = 0; i < h->ndevices; i++)
7765 h->dev[i]->offload_enabled = 0;
23100dd9 7766 hpsa_drain_accel_commands(h);
76438d08
SC
7767 /* Set 'accelerator path config change' bit */
7768 dev_warn(&h->pdev->dev,
7769 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7770 h->events, event_type);
7771 writel(h->events, &(h->cfgtable->clear_event_notify));
7772 /* Set the "clear event notify field update" bit 6 */
7773 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7774 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7775 hpsa_wait_for_clear_event_notify_ack(h);
7776 scsi_unblock_requests(h->scsi_host);
7777 } else {
7778 /* Acknowledge controller notification events. */
7779 writel(h->events, &(h->cfgtable->clear_event_notify));
7780 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7781 hpsa_wait_for_clear_event_notify_ack(h);
7782#if 0
7783 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7784 hpsa_wait_for_mode_change_ack(h);
7785#endif
7786 }
9846590e 7787 return;
76438d08
SC
7788}
7789
7790/* Check a register on the controller to see if there are configuration
7791 * changes (added/changed/removed logical drives, etc.) which mean that
e863d68e
ST
7792 * we should rescan the controller for devices.
7793 * Also check flag for driver-initiated rescan.
76438d08 7794 */
9846590e 7795static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
76438d08
SC
7796{
7797 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
9846590e 7798 return 0;
76438d08
SC
7799
7800 h->events = readl(&(h->cfgtable->event_notify));
9846590e
SC
7801 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7802}
76438d08 7803
9846590e
SC
7804/*
7805 * Check if any of the offline devices have become ready
7806 */
7807static int hpsa_offline_devices_ready(struct ctlr_info *h)
7808{
7809 unsigned long flags;
7810 struct offline_device_entry *d;
7811 struct list_head *this, *tmp;
7812
7813 spin_lock_irqsave(&h->offline_device_lock, flags);
7814 list_for_each_safe(this, tmp, &h->offline_device_list) {
7815 d = list_entry(this, struct offline_device_entry,
7816 offline_list);
7817 spin_unlock_irqrestore(&h->offline_device_lock, flags);
d1fea47c
SC
7818 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7819 spin_lock_irqsave(&h->offline_device_lock, flags);
7820 list_del(&d->offline_list);
7821 spin_unlock_irqrestore(&h->offline_device_lock, flags);
9846590e 7822 return 1;
d1fea47c 7823 }
9846590e
SC
7824 spin_lock_irqsave(&h->offline_device_lock, flags);
7825 }
7826 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7827 return 0;
76438d08
SC
7828}
7829
6636e7f4 7830static void hpsa_rescan_ctlr_worker(struct work_struct *work)
a0c12413
SC
7831{
7832 unsigned long flags;
8a98db73 7833 struct ctlr_info *h = container_of(to_delayed_work(work),
6636e7f4
DB
7834 struct ctlr_info, rescan_ctlr_work);
7835
7836
7837 if (h->remove_in_progress)
8a98db73 7838 return;
9846590e
SC
7839
7840 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7841 scsi_host_get(h->scsi_host);
9846590e
SC
7842 hpsa_ack_ctlr_events(h);
7843 hpsa_scan_start(h->scsi_host);
7844 scsi_host_put(h->scsi_host);
7845 }
8a98db73 7846 spin_lock_irqsave(&h->lock, flags);
6636e7f4
DB
7847 if (!h->remove_in_progress)
7848 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7849 h->heartbeat_sample_interval);
7850 spin_unlock_irqrestore(&h->lock, flags);
7851}
7852
7853static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7854{
7855 unsigned long flags;
7856 struct ctlr_info *h = container_of(to_delayed_work(work),
7857 struct ctlr_info, monitor_ctlr_work);
7858
7859 detect_controller_lockup(h);
7860 if (lockup_detected(h))
a0c12413 7861 return;
6636e7f4
DB
7862
7863 spin_lock_irqsave(&h->lock, flags);
7864 if (!h->remove_in_progress)
7865 schedule_delayed_work(&h->monitor_ctlr_work,
8a98db73
SC
7866 h->heartbeat_sample_interval);
7867 spin_unlock_irqrestore(&h->lock, flags);
a0c12413
SC
7868}
7869
6636e7f4
DB
7870static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7871 char *name)
7872{
7873 struct workqueue_struct *wq = NULL;
6636e7f4 7874
397ea9cb 7875 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6636e7f4
DB
7876 if (!wq)
7877 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7878
7879 return wq;
7880}
7881
6f039790 7882static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
edd16368 7883{
4c2a8c40 7884 int dac, rc;
edd16368 7885 struct ctlr_info *h;
64670ac8
SC
7886 int try_soft_reset = 0;
7887 unsigned long flags;
6b6c1cd7 7888 u32 board_id;
edd16368
SC
7889
7890 if (number_of_controllers == 0)
7891 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 7892
6b6c1cd7
TH
7893 rc = hpsa_lookup_board_id(pdev, &board_id);
7894 if (rc < 0) {
7895 dev_warn(&pdev->dev, "Board ID not found\n");
7896 return rc;
7897 }
7898
7899 rc = hpsa_init_reset_devices(pdev, board_id);
64670ac8
SC
7900 if (rc) {
7901 if (rc != -ENOTSUPP)
7902 return rc;
7903 /* If the reset fails in a particular way (it has no way to do
7904 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7905 * a soft reset once we get the controller configured up to the
7906 * point that it can accept a command.
7907 */
7908 try_soft_reset = 1;
7909 rc = 0;
7910 }
7911
7912reinit_after_soft_reset:
edd16368 7913
303932fd
DB
7914 /* Command structures must be aligned on a 32-byte boundary because
7915 * the 5 lower bits of the address are used by the hardware. and by
7916 * the driver. See comments in hpsa.h for more info.
7917 */
303932fd 7918 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368 7919 h = kzalloc(sizeof(*h), GFP_KERNEL);
105a3dbc
RE
7920 if (!h) {
7921 dev_err(&pdev->dev, "Failed to allocate controller head\n");
ecd9aad4 7922 return -ENOMEM;
105a3dbc 7923 }
edd16368 7924
55c06c71 7925 h->pdev = pdev;
105a3dbc 7926
a9a3a273 7927 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9846590e 7928 INIT_LIST_HEAD(&h->offline_device_list);
6eaf46fd 7929 spin_lock_init(&h->lock);
9846590e 7930 spin_lock_init(&h->offline_device_lock);
6eaf46fd 7931 spin_lock_init(&h->scan_lock);
34f0c627 7932 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
9b5c48c2 7933 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
094963da
SC
7934
7935 /* Allocate and clear per-cpu variable lockup_detected */
7936 h->lockup_detected = alloc_percpu(u32);
2a5ac326 7937 if (!h->lockup_detected) {
105a3dbc 7938 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
2a5ac326 7939 rc = -ENOMEM;
2efa5929 7940 goto clean1; /* aer/h */
2a5ac326 7941 }
094963da
SC
7942 set_lockup_detected_for_all_cpus(h, 0);
7943
55c06c71 7944 rc = hpsa_pci_init(h);
105a3dbc 7945 if (rc)
2946e82b
RE
7946 goto clean2; /* lu, aer/h */
7947
7948 /* relies on h-> settings made by hpsa_pci_init, including
7949 * interrupt_mode h->intr */
7950 rc = hpsa_scsi_host_alloc(h);
7951 if (rc)
7952 goto clean2_5; /* pci, lu, aer/h */
edd16368 7953
2946e82b 7954 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
edd16368
SC
7955 h->ctlr = number_of_controllers;
7956 number_of_controllers++;
edd16368
SC
7957
7958 /* configure PCI DMA stuff */
ecd9aad4
SC
7959 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7960 if (rc == 0) {
edd16368 7961 dac = 1;
ecd9aad4
SC
7962 } else {
7963 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7964 if (rc == 0) {
7965 dac = 0;
7966 } else {
7967 dev_err(&pdev->dev, "no suitable DMA available\n");
2946e82b 7968 goto clean3; /* shost, pci, lu, aer/h */
ecd9aad4 7969 }
edd16368
SC
7970 }
7971
7972 /* make sure the board interrupts are off */
7973 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 7974
105a3dbc
RE
7975 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7976 if (rc)
2946e82b 7977 goto clean3; /* shost, pci, lu, aer/h */
d37ffbe4 7978 rc = hpsa_alloc_cmd_pool(h);
8947fd10 7979 if (rc)
2946e82b 7980 goto clean4; /* irq, shost, pci, lu, aer/h */
105a3dbc
RE
7981 rc = hpsa_alloc_sg_chain_blocks(h);
7982 if (rc)
2946e82b 7983 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
a08a8471 7984 init_waitqueue_head(&h->scan_wait_queue);
9b5c48c2 7985 init_waitqueue_head(&h->abort_cmd_wait_queue);
d604f533
WS
7986 init_waitqueue_head(&h->event_sync_wait_queue);
7987 mutex_init(&h->reset_mutex);
a08a8471 7988 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
7989
7990 pci_set_drvdata(pdev, h);
9a41338e 7991 h->ndevices = 0;
316b221a 7992 h->hba_mode_enabled = 0;
2946e82b 7993
9a41338e 7994 spin_lock_init(&h->devlock);
105a3dbc
RE
7995 rc = hpsa_put_ctlr_into_performant_mode(h);
7996 if (rc)
2946e82b
RE
7997 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7998
7999 /* hook into SCSI subsystem */
8000 rc = hpsa_scsi_add_host(h);
8001 if (rc)
8002 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
2efa5929
RE
8003
8004 /* create the resubmit workqueue */
8005 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8006 if (!h->rescan_ctlr_wq) {
8007 rc = -ENOMEM;
8008 goto clean7;
8009 }
8010
8011 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8012 if (!h->resubmit_wq) {
8013 rc = -ENOMEM;
8014 goto clean7; /* aer/h */
8015 }
64670ac8 8016
105a3dbc
RE
8017 /*
8018 * At this point, the controller is ready to take commands.
64670ac8
SC
8019 * Now, if reset_devices and the hard reset didn't work, try
8020 * the soft reset and see if that works.
8021 */
8022 if (try_soft_reset) {
8023
8024 /* This is kind of gross. We may or may not get a completion
8025 * from the soft reset command, and if we do, then the value
8026 * from the fifo may or may not be valid. So, we wait 10 secs
8027 * after the reset throwing away any completions we get during
8028 * that time. Unregister the interrupt handler and register
8029 * fake ones to scoop up any residual completions.
8030 */
8031 spin_lock_irqsave(&h->lock, flags);
8032 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8033 spin_unlock_irqrestore(&h->lock, flags);
ec501a18 8034 hpsa_free_irqs(h);
9ee61794 8035 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
64670ac8
SC
8036 hpsa_intx_discard_completions);
8037 if (rc) {
9ee61794
RE
8038 dev_warn(&h->pdev->dev,
8039 "Failed to request_irq after soft reset.\n");
d498757c 8040 /*
b2ef480c
RE
8041 * cannot goto clean7 or free_irqs will be called
8042 * again. Instead, do its work
8043 */
8044 hpsa_free_performant_mode(h); /* clean7 */
8045 hpsa_free_sg_chain_blocks(h); /* clean6 */
8046 hpsa_free_cmd_pool(h); /* clean5 */
8047 /*
8048 * skip hpsa_free_irqs(h) clean4 since that
8049 * was just called before request_irqs failed
d498757c
RE
8050 */
8051 goto clean3;
64670ac8
SC
8052 }
8053
8054 rc = hpsa_kdump_soft_reset(h);
8055 if (rc)
8056 /* Neither hard nor soft reset worked, we're hosed. */
2946e82b 8057 goto clean9;
64670ac8
SC
8058
8059 dev_info(&h->pdev->dev, "Board READY.\n");
8060 dev_info(&h->pdev->dev,
8061 "Waiting for stale completions to drain.\n");
8062 h->access.set_intr_mask(h, HPSA_INTR_ON);
8063 msleep(10000);
8064 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8065
8066 rc = controller_reset_failed(h->cfgtable);
8067 if (rc)
8068 dev_info(&h->pdev->dev,
8069 "Soft reset appears to have failed.\n");
8070
8071 /* since the controller's reset, we have to go back and re-init
8072 * everything. Easiest to just forget what we've done and do it
8073 * all over again.
8074 */
8075 hpsa_undo_allocations_after_kdump_soft_reset(h);
8076 try_soft_reset = 0;
8077 if (rc)
b2ef480c 8078 /* don't goto clean, we already unallocated */
64670ac8
SC
8079 return -ENODEV;
8080
8081 goto reinit_after_soft_reset;
8082 }
edd16368 8083
105a3dbc
RE
8084 /* Enable Accelerated IO path at driver layer */
8085 h->acciopath_status = 1;
da0697bd 8086
e863d68e 8087
edd16368
SC
8088 /* Turn the interrupts on so we can service requests */
8089 h->access.set_intr_mask(h, HPSA_INTR_ON);
8090
339b2b14 8091 hpsa_hba_inquiry(h);
8a98db73
SC
8092
8093 /* Monitor the controller for firmware lockups */
8094 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8095 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8096 schedule_delayed_work(&h->monitor_ctlr_work,
8097 h->heartbeat_sample_interval);
6636e7f4
DB
8098 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8099 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8100 h->heartbeat_sample_interval);
88bf6d62 8101 return 0;
edd16368 8102
2946e82b 8103clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
105a3dbc 8104 kfree(h->hba_inquiry_data);
2946e82b 8105clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
105a3dbc
RE
8106 hpsa_free_performant_mode(h);
8107 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8108clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
33a2ffce 8109 hpsa_free_sg_chain_blocks(h);
2946e82b 8110clean5: /* cmd, irq, shost, pci, lu, aer/h */
2e9d1b36 8111 hpsa_free_cmd_pool(h);
2946e82b 8112clean4: /* irq, shost, pci, lu, aer/h */
ec501a18 8113 hpsa_free_irqs(h);
2946e82b
RE
8114clean3: /* shost, pci, lu, aer/h */
8115 scsi_host_put(h->scsi_host);
8116 h->scsi_host = NULL;
8117clean2_5: /* pci, lu, aer/h */
195f2c65 8118 hpsa_free_pci_init(h);
2946e82b 8119clean2: /* lu, aer/h */
105a3dbc
RE
8120 if (h->lockup_detected) {
8121 free_percpu(h->lockup_detected);
8122 h->lockup_detected = NULL;
8123 }
8124clean1: /* wq/aer/h */
8125 if (h->resubmit_wq) {
080ef1cc 8126 destroy_workqueue(h->resubmit_wq);
105a3dbc
RE
8127 h->resubmit_wq = NULL;
8128 }
8129 if (h->rescan_ctlr_wq) {
6636e7f4 8130 destroy_workqueue(h->rescan_ctlr_wq);
105a3dbc
RE
8131 h->rescan_ctlr_wq = NULL;
8132 }
edd16368 8133 kfree(h);
ecd9aad4 8134 return rc;
edd16368
SC
8135}
8136
8137static void hpsa_flush_cache(struct ctlr_info *h)
8138{
8139 char *flush_buf;
8140 struct CommandList *c;
25163bd5 8141 int rc;
702890e3 8142
094963da 8143 if (unlikely(lockup_detected(h)))
702890e3 8144 return;
edd16368
SC
8145 flush_buf = kzalloc(4, GFP_KERNEL);
8146 if (!flush_buf)
8147 return;
8148
45fcb86e 8149 c = cmd_alloc(h);
bf43caf3 8150
a2dac136
SC
8151 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8152 RAID_CTLR_LUNID, TYPE_CMD)) {
8153 goto out;
8154 }
25163bd5
WS
8155 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8156 PCI_DMA_TODEVICE, NO_TIMEOUT);
8157 if (rc)
8158 goto out;
edd16368 8159 if (c->err_info->CommandStatus != 0)
a2dac136 8160out:
edd16368
SC
8161 dev_warn(&h->pdev->dev,
8162 "error flushing cache on controller\n");
45fcb86e 8163 cmd_free(h, c);
edd16368
SC
8164 kfree(flush_buf);
8165}
8166
8167static void hpsa_shutdown(struct pci_dev *pdev)
8168{
8169 struct ctlr_info *h;
8170
8171 h = pci_get_drvdata(pdev);
8172 /* Turn board interrupts off and send the flush cache command
8173 * sendcmd will turn off interrupt, and send the flush...
8174 * To write all data in the battery backed cache to disks
8175 */
8176 hpsa_flush_cache(h);
8177 h->access.set_intr_mask(h, HPSA_INTR_OFF);
105a3dbc 8178 hpsa_free_irqs(h); /* init_one 4 */
cc64c817 8179 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
edd16368
SC
8180}
8181
6f039790 8182static void hpsa_free_device_info(struct ctlr_info *h)
55e14e76
SC
8183{
8184 int i;
8185
105a3dbc 8186 for (i = 0; i < h->ndevices; i++) {
55e14e76 8187 kfree(h->dev[i]);
105a3dbc
RE
8188 h->dev[i] = NULL;
8189 }
55e14e76
SC
8190}
8191
6f039790 8192static void hpsa_remove_one(struct pci_dev *pdev)
edd16368
SC
8193{
8194 struct ctlr_info *h;
8a98db73 8195 unsigned long flags;
edd16368
SC
8196
8197 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 8198 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
8199 return;
8200 }
8201 h = pci_get_drvdata(pdev);
8a98db73
SC
8202
8203 /* Get rid of any controller monitoring work items */
8204 spin_lock_irqsave(&h->lock, flags);
8205 h->remove_in_progress = 1;
8a98db73 8206 spin_unlock_irqrestore(&h->lock, flags);
6636e7f4
DB
8207 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8208 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8209 destroy_workqueue(h->rescan_ctlr_wq);
8210 destroy_workqueue(h->resubmit_wq);
cc64c817 8211
105a3dbc 8212 /* includes hpsa_free_irqs - init_one 4 */
195f2c65 8213 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
edd16368 8214 hpsa_shutdown(pdev);
cc64c817 8215
105a3dbc
RE
8216 hpsa_free_device_info(h); /* scan */
8217
2946e82b
RE
8218 kfree(h->hba_inquiry_data); /* init_one 10 */
8219 h->hba_inquiry_data = NULL; /* init_one 10 */
8220 if (h->scsi_host)
8221 scsi_remove_host(h->scsi_host); /* init_one 8 */
8222 hpsa_free_ioaccel2_sg_chain_blocks(h);
105a3dbc
RE
8223 hpsa_free_performant_mode(h); /* init_one 7 */
8224 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8225 hpsa_free_cmd_pool(h); /* init_one 5 */
8226
8227 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
195f2c65 8228
2946e82b
RE
8229 scsi_host_put(h->scsi_host); /* init_one 3 */
8230 h->scsi_host = NULL; /* init_one 3 */
8231
195f2c65 8232 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
2946e82b 8233 hpsa_free_pci_init(h); /* init_one 2.5 */
195f2c65 8234
105a3dbc
RE
8235 free_percpu(h->lockup_detected); /* init_one 2 */
8236 h->lockup_detected = NULL; /* init_one 2 */
8237 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8238 kfree(h); /* init_one 1 */
edd16368
SC
8239}
8240
8241static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8242 __attribute__((unused)) pm_message_t state)
8243{
8244 return -ENOSYS;
8245}
8246
8247static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8248{
8249 return -ENOSYS;
8250}
8251
8252static struct pci_driver hpsa_pci_driver = {
f79cfec6 8253 .name = HPSA,
edd16368 8254 .probe = hpsa_init_one,
6f039790 8255 .remove = hpsa_remove_one,
edd16368
SC
8256 .id_table = hpsa_pci_device_id, /* id_table */
8257 .shutdown = hpsa_shutdown,
8258 .suspend = hpsa_suspend,
8259 .resume = hpsa_resume,
8260};
8261
303932fd
DB
8262/* Fill in bucket_map[], given nsgs (the max number of
8263 * scatter gather elements supported) and bucket[],
8264 * which is an array of 8 integers. The bucket[] array
8265 * contains 8 different DMA transfer sizes (in 16
8266 * byte increments) which the controller uses to fetch
8267 * commands. This function fills in bucket_map[], which
8268 * maps a given number of scatter gather elements to one of
8269 * the 8 DMA transfer sizes. The point of it is to allow the
8270 * controller to only do as much DMA as needed to fetch the
8271 * command, with the DMA transfer size encoded in the lower
8272 * bits of the command address.
8273 */
8274static void calc_bucket_map(int bucket[], int num_buckets,
2b08b3e9 8275 int nsgs, int min_blocks, u32 *bucket_map)
303932fd
DB
8276{
8277 int i, j, b, size;
8278
303932fd
DB
8279 /* Note, bucket_map must have nsgs+1 entries. */
8280 for (i = 0; i <= nsgs; i++) {
8281 /* Compute size of a command with i SG entries */
e1f7de0c 8282 size = i + min_blocks;
303932fd
DB
8283 b = num_buckets; /* Assume the biggest bucket */
8284 /* Find the bucket that is just big enough */
e1f7de0c 8285 for (j = 0; j < num_buckets; j++) {
303932fd
DB
8286 if (bucket[j] >= size) {
8287 b = j;
8288 break;
8289 }
8290 }
8291 /* for a command with i SG entries, use bucket b. */
8292 bucket_map[i] = b;
8293 }
8294}
8295
105a3dbc
RE
8296/*
8297 * return -ENODEV on err, 0 on success (or no action)
8298 * allocates numerous items that must be freed later
8299 */
c706a795 8300static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
303932fd 8301{
6c311b57
SC
8302 int i;
8303 unsigned long register_value;
e1f7de0c
MG
8304 unsigned long transMethod = CFGTBL_Trans_Performant |
8305 (trans_support & CFGTBL_Trans_use_short_tags) |
b9af4937
SC
8306 CFGTBL_Trans_enable_directed_msix |
8307 (trans_support & (CFGTBL_Trans_io_accel1 |
8308 CFGTBL_Trans_io_accel2));
e1f7de0c 8309 struct access_method access = SA5_performant_access;
def342bd
SC
8310
8311 /* This is a bit complicated. There are 8 registers on
8312 * the controller which we write to to tell it 8 different
8313 * sizes of commands which there may be. It's a way of
8314 * reducing the DMA done to fetch each command. Encoded into
8315 * each command's tag are 3 bits which communicate to the controller
8316 * which of the eight sizes that command fits within. The size of
8317 * each command depends on how many scatter gather entries there are.
8318 * Each SG entry requires 16 bytes. The eight registers are programmed
8319 * with the number of 16-byte blocks a command of that size requires.
8320 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 8321 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
8322 * blocks. Note, this only extends to the SG entries contained
8323 * within the command block, and does not extend to chained blocks
8324 * of SG elements. bft[] contains the eight values we write to
8325 * the registers. They are not evenly distributed, but have more
8326 * sizes for small commands, and fewer sizes for larger commands.
8327 */
d66ae08b 8328 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
b9af4937
SC
8329#define MIN_IOACCEL2_BFT_ENTRY 5
8330#define HPSA_IOACCEL2_HEADER_SZ 4
8331 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8332 13, 14, 15, 16, 17, 18, 19,
8333 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8334 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8335 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8336 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8337 16 * MIN_IOACCEL2_BFT_ENTRY);
8338 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
d66ae08b 8339 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
8340 /* 5 = 1 s/g entry or 4k
8341 * 6 = 2 s/g entry or 8k
8342 * 8 = 4 s/g entry or 16k
8343 * 10 = 6 s/g entry or 24k
8344 */
303932fd 8345
b3a52e79
SC
8346 /* If the controller supports either ioaccel method then
8347 * we can also use the RAID stack submit path that does not
8348 * perform the superfluous readl() after each command submission.
8349 */
8350 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8351 access = SA5_performant_access_no_read;
8352
303932fd 8353 /* Controller spec: zero out this buffer. */
072b0518
SC
8354 for (i = 0; i < h->nreply_queues; i++)
8355 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
303932fd 8356
d66ae08b
SC
8357 bft[7] = SG_ENTRIES_IN_CMD + 4;
8358 calc_bucket_map(bft, ARRAY_SIZE(bft),
e1f7de0c 8359 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
303932fd
DB
8360 for (i = 0; i < 8; i++)
8361 writel(bft[i], &h->transtable->BlockFetch[i]);
8362
8363 /* size of controller ring buffer */
8364 writel(h->max_commands, &h->transtable->RepQSize);
254f796b 8365 writel(h->nreply_queues, &h->transtable->RepQCount);
303932fd
DB
8366 writel(0, &h->transtable->RepQCtrAddrLow32);
8367 writel(0, &h->transtable->RepQCtrAddrHigh32);
254f796b
MG
8368
8369 for (i = 0; i < h->nreply_queues; i++) {
8370 writel(0, &h->transtable->RepQAddr[i].upper);
072b0518 8371 writel(h->reply_queue[i].busaddr,
254f796b
MG
8372 &h->transtable->RepQAddr[i].lower);
8373 }
8374
b9af4937 8375 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
e1f7de0c
MG
8376 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8377 /*
8378 * enable outbound interrupt coalescing in accelerator mode;
8379 */
8380 if (trans_support & CFGTBL_Trans_io_accel1) {
8381 access = SA5_ioaccel_mode1_access;
8382 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8383 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
c349775e
ST
8384 } else {
8385 if (trans_support & CFGTBL_Trans_io_accel2) {
8386 access = SA5_ioaccel_mode2_access;
8387 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8388 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8389 }
e1f7de0c 8390 }
303932fd 8391 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8392 if (hpsa_wait_for_mode_change_ack(h)) {
8393 dev_err(&h->pdev->dev,
8394 "performant mode problem - doorbell timeout\n");
8395 return -ENODEV;
8396 }
303932fd
DB
8397 register_value = readl(&(h->cfgtable->TransportActive));
8398 if (!(register_value & CFGTBL_Trans_Performant)) {
050f7147
SC
8399 dev_err(&h->pdev->dev,
8400 "performant mode problem - transport not active\n");
c706a795 8401 return -ENODEV;
303932fd 8402 }
960a30e7 8403 /* Change the access methods to the performant access methods */
e1f7de0c
MG
8404 h->access = access;
8405 h->transMethod = transMethod;
8406
b9af4937
SC
8407 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8408 (trans_support & CFGTBL_Trans_io_accel2)))
c706a795 8409 return 0;
e1f7de0c 8410
b9af4937
SC
8411 if (trans_support & CFGTBL_Trans_io_accel1) {
8412 /* Set up I/O accelerator mode */
8413 for (i = 0; i < h->nreply_queues; i++) {
8414 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8415 h->reply_queue[i].current_entry =
8416 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8417 }
8418 bft[7] = h->ioaccel_maxsg + 8;
8419 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8420 h->ioaccel1_blockFetchTable);
e1f7de0c 8421
b9af4937 8422 /* initialize all reply queue entries to unused */
072b0518
SC
8423 for (i = 0; i < h->nreply_queues; i++)
8424 memset(h->reply_queue[i].head,
8425 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8426 h->reply_queue_size);
e1f7de0c 8427
b9af4937
SC
8428 /* set all the constant fields in the accelerator command
8429 * frames once at init time to save CPU cycles later.
8430 */
8431 for (i = 0; i < h->nr_cmds; i++) {
8432 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8433
8434 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8435 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8436 (i * sizeof(struct ErrorInfo)));
8437 cp->err_info_len = sizeof(struct ErrorInfo);
8438 cp->sgl_offset = IOACCEL1_SGLOFFSET;
2b08b3e9
DB
8439 cp->host_context_flags =
8440 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
b9af4937
SC
8441 cp->timeout_sec = 0;
8442 cp->ReplyQueue = 0;
50a0decf 8443 cp->tag =
f2405db8 8444 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
50a0decf
SC
8445 cp->host_addr =
8446 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
b9af4937 8447 (i * sizeof(struct io_accel1_cmd)));
b9af4937
SC
8448 }
8449 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8450 u64 cfg_offset, cfg_base_addr_index;
8451 u32 bft2_offset, cfg_base_addr;
8452 int rc;
8453
8454 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8455 &cfg_base_addr_index, &cfg_offset);
8456 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8457 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8458 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8459 4, h->ioaccel2_blockFetchTable);
8460 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8461 BUILD_BUG_ON(offsetof(struct CfgTable,
8462 io_accel_request_size_offset) != 0xb8);
8463 h->ioaccel2_bft2_regs =
8464 remap_pci_mem(pci_resource_start(h->pdev,
8465 cfg_base_addr_index) +
8466 cfg_offset + bft2_offset,
8467 ARRAY_SIZE(bft2) *
8468 sizeof(*h->ioaccel2_bft2_regs));
8469 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8470 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
e1f7de0c 8471 }
b9af4937 8472 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8473 if (hpsa_wait_for_mode_change_ack(h)) {
8474 dev_err(&h->pdev->dev,
8475 "performant mode problem - enabling ioaccel mode\n");
8476 return -ENODEV;
8477 }
8478 return 0;
e1f7de0c
MG
8479}
8480
1fb7c98a
RE
8481/* Free ioaccel1 mode command blocks and block fetch table */
8482static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8483{
105a3dbc 8484 if (h->ioaccel_cmd_pool) {
1fb7c98a
RE
8485 pci_free_consistent(h->pdev,
8486 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8487 h->ioaccel_cmd_pool,
8488 h->ioaccel_cmd_pool_dhandle);
105a3dbc
RE
8489 h->ioaccel_cmd_pool = NULL;
8490 h->ioaccel_cmd_pool_dhandle = 0;
8491 }
1fb7c98a 8492 kfree(h->ioaccel1_blockFetchTable);
105a3dbc 8493 h->ioaccel1_blockFetchTable = NULL;
1fb7c98a
RE
8494}
8495
d37ffbe4
RE
8496/* Allocate ioaccel1 mode command blocks and block fetch table */
8497static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
e1f7de0c 8498{
283b4a9b
SC
8499 h->ioaccel_maxsg =
8500 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8501 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8502 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8503
e1f7de0c
MG
8504 /* Command structures must be aligned on a 128-byte boundary
8505 * because the 7 lower bits of the address are used by the
8506 * hardware.
8507 */
e1f7de0c
MG
8508 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8509 IOACCEL1_COMMANDLIST_ALIGNMENT);
8510 h->ioaccel_cmd_pool =
8511 pci_alloc_consistent(h->pdev,
8512 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8513 &(h->ioaccel_cmd_pool_dhandle));
8514
8515 h->ioaccel1_blockFetchTable =
283b4a9b 8516 kmalloc(((h->ioaccel_maxsg + 1) *
e1f7de0c
MG
8517 sizeof(u32)), GFP_KERNEL);
8518
8519 if ((h->ioaccel_cmd_pool == NULL) ||
8520 (h->ioaccel1_blockFetchTable == NULL))
8521 goto clean_up;
8522
8523 memset(h->ioaccel_cmd_pool, 0,
8524 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8525 return 0;
8526
8527clean_up:
1fb7c98a 8528 hpsa_free_ioaccel1_cmd_and_bft(h);
2dd02d74 8529 return -ENOMEM;
6c311b57
SC
8530}
8531
1fb7c98a
RE
8532/* Free ioaccel2 mode command blocks and block fetch table */
8533static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8534{
d9a729f3
WS
8535 hpsa_free_ioaccel2_sg_chain_blocks(h);
8536
105a3dbc 8537 if (h->ioaccel2_cmd_pool) {
1fb7c98a
RE
8538 pci_free_consistent(h->pdev,
8539 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8540 h->ioaccel2_cmd_pool,
8541 h->ioaccel2_cmd_pool_dhandle);
105a3dbc
RE
8542 h->ioaccel2_cmd_pool = NULL;
8543 h->ioaccel2_cmd_pool_dhandle = 0;
8544 }
1fb7c98a 8545 kfree(h->ioaccel2_blockFetchTable);
105a3dbc 8546 h->ioaccel2_blockFetchTable = NULL;
1fb7c98a
RE
8547}
8548
d37ffbe4
RE
8549/* Allocate ioaccel2 mode command blocks and block fetch table */
8550static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
aca9012a 8551{
d9a729f3
WS
8552 int rc;
8553
aca9012a
SC
8554 /* Allocate ioaccel2 mode command blocks and block fetch table */
8555
8556 h->ioaccel_maxsg =
8557 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8558 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8559 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8560
aca9012a
SC
8561 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8562 IOACCEL2_COMMANDLIST_ALIGNMENT);
8563 h->ioaccel2_cmd_pool =
8564 pci_alloc_consistent(h->pdev,
8565 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8566 &(h->ioaccel2_cmd_pool_dhandle));
8567
8568 h->ioaccel2_blockFetchTable =
8569 kmalloc(((h->ioaccel_maxsg + 1) *
8570 sizeof(u32)), GFP_KERNEL);
8571
8572 if ((h->ioaccel2_cmd_pool == NULL) ||
d9a729f3
WS
8573 (h->ioaccel2_blockFetchTable == NULL)) {
8574 rc = -ENOMEM;
8575 goto clean_up;
8576 }
8577
8578 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8579 if (rc)
aca9012a
SC
8580 goto clean_up;
8581
8582 memset(h->ioaccel2_cmd_pool, 0,
8583 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8584 return 0;
8585
8586clean_up:
1fb7c98a 8587 hpsa_free_ioaccel2_cmd_and_bft(h);
d9a729f3 8588 return rc;
aca9012a
SC
8589}
8590
105a3dbc
RE
8591/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8592static void hpsa_free_performant_mode(struct ctlr_info *h)
8593{
8594 kfree(h->blockFetchTable);
8595 h->blockFetchTable = NULL;
8596 hpsa_free_reply_queues(h);
8597 hpsa_free_ioaccel1_cmd_and_bft(h);
8598 hpsa_free_ioaccel2_cmd_and_bft(h);
8599}
8600
8601/* return -ENODEV on error, 0 on success (or no action)
8602 * allocates numerous items that must be freed later
8603 */
8604static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
6c311b57
SC
8605{
8606 u32 trans_support;
e1f7de0c
MG
8607 unsigned long transMethod = CFGTBL_Trans_Performant |
8608 CFGTBL_Trans_use_short_tags;
105a3dbc 8609 int i, rc;
6c311b57 8610
02ec19c8 8611 if (hpsa_simple_mode)
105a3dbc 8612 return 0;
02ec19c8 8613
67c99a72 8614 trans_support = readl(&(h->cfgtable->TransportSupport));
8615 if (!(trans_support & PERFORMANT_MODE))
105a3dbc 8616 return 0;
67c99a72 8617
e1f7de0c
MG
8618 /* Check for I/O accelerator mode support */
8619 if (trans_support & CFGTBL_Trans_io_accel1) {
8620 transMethod |= CFGTBL_Trans_io_accel1 |
8621 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8622 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8623 if (rc)
8624 return rc;
8625 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8626 transMethod |= CFGTBL_Trans_io_accel2 |
aca9012a 8627 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8628 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8629 if (rc)
8630 return rc;
e1f7de0c
MG
8631 }
8632
eee0f03a 8633 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
cba3d38b 8634 hpsa_get_max_perf_mode_cmds(h);
6c311b57 8635 /* Performant mode ring buffer and supporting data structures */
072b0518 8636 h->reply_queue_size = h->max_commands * sizeof(u64);
6c311b57 8637
254f796b 8638 for (i = 0; i < h->nreply_queues; i++) {
072b0518
SC
8639 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8640 h->reply_queue_size,
8641 &(h->reply_queue[i].busaddr));
105a3dbc
RE
8642 if (!h->reply_queue[i].head) {
8643 rc = -ENOMEM;
8644 goto clean1; /* rq, ioaccel */
8645 }
254f796b
MG
8646 h->reply_queue[i].size = h->max_commands;
8647 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8648 h->reply_queue[i].current_entry = 0;
8649 }
8650
6c311b57 8651 /* Need a block fetch table for performant mode */
d66ae08b 8652 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57 8653 sizeof(u32)), GFP_KERNEL);
105a3dbc
RE
8654 if (!h->blockFetchTable) {
8655 rc = -ENOMEM;
8656 goto clean1; /* rq, ioaccel */
8657 }
6c311b57 8658
105a3dbc
RE
8659 rc = hpsa_enter_performant_mode(h, trans_support);
8660 if (rc)
8661 goto clean2; /* bft, rq, ioaccel */
8662 return 0;
303932fd 8663
105a3dbc 8664clean2: /* bft, rq, ioaccel */
303932fd 8665 kfree(h->blockFetchTable);
105a3dbc
RE
8666 h->blockFetchTable = NULL;
8667clean1: /* rq, ioaccel */
8668 hpsa_free_reply_queues(h);
8669 hpsa_free_ioaccel1_cmd_and_bft(h);
8670 hpsa_free_ioaccel2_cmd_and_bft(h);
8671 return rc;
303932fd
DB
8672}
8673
23100dd9 8674static int is_accelerated_cmd(struct CommandList *c)
76438d08 8675{
23100dd9
SC
8676 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8677}
8678
8679static void hpsa_drain_accel_commands(struct ctlr_info *h)
8680{
8681 struct CommandList *c = NULL;
f2405db8 8682 int i, accel_cmds_out;
281a7fd0 8683 int refcount;
76438d08 8684
f2405db8 8685 do { /* wait for all outstanding ioaccel commands to drain out */
23100dd9 8686 accel_cmds_out = 0;
f2405db8 8687 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 8688 c = h->cmd_pool + i;
281a7fd0
WS
8689 refcount = atomic_inc_return(&c->refcount);
8690 if (refcount > 1) /* Command is allocated */
8691 accel_cmds_out += is_accelerated_cmd(c);
8692 cmd_free(h, c);
f2405db8 8693 }
23100dd9 8694 if (accel_cmds_out <= 0)
281a7fd0 8695 break;
76438d08
SC
8696 msleep(100);
8697 } while (1);
8698}
8699
edd16368
SC
8700/*
8701 * This is it. Register the PCI driver information for the cards we control
8702 * the OS will call our registered routines when it finds one of our cards.
8703 */
8704static int __init hpsa_init(void)
8705{
31468401 8706 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
8707}
8708
8709static void __exit hpsa_cleanup(void)
8710{
8711 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
8712}
8713
e1f7de0c
MG
8714static void __attribute__((unused)) verify_offsets(void)
8715{
dd0e19f3
ST
8716#define VERIFY_OFFSET(member, offset) \
8717 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8718
8719 VERIFY_OFFSET(structure_size, 0);
8720 VERIFY_OFFSET(volume_blk_size, 4);
8721 VERIFY_OFFSET(volume_blk_cnt, 8);
8722 VERIFY_OFFSET(phys_blk_shift, 16);
8723 VERIFY_OFFSET(parity_rotation_shift, 17);
8724 VERIFY_OFFSET(strip_size, 18);
8725 VERIFY_OFFSET(disk_starting_blk, 20);
8726 VERIFY_OFFSET(disk_blk_cnt, 28);
8727 VERIFY_OFFSET(data_disks_per_row, 36);
8728 VERIFY_OFFSET(metadata_disks_per_row, 38);
8729 VERIFY_OFFSET(row_cnt, 40);
8730 VERIFY_OFFSET(layout_map_count, 42);
8731 VERIFY_OFFSET(flags, 44);
8732 VERIFY_OFFSET(dekindex, 46);
8733 /* VERIFY_OFFSET(reserved, 48 */
8734 VERIFY_OFFSET(data, 64);
8735
8736#undef VERIFY_OFFSET
8737
b66cc250
MM
8738#define VERIFY_OFFSET(member, offset) \
8739 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8740
8741 VERIFY_OFFSET(IU_type, 0);
8742 VERIFY_OFFSET(direction, 1);
8743 VERIFY_OFFSET(reply_queue, 2);
8744 /* VERIFY_OFFSET(reserved1, 3); */
8745 VERIFY_OFFSET(scsi_nexus, 4);
8746 VERIFY_OFFSET(Tag, 8);
8747 VERIFY_OFFSET(cdb, 16);
8748 VERIFY_OFFSET(cciss_lun, 32);
8749 VERIFY_OFFSET(data_len, 40);
8750 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8751 VERIFY_OFFSET(sg_count, 45);
8752 /* VERIFY_OFFSET(reserved3 */
8753 VERIFY_OFFSET(err_ptr, 48);
8754 VERIFY_OFFSET(err_len, 56);
8755 /* VERIFY_OFFSET(reserved4 */
8756 VERIFY_OFFSET(sg, 64);
8757
8758#undef VERIFY_OFFSET
8759
e1f7de0c
MG
8760#define VERIFY_OFFSET(member, offset) \
8761 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8762
8763 VERIFY_OFFSET(dev_handle, 0x00);
8764 VERIFY_OFFSET(reserved1, 0x02);
8765 VERIFY_OFFSET(function, 0x03);
8766 VERIFY_OFFSET(reserved2, 0x04);
8767 VERIFY_OFFSET(err_info, 0x0C);
8768 VERIFY_OFFSET(reserved3, 0x10);
8769 VERIFY_OFFSET(err_info_len, 0x12);
8770 VERIFY_OFFSET(reserved4, 0x13);
8771 VERIFY_OFFSET(sgl_offset, 0x14);
8772 VERIFY_OFFSET(reserved5, 0x15);
8773 VERIFY_OFFSET(transfer_len, 0x1C);
8774 VERIFY_OFFSET(reserved6, 0x20);
8775 VERIFY_OFFSET(io_flags, 0x24);
8776 VERIFY_OFFSET(reserved7, 0x26);
8777 VERIFY_OFFSET(LUN, 0x34);
8778 VERIFY_OFFSET(control, 0x3C);
8779 VERIFY_OFFSET(CDB, 0x40);
8780 VERIFY_OFFSET(reserved8, 0x50);
8781 VERIFY_OFFSET(host_context_flags, 0x60);
8782 VERIFY_OFFSET(timeout_sec, 0x62);
8783 VERIFY_OFFSET(ReplyQueue, 0x64);
8784 VERIFY_OFFSET(reserved9, 0x65);
50a0decf 8785 VERIFY_OFFSET(tag, 0x68);
e1f7de0c
MG
8786 VERIFY_OFFSET(host_addr, 0x70);
8787 VERIFY_OFFSET(CISS_LUN, 0x78);
8788 VERIFY_OFFSET(SG, 0x78 + 8);
8789#undef VERIFY_OFFSET
8790}
8791
edd16368
SC
8792module_init(hpsa_init);
8793module_exit(hpsa_cleanup);