]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/hpsa.c
hpsa: add PMC to copyright
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
1358f6dc
DB
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
edd16368
SC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
1358f6dc 15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
edd16368
SC
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/types.h>
22#include <linux/pci.h>
e5a44df8 23#include <linux/pci-aspm.h>
edd16368
SC
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/fs.h>
28#include <linux/timer.h>
edd16368
SC
29#include <linux/init.h>
30#include <linux/spinlock.h>
edd16368
SC
31#include <linux/compat.h>
32#include <linux/blktrace_api.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/dma-mapping.h>
36#include <linux/completion.h>
37#include <linux/moduleparam.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
667e23d4 42#include <scsi/scsi_tcq.h>
9437ac43 43#include <scsi/scsi_eh.h>
73153fe5 44#include <scsi/scsi_dbg.h>
edd16368
SC
45#include <linux/cciss_ioctl.h>
46#include <linux/string.h>
47#include <linux/bitmap.h>
60063497 48#include <linux/atomic.h>
a0c12413 49#include <linux/jiffies.h>
42a91641 50#include <linux/percpu-defs.h>
094963da 51#include <linux/percpu.h>
2b08b3e9 52#include <asm/unaligned.h>
283b4a9b 53#include <asm/div64.h>
edd16368
SC
54#include "hpsa_cmd.h"
55#include "hpsa.h"
56
57/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
f532a3f9 58#define HPSA_DRIVER_VERSION "3.4.10-0"
edd16368 59#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
f79cfec6 60#define HPSA "hpsa"
edd16368 61
007e7aa9
RE
62/* How long to wait for CISS doorbell communication */
63#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
64#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
65#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
66#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
edd16368
SC
67#define MAX_IOCTL_CONFIG_WAIT 1000
68
69/*define how many times we will try a command because of bus resets */
70#define MAX_CMD_RETRIES 3
71
72/* Embedded module documentation macros - see modules.h */
73MODULE_AUTHOR("Hewlett-Packard Company");
74MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
75 HPSA_DRIVER_VERSION);
76MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77MODULE_VERSION(HPSA_DRIVER_VERSION);
78MODULE_LICENSE("GPL");
79
80static int hpsa_allow_any;
81module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_allow_any,
83 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
84static int hpsa_simple_mode;
85module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86MODULE_PARM_DESC(hpsa_simple_mode,
87 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
88
89/* define the PCI info for the cards we can control */
90static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
163dbcd8
MM
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
f8b01eb9 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
fe0c9610
MM
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
fe0c9610
MM
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
97b9f53d
MM
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
3b7a45e5 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
97b9f53d
MM
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
3b7a45e5
JH
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
fdfa4b6d 131 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
8e616a5e
SC
132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
133 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
134 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
136 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
7c03b870 137 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 138 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
139 {0,}
140};
141
142MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
143
144/* board_id = Subsystem Device ID & Vendor ID
145 * product = Marketing Name for the board
146 * access = Address of the struct of function pointers
147 */
148static struct board_type products[] = {
edd16368
SC
149 {0x3241103C, "Smart Array P212", &SA5_access},
150 {0x3243103C, "Smart Array P410", &SA5_access},
151 {0x3245103C, "Smart Array P410i", &SA5_access},
152 {0x3247103C, "Smart Array P411", &SA5_access},
153 {0x3249103C, "Smart Array P812", &SA5_access},
163dbcd8
MM
154 {0x324A103C, "Smart Array P712m", &SA5_access},
155 {0x324B103C, "Smart Array P711m", &SA5_access},
7d2cce58 156 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
fe0c9610
MM
157 {0x3350103C, "Smart Array P222", &SA5_access},
158 {0x3351103C, "Smart Array P420", &SA5_access},
159 {0x3352103C, "Smart Array P421", &SA5_access},
160 {0x3353103C, "Smart Array P822", &SA5_access},
161 {0x3354103C, "Smart Array P420i", &SA5_access},
162 {0x3355103C, "Smart Array P220i", &SA5_access},
163 {0x3356103C, "Smart Array P721m", &SA5_access},
1fd6c8e3
MM
164 {0x1921103C, "Smart Array P830i", &SA5_access},
165 {0x1922103C, "Smart Array P430", &SA5_access},
166 {0x1923103C, "Smart Array P431", &SA5_access},
167 {0x1924103C, "Smart Array P830", &SA5_access},
168 {0x1926103C, "Smart Array P731m", &SA5_access},
169 {0x1928103C, "Smart Array P230i", &SA5_access},
170 {0x1929103C, "Smart Array P530", &SA5_access},
27fb8137
DB
171 {0x21BD103C, "Smart Array P244br", &SA5_access},
172 {0x21BE103C, "Smart Array P741m", &SA5_access},
173 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
174 {0x21C0103C, "Smart Array P440ar", &SA5_access},
c8ae0ab1 175 {0x21C1103C, "Smart Array P840ar", &SA5_access},
27fb8137
DB
176 {0x21C2103C, "Smart Array P440", &SA5_access},
177 {0x21C3103C, "Smart Array P441", &SA5_access},
97b9f53d 178 {0x21C4103C, "Smart Array", &SA5_access},
27fb8137
DB
179 {0x21C5103C, "Smart Array P841", &SA5_access},
180 {0x21C6103C, "Smart HBA H244br", &SA5_access},
181 {0x21C7103C, "Smart HBA H240", &SA5_access},
182 {0x21C8103C, "Smart HBA H241", &SA5_access},
97b9f53d 183 {0x21C9103C, "Smart Array", &SA5_access},
27fb8137
DB
184 {0x21CA103C, "Smart Array P246br", &SA5_access},
185 {0x21CB103C, "Smart Array P840", &SA5_access},
3b7a45e5
JH
186 {0x21CC103C, "Smart Array", &SA5_access},
187 {0x21CD103C, "Smart Array", &SA5_access},
27fb8137 188 {0x21CE103C, "Smart HBA", &SA5_access},
fdfa4b6d 189 {0x05809005, "SmartHBA-SA", &SA5_access},
8e616a5e
SC
190 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
191 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
192 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
193 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
194 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
edd16368
SC
195 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
196};
197
a58e7e53
WS
198#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
199static const struct scsi_cmnd hpsa_cmd_busy;
200#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
201static const struct scsi_cmnd hpsa_cmd_idle;
edd16368
SC
202static int number_of_controllers;
203
10f66018
SC
204static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
205static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
42a91641 206static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
edd16368
SC
207
208#ifdef CONFIG_COMPAT
42a91641
DB
209static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
210 void __user *arg);
edd16368
SC
211#endif
212
213static void cmd_free(struct ctlr_info *h, struct CommandList *c);
edd16368 214static struct CommandList *cmd_alloc(struct ctlr_info *h);
73153fe5
WS
215static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
216static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
217 struct scsi_cmnd *scmd);
a2dac136 218static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 219 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368 220 int cmd_type);
2c143342 221static void hpsa_free_cmd_pool(struct ctlr_info *h);
b7bb24eb 222#define VPD_PAGE (1 << 8)
edd16368 223
f281233d 224static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
225static void hpsa_scan_start(struct Scsi_Host *);
226static int hpsa_scan_finished(struct Scsi_Host *sh,
227 unsigned long elapsed_time);
7c0a0229 228static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
edd16368
SC
229
230static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
75167d2c 231static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
edd16368 232static int hpsa_slave_alloc(struct scsi_device *sdev);
41ce4c35 233static int hpsa_slave_configure(struct scsi_device *sdev);
edd16368
SC
234static void hpsa_slave_destroy(struct scsi_device *sdev);
235
edd16368 236static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
edd16368
SC
237static int check_for_unit_attention(struct ctlr_info *h,
238 struct CommandList *c);
239static void check_ioctl_unit_attention(struct ctlr_info *h,
240 struct CommandList *c);
303932fd
DB
241/* performant mode helper functions */
242static void calc_bucket_map(int *bucket, int num_buckets,
2b08b3e9 243 int nsgs, int min_blocks, u32 *bucket_map);
105a3dbc
RE
244static void hpsa_free_performant_mode(struct ctlr_info *h);
245static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
254f796b 246static inline u32 next_command(struct ctlr_info *h, u8 q);
6f039790
GKH
247static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
248 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
249 u64 *cfg_offset);
250static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
251 unsigned long *memory_bar);
252static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
253static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
254 int wait_for_ready);
75167d2c 255static inline void finish_cmd(struct CommandList *c);
c706a795 256static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
fe5389c8
SC
257#define BOARD_NOT_READY 0
258#define BOARD_READY 1
23100dd9 259static void hpsa_drain_accel_commands(struct ctlr_info *h);
76438d08 260static void hpsa_flush_cache(struct ctlr_info *h);
c349775e
ST
261static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
262 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 263 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
080ef1cc 264static void hpsa_command_resubmit_worker(struct work_struct *work);
25163bd5
WS
265static u32 lockup_detected(struct ctlr_info *h);
266static int detect_controller_lockup(struct ctlr_info *h);
edd16368 267
edd16368
SC
268static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
269{
270 unsigned long *priv = shost_priv(sdev->host);
271 return (struct ctlr_info *) *priv;
272}
273
a23513e8
SC
274static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
275{
276 unsigned long *priv = shost_priv(sh);
277 return (struct ctlr_info *) *priv;
278}
279
a58e7e53
WS
280static inline bool hpsa_is_cmd_idle(struct CommandList *c)
281{
282 return c->scsi_cmd == SCSI_CMD_IDLE;
283}
284
d604f533
WS
285static inline bool hpsa_is_pending_event(struct CommandList *c)
286{
287 return c->abort_pending || c->reset_pending;
288}
289
9437ac43
SC
290/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
291static void decode_sense_data(const u8 *sense_data, int sense_data_len,
292 u8 *sense_key, u8 *asc, u8 *ascq)
293{
294 struct scsi_sense_hdr sshdr;
295 bool rc;
296
297 *sense_key = -1;
298 *asc = -1;
299 *ascq = -1;
300
301 if (sense_data_len < 1)
302 return;
303
304 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
305 if (rc) {
306 *sense_key = sshdr.sense_key;
307 *asc = sshdr.asc;
308 *ascq = sshdr.ascq;
309 }
310}
311
edd16368
SC
312static int check_for_unit_attention(struct ctlr_info *h,
313 struct CommandList *c)
314{
9437ac43
SC
315 u8 sense_key, asc, ascq;
316 int sense_len;
317
318 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
319 sense_len = sizeof(c->err_info->SenseInfo);
320 else
321 sense_len = c->err_info->SenseLen;
322
323 decode_sense_data(c->err_info->SenseInfo, sense_len,
324 &sense_key, &asc, &ascq);
81c27557 325 if (sense_key != UNIT_ATTENTION || asc == 0xff)
edd16368
SC
326 return 0;
327
9437ac43 328 switch (asc) {
edd16368 329 case STATE_CHANGED:
9437ac43 330 dev_warn(&h->pdev->dev,
2946e82b
RE
331 "%s: a state change detected, command retried\n",
332 h->devname);
edd16368
SC
333 break;
334 case LUN_FAILED:
7f73695a 335 dev_warn(&h->pdev->dev,
2946e82b 336 "%s: LUN failure detected\n", h->devname);
edd16368
SC
337 break;
338 case REPORT_LUNS_CHANGED:
7f73695a 339 dev_warn(&h->pdev->dev,
2946e82b 340 "%s: report LUN data changed\n", h->devname);
edd16368 341 /*
4f4eb9f1
ST
342 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
343 * target (array) devices.
edd16368
SC
344 */
345 break;
346 case POWER_OR_RESET:
2946e82b
RE
347 dev_warn(&h->pdev->dev,
348 "%s: a power on or device reset detected\n",
349 h->devname);
edd16368
SC
350 break;
351 case UNIT_ATTENTION_CLEARED:
2946e82b
RE
352 dev_warn(&h->pdev->dev,
353 "%s: unit attention cleared by another initiator\n",
354 h->devname);
edd16368
SC
355 break;
356 default:
2946e82b
RE
357 dev_warn(&h->pdev->dev,
358 "%s: unknown unit attention detected\n",
359 h->devname);
edd16368
SC
360 break;
361 }
362 return 1;
363}
364
852af20a
MB
365static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
366{
367 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
368 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
369 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
370 return 0;
371 dev_warn(&h->pdev->dev, HPSA "device busy");
372 return 1;
373}
374
e985c58f
SC
375static u32 lockup_detected(struct ctlr_info *h);
376static ssize_t host_show_lockup_detected(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 int ld;
380 struct ctlr_info *h;
381 struct Scsi_Host *shost = class_to_shost(dev);
382
383 h = shost_to_hba(shost);
384 ld = lockup_detected(h);
385
386 return sprintf(buf, "ld=%d\n", ld);
387}
388
da0697bd
ST
389static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
390 struct device_attribute *attr,
391 const char *buf, size_t count)
392{
393 int status, len;
394 struct ctlr_info *h;
395 struct Scsi_Host *shost = class_to_shost(dev);
396 char tmpbuf[10];
397
398 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
399 return -EACCES;
400 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
401 strncpy(tmpbuf, buf, len);
402 tmpbuf[len] = '\0';
403 if (sscanf(tmpbuf, "%d", &status) != 1)
404 return -EINVAL;
405 h = shost_to_hba(shost);
406 h->acciopath_status = !!status;
407 dev_warn(&h->pdev->dev,
408 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
409 h->acciopath_status ? "enabled" : "disabled");
410 return count;
411}
412
2ba8bfc8
SC
413static ssize_t host_store_raid_offload_debug(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
416{
417 int debug_level, len;
418 struct ctlr_info *h;
419 struct Scsi_Host *shost = class_to_shost(dev);
420 char tmpbuf[10];
421
422 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
423 return -EACCES;
424 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
425 strncpy(tmpbuf, buf, len);
426 tmpbuf[len] = '\0';
427 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
428 return -EINVAL;
429 if (debug_level < 0)
430 debug_level = 0;
431 h = shost_to_hba(shost);
432 h->raid_offload_debug = debug_level;
433 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
434 h->raid_offload_debug);
435 return count;
436}
437
edd16368
SC
438static ssize_t host_store_rescan(struct device *dev,
439 struct device_attribute *attr,
440 const char *buf, size_t count)
441{
442 struct ctlr_info *h;
443 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 444 h = shost_to_hba(shost);
31468401 445 hpsa_scan_start(h->scsi_host);
edd16368
SC
446 return count;
447}
448
d28ce020
SC
449static ssize_t host_show_firmware_revision(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct ctlr_info *h;
453 struct Scsi_Host *shost = class_to_shost(dev);
454 unsigned char *fwrev;
455
456 h = shost_to_hba(shost);
457 if (!h->hba_inquiry_data)
458 return 0;
459 fwrev = &h->hba_inquiry_data[32];
460 return snprintf(buf, 20, "%c%c%c%c\n",
461 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
462}
463
94a13649
SC
464static ssize_t host_show_commands_outstanding(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 struct Scsi_Host *shost = class_to_shost(dev);
468 struct ctlr_info *h = shost_to_hba(shost);
469
0cbf768e
SC
470 return snprintf(buf, 20, "%d\n",
471 atomic_read(&h->commands_outstanding));
94a13649
SC
472}
473
745a7a25
SC
474static ssize_t host_show_transport_mode(struct device *dev,
475 struct device_attribute *attr, char *buf)
476{
477 struct ctlr_info *h;
478 struct Scsi_Host *shost = class_to_shost(dev);
479
480 h = shost_to_hba(shost);
481 return snprintf(buf, 20, "%s\n",
960a30e7 482 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
483 "performant" : "simple");
484}
485
da0697bd
ST
486static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
487 struct device_attribute *attr, char *buf)
488{
489 struct ctlr_info *h;
490 struct Scsi_Host *shost = class_to_shost(dev);
491
492 h = shost_to_hba(shost);
493 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
494 (h->acciopath_status == 1) ? "enabled" : "disabled");
495}
496
46380786 497/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
498static u32 unresettable_controller[] = {
499 0x324a103C, /* Smart Array P712m */
9b5c48c2 500 0x324b103C, /* Smart Array P711m */
941b1cda
SC
501 0x3223103C, /* Smart Array P800 */
502 0x3234103C, /* Smart Array P400 */
503 0x3235103C, /* Smart Array P400i */
504 0x3211103C, /* Smart Array E200i */
505 0x3212103C, /* Smart Array E200 */
506 0x3213103C, /* Smart Array E200i */
507 0x3214103C, /* Smart Array E200i */
508 0x3215103C, /* Smart Array E200i */
509 0x3237103C, /* Smart Array E500 */
510 0x323D103C, /* Smart Array P700m */
7af0abbc 511 0x40800E11, /* Smart Array 5i */
941b1cda
SC
512 0x409C0E11, /* Smart Array 6400 */
513 0x409D0E11, /* Smart Array 6400 EM */
5a4f934e
TH
514 0x40700E11, /* Smart Array 5300 */
515 0x40820E11, /* Smart Array 532 */
516 0x40830E11, /* Smart Array 5312 */
517 0x409A0E11, /* Smart Array 641 */
518 0x409B0E11, /* Smart Array 642 */
519 0x40910E11, /* Smart Array 6i */
941b1cda
SC
520};
521
46380786
SC
522/* List of controllers which cannot even be soft reset */
523static u32 soft_unresettable_controller[] = {
7af0abbc 524 0x40800E11, /* Smart Array 5i */
5a4f934e
TH
525 0x40700E11, /* Smart Array 5300 */
526 0x40820E11, /* Smart Array 532 */
527 0x40830E11, /* Smart Array 5312 */
528 0x409A0E11, /* Smart Array 641 */
529 0x409B0E11, /* Smart Array 642 */
530 0x40910E11, /* Smart Array 6i */
46380786
SC
531 /* Exclude 640x boards. These are two pci devices in one slot
532 * which share a battery backed cache module. One controls the
533 * cache, the other accesses the cache through the one that controls
534 * it. If we reset the one controlling the cache, the other will
535 * likely not be happy. Just forbid resetting this conjoined mess.
536 * The 640x isn't really supported by hpsa anyway.
537 */
538 0x409C0E11, /* Smart Array 6400 */
539 0x409D0E11, /* Smart Array 6400 EM */
540};
541
9b5c48c2
SC
542static u32 needs_abort_tags_swizzled[] = {
543 0x323D103C, /* Smart Array P700m */
544 0x324a103C, /* Smart Array P712m */
545 0x324b103C, /* SmartArray P711m */
546};
547
548static int board_id_in_array(u32 a[], int nelems, u32 board_id)
941b1cda
SC
549{
550 int i;
551
9b5c48c2
SC
552 for (i = 0; i < nelems; i++)
553 if (a[i] == board_id)
554 return 1;
555 return 0;
46380786
SC
556}
557
9b5c48c2 558static int ctlr_is_hard_resettable(u32 board_id)
46380786 559{
9b5c48c2
SC
560 return !board_id_in_array(unresettable_controller,
561 ARRAY_SIZE(unresettable_controller), board_id);
562}
46380786 563
9b5c48c2
SC
564static int ctlr_is_soft_resettable(u32 board_id)
565{
566 return !board_id_in_array(soft_unresettable_controller,
567 ARRAY_SIZE(soft_unresettable_controller), board_id);
941b1cda
SC
568}
569
46380786
SC
570static int ctlr_is_resettable(u32 board_id)
571{
572 return ctlr_is_hard_resettable(board_id) ||
573 ctlr_is_soft_resettable(board_id);
574}
575
9b5c48c2
SC
576static int ctlr_needs_abort_tags_swizzled(u32 board_id)
577{
578 return board_id_in_array(needs_abort_tags_swizzled,
579 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
580}
581
941b1cda
SC
582static ssize_t host_show_resettable(struct device *dev,
583 struct device_attribute *attr, char *buf)
584{
585 struct ctlr_info *h;
586 struct Scsi_Host *shost = class_to_shost(dev);
587
588 h = shost_to_hba(shost);
46380786 589 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
590}
591
edd16368
SC
592static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
593{
594 return (scsi3addr[3] & 0xC0) == 0x40;
595}
596
f2ef0ce7
RE
597static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
598 "1(+0)ADM", "UNKNOWN"
edd16368 599};
6b80b18f
ST
600#define HPSA_RAID_0 0
601#define HPSA_RAID_4 1
602#define HPSA_RAID_1 2 /* also used for RAID 10 */
603#define HPSA_RAID_5 3 /* also used for RAID 50 */
604#define HPSA_RAID_51 4
605#define HPSA_RAID_6 5 /* also used for RAID 60 */
606#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
edd16368
SC
607#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
608
609static ssize_t raid_level_show(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 ssize_t l = 0;
82a72c0a 613 unsigned char rlevel;
edd16368
SC
614 struct ctlr_info *h;
615 struct scsi_device *sdev;
616 struct hpsa_scsi_dev_t *hdev;
617 unsigned long flags;
618
619 sdev = to_scsi_device(dev);
620 h = sdev_to_hba(sdev);
621 spin_lock_irqsave(&h->lock, flags);
622 hdev = sdev->hostdata;
623 if (!hdev) {
624 spin_unlock_irqrestore(&h->lock, flags);
625 return -ENODEV;
626 }
627
628 /* Is this even a logical drive? */
629 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
630 spin_unlock_irqrestore(&h->lock, flags);
631 l = snprintf(buf, PAGE_SIZE, "N/A\n");
632 return l;
633 }
634
635 rlevel = hdev->raid_level;
636 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 637 if (rlevel > RAID_UNKNOWN)
edd16368
SC
638 rlevel = RAID_UNKNOWN;
639 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
640 return l;
641}
642
643static ssize_t lunid_show(struct device *dev,
644 struct device_attribute *attr, char *buf)
645{
646 struct ctlr_info *h;
647 struct scsi_device *sdev;
648 struct hpsa_scsi_dev_t *hdev;
649 unsigned long flags;
650 unsigned char lunid[8];
651
652 sdev = to_scsi_device(dev);
653 h = sdev_to_hba(sdev);
654 spin_lock_irqsave(&h->lock, flags);
655 hdev = sdev->hostdata;
656 if (!hdev) {
657 spin_unlock_irqrestore(&h->lock, flags);
658 return -ENODEV;
659 }
660 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
661 spin_unlock_irqrestore(&h->lock, flags);
662 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
663 lunid[0], lunid[1], lunid[2], lunid[3],
664 lunid[4], lunid[5], lunid[6], lunid[7]);
665}
666
667static ssize_t unique_id_show(struct device *dev,
668 struct device_attribute *attr, char *buf)
669{
670 struct ctlr_info *h;
671 struct scsi_device *sdev;
672 struct hpsa_scsi_dev_t *hdev;
673 unsigned long flags;
674 unsigned char sn[16];
675
676 sdev = to_scsi_device(dev);
677 h = sdev_to_hba(sdev);
678 spin_lock_irqsave(&h->lock, flags);
679 hdev = sdev->hostdata;
680 if (!hdev) {
681 spin_unlock_irqrestore(&h->lock, flags);
682 return -ENODEV;
683 }
684 memcpy(sn, hdev->device_id, sizeof(sn));
685 spin_unlock_irqrestore(&h->lock, flags);
686 return snprintf(buf, 16 * 2 + 2,
687 "%02X%02X%02X%02X%02X%02X%02X%02X"
688 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
689 sn[0], sn[1], sn[2], sn[3],
690 sn[4], sn[5], sn[6], sn[7],
691 sn[8], sn[9], sn[10], sn[11],
692 sn[12], sn[13], sn[14], sn[15]);
693}
694
c1988684
ST
695static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
696 struct device_attribute *attr, char *buf)
697{
698 struct ctlr_info *h;
699 struct scsi_device *sdev;
700 struct hpsa_scsi_dev_t *hdev;
701 unsigned long flags;
702 int offload_enabled;
703
704 sdev = to_scsi_device(dev);
705 h = sdev_to_hba(sdev);
706 spin_lock_irqsave(&h->lock, flags);
707 hdev = sdev->hostdata;
708 if (!hdev) {
709 spin_unlock_irqrestore(&h->lock, flags);
710 return -ENODEV;
711 }
712 offload_enabled = hdev->offload_enabled;
713 spin_unlock_irqrestore(&h->lock, flags);
714 return snprintf(buf, 20, "%d\n", offload_enabled);
715}
716
3f5eac3a
SC
717static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
718static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
719static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
720static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
c1988684
ST
721static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
722 host_show_hp_ssd_smart_path_enabled, NULL);
da0697bd
ST
723static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
724 host_show_hp_ssd_smart_path_status,
725 host_store_hp_ssd_smart_path_status);
2ba8bfc8
SC
726static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
727 host_store_raid_offload_debug);
3f5eac3a
SC
728static DEVICE_ATTR(firmware_revision, S_IRUGO,
729 host_show_firmware_revision, NULL);
730static DEVICE_ATTR(commands_outstanding, S_IRUGO,
731 host_show_commands_outstanding, NULL);
732static DEVICE_ATTR(transport_mode, S_IRUGO,
733 host_show_transport_mode, NULL);
941b1cda
SC
734static DEVICE_ATTR(resettable, S_IRUGO,
735 host_show_resettable, NULL);
e985c58f
SC
736static DEVICE_ATTR(lockup_detected, S_IRUGO,
737 host_show_lockup_detected, NULL);
3f5eac3a
SC
738
739static struct device_attribute *hpsa_sdev_attrs[] = {
740 &dev_attr_raid_level,
741 &dev_attr_lunid,
742 &dev_attr_unique_id,
c1988684 743 &dev_attr_hp_ssd_smart_path_enabled,
e985c58f 744 &dev_attr_lockup_detected,
3f5eac3a
SC
745 NULL,
746};
747
748static struct device_attribute *hpsa_shost_attrs[] = {
749 &dev_attr_rescan,
750 &dev_attr_firmware_revision,
751 &dev_attr_commands_outstanding,
752 &dev_attr_transport_mode,
941b1cda 753 &dev_attr_resettable,
da0697bd 754 &dev_attr_hp_ssd_smart_path_status,
2ba8bfc8 755 &dev_attr_raid_offload_debug,
3f5eac3a
SC
756 NULL,
757};
758
41ce4c35
SC
759#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
760 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
761
3f5eac3a
SC
762static struct scsi_host_template hpsa_driver_template = {
763 .module = THIS_MODULE,
f79cfec6
SC
764 .name = HPSA,
765 .proc_name = HPSA,
3f5eac3a
SC
766 .queuecommand = hpsa_scsi_queue_command,
767 .scan_start = hpsa_scan_start,
768 .scan_finished = hpsa_scan_finished,
7c0a0229 769 .change_queue_depth = hpsa_change_queue_depth,
3f5eac3a
SC
770 .this_id = -1,
771 .use_clustering = ENABLE_CLUSTERING,
75167d2c 772 .eh_abort_handler = hpsa_eh_abort_handler,
3f5eac3a
SC
773 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
774 .ioctl = hpsa_ioctl,
775 .slave_alloc = hpsa_slave_alloc,
41ce4c35 776 .slave_configure = hpsa_slave_configure,
3f5eac3a
SC
777 .slave_destroy = hpsa_slave_destroy,
778#ifdef CONFIG_COMPAT
779 .compat_ioctl = hpsa_compat_ioctl,
780#endif
781 .sdev_attrs = hpsa_sdev_attrs,
782 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 783 .max_sectors = 8192,
54b2b50c 784 .no_write_same = 1,
3f5eac3a
SC
785};
786
254f796b 787static inline u32 next_command(struct ctlr_info *h, u8 q)
3f5eac3a
SC
788{
789 u32 a;
072b0518 790 struct reply_queue_buffer *rq = &h->reply_queue[q];
3f5eac3a 791
e1f7de0c
MG
792 if (h->transMethod & CFGTBL_Trans_io_accel1)
793 return h->access.command_completed(h, q);
794
3f5eac3a 795 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
254f796b 796 return h->access.command_completed(h, q);
3f5eac3a 797
254f796b
MG
798 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
799 a = rq->head[rq->current_entry];
800 rq->current_entry++;
0cbf768e 801 atomic_dec(&h->commands_outstanding);
3f5eac3a
SC
802 } else {
803 a = FIFO_EMPTY;
804 }
805 /* Check for wraparound */
254f796b
MG
806 if (rq->current_entry == h->max_commands) {
807 rq->current_entry = 0;
808 rq->wraparound ^= 1;
3f5eac3a
SC
809 }
810 return a;
811}
812
c349775e
ST
813/*
814 * There are some special bits in the bus address of the
815 * command that we have to set for the controller to know
816 * how to process the command:
817 *
818 * Normal performant mode:
819 * bit 0: 1 means performant mode, 0 means simple mode.
820 * bits 1-3 = block fetch table entry
821 * bits 4-6 = command type (== 0)
822 *
823 * ioaccel1 mode:
824 * bit 0 = "performant mode" bit.
825 * bits 1-3 = block fetch table entry
826 * bits 4-6 = command type (== 110)
827 * (command type is needed because ioaccel1 mode
828 * commands are submitted through the same register as normal
829 * mode commands, so this is how the controller knows whether
830 * the command is normal mode or ioaccel1 mode.)
831 *
832 * ioaccel2 mode:
833 * bit 0 = "performant mode" bit.
834 * bits 1-4 = block fetch table entry (note extra bit)
835 * bits 4-6 = not needed, because ioaccel2 mode has
836 * a separate special register for submitting commands.
837 */
838
25163bd5
WS
839/*
840 * set_performant_mode: Modify the tag for cciss performant
3f5eac3a
SC
841 * set bit 0 for pull model, bits 3-1 for block fetch
842 * register number
843 */
25163bd5
WS
844#define DEFAULT_REPLY_QUEUE (-1)
845static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
846 int reply_queue)
3f5eac3a 847{
254f796b 848 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
3f5eac3a 849 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
25163bd5
WS
850 if (unlikely(!h->msix_vector))
851 return;
852 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
254f796b 853 c->Header.ReplyQueue =
804a5cb5 854 raw_smp_processor_id() % h->nreply_queues;
25163bd5
WS
855 else
856 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
254f796b 857 }
3f5eac3a
SC
858}
859
c349775e 860static void set_ioaccel1_performant_mode(struct ctlr_info *h,
25163bd5
WS
861 struct CommandList *c,
862 int reply_queue)
c349775e
ST
863{
864 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
865
25163bd5
WS
866 /*
867 * Tell the controller to post the reply to the queue for this
c349775e
ST
868 * processor. This seems to give the best I/O throughput.
869 */
25163bd5
WS
870 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
871 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
872 else
873 cp->ReplyQueue = reply_queue % h->nreply_queues;
874 /*
875 * Set the bits in the address sent down to include:
c349775e
ST
876 * - performant mode bit (bit 0)
877 * - pull count (bits 1-3)
878 * - command type (bits 4-6)
879 */
880 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
881 IOACCEL1_BUSADDR_CMDTYPE;
882}
883
8be986cc
SC
884static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
885 struct CommandList *c,
886 int reply_queue)
887{
888 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
889 &h->ioaccel2_cmd_pool[c->cmdindex];
890
891 /* Tell the controller to post the reply to the queue for this
892 * processor. This seems to give the best I/O throughput.
893 */
894 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
895 cp->reply_queue = smp_processor_id() % h->nreply_queues;
896 else
897 cp->reply_queue = reply_queue % h->nreply_queues;
898 /* Set the bits in the address sent down to include:
899 * - performant mode bit not used in ioaccel mode 2
900 * - pull count (bits 0-3)
901 * - command type isn't needed for ioaccel2
902 */
903 c->busaddr |= h->ioaccel2_blockFetchTable[0];
904}
905
c349775e 906static void set_ioaccel2_performant_mode(struct ctlr_info *h,
25163bd5
WS
907 struct CommandList *c,
908 int reply_queue)
c349775e
ST
909{
910 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
911
25163bd5
WS
912 /*
913 * Tell the controller to post the reply to the queue for this
c349775e
ST
914 * processor. This seems to give the best I/O throughput.
915 */
25163bd5
WS
916 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
917 cp->reply_queue = smp_processor_id() % h->nreply_queues;
918 else
919 cp->reply_queue = reply_queue % h->nreply_queues;
920 /*
921 * Set the bits in the address sent down to include:
c349775e
ST
922 * - performant mode bit not used in ioaccel mode 2
923 * - pull count (bits 0-3)
924 * - command type isn't needed for ioaccel2
925 */
926 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
927}
928
e85c5974
SC
929static int is_firmware_flash_cmd(u8 *cdb)
930{
931 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
932}
933
934/*
935 * During firmware flash, the heartbeat register may not update as frequently
936 * as it should. So we dial down lockup detection during firmware flash. and
937 * dial it back up when firmware flash completes.
938 */
939#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
940#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
941static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
942 struct CommandList *c)
943{
944 if (!is_firmware_flash_cmd(c->Request.CDB))
945 return;
946 atomic_inc(&h->firmware_flash_in_progress);
947 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
948}
949
950static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
951 struct CommandList *c)
952{
953 if (is_firmware_flash_cmd(c->Request.CDB) &&
954 atomic_dec_and_test(&h->firmware_flash_in_progress))
955 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
956}
957
25163bd5
WS
958static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
959 struct CommandList *c, int reply_queue)
3f5eac3a 960{
c05e8866
SC
961 dial_down_lockup_detection_during_fw_flash(h, c);
962 atomic_inc(&h->commands_outstanding);
c349775e
ST
963 switch (c->cmd_type) {
964 case CMD_IOACCEL1:
25163bd5 965 set_ioaccel1_performant_mode(h, c, reply_queue);
c05e8866 966 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
c349775e
ST
967 break;
968 case CMD_IOACCEL2:
25163bd5 969 set_ioaccel2_performant_mode(h, c, reply_queue);
c05e8866 970 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
c349775e 971 break;
8be986cc
SC
972 case IOACCEL2_TMF:
973 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
974 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
975 break;
c349775e 976 default:
25163bd5 977 set_performant_mode(h, c, reply_queue);
c05e8866 978 h->access.submit_command(h, c);
c349775e 979 }
3f5eac3a
SC
980}
981
a58e7e53 982static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
25163bd5 983{
d604f533 984 if (unlikely(hpsa_is_pending_event(c)))
a58e7e53
WS
985 return finish_cmd(c);
986
25163bd5
WS
987 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
988}
989
3f5eac3a
SC
990static inline int is_hba_lunid(unsigned char scsi3addr[])
991{
992 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
993}
994
995static inline int is_scsi_rev_5(struct ctlr_info *h)
996{
997 if (!h->hba_inquiry_data)
998 return 0;
999 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1000 return 1;
1001 return 0;
1002}
1003
edd16368
SC
1004static int hpsa_find_target_lun(struct ctlr_info *h,
1005 unsigned char scsi3addr[], int bus, int *target, int *lun)
1006{
1007 /* finds an unused bus, target, lun for a new physical device
1008 * assumes h->devlock is held
1009 */
1010 int i, found = 0;
cfe5badc 1011 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 1012
263d9401 1013 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
edd16368
SC
1014
1015 for (i = 0; i < h->ndevices; i++) {
1016 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
263d9401 1017 __set_bit(h->dev[i]->target, lun_taken);
edd16368
SC
1018 }
1019
263d9401
AM
1020 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1021 if (i < HPSA_MAX_DEVICES) {
1022 /* *bus = 1; */
1023 *target = i;
1024 *lun = 0;
1025 found = 1;
edd16368
SC
1026 }
1027 return !found;
1028}
1029
0d96ef5f
WS
1030static inline void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1031 struct hpsa_scsi_dev_t *dev, char *description)
1032{
1033 dev_printk(level, &h->pdev->dev,
1034 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1035 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1036 description,
1037 scsi_device_type(dev->devtype),
1038 dev->vendor,
1039 dev->model,
1040 dev->raid_level > RAID_UNKNOWN ?
1041 "RAID-?" : raid_label[dev->raid_level],
1042 dev->offload_config ? '+' : '-',
1043 dev->offload_enabled ? '+' : '-',
1044 dev->expose_state);
1045}
1046
edd16368
SC
1047/* Add an entry into h->dev[] array. */
1048static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1049 struct hpsa_scsi_dev_t *device,
1050 struct hpsa_scsi_dev_t *added[], int *nadded)
1051{
1052 /* assumes h->devlock is held */
1053 int n = h->ndevices;
1054 int i;
1055 unsigned char addr1[8], addr2[8];
1056 struct hpsa_scsi_dev_t *sd;
1057
cfe5badc 1058 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
1059 dev_err(&h->pdev->dev, "too many devices, some will be "
1060 "inaccessible.\n");
1061 return -1;
1062 }
1063
1064 /* physical devices do not have lun or target assigned until now. */
1065 if (device->lun != -1)
1066 /* Logical device, lun is already assigned. */
1067 goto lun_assigned;
1068
1069 /* If this device a non-zero lun of a multi-lun device
1070 * byte 4 of the 8-byte LUN addr will contain the logical
2b08b3e9 1071 * unit no, zero otherwise.
edd16368
SC
1072 */
1073 if (device->scsi3addr[4] == 0) {
1074 /* This is not a non-zero lun of a multi-lun device */
1075 if (hpsa_find_target_lun(h, device->scsi3addr,
1076 device->bus, &device->target, &device->lun) != 0)
1077 return -1;
1078 goto lun_assigned;
1079 }
1080
1081 /* This is a non-zero lun of a multi-lun device.
1082 * Search through our list and find the device which
1083 * has the same 8 byte LUN address, excepting byte 4.
1084 * Assign the same bus and target for this new LUN.
1085 * Use the logical unit number from the firmware.
1086 */
1087 memcpy(addr1, device->scsi3addr, 8);
1088 addr1[4] = 0;
1089 for (i = 0; i < n; i++) {
1090 sd = h->dev[i];
1091 memcpy(addr2, sd->scsi3addr, 8);
1092 addr2[4] = 0;
1093 /* differ only in byte 4? */
1094 if (memcmp(addr1, addr2, 8) == 0) {
1095 device->bus = sd->bus;
1096 device->target = sd->target;
1097 device->lun = device->scsi3addr[4];
1098 break;
1099 }
1100 }
1101 if (device->lun == -1) {
1102 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1103 " suspect firmware bug or unsupported hardware "
1104 "configuration.\n");
1105 return -1;
1106 }
1107
1108lun_assigned:
1109
1110 h->dev[n] = device;
1111 h->ndevices++;
1112 added[*nadded] = device;
1113 (*nadded)++;
0d96ef5f
WS
1114 hpsa_show_dev_msg(KERN_INFO, h, device,
1115 device->expose_state & HPSA_SCSI_ADD ? "added" : "masked");
a473d86c
RE
1116 device->offload_to_be_enabled = device->offload_enabled;
1117 device->offload_enabled = 0;
edd16368
SC
1118 return 0;
1119}
1120
bd9244f7
ST
1121/* Update an entry in h->dev[] array. */
1122static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
1123 int entry, struct hpsa_scsi_dev_t *new_entry)
1124{
a473d86c 1125 int offload_enabled;
bd9244f7
ST
1126 /* assumes h->devlock is held */
1127 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1128
1129 /* Raid level changed. */
1130 h->dev[entry]->raid_level = new_entry->raid_level;
250fb125 1131
03383736
DB
1132 /* Raid offload parameters changed. Careful about the ordering. */
1133 if (new_entry->offload_config && new_entry->offload_enabled) {
1134 /*
1135 * if drive is newly offload_enabled, we want to copy the
1136 * raid map data first. If previously offload_enabled and
1137 * offload_config were set, raid map data had better be
1138 * the same as it was before. if raid map data is changed
1139 * then it had better be the case that
1140 * h->dev[entry]->offload_enabled is currently 0.
1141 */
1142 h->dev[entry]->raid_map = new_entry->raid_map;
1143 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
03383736 1144 }
a3144e0b
JH
1145 if (new_entry->hba_ioaccel_enabled) {
1146 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1147 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1148 }
1149 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
250fb125 1150 h->dev[entry]->offload_config = new_entry->offload_config;
9fb0de2d 1151 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
03383736 1152 h->dev[entry]->queue_depth = new_entry->queue_depth;
250fb125 1153
41ce4c35
SC
1154 /*
1155 * We can turn off ioaccel offload now, but need to delay turning
1156 * it on until we can update h->dev[entry]->phys_disk[], but we
1157 * can't do that until all the devices are updated.
1158 */
1159 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1160 if (!new_entry->offload_enabled)
1161 h->dev[entry]->offload_enabled = 0;
1162
a473d86c
RE
1163 offload_enabled = h->dev[entry]->offload_enabled;
1164 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
0d96ef5f 1165 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
a473d86c 1166 h->dev[entry]->offload_enabled = offload_enabled;
bd9244f7
ST
1167}
1168
2a8ccf31
SC
1169/* Replace an entry from h->dev[] array. */
1170static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
1171 int entry, struct hpsa_scsi_dev_t *new_entry,
1172 struct hpsa_scsi_dev_t *added[], int *nadded,
1173 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1174{
1175 /* assumes h->devlock is held */
cfe5badc 1176 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
1177 removed[*nremoved] = h->dev[entry];
1178 (*nremoved)++;
01350d05
SC
1179
1180 /*
1181 * New physical devices won't have target/lun assigned yet
1182 * so we need to preserve the values in the slot we are replacing.
1183 */
1184 if (new_entry->target == -1) {
1185 new_entry->target = h->dev[entry]->target;
1186 new_entry->lun = h->dev[entry]->lun;
1187 }
1188
2a8ccf31
SC
1189 h->dev[entry] = new_entry;
1190 added[*nadded] = new_entry;
1191 (*nadded)++;
0d96ef5f 1192 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
a473d86c
RE
1193 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1194 new_entry->offload_enabled = 0;
2a8ccf31
SC
1195}
1196
edd16368
SC
1197/* Remove an entry from h->dev[] array. */
1198static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1199 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1200{
1201 /* assumes h->devlock is held */
1202 int i;
1203 struct hpsa_scsi_dev_t *sd;
1204
cfe5badc 1205 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
1206
1207 sd = h->dev[entry];
1208 removed[*nremoved] = h->dev[entry];
1209 (*nremoved)++;
1210
1211 for (i = entry; i < h->ndevices-1; i++)
1212 h->dev[i] = h->dev[i+1];
1213 h->ndevices--;
0d96ef5f 1214 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
edd16368
SC
1215}
1216
1217#define SCSI3ADDR_EQ(a, b) ( \
1218 (a)[7] == (b)[7] && \
1219 (a)[6] == (b)[6] && \
1220 (a)[5] == (b)[5] && \
1221 (a)[4] == (b)[4] && \
1222 (a)[3] == (b)[3] && \
1223 (a)[2] == (b)[2] && \
1224 (a)[1] == (b)[1] && \
1225 (a)[0] == (b)[0])
1226
1227static void fixup_botched_add(struct ctlr_info *h,
1228 struct hpsa_scsi_dev_t *added)
1229{
1230 /* called when scsi_add_device fails in order to re-adjust
1231 * h->dev[] to match the mid layer's view.
1232 */
1233 unsigned long flags;
1234 int i, j;
1235
1236 spin_lock_irqsave(&h->lock, flags);
1237 for (i = 0; i < h->ndevices; i++) {
1238 if (h->dev[i] == added) {
1239 for (j = i; j < h->ndevices-1; j++)
1240 h->dev[j] = h->dev[j+1];
1241 h->ndevices--;
1242 break;
1243 }
1244 }
1245 spin_unlock_irqrestore(&h->lock, flags);
1246 kfree(added);
1247}
1248
1249static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1250 struct hpsa_scsi_dev_t *dev2)
1251{
edd16368
SC
1252 /* we compare everything except lun and target as these
1253 * are not yet assigned. Compare parts likely
1254 * to differ first
1255 */
1256 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1257 sizeof(dev1->scsi3addr)) != 0)
1258 return 0;
1259 if (memcmp(dev1->device_id, dev2->device_id,
1260 sizeof(dev1->device_id)) != 0)
1261 return 0;
1262 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1263 return 0;
1264 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1265 return 0;
edd16368
SC
1266 if (dev1->devtype != dev2->devtype)
1267 return 0;
edd16368
SC
1268 if (dev1->bus != dev2->bus)
1269 return 0;
1270 return 1;
1271}
1272
bd9244f7
ST
1273static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1274 struct hpsa_scsi_dev_t *dev2)
1275{
1276 /* Device attributes that can change, but don't mean
1277 * that the device is a different device, nor that the OS
1278 * needs to be told anything about the change.
1279 */
1280 if (dev1->raid_level != dev2->raid_level)
1281 return 1;
250fb125
SC
1282 if (dev1->offload_config != dev2->offload_config)
1283 return 1;
1284 if (dev1->offload_enabled != dev2->offload_enabled)
1285 return 1;
03383736
DB
1286 if (dev1->queue_depth != dev2->queue_depth)
1287 return 1;
bd9244f7
ST
1288 return 0;
1289}
1290
edd16368
SC
1291/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1292 * and return needle location in *index. If scsi3addr matches, but not
1293 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
bd9244f7
ST
1294 * location in *index.
1295 * In the case of a minor device attribute change, such as RAID level, just
1296 * return DEVICE_UPDATED, along with the updated device's location in index.
1297 * If needle not found, return DEVICE_NOT_FOUND.
edd16368
SC
1298 */
1299static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1300 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1301 int *index)
1302{
1303 int i;
1304#define DEVICE_NOT_FOUND 0
1305#define DEVICE_CHANGED 1
1306#define DEVICE_SAME 2
bd9244f7 1307#define DEVICE_UPDATED 3
edd16368 1308 for (i = 0; i < haystack_size; i++) {
23231048
SC
1309 if (haystack[i] == NULL) /* previously removed. */
1310 continue;
edd16368
SC
1311 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1312 *index = i;
bd9244f7
ST
1313 if (device_is_the_same(needle, haystack[i])) {
1314 if (device_updated(needle, haystack[i]))
1315 return DEVICE_UPDATED;
edd16368 1316 return DEVICE_SAME;
bd9244f7 1317 } else {
9846590e
SC
1318 /* Keep offline devices offline */
1319 if (needle->volume_offline)
1320 return DEVICE_NOT_FOUND;
edd16368 1321 return DEVICE_CHANGED;
bd9244f7 1322 }
edd16368
SC
1323 }
1324 }
1325 *index = -1;
1326 return DEVICE_NOT_FOUND;
1327}
1328
9846590e
SC
1329static void hpsa_monitor_offline_device(struct ctlr_info *h,
1330 unsigned char scsi3addr[])
1331{
1332 struct offline_device_entry *device;
1333 unsigned long flags;
1334
1335 /* Check to see if device is already on the list */
1336 spin_lock_irqsave(&h->offline_device_lock, flags);
1337 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1338 if (memcmp(device->scsi3addr, scsi3addr,
1339 sizeof(device->scsi3addr)) == 0) {
1340 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1341 return;
1342 }
1343 }
1344 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1345
1346 /* Device is not on the list, add it. */
1347 device = kmalloc(sizeof(*device), GFP_KERNEL);
1348 if (!device) {
1349 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1350 return;
1351 }
1352 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1353 spin_lock_irqsave(&h->offline_device_lock, flags);
1354 list_add_tail(&device->offline_list, &h->offline_device_list);
1355 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1356}
1357
1358/* Print a message explaining various offline volume states */
1359static void hpsa_show_volume_status(struct ctlr_info *h,
1360 struct hpsa_scsi_dev_t *sd)
1361{
1362 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1363 dev_info(&h->pdev->dev,
1364 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1365 h->scsi_host->host_no,
1366 sd->bus, sd->target, sd->lun);
1367 switch (sd->volume_offline) {
1368 case HPSA_LV_OK:
1369 break;
1370 case HPSA_LV_UNDERGOING_ERASE:
1371 dev_info(&h->pdev->dev,
1372 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1373 h->scsi_host->host_no,
1374 sd->bus, sd->target, sd->lun);
1375 break;
1376 case HPSA_LV_UNDERGOING_RPI:
1377 dev_info(&h->pdev->dev,
1378 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1379 h->scsi_host->host_no,
1380 sd->bus, sd->target, sd->lun);
1381 break;
1382 case HPSA_LV_PENDING_RPI:
1383 dev_info(&h->pdev->dev,
1384 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1385 h->scsi_host->host_no,
1386 sd->bus, sd->target, sd->lun);
1387 break;
1388 case HPSA_LV_ENCRYPTED_NO_KEY:
1389 dev_info(&h->pdev->dev,
1390 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1391 h->scsi_host->host_no,
1392 sd->bus, sd->target, sd->lun);
1393 break;
1394 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1395 dev_info(&h->pdev->dev,
1396 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1397 h->scsi_host->host_no,
1398 sd->bus, sd->target, sd->lun);
1399 break;
1400 case HPSA_LV_UNDERGOING_ENCRYPTION:
1401 dev_info(&h->pdev->dev,
1402 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1403 h->scsi_host->host_no,
1404 sd->bus, sd->target, sd->lun);
1405 break;
1406 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1407 dev_info(&h->pdev->dev,
1408 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1409 h->scsi_host->host_no,
1410 sd->bus, sd->target, sd->lun);
1411 break;
1412 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1413 dev_info(&h->pdev->dev,
1414 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1415 h->scsi_host->host_no,
1416 sd->bus, sd->target, sd->lun);
1417 break;
1418 case HPSA_LV_PENDING_ENCRYPTION:
1419 dev_info(&h->pdev->dev,
1420 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1421 h->scsi_host->host_no,
1422 sd->bus, sd->target, sd->lun);
1423 break;
1424 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1425 dev_info(&h->pdev->dev,
1426 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1427 h->scsi_host->host_no,
1428 sd->bus, sd->target, sd->lun);
1429 break;
1430 }
1431}
1432
03383736
DB
1433/*
1434 * Figure the list of physical drive pointers for a logical drive with
1435 * raid offload configured.
1436 */
1437static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1438 struct hpsa_scsi_dev_t *dev[], int ndevices,
1439 struct hpsa_scsi_dev_t *logical_drive)
1440{
1441 struct raid_map_data *map = &logical_drive->raid_map;
1442 struct raid_map_disk_data *dd = &map->data[0];
1443 int i, j;
1444 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1445 le16_to_cpu(map->metadata_disks_per_row);
1446 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1447 le16_to_cpu(map->layout_map_count) *
1448 total_disks_per_row;
1449 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1450 total_disks_per_row;
1451 int qdepth;
1452
1453 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1454 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1455
d604f533
WS
1456 logical_drive->nphysical_disks = nraid_map_entries;
1457
03383736
DB
1458 qdepth = 0;
1459 for (i = 0; i < nraid_map_entries; i++) {
1460 logical_drive->phys_disk[i] = NULL;
1461 if (!logical_drive->offload_config)
1462 continue;
1463 for (j = 0; j < ndevices; j++) {
1464 if (dev[j]->devtype != TYPE_DISK)
1465 continue;
1466 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1467 continue;
1468 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1469 continue;
1470
1471 logical_drive->phys_disk[i] = dev[j];
1472 if (i < nphys_disk)
1473 qdepth = min(h->nr_cmds, qdepth +
1474 logical_drive->phys_disk[i]->queue_depth);
1475 break;
1476 }
1477
1478 /*
1479 * This can happen if a physical drive is removed and
1480 * the logical drive is degraded. In that case, the RAID
1481 * map data will refer to a physical disk which isn't actually
1482 * present. And in that case offload_enabled should already
1483 * be 0, but we'll turn it off here just in case
1484 */
1485 if (!logical_drive->phys_disk[i]) {
1486 logical_drive->offload_enabled = 0;
41ce4c35
SC
1487 logical_drive->offload_to_be_enabled = 0;
1488 logical_drive->queue_depth = 8;
03383736
DB
1489 }
1490 }
1491 if (nraid_map_entries)
1492 /*
1493 * This is correct for reads, too high for full stripe writes,
1494 * way too high for partial stripe writes
1495 */
1496 logical_drive->queue_depth = qdepth;
1497 else
1498 logical_drive->queue_depth = h->nr_cmds;
1499}
1500
1501static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1502 struct hpsa_scsi_dev_t *dev[], int ndevices)
1503{
1504 int i;
1505
1506 for (i = 0; i < ndevices; i++) {
1507 if (dev[i]->devtype != TYPE_DISK)
1508 continue;
1509 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1510 continue;
41ce4c35
SC
1511
1512 /*
1513 * If offload is currently enabled, the RAID map and
1514 * phys_disk[] assignment *better* not be changing
1515 * and since it isn't changing, we do not need to
1516 * update it.
1517 */
1518 if (dev[i]->offload_enabled)
1519 continue;
1520
03383736
DB
1521 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1522 }
1523}
1524
4967bd3e 1525static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
edd16368
SC
1526 struct hpsa_scsi_dev_t *sd[], int nsds)
1527{
1528 /* sd contains scsi3 addresses and devtypes, and inquiry
1529 * data. This function takes what's in sd to be the current
1530 * reality and updates h->dev[] to reflect that reality.
1531 */
1532 int i, entry, device_change, changes = 0;
1533 struct hpsa_scsi_dev_t *csd;
1534 unsigned long flags;
1535 struct hpsa_scsi_dev_t **added, **removed;
1536 int nadded, nremoved;
1537 struct Scsi_Host *sh = NULL;
1538
cfe5badc
ST
1539 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1540 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1541
1542 if (!added || !removed) {
1543 dev_warn(&h->pdev->dev, "out of memory in "
1544 "adjust_hpsa_scsi_table\n");
1545 goto free_and_out;
1546 }
1547
1548 spin_lock_irqsave(&h->devlock, flags);
1549
1550 /* find any devices in h->dev[] that are not in
1551 * sd[] and remove them from h->dev[], and for any
1552 * devices which have changed, remove the old device
1553 * info and add the new device info.
bd9244f7
ST
1554 * If minor device attributes change, just update
1555 * the existing device structure.
edd16368
SC
1556 */
1557 i = 0;
1558 nremoved = 0;
1559 nadded = 0;
1560 while (i < h->ndevices) {
1561 csd = h->dev[i];
1562 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1563 if (device_change == DEVICE_NOT_FOUND) {
1564 changes++;
1565 hpsa_scsi_remove_entry(h, hostno, i,
1566 removed, &nremoved);
1567 continue; /* remove ^^^, hence i not incremented */
1568 } else if (device_change == DEVICE_CHANGED) {
1569 changes++;
2a8ccf31
SC
1570 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1571 added, &nadded, removed, &nremoved);
c7f172dc
SC
1572 /* Set it to NULL to prevent it from being freed
1573 * at the bottom of hpsa_update_scsi_devices()
1574 */
1575 sd[entry] = NULL;
bd9244f7
ST
1576 } else if (device_change == DEVICE_UPDATED) {
1577 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
edd16368
SC
1578 }
1579 i++;
1580 }
1581
1582 /* Now, make sure every device listed in sd[] is also
1583 * listed in h->dev[], adding them if they aren't found
1584 */
1585
1586 for (i = 0; i < nsds; i++) {
1587 if (!sd[i]) /* if already added above. */
1588 continue;
9846590e
SC
1589
1590 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1591 * as the SCSI mid-layer does not handle such devices well.
1592 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1593 * at 160Hz, and prevents the system from coming up.
1594 */
1595 if (sd[i]->volume_offline) {
1596 hpsa_show_volume_status(h, sd[i]);
0d96ef5f 1597 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
9846590e
SC
1598 continue;
1599 }
1600
edd16368
SC
1601 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1602 h->ndevices, &entry);
1603 if (device_change == DEVICE_NOT_FOUND) {
1604 changes++;
1605 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1606 added, &nadded) != 0)
1607 break;
1608 sd[i] = NULL; /* prevent from being freed later. */
1609 } else if (device_change == DEVICE_CHANGED) {
1610 /* should never happen... */
1611 changes++;
1612 dev_warn(&h->pdev->dev,
1613 "device unexpectedly changed.\n");
1614 /* but if it does happen, we just ignore that device */
1615 }
1616 }
41ce4c35
SC
1617 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1618
1619 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1620 * any logical drives that need it enabled.
1621 */
1622 for (i = 0; i < h->ndevices; i++)
1623 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1624
edd16368
SC
1625 spin_unlock_irqrestore(&h->devlock, flags);
1626
9846590e
SC
1627 /* Monitor devices which are in one of several NOT READY states to be
1628 * brought online later. This must be done without holding h->devlock,
1629 * so don't touch h->dev[]
1630 */
1631 for (i = 0; i < nsds; i++) {
1632 if (!sd[i]) /* if already added above. */
1633 continue;
1634 if (sd[i]->volume_offline)
1635 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1636 }
1637
edd16368
SC
1638 /* Don't notify scsi mid layer of any changes the first time through
1639 * (or if there are no changes) scsi_scan_host will do it later the
1640 * first time through.
1641 */
1642 if (hostno == -1 || !changes)
1643 goto free_and_out;
1644
1645 sh = h->scsi_host;
1646 /* Notify scsi mid layer of any removed devices */
1647 for (i = 0; i < nremoved; i++) {
41ce4c35
SC
1648 if (removed[i]->expose_state & HPSA_SCSI_ADD) {
1649 struct scsi_device *sdev =
1650 scsi_device_lookup(sh, removed[i]->bus,
1651 removed[i]->target, removed[i]->lun);
1652 if (sdev != NULL) {
1653 scsi_remove_device(sdev);
1654 scsi_device_put(sdev);
1655 } else {
1656 /*
1657 * We don't expect to get here.
1658 * future cmds to this device will get selection
1659 * timeout as if the device was gone.
1660 */
0d96ef5f
WS
1661 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1662 "didn't find device for removal.");
41ce4c35 1663 }
edd16368
SC
1664 }
1665 kfree(removed[i]);
1666 removed[i] = NULL;
1667 }
1668
1669 /* Notify scsi mid layer of any added devices */
1670 for (i = 0; i < nadded; i++) {
41ce4c35
SC
1671 if (!(added[i]->expose_state & HPSA_SCSI_ADD))
1672 continue;
edd16368
SC
1673 if (scsi_add_device(sh, added[i]->bus,
1674 added[i]->target, added[i]->lun) == 0)
1675 continue;
0d96ef5f
WS
1676 hpsa_show_dev_msg(KERN_WARNING, h, added[i],
1677 "addition failed, device not added.");
edd16368
SC
1678 /* now we have to remove it from h->dev,
1679 * since it didn't get added to scsi mid layer
1680 */
1681 fixup_botched_add(h, added[i]);
105a3dbc 1682 added[i] = NULL;
edd16368
SC
1683 }
1684
1685free_and_out:
1686 kfree(added);
1687 kfree(removed);
edd16368
SC
1688}
1689
1690/*
9e03aa2f 1691 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
edd16368
SC
1692 * Assume's h->devlock is held.
1693 */
1694static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1695 int bus, int target, int lun)
1696{
1697 int i;
1698 struct hpsa_scsi_dev_t *sd;
1699
1700 for (i = 0; i < h->ndevices; i++) {
1701 sd = h->dev[i];
1702 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1703 return sd;
1704 }
1705 return NULL;
1706}
1707
edd16368
SC
1708static int hpsa_slave_alloc(struct scsi_device *sdev)
1709{
1710 struct hpsa_scsi_dev_t *sd;
1711 unsigned long flags;
1712 struct ctlr_info *h;
1713
1714 h = sdev_to_hba(sdev);
1715 spin_lock_irqsave(&h->devlock, flags);
1716 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1717 sdev_id(sdev), sdev->lun);
41ce4c35 1718 if (likely(sd)) {
03383736 1719 atomic_set(&sd->ioaccel_cmds_out, 0);
41ce4c35
SC
1720 sdev->hostdata = (sd->expose_state & HPSA_SCSI_ADD) ? sd : NULL;
1721 } else
1722 sdev->hostdata = NULL;
edd16368
SC
1723 spin_unlock_irqrestore(&h->devlock, flags);
1724 return 0;
1725}
1726
41ce4c35
SC
1727/* configure scsi device based on internal per-device structure */
1728static int hpsa_slave_configure(struct scsi_device *sdev)
1729{
1730 struct hpsa_scsi_dev_t *sd;
1731 int queue_depth;
1732
1733 sd = sdev->hostdata;
1734 sdev->no_uld_attach = !sd || !(sd->expose_state & HPSA_ULD_ATTACH);
1735
1736 if (sd)
1737 queue_depth = sd->queue_depth != 0 ?
1738 sd->queue_depth : sdev->host->can_queue;
1739 else
1740 queue_depth = sdev->host->can_queue;
1741
1742 scsi_change_queue_depth(sdev, queue_depth);
1743
1744 return 0;
1745}
1746
edd16368
SC
1747static void hpsa_slave_destroy(struct scsi_device *sdev)
1748{
bcc44255 1749 /* nothing to do. */
edd16368
SC
1750}
1751
d9a729f3
WS
1752static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1753{
1754 int i;
1755
1756 if (!h->ioaccel2_cmd_sg_list)
1757 return;
1758 for (i = 0; i < h->nr_cmds; i++) {
1759 kfree(h->ioaccel2_cmd_sg_list[i]);
1760 h->ioaccel2_cmd_sg_list[i] = NULL;
1761 }
1762 kfree(h->ioaccel2_cmd_sg_list);
1763 h->ioaccel2_cmd_sg_list = NULL;
1764}
1765
1766static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1767{
1768 int i;
1769
1770 if (h->chainsize <= 0)
1771 return 0;
1772
1773 h->ioaccel2_cmd_sg_list =
1774 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1775 GFP_KERNEL);
1776 if (!h->ioaccel2_cmd_sg_list)
1777 return -ENOMEM;
1778 for (i = 0; i < h->nr_cmds; i++) {
1779 h->ioaccel2_cmd_sg_list[i] =
1780 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1781 h->maxsgentries, GFP_KERNEL);
1782 if (!h->ioaccel2_cmd_sg_list[i])
1783 goto clean;
1784 }
1785 return 0;
1786
1787clean:
1788 hpsa_free_ioaccel2_sg_chain_blocks(h);
1789 return -ENOMEM;
1790}
1791
33a2ffce
SC
1792static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1793{
1794 int i;
1795
1796 if (!h->cmd_sg_list)
1797 return;
1798 for (i = 0; i < h->nr_cmds; i++) {
1799 kfree(h->cmd_sg_list[i]);
1800 h->cmd_sg_list[i] = NULL;
1801 }
1802 kfree(h->cmd_sg_list);
1803 h->cmd_sg_list = NULL;
1804}
1805
105a3dbc 1806static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
33a2ffce
SC
1807{
1808 int i;
1809
1810 if (h->chainsize <= 0)
1811 return 0;
1812
1813 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1814 GFP_KERNEL);
3d4e6af8
RE
1815 if (!h->cmd_sg_list) {
1816 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
33a2ffce 1817 return -ENOMEM;
3d4e6af8 1818 }
33a2ffce
SC
1819 for (i = 0; i < h->nr_cmds; i++) {
1820 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1821 h->chainsize, GFP_KERNEL);
3d4e6af8
RE
1822 if (!h->cmd_sg_list[i]) {
1823 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
33a2ffce 1824 goto clean;
3d4e6af8 1825 }
33a2ffce
SC
1826 }
1827 return 0;
1828
1829clean:
1830 hpsa_free_sg_chain_blocks(h);
1831 return -ENOMEM;
1832}
1833
d9a729f3
WS
1834static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1835 struct io_accel2_cmd *cp, struct CommandList *c)
1836{
1837 struct ioaccel2_sg_element *chain_block;
1838 u64 temp64;
1839 u32 chain_size;
1840
1841 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
1842 chain_size = le32_to_cpu(cp->data_len);
1843 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1844 PCI_DMA_TODEVICE);
1845 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1846 /* prevent subsequent unmapping */
1847 cp->sg->address = 0;
1848 return -1;
1849 }
1850 cp->sg->address = cpu_to_le64(temp64);
1851 return 0;
1852}
1853
1854static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1855 struct io_accel2_cmd *cp)
1856{
1857 struct ioaccel2_sg_element *chain_sg;
1858 u64 temp64;
1859 u32 chain_size;
1860
1861 chain_sg = cp->sg;
1862 temp64 = le64_to_cpu(chain_sg->address);
1863 chain_size = le32_to_cpu(cp->data_len);
1864 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
1865}
1866
e2bea6df 1867static int hpsa_map_sg_chain_block(struct ctlr_info *h,
33a2ffce
SC
1868 struct CommandList *c)
1869{
1870 struct SGDescriptor *chain_sg, *chain_block;
1871 u64 temp64;
50a0decf 1872 u32 chain_len;
33a2ffce
SC
1873
1874 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1875 chain_block = h->cmd_sg_list[c->cmdindex];
50a0decf
SC
1876 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1877 chain_len = sizeof(*chain_sg) *
2b08b3e9 1878 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
50a0decf
SC
1879 chain_sg->Len = cpu_to_le32(chain_len);
1880 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
33a2ffce 1881 PCI_DMA_TODEVICE);
e2bea6df
SC
1882 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1883 /* prevent subsequent unmapping */
50a0decf 1884 chain_sg->Addr = cpu_to_le64(0);
e2bea6df
SC
1885 return -1;
1886 }
50a0decf 1887 chain_sg->Addr = cpu_to_le64(temp64);
e2bea6df 1888 return 0;
33a2ffce
SC
1889}
1890
1891static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1892 struct CommandList *c)
1893{
1894 struct SGDescriptor *chain_sg;
33a2ffce 1895
50a0decf 1896 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
33a2ffce
SC
1897 return;
1898
1899 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
50a0decf
SC
1900 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1901 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
33a2ffce
SC
1902}
1903
a09c1441
ST
1904
1905/* Decode the various types of errors on ioaccel2 path.
1906 * Return 1 for any error that should generate a RAID path retry.
1907 * Return 0 for errors that don't require a RAID path retry.
1908 */
1909static int handle_ioaccel_mode2_error(struct ctlr_info *h,
c349775e
ST
1910 struct CommandList *c,
1911 struct scsi_cmnd *cmd,
1912 struct io_accel2_cmd *c2)
1913{
1914 int data_len;
a09c1441 1915 int retry = 0;
c40820d5 1916 u32 ioaccel2_resid = 0;
c349775e
ST
1917
1918 switch (c2->error_data.serv_response) {
1919 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1920 switch (c2->error_data.status) {
1921 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1922 break;
1923 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
ee6b1889 1924 cmd->result |= SAM_STAT_CHECK_CONDITION;
c349775e 1925 if (c2->error_data.data_present !=
ee6b1889
SC
1926 IOACCEL2_SENSE_DATA_PRESENT) {
1927 memset(cmd->sense_buffer, 0,
1928 SCSI_SENSE_BUFFERSIZE);
c349775e 1929 break;
ee6b1889 1930 }
c349775e
ST
1931 /* copy the sense data */
1932 data_len = c2->error_data.sense_data_len;
1933 if (data_len > SCSI_SENSE_BUFFERSIZE)
1934 data_len = SCSI_SENSE_BUFFERSIZE;
1935 if (data_len > sizeof(c2->error_data.sense_data_buff))
1936 data_len =
1937 sizeof(c2->error_data.sense_data_buff);
1938 memcpy(cmd->sense_buffer,
1939 c2->error_data.sense_data_buff, data_len);
a09c1441 1940 retry = 1;
c349775e
ST
1941 break;
1942 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
a09c1441 1943 retry = 1;
c349775e
ST
1944 break;
1945 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
a09c1441 1946 retry = 1;
c349775e
ST
1947 break;
1948 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
4a8da22b 1949 retry = 1;
c349775e
ST
1950 break;
1951 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
a09c1441 1952 retry = 1;
c349775e
ST
1953 break;
1954 default:
a09c1441 1955 retry = 1;
c349775e
ST
1956 break;
1957 }
1958 break;
1959 case IOACCEL2_SERV_RESPONSE_FAILURE:
c40820d5
JH
1960 switch (c2->error_data.status) {
1961 case IOACCEL2_STATUS_SR_IO_ERROR:
1962 case IOACCEL2_STATUS_SR_IO_ABORTED:
1963 case IOACCEL2_STATUS_SR_OVERRUN:
1964 retry = 1;
1965 break;
1966 case IOACCEL2_STATUS_SR_UNDERRUN:
1967 cmd->result = (DID_OK << 16); /* host byte */
1968 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1969 ioaccel2_resid = get_unaligned_le32(
1970 &c2->error_data.resid_cnt[0]);
1971 scsi_set_resid(cmd, ioaccel2_resid);
1972 break;
1973 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
1974 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
1975 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
1976 /* We will get an event from ctlr to trigger rescan */
1977 retry = 1;
1978 break;
1979 default:
1980 retry = 1;
c40820d5 1981 }
c349775e
ST
1982 break;
1983 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1984 break;
1985 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1986 break;
1987 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
a09c1441 1988 retry = 1;
c349775e
ST
1989 break;
1990 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
c349775e
ST
1991 break;
1992 default:
a09c1441 1993 retry = 1;
c349775e
ST
1994 break;
1995 }
a09c1441
ST
1996
1997 return retry; /* retry on raid path? */
c349775e
ST
1998}
1999
a58e7e53
WS
2000static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2001 struct CommandList *c)
2002{
d604f533
WS
2003 bool do_wake = false;
2004
a58e7e53
WS
2005 /*
2006 * Prevent the following race in the abort handler:
2007 *
2008 * 1. LLD is requested to abort a SCSI command
2009 * 2. The SCSI command completes
2010 * 3. The struct CommandList associated with step 2 is made available
2011 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2012 * 5. Abort handler follows scsi_cmnd->host_scribble and
2013 * finds struct CommandList and tries to aborts it
2014 * Now we have aborted the wrong command.
2015 *
d604f533
WS
2016 * Reset c->scsi_cmd here so that the abort or reset handler will know
2017 * this command has completed. Then, check to see if the handler is
a58e7e53
WS
2018 * waiting for this command, and, if so, wake it.
2019 */
2020 c->scsi_cmd = SCSI_CMD_IDLE;
d604f533 2021 mb(); /* Declare command idle before checking for pending events. */
a58e7e53 2022 if (c->abort_pending) {
d604f533 2023 do_wake = true;
a58e7e53 2024 c->abort_pending = false;
a58e7e53 2025 }
d604f533
WS
2026 if (c->reset_pending) {
2027 unsigned long flags;
2028 struct hpsa_scsi_dev_t *dev;
2029
2030 /*
2031 * There appears to be a reset pending; lock the lock and
2032 * reconfirm. If so, then decrement the count of outstanding
2033 * commands and wake the reset command if this is the last one.
2034 */
2035 spin_lock_irqsave(&h->lock, flags);
2036 dev = c->reset_pending; /* Re-fetch under the lock. */
2037 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2038 do_wake = true;
2039 c->reset_pending = NULL;
2040 spin_unlock_irqrestore(&h->lock, flags);
2041 }
2042
2043 if (do_wake)
2044 wake_up_all(&h->event_sync_wait_queue);
a58e7e53
WS
2045}
2046
73153fe5
WS
2047static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2048 struct CommandList *c)
2049{
2050 hpsa_cmd_resolve_events(h, c);
2051 cmd_tagged_free(h, c);
2052}
2053
8a0ff92c
WS
2054static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2055 struct CommandList *c, struct scsi_cmnd *cmd)
2056{
73153fe5 2057 hpsa_cmd_resolve_and_free(h, c);
8a0ff92c
WS
2058 cmd->scsi_done(cmd);
2059}
2060
2061static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2062{
2063 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2064 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2065}
2066
a58e7e53
WS
2067static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2068{
2069 cmd->result = DID_ABORT << 16;
2070}
2071
2072static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2073 struct scsi_cmnd *cmd)
2074{
2075 hpsa_set_scsi_cmd_aborted(cmd);
2076 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2077 c->Request.CDB, c->err_info->ScsiStatus);
73153fe5 2078 hpsa_cmd_resolve_and_free(h, c);
a58e7e53
WS
2079}
2080
c349775e
ST
2081static void process_ioaccel2_completion(struct ctlr_info *h,
2082 struct CommandList *c, struct scsi_cmnd *cmd,
2083 struct hpsa_scsi_dev_t *dev)
2084{
2085 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2086
2087 /* check for good status */
2088 if (likely(c2->error_data.serv_response == 0 &&
8a0ff92c
WS
2089 c2->error_data.status == 0))
2090 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e 2091
8a0ff92c
WS
2092 /*
2093 * Any RAID offload error results in retry which will use
c349775e
ST
2094 * the normal I/O path so the controller can handle whatever's
2095 * wrong.
2096 */
2097 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2098 c2->error_data.serv_response ==
2099 IOACCEL2_SERV_RESPONSE_FAILURE) {
080ef1cc
DB
2100 if (c2->error_data.status ==
2101 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2102 dev->offload_enabled = 0;
8a0ff92c
WS
2103
2104 return hpsa_retry_cmd(h, c);
a09c1441 2105 }
080ef1cc
DB
2106
2107 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
8a0ff92c 2108 return hpsa_retry_cmd(h, c);
080ef1cc 2109
8a0ff92c 2110 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e
ST
2111}
2112
9437ac43
SC
2113/* Returns 0 on success, < 0 otherwise. */
2114static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2115 struct CommandList *cp)
2116{
2117 u8 tmf_status = cp->err_info->ScsiStatus;
2118
2119 switch (tmf_status) {
2120 case CISS_TMF_COMPLETE:
2121 /*
2122 * CISS_TMF_COMPLETE never happens, instead,
2123 * ei->CommandStatus == 0 for this case.
2124 */
2125 case CISS_TMF_SUCCESS:
2126 return 0;
2127 case CISS_TMF_INVALID_FRAME:
2128 case CISS_TMF_NOT_SUPPORTED:
2129 case CISS_TMF_FAILED:
2130 case CISS_TMF_WRONG_LUN:
2131 case CISS_TMF_OVERLAPPED_TAG:
2132 break;
2133 default:
2134 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2135 tmf_status);
2136 break;
2137 }
2138 return -tmf_status;
2139}
2140
1fb011fb 2141static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
2142{
2143 struct scsi_cmnd *cmd;
2144 struct ctlr_info *h;
2145 struct ErrorInfo *ei;
283b4a9b 2146 struct hpsa_scsi_dev_t *dev;
d9a729f3 2147 struct io_accel2_cmd *c2;
edd16368 2148
9437ac43
SC
2149 u8 sense_key;
2150 u8 asc; /* additional sense code */
2151 u8 ascq; /* additional sense code qualifier */
db111e18 2152 unsigned long sense_data_size;
edd16368
SC
2153
2154 ei = cp->err_info;
7fa3030c 2155 cmd = cp->scsi_cmd;
edd16368 2156 h = cp->h;
283b4a9b 2157 dev = cmd->device->hostdata;
d9a729f3 2158 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
edd16368
SC
2159
2160 scsi_dma_unmap(cmd); /* undo the DMA mappings */
e1f7de0c 2161 if ((cp->cmd_type == CMD_SCSI) &&
2b08b3e9 2162 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
33a2ffce 2163 hpsa_unmap_sg_chain_block(h, cp);
edd16368 2164
d9a729f3
WS
2165 if ((cp->cmd_type == CMD_IOACCEL2) &&
2166 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2167 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2168
edd16368
SC
2169 cmd->result = (DID_OK << 16); /* host byte */
2170 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
c349775e 2171
03383736
DB
2172 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2173 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2174
25163bd5
WS
2175 /*
2176 * We check for lockup status here as it may be set for
2177 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2178 * fail_all_oustanding_cmds()
2179 */
2180 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2181 /* DID_NO_CONNECT will prevent a retry */
2182 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 2183 return hpsa_cmd_free_and_done(h, cp, cmd);
25163bd5
WS
2184 }
2185
d604f533
WS
2186 if ((unlikely(hpsa_is_pending_event(cp)))) {
2187 if (cp->reset_pending)
2188 return hpsa_cmd_resolve_and_free(h, cp);
2189 if (cp->abort_pending)
2190 return hpsa_cmd_abort_and_free(h, cp, cmd);
2191 }
2192
c349775e
ST
2193 if (cp->cmd_type == CMD_IOACCEL2)
2194 return process_ioaccel2_completion(h, cp, cmd, dev);
2195
6aa4c361 2196 scsi_set_resid(cmd, ei->ResidualCnt);
8a0ff92c
WS
2197 if (ei->CommandStatus == 0)
2198 return hpsa_cmd_free_and_done(h, cp, cmd);
6aa4c361 2199
e1f7de0c
MG
2200 /* For I/O accelerator commands, copy over some fields to the normal
2201 * CISS header used below for error handling.
2202 */
2203 if (cp->cmd_type == CMD_IOACCEL1) {
2204 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2b08b3e9
DB
2205 cp->Header.SGList = scsi_sg_count(cmd);
2206 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2207 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2208 IOACCEL1_IOFLAGS_CDBLEN_MASK;
50a0decf 2209 cp->Header.tag = c->tag;
e1f7de0c
MG
2210 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2211 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
283b4a9b
SC
2212
2213 /* Any RAID offload error results in retry which will use
2214 * the normal I/O path so the controller can handle whatever's
2215 * wrong.
2216 */
2217 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2218 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2219 dev->offload_enabled = 0;
d604f533 2220 return hpsa_retry_cmd(h, cp);
283b4a9b 2221 }
e1f7de0c
MG
2222 }
2223
edd16368
SC
2224 /* an error has occurred */
2225 switch (ei->CommandStatus) {
2226
2227 case CMD_TARGET_STATUS:
9437ac43
SC
2228 cmd->result |= ei->ScsiStatus;
2229 /* copy the sense data */
2230 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2231 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2232 else
2233 sense_data_size = sizeof(ei->SenseInfo);
2234 if (ei->SenseLen < sense_data_size)
2235 sense_data_size = ei->SenseLen;
2236 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2237 if (ei->ScsiStatus)
2238 decode_sense_data(ei->SenseInfo, sense_data_size,
2239 &sense_key, &asc, &ascq);
edd16368 2240 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1d3b3609 2241 if (sense_key == ABORTED_COMMAND) {
2e311fba 2242 cmd->result |= DID_SOFT_ERROR << 16;
1d3b3609
MG
2243 break;
2244 }
edd16368
SC
2245 break;
2246 }
edd16368
SC
2247 /* Problem was not a check condition
2248 * Pass it up to the upper layers...
2249 */
2250 if (ei->ScsiStatus) {
2251 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2252 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2253 "Returning result: 0x%x\n",
2254 cp, ei->ScsiStatus,
2255 sense_key, asc, ascq,
2256 cmd->result);
2257 } else { /* scsi status is zero??? How??? */
2258 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2259 "Returning no connection.\n", cp),
2260
2261 /* Ordinarily, this case should never happen,
2262 * but there is a bug in some released firmware
2263 * revisions that allows it to happen if, for
2264 * example, a 4100 backplane loses power and
2265 * the tape drive is in it. We assume that
2266 * it's a fatal error of some kind because we
2267 * can't show that it wasn't. We will make it
2268 * look like selection timeout since that is
2269 * the most common reason for this to occur,
2270 * and it's severe enough.
2271 */
2272
2273 cmd->result = DID_NO_CONNECT << 16;
2274 }
2275 break;
2276
2277 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2278 break;
2279 case CMD_DATA_OVERRUN:
f42e81e1
SC
2280 dev_warn(&h->pdev->dev,
2281 "CDB %16phN data overrun\n", cp->Request.CDB);
edd16368
SC
2282 break;
2283 case CMD_INVALID: {
2284 /* print_bytes(cp, sizeof(*cp), 1, 0);
2285 print_cmd(cp); */
2286 /* We get CMD_INVALID if you address a non-existent device
2287 * instead of a selection timeout (no response). You will
2288 * see this if you yank out a drive, then try to access it.
2289 * This is kind of a shame because it means that any other
2290 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2291 * missing target. */
2292 cmd->result = DID_NO_CONNECT << 16;
2293 }
2294 break;
2295 case CMD_PROTOCOL_ERR:
256d0eaa 2296 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2297 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2298 cp->Request.CDB);
edd16368
SC
2299 break;
2300 case CMD_HARDWARE_ERR:
2301 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2302 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2303 cp->Request.CDB);
edd16368
SC
2304 break;
2305 case CMD_CONNECTION_LOST:
2306 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2307 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2308 cp->Request.CDB);
edd16368
SC
2309 break;
2310 case CMD_ABORTED:
a58e7e53
WS
2311 /* Return now to avoid calling scsi_done(). */
2312 return hpsa_cmd_abort_and_free(h, cp, cmd);
edd16368
SC
2313 case CMD_ABORT_FAILED:
2314 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2315 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2316 cp->Request.CDB);
edd16368
SC
2317 break;
2318 case CMD_UNSOLICITED_ABORT:
f6e76055 2319 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
f42e81e1
SC
2320 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2321 cp->Request.CDB);
edd16368
SC
2322 break;
2323 case CMD_TIMEOUT:
2324 cmd->result = DID_TIME_OUT << 16;
f42e81e1
SC
2325 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2326 cp->Request.CDB);
edd16368 2327 break;
1d5e2ed0
SC
2328 case CMD_UNABORTABLE:
2329 cmd->result = DID_ERROR << 16;
2330 dev_warn(&h->pdev->dev, "Command unabortable\n");
2331 break;
9437ac43
SC
2332 case CMD_TMF_STATUS:
2333 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2334 cmd->result = DID_ERROR << 16;
2335 break;
283b4a9b
SC
2336 case CMD_IOACCEL_DISABLED:
2337 /* This only handles the direct pass-through case since RAID
2338 * offload is handled above. Just attempt a retry.
2339 */
2340 cmd->result = DID_SOFT_ERROR << 16;
2341 dev_warn(&h->pdev->dev,
2342 "cp %p had HP SSD Smart Path error\n", cp);
2343 break;
edd16368
SC
2344 default:
2345 cmd->result = DID_ERROR << 16;
2346 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2347 cp, ei->CommandStatus);
2348 }
8a0ff92c
WS
2349
2350 return hpsa_cmd_free_and_done(h, cp, cmd);
edd16368
SC
2351}
2352
edd16368
SC
2353static void hpsa_pci_unmap(struct pci_dev *pdev,
2354 struct CommandList *c, int sg_used, int data_direction)
2355{
2356 int i;
edd16368 2357
50a0decf
SC
2358 for (i = 0; i < sg_used; i++)
2359 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2360 le32_to_cpu(c->SG[i].Len),
2361 data_direction);
edd16368
SC
2362}
2363
a2dac136 2364static int hpsa_map_one(struct pci_dev *pdev,
edd16368
SC
2365 struct CommandList *cp,
2366 unsigned char *buf,
2367 size_t buflen,
2368 int data_direction)
2369{
01a02ffc 2370 u64 addr64;
edd16368
SC
2371
2372 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2373 cp->Header.SGList = 0;
50a0decf 2374 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2375 return 0;
edd16368
SC
2376 }
2377
50a0decf 2378 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
eceaae18 2379 if (dma_mapping_error(&pdev->dev, addr64)) {
a2dac136 2380 /* Prevent subsequent unmap of something never mapped */
eceaae18 2381 cp->Header.SGList = 0;
50a0decf 2382 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2383 return -1;
eceaae18 2384 }
50a0decf
SC
2385 cp->SG[0].Addr = cpu_to_le64(addr64);
2386 cp->SG[0].Len = cpu_to_le32(buflen);
2387 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2388 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2389 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
a2dac136 2390 return 0;
edd16368
SC
2391}
2392
25163bd5
WS
2393#define NO_TIMEOUT ((unsigned long) -1)
2394#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2395static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2396 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
edd16368
SC
2397{
2398 DECLARE_COMPLETION_ONSTACK(wait);
2399
2400 c->waiting = &wait;
25163bd5
WS
2401 __enqueue_cmd_and_start_io(h, c, reply_queue);
2402 if (timeout_msecs == NO_TIMEOUT) {
2403 /* TODO: get rid of this no-timeout thing */
2404 wait_for_completion_io(&wait);
2405 return IO_OK;
2406 }
2407 if (!wait_for_completion_io_timeout(&wait,
2408 msecs_to_jiffies(timeout_msecs))) {
2409 dev_warn(&h->pdev->dev, "Command timed out.\n");
2410 return -ETIMEDOUT;
2411 }
2412 return IO_OK;
2413}
2414
2415static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2416 int reply_queue, unsigned long timeout_msecs)
2417{
2418 if (unlikely(lockup_detected(h))) {
2419 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2420 return IO_OK;
2421 }
2422 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
edd16368
SC
2423}
2424
094963da
SC
2425static u32 lockup_detected(struct ctlr_info *h)
2426{
2427 int cpu;
2428 u32 rc, *lockup_detected;
2429
2430 cpu = get_cpu();
2431 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2432 rc = *lockup_detected;
2433 put_cpu();
2434 return rc;
2435}
2436
9c2fc160 2437#define MAX_DRIVER_CMD_RETRIES 25
25163bd5
WS
2438static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2439 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
edd16368 2440{
9c2fc160 2441 int backoff_time = 10, retry_count = 0;
25163bd5 2442 int rc;
edd16368
SC
2443
2444 do {
7630abd0 2445 memset(c->err_info, 0, sizeof(*c->err_info));
25163bd5
WS
2446 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2447 timeout_msecs);
2448 if (rc)
2449 break;
edd16368 2450 retry_count++;
9c2fc160
SC
2451 if (retry_count > 3) {
2452 msleep(backoff_time);
2453 if (backoff_time < 1000)
2454 backoff_time *= 2;
2455 }
852af20a 2456 } while ((check_for_unit_attention(h, c) ||
9c2fc160
SC
2457 check_for_busy(h, c)) &&
2458 retry_count <= MAX_DRIVER_CMD_RETRIES);
edd16368 2459 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
25163bd5
WS
2460 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2461 rc = -EIO;
2462 return rc;
edd16368
SC
2463}
2464
d1e8beac
SC
2465static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2466 struct CommandList *c)
edd16368 2467{
d1e8beac
SC
2468 const u8 *cdb = c->Request.CDB;
2469 const u8 *lun = c->Header.LUN.LunAddrBytes;
2470
2471 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2472 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2473 txt, lun[0], lun[1], lun[2], lun[3],
2474 lun[4], lun[5], lun[6], lun[7],
2475 cdb[0], cdb[1], cdb[2], cdb[3],
2476 cdb[4], cdb[5], cdb[6], cdb[7],
2477 cdb[8], cdb[9], cdb[10], cdb[11],
2478 cdb[12], cdb[13], cdb[14], cdb[15]);
2479}
2480
2481static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2482 struct CommandList *cp)
2483{
2484 const struct ErrorInfo *ei = cp->err_info;
edd16368 2485 struct device *d = &cp->h->pdev->dev;
9437ac43
SC
2486 u8 sense_key, asc, ascq;
2487 int sense_len;
edd16368 2488
edd16368
SC
2489 switch (ei->CommandStatus) {
2490 case CMD_TARGET_STATUS:
9437ac43
SC
2491 if (ei->SenseLen > sizeof(ei->SenseInfo))
2492 sense_len = sizeof(ei->SenseInfo);
2493 else
2494 sense_len = ei->SenseLen;
2495 decode_sense_data(ei->SenseInfo, sense_len,
2496 &sense_key, &asc, &ascq);
d1e8beac
SC
2497 hpsa_print_cmd(h, "SCSI status", cp);
2498 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
9437ac43
SC
2499 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2500 sense_key, asc, ascq);
d1e8beac 2501 else
9437ac43 2502 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
edd16368
SC
2503 if (ei->ScsiStatus == 0)
2504 dev_warn(d, "SCSI status is abnormally zero. "
2505 "(probably indicates selection timeout "
2506 "reported incorrectly due to a known "
2507 "firmware bug, circa July, 2001.)\n");
2508 break;
2509 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
edd16368
SC
2510 break;
2511 case CMD_DATA_OVERRUN:
d1e8beac 2512 hpsa_print_cmd(h, "overrun condition", cp);
edd16368
SC
2513 break;
2514 case CMD_INVALID: {
2515 /* controller unfortunately reports SCSI passthru's
2516 * to non-existent targets as invalid commands.
2517 */
d1e8beac
SC
2518 hpsa_print_cmd(h, "invalid command", cp);
2519 dev_warn(d, "probably means device no longer present\n");
edd16368
SC
2520 }
2521 break;
2522 case CMD_PROTOCOL_ERR:
d1e8beac 2523 hpsa_print_cmd(h, "protocol error", cp);
edd16368
SC
2524 break;
2525 case CMD_HARDWARE_ERR:
d1e8beac 2526 hpsa_print_cmd(h, "hardware error", cp);
edd16368
SC
2527 break;
2528 case CMD_CONNECTION_LOST:
d1e8beac 2529 hpsa_print_cmd(h, "connection lost", cp);
edd16368
SC
2530 break;
2531 case CMD_ABORTED:
d1e8beac 2532 hpsa_print_cmd(h, "aborted", cp);
edd16368
SC
2533 break;
2534 case CMD_ABORT_FAILED:
d1e8beac 2535 hpsa_print_cmd(h, "abort failed", cp);
edd16368
SC
2536 break;
2537 case CMD_UNSOLICITED_ABORT:
d1e8beac 2538 hpsa_print_cmd(h, "unsolicited abort", cp);
edd16368
SC
2539 break;
2540 case CMD_TIMEOUT:
d1e8beac 2541 hpsa_print_cmd(h, "timed out", cp);
edd16368 2542 break;
1d5e2ed0 2543 case CMD_UNABORTABLE:
d1e8beac 2544 hpsa_print_cmd(h, "unabortable", cp);
1d5e2ed0 2545 break;
25163bd5
WS
2546 case CMD_CTLR_LOCKUP:
2547 hpsa_print_cmd(h, "controller lockup detected", cp);
2548 break;
edd16368 2549 default:
d1e8beac
SC
2550 hpsa_print_cmd(h, "unknown status", cp);
2551 dev_warn(d, "Unknown command status %x\n",
edd16368
SC
2552 ei->CommandStatus);
2553 }
2554}
2555
2556static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
b7bb24eb 2557 u16 page, unsigned char *buf,
edd16368
SC
2558 unsigned char bufsize)
2559{
2560 int rc = IO_OK;
2561 struct CommandList *c;
2562 struct ErrorInfo *ei;
2563
45fcb86e 2564 c = cmd_alloc(h);
edd16368 2565
a2dac136
SC
2566 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2567 page, scsi3addr, TYPE_CMD)) {
2568 rc = -1;
2569 goto out;
2570 }
25163bd5
WS
2571 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2572 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2573 if (rc)
2574 goto out;
edd16368
SC
2575 ei = c->err_info;
2576 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2577 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2578 rc = -1;
2579 }
a2dac136 2580out:
45fcb86e 2581 cmd_free(h, c);
edd16368
SC
2582 return rc;
2583}
2584
316b221a
SC
2585static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2586 unsigned char *scsi3addr, unsigned char page,
2587 struct bmic_controller_parameters *buf, size_t bufsize)
2588{
2589 int rc = IO_OK;
2590 struct CommandList *c;
2591 struct ErrorInfo *ei;
2592
45fcb86e 2593 c = cmd_alloc(h);
316b221a
SC
2594 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2595 page, scsi3addr, TYPE_CMD)) {
2596 rc = -1;
2597 goto out;
2598 }
25163bd5
WS
2599 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2600 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2601 if (rc)
2602 goto out;
316b221a
SC
2603 ei = c->err_info;
2604 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2605 hpsa_scsi_interpret_error(h, c);
2606 rc = -1;
2607 }
2608out:
45fcb86e 2609 cmd_free(h, c);
316b221a 2610 return rc;
bf43caf3 2611}
316b221a 2612
bf711ac6 2613static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
25163bd5 2614 u8 reset_type, int reply_queue)
edd16368
SC
2615{
2616 int rc = IO_OK;
2617 struct CommandList *c;
2618 struct ErrorInfo *ei;
2619
45fcb86e 2620 c = cmd_alloc(h);
edd16368 2621
edd16368 2622
a2dac136 2623 /* fill_cmd can't fail here, no data buffer to map. */
bf711ac6
ST
2624 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2625 scsi3addr, TYPE_MSG);
2626 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
25163bd5
WS
2627 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2628 if (rc) {
2629 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2630 goto out;
2631 }
edd16368
SC
2632 /* no unmap needed here because no data xfer. */
2633
2634 ei = c->err_info;
2635 if (ei->CommandStatus != 0) {
d1e8beac 2636 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2637 rc = -1;
2638 }
25163bd5 2639out:
45fcb86e 2640 cmd_free(h, c);
edd16368
SC
2641 return rc;
2642}
2643
d604f533
WS
2644static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2645 struct hpsa_scsi_dev_t *dev,
2646 unsigned char *scsi3addr)
2647{
2648 int i;
2649 bool match = false;
2650 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2651 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2652
2653 if (hpsa_is_cmd_idle(c))
2654 return false;
2655
2656 switch (c->cmd_type) {
2657 case CMD_SCSI:
2658 case CMD_IOCTL_PEND:
2659 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2660 sizeof(c->Header.LUN.LunAddrBytes));
2661 break;
2662
2663 case CMD_IOACCEL1:
2664 case CMD_IOACCEL2:
2665 if (c->phys_disk == dev) {
2666 /* HBA mode match */
2667 match = true;
2668 } else {
2669 /* Possible RAID mode -- check each phys dev. */
2670 /* FIXME: Do we need to take out a lock here? If
2671 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2672 * instead. */
2673 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2674 /* FIXME: an alternate test might be
2675 *
2676 * match = dev->phys_disk[i]->ioaccel_handle
2677 * == c2->scsi_nexus; */
2678 match = dev->phys_disk[i] == c->phys_disk;
2679 }
2680 }
2681 break;
2682
2683 case IOACCEL2_TMF:
2684 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2685 match = dev->phys_disk[i]->ioaccel_handle ==
2686 le32_to_cpu(ac->it_nexus);
2687 }
2688 break;
2689
2690 case 0: /* The command is in the middle of being initialized. */
2691 match = false;
2692 break;
2693
2694 default:
2695 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2696 c->cmd_type);
2697 BUG();
2698 }
2699
2700 return match;
2701}
2702
2703static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2704 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2705{
2706 int i;
2707 int rc = 0;
2708
2709 /* We can really only handle one reset at a time */
2710 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2711 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2712 return -EINTR;
2713 }
2714
2715 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2716
2717 for (i = 0; i < h->nr_cmds; i++) {
2718 struct CommandList *c = h->cmd_pool + i;
2719 int refcount = atomic_inc_return(&c->refcount);
2720
2721 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2722 unsigned long flags;
2723
2724 /*
2725 * Mark the target command as having a reset pending,
2726 * then lock a lock so that the command cannot complete
2727 * while we're considering it. If the command is not
2728 * idle then count it; otherwise revoke the event.
2729 */
2730 c->reset_pending = dev;
2731 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2732 if (!hpsa_is_cmd_idle(c))
2733 atomic_inc(&dev->reset_cmds_out);
2734 else
2735 c->reset_pending = NULL;
2736 spin_unlock_irqrestore(&h->lock, flags);
2737 }
2738
2739 cmd_free(h, c);
2740 }
2741
2742 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2743 if (!rc)
2744 wait_event(h->event_sync_wait_queue,
2745 atomic_read(&dev->reset_cmds_out) == 0 ||
2746 lockup_detected(h));
2747
2748 if (unlikely(lockup_detected(h))) {
77678d3a
DB
2749 dev_warn(&h->pdev->dev,
2750 "Controller lockup detected during reset wait\n");
2751 rc = -ENODEV;
2752 }
d604f533
WS
2753
2754 if (unlikely(rc))
2755 atomic_set(&dev->reset_cmds_out, 0);
2756
2757 mutex_unlock(&h->reset_mutex);
2758 return rc;
2759}
2760
edd16368
SC
2761static void hpsa_get_raid_level(struct ctlr_info *h,
2762 unsigned char *scsi3addr, unsigned char *raid_level)
2763{
2764 int rc;
2765 unsigned char *buf;
2766
2767 *raid_level = RAID_UNKNOWN;
2768 buf = kzalloc(64, GFP_KERNEL);
2769 if (!buf)
2770 return;
b7bb24eb 2771 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
edd16368
SC
2772 if (rc == 0)
2773 *raid_level = buf[8];
2774 if (*raid_level > RAID_UNKNOWN)
2775 *raid_level = RAID_UNKNOWN;
2776 kfree(buf);
2777 return;
2778}
2779
283b4a9b
SC
2780#define HPSA_MAP_DEBUG
2781#ifdef HPSA_MAP_DEBUG
2782static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2783 struct raid_map_data *map_buff)
2784{
2785 struct raid_map_disk_data *dd = &map_buff->data[0];
2786 int map, row, col;
2787 u16 map_cnt, row_cnt, disks_per_row;
2788
2789 if (rc != 0)
2790 return;
2791
2ba8bfc8
SC
2792 /* Show details only if debugging has been activated. */
2793 if (h->raid_offload_debug < 2)
2794 return;
2795
283b4a9b
SC
2796 dev_info(&h->pdev->dev, "structure_size = %u\n",
2797 le32_to_cpu(map_buff->structure_size));
2798 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2799 le32_to_cpu(map_buff->volume_blk_size));
2800 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2801 le64_to_cpu(map_buff->volume_blk_cnt));
2802 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2803 map_buff->phys_blk_shift);
2804 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2805 map_buff->parity_rotation_shift);
2806 dev_info(&h->pdev->dev, "strip_size = %u\n",
2807 le16_to_cpu(map_buff->strip_size));
2808 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2809 le64_to_cpu(map_buff->disk_starting_blk));
2810 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2811 le64_to_cpu(map_buff->disk_blk_cnt));
2812 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2813 le16_to_cpu(map_buff->data_disks_per_row));
2814 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2815 le16_to_cpu(map_buff->metadata_disks_per_row));
2816 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2817 le16_to_cpu(map_buff->row_cnt));
2818 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2819 le16_to_cpu(map_buff->layout_map_count));
2b08b3e9 2820 dev_info(&h->pdev->dev, "flags = 0x%x\n",
dd0e19f3 2821 le16_to_cpu(map_buff->flags));
2b08b3e9
DB
2822 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2823 le16_to_cpu(map_buff->flags) &
2824 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dd0e19f3
ST
2825 dev_info(&h->pdev->dev, "dekindex = %u\n",
2826 le16_to_cpu(map_buff->dekindex));
283b4a9b
SC
2827 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2828 for (map = 0; map < map_cnt; map++) {
2829 dev_info(&h->pdev->dev, "Map%u:\n", map);
2830 row_cnt = le16_to_cpu(map_buff->row_cnt);
2831 for (row = 0; row < row_cnt; row++) {
2832 dev_info(&h->pdev->dev, " Row%u:\n", row);
2833 disks_per_row =
2834 le16_to_cpu(map_buff->data_disks_per_row);
2835 for (col = 0; col < disks_per_row; col++, dd++)
2836 dev_info(&h->pdev->dev,
2837 " D%02u: h=0x%04x xor=%u,%u\n",
2838 col, dd->ioaccel_handle,
2839 dd->xor_mult[0], dd->xor_mult[1]);
2840 disks_per_row =
2841 le16_to_cpu(map_buff->metadata_disks_per_row);
2842 for (col = 0; col < disks_per_row; col++, dd++)
2843 dev_info(&h->pdev->dev,
2844 " M%02u: h=0x%04x xor=%u,%u\n",
2845 col, dd->ioaccel_handle,
2846 dd->xor_mult[0], dd->xor_mult[1]);
2847 }
2848 }
2849}
2850#else
2851static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2852 __attribute__((unused)) int rc,
2853 __attribute__((unused)) struct raid_map_data *map_buff)
2854{
2855}
2856#endif
2857
2858static int hpsa_get_raid_map(struct ctlr_info *h,
2859 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2860{
2861 int rc = 0;
2862 struct CommandList *c;
2863 struct ErrorInfo *ei;
2864
45fcb86e 2865 c = cmd_alloc(h);
bf43caf3 2866
283b4a9b
SC
2867 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2868 sizeof(this_device->raid_map), 0,
2869 scsi3addr, TYPE_CMD)) {
2dd02d74
RE
2870 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2871 cmd_free(h, c);
2872 return -1;
283b4a9b 2873 }
25163bd5
WS
2874 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2875 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2876 if (rc)
2877 goto out;
283b4a9b
SC
2878 ei = c->err_info;
2879 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2880 hpsa_scsi_interpret_error(h, c);
25163bd5
WS
2881 rc = -1;
2882 goto out;
283b4a9b 2883 }
45fcb86e 2884 cmd_free(h, c);
283b4a9b
SC
2885
2886 /* @todo in the future, dynamically allocate RAID map memory */
2887 if (le32_to_cpu(this_device->raid_map.structure_size) >
2888 sizeof(this_device->raid_map)) {
2889 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2890 rc = -1;
2891 }
2892 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2893 return rc;
25163bd5
WS
2894out:
2895 cmd_free(h, c);
2896 return rc;
283b4a9b
SC
2897}
2898
03383736
DB
2899static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2900 unsigned char scsi3addr[], u16 bmic_device_index,
2901 struct bmic_identify_physical_device *buf, size_t bufsize)
2902{
2903 int rc = IO_OK;
2904 struct CommandList *c;
2905 struct ErrorInfo *ei;
2906
2907 c = cmd_alloc(h);
2908 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2909 0, RAID_CTLR_LUNID, TYPE_CMD);
2910 if (rc)
2911 goto out;
2912
2913 c->Request.CDB[2] = bmic_device_index & 0xff;
2914 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2915
25163bd5
WS
2916 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
2917 NO_TIMEOUT);
03383736
DB
2918 ei = c->err_info;
2919 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2920 hpsa_scsi_interpret_error(h, c);
2921 rc = -1;
2922 }
2923out:
2924 cmd_free(h, c);
2925 return rc;
2926}
2927
1b70150a
SC
2928static int hpsa_vpd_page_supported(struct ctlr_info *h,
2929 unsigned char scsi3addr[], u8 page)
2930{
2931 int rc;
2932 int i;
2933 int pages;
2934 unsigned char *buf, bufsize;
2935
2936 buf = kzalloc(256, GFP_KERNEL);
2937 if (!buf)
2938 return 0;
2939
2940 /* Get the size of the page list first */
2941 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2942 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2943 buf, HPSA_VPD_HEADER_SZ);
2944 if (rc != 0)
2945 goto exit_unsupported;
2946 pages = buf[3];
2947 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2948 bufsize = pages + HPSA_VPD_HEADER_SZ;
2949 else
2950 bufsize = 255;
2951
2952 /* Get the whole VPD page list */
2953 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2954 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2955 buf, bufsize);
2956 if (rc != 0)
2957 goto exit_unsupported;
2958
2959 pages = buf[3];
2960 for (i = 1; i <= pages; i++)
2961 if (buf[3 + i] == page)
2962 goto exit_supported;
2963exit_unsupported:
2964 kfree(buf);
2965 return 0;
2966exit_supported:
2967 kfree(buf);
2968 return 1;
2969}
2970
283b4a9b
SC
2971static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2972 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2973{
2974 int rc;
2975 unsigned char *buf;
2976 u8 ioaccel_status;
2977
2978 this_device->offload_config = 0;
2979 this_device->offload_enabled = 0;
41ce4c35 2980 this_device->offload_to_be_enabled = 0;
283b4a9b
SC
2981
2982 buf = kzalloc(64, GFP_KERNEL);
2983 if (!buf)
2984 return;
1b70150a
SC
2985 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2986 goto out;
283b4a9b 2987 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
b7bb24eb 2988 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
283b4a9b
SC
2989 if (rc != 0)
2990 goto out;
2991
2992#define IOACCEL_STATUS_BYTE 4
2993#define OFFLOAD_CONFIGURED_BIT 0x01
2994#define OFFLOAD_ENABLED_BIT 0x02
2995 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2996 this_device->offload_config =
2997 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2998 if (this_device->offload_config) {
2999 this_device->offload_enabled =
3000 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3001 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3002 this_device->offload_enabled = 0;
3003 }
41ce4c35 3004 this_device->offload_to_be_enabled = this_device->offload_enabled;
283b4a9b
SC
3005out:
3006 kfree(buf);
3007 return;
3008}
3009
edd16368
SC
3010/* Get the device id from inquiry page 0x83 */
3011static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3012 unsigned char *device_id, int buflen)
3013{
3014 int rc;
3015 unsigned char *buf;
3016
3017 if (buflen > 16)
3018 buflen = 16;
3019 buf = kzalloc(64, GFP_KERNEL);
3020 if (!buf)
a84d794d 3021 return -ENOMEM;
b7bb24eb 3022 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
edd16368
SC
3023 if (rc == 0)
3024 memcpy(device_id, &buf[8], buflen);
3025 kfree(buf);
3026 return rc != 0;
3027}
3028
3029static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
03383736 3030 void *buf, int bufsize,
edd16368
SC
3031 int extended_response)
3032{
3033 int rc = IO_OK;
3034 struct CommandList *c;
3035 unsigned char scsi3addr[8];
3036 struct ErrorInfo *ei;
3037
45fcb86e 3038 c = cmd_alloc(h);
bf43caf3 3039
e89c0ae7
SC
3040 /* address the controller */
3041 memset(scsi3addr, 0, sizeof(scsi3addr));
a2dac136
SC
3042 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3043 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3044 rc = -1;
3045 goto out;
3046 }
edd16368
SC
3047 if (extended_response)
3048 c->Request.CDB[1] = extended_response;
25163bd5
WS
3049 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3050 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3051 if (rc)
3052 goto out;
edd16368
SC
3053 ei = c->err_info;
3054 if (ei->CommandStatus != 0 &&
3055 ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 3056 hpsa_scsi_interpret_error(h, c);
edd16368 3057 rc = -1;
283b4a9b 3058 } else {
03383736
DB
3059 struct ReportLUNdata *rld = buf;
3060
3061 if (rld->extended_response_flag != extended_response) {
283b4a9b
SC
3062 dev_err(&h->pdev->dev,
3063 "report luns requested format %u, got %u\n",
3064 extended_response,
03383736 3065 rld->extended_response_flag);
283b4a9b
SC
3066 rc = -1;
3067 }
edd16368 3068 }
a2dac136 3069out:
45fcb86e 3070 cmd_free(h, c);
edd16368
SC
3071 return rc;
3072}
3073
3074static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
03383736 3075 struct ReportExtendedLUNdata *buf, int bufsize)
edd16368 3076{
03383736
DB
3077 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3078 HPSA_REPORT_PHYS_EXTENDED);
edd16368
SC
3079}
3080
3081static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3082 struct ReportLUNdata *buf, int bufsize)
3083{
3084 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3085}
3086
3087static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3088 int bus, int target, int lun)
3089{
3090 device->bus = bus;
3091 device->target = target;
3092 device->lun = lun;
3093}
3094
9846590e
SC
3095/* Use VPD inquiry to get details of volume status */
3096static int hpsa_get_volume_status(struct ctlr_info *h,
3097 unsigned char scsi3addr[])
3098{
3099 int rc;
3100 int status;
3101 int size;
3102 unsigned char *buf;
3103
3104 buf = kzalloc(64, GFP_KERNEL);
3105 if (!buf)
3106 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3107
3108 /* Does controller have VPD for logical volume status? */
24a4b078 3109 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
9846590e 3110 goto exit_failed;
9846590e
SC
3111
3112 /* Get the size of the VPD return buffer */
3113 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3114 buf, HPSA_VPD_HEADER_SZ);
24a4b078 3115 if (rc != 0)
9846590e 3116 goto exit_failed;
9846590e
SC
3117 size = buf[3];
3118
3119 /* Now get the whole VPD buffer */
3120 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3121 buf, size + HPSA_VPD_HEADER_SZ);
24a4b078 3122 if (rc != 0)
9846590e 3123 goto exit_failed;
9846590e
SC
3124 status = buf[4]; /* status byte */
3125
3126 kfree(buf);
3127 return status;
3128exit_failed:
3129 kfree(buf);
3130 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3131}
3132
3133/* Determine offline status of a volume.
3134 * Return either:
3135 * 0 (not offline)
67955ba3 3136 * 0xff (offline for unknown reasons)
9846590e
SC
3137 * # (integer code indicating one of several NOT READY states
3138 * describing why a volume is to be kept offline)
3139 */
67955ba3 3140static int hpsa_volume_offline(struct ctlr_info *h,
9846590e
SC
3141 unsigned char scsi3addr[])
3142{
3143 struct CommandList *c;
9437ac43
SC
3144 unsigned char *sense;
3145 u8 sense_key, asc, ascq;
3146 int sense_len;
25163bd5 3147 int rc, ldstat = 0;
9846590e
SC
3148 u16 cmd_status;
3149 u8 scsi_status;
3150#define ASC_LUN_NOT_READY 0x04
3151#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3152#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3153
3154 c = cmd_alloc(h);
bf43caf3 3155
9846590e 3156 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
25163bd5
WS
3157 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3158 if (rc) {
3159 cmd_free(h, c);
3160 return 0;
3161 }
9846590e 3162 sense = c->err_info->SenseInfo;
9437ac43
SC
3163 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3164 sense_len = sizeof(c->err_info->SenseInfo);
3165 else
3166 sense_len = c->err_info->SenseLen;
3167 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
9846590e
SC
3168 cmd_status = c->err_info->CommandStatus;
3169 scsi_status = c->err_info->ScsiStatus;
3170 cmd_free(h, c);
3171 /* Is the volume 'not ready'? */
3172 if (cmd_status != CMD_TARGET_STATUS ||
3173 scsi_status != SAM_STAT_CHECK_CONDITION ||
3174 sense_key != NOT_READY ||
3175 asc != ASC_LUN_NOT_READY) {
3176 return 0;
3177 }
3178
3179 /* Determine the reason for not ready state */
3180 ldstat = hpsa_get_volume_status(h, scsi3addr);
3181
3182 /* Keep volume offline in certain cases: */
3183 switch (ldstat) {
3184 case HPSA_LV_UNDERGOING_ERASE:
3185 case HPSA_LV_UNDERGOING_RPI:
3186 case HPSA_LV_PENDING_RPI:
3187 case HPSA_LV_ENCRYPTED_NO_KEY:
3188 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3189 case HPSA_LV_UNDERGOING_ENCRYPTION:
3190 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3191 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3192 return ldstat;
3193 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3194 /* If VPD status page isn't available,
3195 * use ASC/ASCQ to determine state
3196 */
3197 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3198 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3199 return ldstat;
3200 break;
3201 default:
3202 break;
3203 }
3204 return 0;
3205}
3206
9b5c48c2
SC
3207/*
3208 * Find out if a logical device supports aborts by simply trying one.
3209 * Smart Array may claim not to support aborts on logical drives, but
3210 * if a MSA2000 * is connected, the drives on that will be presented
3211 * by the Smart Array as logical drives, and aborts may be sent to
3212 * those devices successfully. So the simplest way to find out is
3213 * to simply try an abort and see how the device responds.
3214 */
3215static int hpsa_device_supports_aborts(struct ctlr_info *h,
3216 unsigned char *scsi3addr)
3217{
3218 struct CommandList *c;
3219 struct ErrorInfo *ei;
3220 int rc = 0;
3221
3222 u64 tag = (u64) -1; /* bogus tag */
3223
3224 /* Assume that physical devices support aborts */
3225 if (!is_logical_dev_addr_mode(scsi3addr))
3226 return 1;
3227
3228 c = cmd_alloc(h);
bf43caf3 3229
9b5c48c2
SC
3230 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3231 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3232 /* no unmap needed here because no data xfer. */
3233 ei = c->err_info;
3234 switch (ei->CommandStatus) {
3235 case CMD_INVALID:
3236 rc = 0;
3237 break;
3238 case CMD_UNABORTABLE:
3239 case CMD_ABORT_FAILED:
3240 rc = 1;
3241 break;
9437ac43
SC
3242 case CMD_TMF_STATUS:
3243 rc = hpsa_evaluate_tmf_status(h, c);
3244 break;
9b5c48c2
SC
3245 default:
3246 rc = 0;
3247 break;
3248 }
3249 cmd_free(h, c);
3250 return rc;
3251}
3252
edd16368 3253static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
3254 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3255 unsigned char *is_OBDR_device)
edd16368 3256{
0b0e1d6c
SC
3257
3258#define OBDR_SIG_OFFSET 43
3259#define OBDR_TAPE_SIG "$DR-10"
3260#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3261#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3262
ea6d3bc3 3263 unsigned char *inq_buff;
0b0e1d6c 3264 unsigned char *obdr_sig;
edd16368 3265
ea6d3bc3 3266 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
edd16368
SC
3267 if (!inq_buff)
3268 goto bail_out;
3269
edd16368
SC
3270 /* Do an inquiry to the device to see what it is. */
3271 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3272 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3273 /* Inquiry failed (msg printed already) */
3274 dev_err(&h->pdev->dev,
3275 "hpsa_update_device_info: inquiry failed\n");
3276 goto bail_out;
3277 }
3278
edd16368
SC
3279 this_device->devtype = (inq_buff[0] & 0x1f);
3280 memcpy(this_device->scsi3addr, scsi3addr, 8);
3281 memcpy(this_device->vendor, &inq_buff[8],
3282 sizeof(this_device->vendor));
3283 memcpy(this_device->model, &inq_buff[16],
3284 sizeof(this_device->model));
edd16368
SC
3285 memset(this_device->device_id, 0,
3286 sizeof(this_device->device_id));
3287 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3288 sizeof(this_device->device_id));
3289
3290 if (this_device->devtype == TYPE_DISK &&
283b4a9b 3291 is_logical_dev_addr_mode(scsi3addr)) {
67955ba3
SC
3292 int volume_offline;
3293
edd16368 3294 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
283b4a9b
SC
3295 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3296 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
67955ba3
SC
3297 volume_offline = hpsa_volume_offline(h, scsi3addr);
3298 if (volume_offline < 0 || volume_offline > 0xff)
3299 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3300 this_device->volume_offline = volume_offline & 0xff;
283b4a9b 3301 } else {
edd16368 3302 this_device->raid_level = RAID_UNKNOWN;
283b4a9b
SC
3303 this_device->offload_config = 0;
3304 this_device->offload_enabled = 0;
41ce4c35 3305 this_device->offload_to_be_enabled = 0;
a3144e0b 3306 this_device->hba_ioaccel_enabled = 0;
9846590e 3307 this_device->volume_offline = 0;
03383736 3308 this_device->queue_depth = h->nr_cmds;
283b4a9b 3309 }
edd16368 3310
0b0e1d6c
SC
3311 if (is_OBDR_device) {
3312 /* See if this is a One-Button-Disaster-Recovery device
3313 * by looking for "$DR-10" at offset 43 in inquiry data.
3314 */
3315 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3316 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3317 strncmp(obdr_sig, OBDR_TAPE_SIG,
3318 OBDR_SIG_LEN) == 0);
3319 }
edd16368
SC
3320 kfree(inq_buff);
3321 return 0;
3322
3323bail_out:
3324 kfree(inq_buff);
3325 return 1;
3326}
3327
9b5c48c2
SC
3328static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3329 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3330{
3331 unsigned long flags;
3332 int rc, entry;
3333 /*
3334 * See if this device supports aborts. If we already know
3335 * the device, we already know if it supports aborts, otherwise
3336 * we have to find out if it supports aborts by trying one.
3337 */
3338 spin_lock_irqsave(&h->devlock, flags);
3339 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3340 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3341 entry >= 0 && entry < h->ndevices) {
3342 dev->supports_aborts = h->dev[entry]->supports_aborts;
3343 spin_unlock_irqrestore(&h->devlock, flags);
3344 } else {
3345 spin_unlock_irqrestore(&h->devlock, flags);
3346 dev->supports_aborts =
3347 hpsa_device_supports_aborts(h, scsi3addr);
3348 if (dev->supports_aborts < 0)
3349 dev->supports_aborts = 0;
3350 }
3351}
3352
4f4eb9f1 3353static unsigned char *ext_target_model[] = {
edd16368
SC
3354 "MSA2012",
3355 "MSA2024",
3356 "MSA2312",
3357 "MSA2324",
fda38518 3358 "P2000 G3 SAS",
e06c8e5c 3359 "MSA 2040 SAS",
edd16368
SC
3360 NULL,
3361};
3362
4f4eb9f1 3363static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
edd16368
SC
3364{
3365 int i;
3366
4f4eb9f1
ST
3367 for (i = 0; ext_target_model[i]; i++)
3368 if (strncmp(device->model, ext_target_model[i],
3369 strlen(ext_target_model[i])) == 0)
edd16368
SC
3370 return 1;
3371 return 0;
3372}
3373
3374/* Helper function to assign bus, target, lun mapping of devices.
4f4eb9f1 3375 * Puts non-external target logical volumes on bus 0, external target logical
edd16368
SC
3376 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3377 * Logical drive target and lun are assigned at this time, but
3378 * physical device lun and target assignment are deferred (assigned
3379 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3380 */
3381static void figure_bus_target_lun(struct ctlr_info *h,
1f310bde 3382 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
edd16368 3383{
1f310bde
SC
3384 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3385
3386 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3387 /* physical device, target and lun filled in later */
edd16368 3388 if (is_hba_lunid(lunaddrbytes))
1f310bde 3389 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
edd16368 3390 else
1f310bde
SC
3391 /* defer target, lun assignment for physical devices */
3392 hpsa_set_bus_target_lun(device, 2, -1, -1);
3393 return;
3394 }
3395 /* It's a logical device */
4f4eb9f1
ST
3396 if (is_ext_target(h, device)) {
3397 /* external target way, put logicals on bus 1
1f310bde
SC
3398 * and match target/lun numbers box
3399 * reports, other smart array, bus 0, target 0, match lunid
3400 */
3401 hpsa_set_bus_target_lun(device,
3402 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3403 return;
edd16368 3404 }
1f310bde 3405 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
edd16368
SC
3406}
3407
3408/*
3409 * If there is no lun 0 on a target, linux won't find any devices.
4f4eb9f1 3410 * For the external targets (arrays), we have to manually detect the enclosure
edd16368
SC
3411 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3412 * it for some reason. *tmpdevice is the target we're adding,
3413 * this_device is a pointer into the current element of currentsd[]
3414 * that we're building up in update_scsi_devices(), below.
3415 * lunzerobits is a bitmap that tracks which targets already have a
3416 * lun 0 assigned.
3417 * Returns 1 if an enclosure was added, 0 if not.
3418 */
4f4eb9f1 3419static int add_ext_target_dev(struct ctlr_info *h,
edd16368 3420 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 3421 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
4f4eb9f1 3422 unsigned long lunzerobits[], int *n_ext_target_devs)
edd16368
SC
3423{
3424 unsigned char scsi3addr[8];
3425
1f310bde 3426 if (test_bit(tmpdevice->target, lunzerobits))
edd16368
SC
3427 return 0; /* There is already a lun 0 on this target. */
3428
3429 if (!is_logical_dev_addr_mode(lunaddrbytes))
3430 return 0; /* It's the logical targets that may lack lun 0. */
3431
4f4eb9f1
ST
3432 if (!is_ext_target(h, tmpdevice))
3433 return 0; /* Only external target devices have this problem. */
edd16368 3434
1f310bde 3435 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
edd16368
SC
3436 return 0;
3437
c4f8a299 3438 memset(scsi3addr, 0, 8);
1f310bde 3439 scsi3addr[3] = tmpdevice->target;
edd16368
SC
3440 if (is_hba_lunid(scsi3addr))
3441 return 0; /* Don't add the RAID controller here. */
3442
339b2b14
SC
3443 if (is_scsi_rev_5(h))
3444 return 0; /* p1210m doesn't need to do this. */
3445
4f4eb9f1 3446 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
aca4a520
ST
3447 dev_warn(&h->pdev->dev, "Maximum number of external "
3448 "target devices exceeded. Check your hardware "
edd16368
SC
3449 "configuration.");
3450 return 0;
3451 }
3452
0b0e1d6c 3453 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368 3454 return 0;
4f4eb9f1 3455 (*n_ext_target_devs)++;
1f310bde
SC
3456 hpsa_set_bus_target_lun(this_device,
3457 tmpdevice->bus, tmpdevice->target, 0);
9b5c48c2 3458 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
1f310bde 3459 set_bit(tmpdevice->target, lunzerobits);
edd16368
SC
3460 return 1;
3461}
3462
54b6e9e9
ST
3463/*
3464 * Get address of physical disk used for an ioaccel2 mode command:
3465 * 1. Extract ioaccel2 handle from the command.
3466 * 2. Find a matching ioaccel2 handle from list of physical disks.
3467 * 3. Return:
3468 * 1 and set scsi3addr to address of matching physical
3469 * 0 if no matching physical disk was found.
3470 */
3471static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3472 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3473{
41ce4c35
SC
3474 struct io_accel2_cmd *c2 =
3475 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3476 unsigned long flags;
54b6e9e9 3477 int i;
54b6e9e9 3478
41ce4c35
SC
3479 spin_lock_irqsave(&h->devlock, flags);
3480 for (i = 0; i < h->ndevices; i++)
3481 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3482 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3483 sizeof(h->dev[i]->scsi3addr));
3484 spin_unlock_irqrestore(&h->devlock, flags);
3485 return 1;
3486 }
3487 spin_unlock_irqrestore(&h->devlock, flags);
3488 return 0;
54b6e9e9 3489}
41ce4c35 3490
edd16368
SC
3491/*
3492 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3493 * logdev. The number of luns in physdev and logdev are returned in
3494 * *nphysicals and *nlogicals, respectively.
3495 * Returns 0 on success, -1 otherwise.
3496 */
3497static int hpsa_gather_lun_info(struct ctlr_info *h,
03383736 3498 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
01a02ffc 3499 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368 3500{
03383736 3501 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
edd16368
SC
3502 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3503 return -1;
3504 }
03383736 3505 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
edd16368 3506 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
03383736
DB
3507 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3508 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
edd16368
SC
3509 *nphysicals = HPSA_MAX_PHYS_LUN;
3510 }
03383736 3511 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
edd16368
SC
3512 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3513 return -1;
3514 }
6df1e954 3515 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
3516 /* Reject Logicals in excess of our max capability. */
3517 if (*nlogicals > HPSA_MAX_LUN) {
3518 dev_warn(&h->pdev->dev,
3519 "maximum logical LUNs (%d) exceeded. "
3520 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3521 *nlogicals - HPSA_MAX_LUN);
3522 *nlogicals = HPSA_MAX_LUN;
3523 }
3524 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3525 dev_warn(&h->pdev->dev,
3526 "maximum logical + physical LUNs (%d) exceeded. "
3527 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3528 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3529 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3530 }
3531 return 0;
3532}
3533
42a91641
DB
3534static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3535 int i, int nphysicals, int nlogicals,
a93aa1fe 3536 struct ReportExtendedLUNdata *physdev_list,
339b2b14
SC
3537 struct ReportLUNdata *logdev_list)
3538{
3539 /* Helper function, figure out where the LUN ID info is coming from
3540 * given index i, lists of physical and logical devices, where in
3541 * the list the raid controller is supposed to appear (first or last)
3542 */
3543
3544 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3545 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3546
3547 if (i == raid_ctlr_position)
3548 return RAID_CTLR_LUNID;
3549
3550 if (i < logicals_start)
d5b5d964
SC
3551 return &physdev_list->LUN[i -
3552 (raid_ctlr_position == 0)].lunid[0];
339b2b14
SC
3553
3554 if (i < last_device)
3555 return &logdev_list->LUN[i - nphysicals -
3556 (raid_ctlr_position == 0)][0];
3557 BUG();
3558 return NULL;
3559}
3560
316b221a
SC
3561static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3562{
3563 int rc;
6e8e8088 3564 int hba_mode_enabled;
316b221a
SC
3565 struct bmic_controller_parameters *ctlr_params;
3566 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3567 GFP_KERNEL);
3568
3569 if (!ctlr_params)
96444fbb 3570 return -ENOMEM;
316b221a
SC
3571 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3572 sizeof(struct bmic_controller_parameters));
96444fbb 3573 if (rc) {
316b221a 3574 kfree(ctlr_params);
96444fbb 3575 return rc;
316b221a 3576 }
6e8e8088
JH
3577
3578 hba_mode_enabled =
3579 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3580 kfree(ctlr_params);
3581 return hba_mode_enabled;
316b221a
SC
3582}
3583
03383736
DB
3584/* get physical drive ioaccel handle and queue depth */
3585static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3586 struct hpsa_scsi_dev_t *dev,
3587 u8 *lunaddrbytes,
3588 struct bmic_identify_physical_device *id_phys)
3589{
3590 int rc;
3591 struct ext_report_lun_entry *rle =
3592 (struct ext_report_lun_entry *) lunaddrbytes;
3593
3594 dev->ioaccel_handle = rle->ioaccel_handle;
a3144e0b
JH
3595 if (PHYS_IOACCEL(lunaddrbytes) && dev->ioaccel_handle)
3596 dev->hba_ioaccel_enabled = 1;
03383736
DB
3597 memset(id_phys, 0, sizeof(*id_phys));
3598 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3599 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3600 sizeof(*id_phys));
3601 if (!rc)
3602 /* Reserve space for FW operations */
3603#define DRIVE_CMDS_RESERVED_FOR_FW 2
3604#define DRIVE_QUEUE_DEPTH 7
3605 dev->queue_depth =
3606 le16_to_cpu(id_phys->current_queue_depth_limit) -
3607 DRIVE_CMDS_RESERVED_FOR_FW;
3608 else
3609 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3610 atomic_set(&dev->ioaccel_cmds_out, 0);
d604f533 3611 atomic_set(&dev->reset_cmds_out, 0);
03383736
DB
3612}
3613
edd16368
SC
3614static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3615{
3616 /* the idea here is we could get notified
3617 * that some devices have changed, so we do a report
3618 * physical luns and report logical luns cmd, and adjust
3619 * our list of devices accordingly.
3620 *
3621 * The scsi3addr's of devices won't change so long as the
3622 * adapter is not reset. That means we can rescan and
3623 * tell which devices we already know about, vs. new
3624 * devices, vs. disappearing devices.
3625 */
a93aa1fe 3626 struct ReportExtendedLUNdata *physdev_list = NULL;
edd16368 3627 struct ReportLUNdata *logdev_list = NULL;
03383736 3628 struct bmic_identify_physical_device *id_phys = NULL;
01a02ffc
SC
3629 u32 nphysicals = 0;
3630 u32 nlogicals = 0;
3631 u32 ndev_allocated = 0;
edd16368
SC
3632 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3633 int ncurrent = 0;
4f4eb9f1 3634 int i, n_ext_target_devs, ndevs_to_allocate;
339b2b14 3635 int raid_ctlr_position;
2bbf5c7f 3636 int rescan_hba_mode;
aca4a520 3637 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
edd16368 3638
cfe5badc 3639 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
92084715
SC
3640 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3641 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
edd16368 3642 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
03383736 3643 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
edd16368 3644
03383736
DB
3645 if (!currentsd || !physdev_list || !logdev_list ||
3646 !tmpdevice || !id_phys) {
edd16368
SC
3647 dev_err(&h->pdev->dev, "out of memory\n");
3648 goto out;
3649 }
3650 memset(lunzerobits, 0, sizeof(lunzerobits));
3651
316b221a 3652 rescan_hba_mode = hpsa_hba_mode_enabled(h);
96444fbb
JH
3653 if (rescan_hba_mode < 0)
3654 goto out;
316b221a
SC
3655
3656 if (!h->hba_mode_enabled && rescan_hba_mode)
3657 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3658 else if (h->hba_mode_enabled && !rescan_hba_mode)
3659 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3660
3661 h->hba_mode_enabled = rescan_hba_mode;
3662
03383736
DB
3663 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3664 logdev_list, &nlogicals))
edd16368
SC
3665 goto out;
3666
aca4a520
ST
3667 /* We might see up to the maximum number of logical and physical disks
3668 * plus external target devices, and a device for the local RAID
3669 * controller.
edd16368 3670 */
aca4a520 3671 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
edd16368
SC
3672
3673 /* Allocate the per device structures */
3674 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
3675 if (i >= HPSA_MAX_DEVICES) {
3676 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3677 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3678 ndevs_to_allocate - HPSA_MAX_DEVICES);
3679 break;
3680 }
3681
edd16368
SC
3682 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3683 if (!currentsd[i]) {
3684 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3685 __FILE__, __LINE__);
3686 goto out;
3687 }
3688 ndev_allocated++;
3689 }
3690
8645291b 3691 if (is_scsi_rev_5(h))
339b2b14
SC
3692 raid_ctlr_position = 0;
3693 else
3694 raid_ctlr_position = nphysicals + nlogicals;
3695
edd16368 3696 /* adjust our table of devices */
4f4eb9f1 3697 n_ext_target_devs = 0;
edd16368 3698 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 3699 u8 *lunaddrbytes, is_OBDR = 0;
edd16368
SC
3700
3701 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
3702 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3703 i, nphysicals, nlogicals, physdev_list, logdev_list);
41ce4c35
SC
3704
3705 /* skip masked non-disk devices */
3706 if (MASKED_DEVICE(lunaddrbytes))
3707 if (i < nphysicals + (raid_ctlr_position == 0) &&
3708 NON_DISK_PHYS_DEV(lunaddrbytes))
3709 continue;
edd16368
SC
3710
3711 /* Get device type, vendor, model, device id */
0b0e1d6c
SC
3712 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3713 &is_OBDR))
edd16368 3714 continue; /* skip it if we can't talk to it. */
1f310bde 3715 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
9b5c48c2 3716 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
edd16368
SC
3717 this_device = currentsd[ncurrent];
3718
3719 /*
4f4eb9f1 3720 * For external target devices, we have to insert a LUN 0 which
edd16368
SC
3721 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3722 * is nonetheless an enclosure device there. We have to
3723 * present that otherwise linux won't find anything if
3724 * there is no lun 0.
3725 */
4f4eb9f1 3726 if (add_ext_target_dev(h, tmpdevice, this_device,
1f310bde 3727 lunaddrbytes, lunzerobits,
4f4eb9f1 3728 &n_ext_target_devs)) {
edd16368
SC
3729 ncurrent++;
3730 this_device = currentsd[ncurrent];
3731 }
3732
3733 *this_device = *tmpdevice;
edd16368 3734
41ce4c35
SC
3735 /* do not expose masked devices */
3736 if (MASKED_DEVICE(lunaddrbytes) &&
3737 i < nphysicals + (raid_ctlr_position == 0)) {
3738 if (h->hba_mode_enabled)
3739 dev_warn(&h->pdev->dev,
3740 "Masked physical device detected\n");
3741 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3742 } else {
3743 this_device->expose_state =
3744 HPSA_SG_ATTACH | HPSA_ULD_ATTACH;
3745 }
3746
edd16368 3747 switch (this_device->devtype) {
0b0e1d6c 3748 case TYPE_ROM:
edd16368
SC
3749 /* We don't *really* support actual CD-ROM devices,
3750 * just "One Button Disaster Recovery" tape drive
3751 * which temporarily pretends to be a CD-ROM drive.
3752 * So we check that the device is really an OBDR tape
3753 * device by checking for "$DR-10" in bytes 43-48 of
3754 * the inquiry data.
3755 */
0b0e1d6c
SC
3756 if (is_OBDR)
3757 ncurrent++;
edd16368
SC
3758 break;
3759 case TYPE_DISK:
ecf418d1 3760 if (i >= nphysicals) {
316b221a
SC
3761 ncurrent++;
3762 break;
283b4a9b 3763 }
ecf418d1
JH
3764
3765 if (h->hba_mode_enabled)
3766 /* never use raid mapper in HBA mode */
3767 this_device->offload_enabled = 0;
3768 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 ||
3769 h->transMethod & CFGTBL_Trans_io_accel2))
3770 break;
3771
3772 hpsa_get_ioaccel_drive_info(h, this_device,
3773 lunaddrbytes, id_phys);
3774 atomic_set(&this_device->ioaccel_cmds_out, 0);
3775 ncurrent++;
edd16368
SC
3776 break;
3777 case TYPE_TAPE:
3778 case TYPE_MEDIUM_CHANGER:
3779 ncurrent++;
3780 break;
41ce4c35
SC
3781 case TYPE_ENCLOSURE:
3782 if (h->hba_mode_enabled)
3783 ncurrent++;
3784 break;
edd16368
SC
3785 case TYPE_RAID:
3786 /* Only present the Smartarray HBA as a RAID controller.
3787 * If it's a RAID controller other than the HBA itself
3788 * (an external RAID controller, MSA500 or similar)
3789 * don't present it.
3790 */
3791 if (!is_hba_lunid(lunaddrbytes))
3792 break;
3793 ncurrent++;
3794 break;
3795 default:
3796 break;
3797 }
cfe5badc 3798 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
3799 break;
3800 }
3801 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3802out:
3803 kfree(tmpdevice);
3804 for (i = 0; i < ndev_allocated; i++)
3805 kfree(currentsd[i]);
3806 kfree(currentsd);
edd16368
SC
3807 kfree(physdev_list);
3808 kfree(logdev_list);
03383736 3809 kfree(id_phys);
edd16368
SC
3810}
3811
ec5cbf04
WS
3812static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3813 struct scatterlist *sg)
3814{
3815 u64 addr64 = (u64) sg_dma_address(sg);
3816 unsigned int len = sg_dma_len(sg);
3817
3818 desc->Addr = cpu_to_le64(addr64);
3819 desc->Len = cpu_to_le32(len);
3820 desc->Ext = 0;
3821}
3822
c7ee65b3
WS
3823/*
3824 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
edd16368
SC
3825 * dma mapping and fills in the scatter gather entries of the
3826 * hpsa command, cp.
3827 */
33a2ffce 3828static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
3829 struct CommandList *cp,
3830 struct scsi_cmnd *cmd)
3831{
edd16368 3832 struct scatterlist *sg;
b3a7ba7c 3833 int use_sg, i, sg_limit, chained, last_sg;
33a2ffce 3834 struct SGDescriptor *curr_sg;
edd16368 3835
33a2ffce 3836 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
3837
3838 use_sg = scsi_dma_map(cmd);
3839 if (use_sg < 0)
3840 return use_sg;
3841
3842 if (!use_sg)
3843 goto sglist_finished;
3844
b3a7ba7c
WS
3845 /*
3846 * If the number of entries is greater than the max for a single list,
3847 * then we have a chained list; we will set up all but one entry in the
3848 * first list (the last entry is saved for link information);
3849 * otherwise, we don't have a chained list and we'll set up at each of
3850 * the entries in the one list.
3851 */
33a2ffce 3852 curr_sg = cp->SG;
b3a7ba7c
WS
3853 chained = use_sg > h->max_cmd_sg_entries;
3854 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3855 last_sg = scsi_sg_count(cmd) - 1;
3856 scsi_for_each_sg(cmd, sg, sg_limit, i) {
ec5cbf04 3857 hpsa_set_sg_descriptor(curr_sg, sg);
33a2ffce
SC
3858 curr_sg++;
3859 }
ec5cbf04 3860
b3a7ba7c
WS
3861 if (chained) {
3862 /*
3863 * Continue with the chained list. Set curr_sg to the chained
3864 * list. Modify the limit to the total count less the entries
3865 * we've already set up. Resume the scan at the list entry
3866 * where the previous loop left off.
3867 */
3868 curr_sg = h->cmd_sg_list[cp->cmdindex];
3869 sg_limit = use_sg - sg_limit;
3870 for_each_sg(sg, sg, sg_limit, i) {
3871 hpsa_set_sg_descriptor(curr_sg, sg);
3872 curr_sg++;
3873 }
3874 }
3875
ec5cbf04 3876 /* Back the pointer up to the last entry and mark it as "last". */
b3a7ba7c 3877 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
33a2ffce
SC
3878
3879 if (use_sg + chained > h->maxSG)
3880 h->maxSG = use_sg + chained;
3881
3882 if (chained) {
3883 cp->Header.SGList = h->max_cmd_sg_entries;
50a0decf 3884 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
e2bea6df
SC
3885 if (hpsa_map_sg_chain_block(h, cp)) {
3886 scsi_dma_unmap(cmd);
3887 return -1;
3888 }
33a2ffce 3889 return 0;
edd16368
SC
3890 }
3891
3892sglist_finished:
3893
01a02ffc 3894 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
c7ee65b3 3895 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
edd16368
SC
3896 return 0;
3897}
3898
283b4a9b
SC
3899#define IO_ACCEL_INELIGIBLE (1)
3900static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3901{
3902 int is_write = 0;
3903 u32 block;
3904 u32 block_cnt;
3905
3906 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3907 switch (cdb[0]) {
3908 case WRITE_6:
3909 case WRITE_12:
3910 is_write = 1;
3911 case READ_6:
3912 case READ_12:
3913 if (*cdb_len == 6) {
3914 block = (((u32) cdb[2]) << 8) | cdb[3];
3915 block_cnt = cdb[4];
3916 } else {
3917 BUG_ON(*cdb_len != 12);
3918 block = (((u32) cdb[2]) << 24) |
3919 (((u32) cdb[3]) << 16) |
3920 (((u32) cdb[4]) << 8) |
3921 cdb[5];
3922 block_cnt =
3923 (((u32) cdb[6]) << 24) |
3924 (((u32) cdb[7]) << 16) |
3925 (((u32) cdb[8]) << 8) |
3926 cdb[9];
3927 }
3928 if (block_cnt > 0xffff)
3929 return IO_ACCEL_INELIGIBLE;
3930
3931 cdb[0] = is_write ? WRITE_10 : READ_10;
3932 cdb[1] = 0;
3933 cdb[2] = (u8) (block >> 24);
3934 cdb[3] = (u8) (block >> 16);
3935 cdb[4] = (u8) (block >> 8);
3936 cdb[5] = (u8) (block);
3937 cdb[6] = 0;
3938 cdb[7] = (u8) (block_cnt >> 8);
3939 cdb[8] = (u8) (block_cnt);
3940 cdb[9] = 0;
3941 *cdb_len = 10;
3942 break;
3943 }
3944 return 0;
3945}
3946
c349775e 3947static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
283b4a9b 3948 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 3949 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
e1f7de0c
MG
3950{
3951 struct scsi_cmnd *cmd = c->scsi_cmd;
e1f7de0c
MG
3952 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3953 unsigned int len;
3954 unsigned int total_len = 0;
3955 struct scatterlist *sg;
3956 u64 addr64;
3957 int use_sg, i;
3958 struct SGDescriptor *curr_sg;
3959 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3960
283b4a9b 3961 /* TODO: implement chaining support */
03383736
DB
3962 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3963 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 3964 return IO_ACCEL_INELIGIBLE;
03383736 3965 }
283b4a9b 3966
e1f7de0c
MG
3967 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3968
03383736
DB
3969 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3970 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 3971 return IO_ACCEL_INELIGIBLE;
03383736 3972 }
283b4a9b 3973
e1f7de0c
MG
3974 c->cmd_type = CMD_IOACCEL1;
3975
3976 /* Adjust the DMA address to point to the accelerated command buffer */
3977 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3978 (c->cmdindex * sizeof(*cp));
3979 BUG_ON(c->busaddr & 0x0000007F);
3980
3981 use_sg = scsi_dma_map(cmd);
03383736
DB
3982 if (use_sg < 0) {
3983 atomic_dec(&phys_disk->ioaccel_cmds_out);
e1f7de0c 3984 return use_sg;
03383736 3985 }
e1f7de0c
MG
3986
3987 if (use_sg) {
3988 curr_sg = cp->SG;
3989 scsi_for_each_sg(cmd, sg, use_sg, i) {
3990 addr64 = (u64) sg_dma_address(sg);
3991 len = sg_dma_len(sg);
3992 total_len += len;
50a0decf
SC
3993 curr_sg->Addr = cpu_to_le64(addr64);
3994 curr_sg->Len = cpu_to_le32(len);
3995 curr_sg->Ext = cpu_to_le32(0);
e1f7de0c
MG
3996 curr_sg++;
3997 }
50a0decf 3998 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
e1f7de0c
MG
3999
4000 switch (cmd->sc_data_direction) {
4001 case DMA_TO_DEVICE:
4002 control |= IOACCEL1_CONTROL_DATA_OUT;
4003 break;
4004 case DMA_FROM_DEVICE:
4005 control |= IOACCEL1_CONTROL_DATA_IN;
4006 break;
4007 case DMA_NONE:
4008 control |= IOACCEL1_CONTROL_NODATAXFER;
4009 break;
4010 default:
4011 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4012 cmd->sc_data_direction);
4013 BUG();
4014 break;
4015 }
4016 } else {
4017 control |= IOACCEL1_CONTROL_NODATAXFER;
4018 }
4019
c349775e 4020 c->Header.SGList = use_sg;
e1f7de0c 4021 /* Fill out the command structure to submit */
2b08b3e9
DB
4022 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4023 cp->transfer_len = cpu_to_le32(total_len);
4024 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4025 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4026 cp->control = cpu_to_le32(control);
283b4a9b
SC
4027 memcpy(cp->CDB, cdb, cdb_len);
4028 memcpy(cp->CISS_LUN, scsi3addr, 8);
c349775e 4029 /* Tag was already set at init time. */
283b4a9b 4030 enqueue_cmd_and_start_io(h, c);
e1f7de0c
MG
4031 return 0;
4032}
edd16368 4033
283b4a9b
SC
4034/*
4035 * Queue a command directly to a device behind the controller using the
4036 * I/O accelerator path.
4037 */
4038static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4039 struct CommandList *c)
4040{
4041 struct scsi_cmnd *cmd = c->scsi_cmd;
4042 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4043
03383736
DB
4044 c->phys_disk = dev;
4045
283b4a9b 4046 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
03383736 4047 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
283b4a9b
SC
4048}
4049
dd0e19f3
ST
4050/*
4051 * Set encryption parameters for the ioaccel2 request
4052 */
4053static void set_encrypt_ioaccel2(struct ctlr_info *h,
4054 struct CommandList *c, struct io_accel2_cmd *cp)
4055{
4056 struct scsi_cmnd *cmd = c->scsi_cmd;
4057 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4058 struct raid_map_data *map = &dev->raid_map;
4059 u64 first_block;
4060
dd0e19f3 4061 /* Are we doing encryption on this device */
2b08b3e9 4062 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
dd0e19f3
ST
4063 return;
4064 /* Set the data encryption key index. */
4065 cp->dekindex = map->dekindex;
4066
4067 /* Set the encryption enable flag, encoded into direction field. */
4068 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4069
4070 /* Set encryption tweak values based on logical block address
4071 * If block size is 512, tweak value is LBA.
4072 * For other block sizes, tweak is (LBA * block size)/ 512)
4073 */
4074 switch (cmd->cmnd[0]) {
4075 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4076 case WRITE_6:
4077 case READ_6:
2b08b3e9 4078 first_block = get_unaligned_be16(&cmd->cmnd[2]);
dd0e19f3
ST
4079 break;
4080 case WRITE_10:
4081 case READ_10:
dd0e19f3
ST
4082 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4083 case WRITE_12:
4084 case READ_12:
2b08b3e9 4085 first_block = get_unaligned_be32(&cmd->cmnd[2]);
dd0e19f3
ST
4086 break;
4087 case WRITE_16:
4088 case READ_16:
2b08b3e9 4089 first_block = get_unaligned_be64(&cmd->cmnd[2]);
dd0e19f3
ST
4090 break;
4091 default:
4092 dev_err(&h->pdev->dev,
2b08b3e9
DB
4093 "ERROR: %s: size (0x%x) not supported for encryption\n",
4094 __func__, cmd->cmnd[0]);
dd0e19f3
ST
4095 BUG();
4096 break;
4097 }
2b08b3e9
DB
4098
4099 if (le32_to_cpu(map->volume_blk_size) != 512)
4100 first_block = first_block *
4101 le32_to_cpu(map->volume_blk_size)/512;
4102
4103 cp->tweak_lower = cpu_to_le32(first_block);
4104 cp->tweak_upper = cpu_to_le32(first_block >> 32);
dd0e19f3
ST
4105}
4106
c349775e
ST
4107static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4108 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4109 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e
ST
4110{
4111 struct scsi_cmnd *cmd = c->scsi_cmd;
4112 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4113 struct ioaccel2_sg_element *curr_sg;
4114 int use_sg, i;
4115 struct scatterlist *sg;
4116 u64 addr64;
4117 u32 len;
4118 u32 total_len = 0;
4119
d9a729f3 4120 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
c349775e 4121
03383736
DB
4122 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4123 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4124 return IO_ACCEL_INELIGIBLE;
03383736
DB
4125 }
4126
c349775e
ST
4127 c->cmd_type = CMD_IOACCEL2;
4128 /* Adjust the DMA address to point to the accelerated command buffer */
4129 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4130 (c->cmdindex * sizeof(*cp));
4131 BUG_ON(c->busaddr & 0x0000007F);
4132
4133 memset(cp, 0, sizeof(*cp));
4134 cp->IU_type = IOACCEL2_IU_TYPE;
4135
4136 use_sg = scsi_dma_map(cmd);
03383736
DB
4137 if (use_sg < 0) {
4138 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4139 return use_sg;
03383736 4140 }
c349775e
ST
4141
4142 if (use_sg) {
c349775e 4143 curr_sg = cp->sg;
d9a729f3
WS
4144 if (use_sg > h->ioaccel_maxsg) {
4145 addr64 = le64_to_cpu(
4146 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4147 curr_sg->address = cpu_to_le64(addr64);
4148 curr_sg->length = 0;
4149 curr_sg->reserved[0] = 0;
4150 curr_sg->reserved[1] = 0;
4151 curr_sg->reserved[2] = 0;
4152 curr_sg->chain_indicator = 0x80;
4153
4154 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4155 }
c349775e
ST
4156 scsi_for_each_sg(cmd, sg, use_sg, i) {
4157 addr64 = (u64) sg_dma_address(sg);
4158 len = sg_dma_len(sg);
4159 total_len += len;
4160 curr_sg->address = cpu_to_le64(addr64);
4161 curr_sg->length = cpu_to_le32(len);
4162 curr_sg->reserved[0] = 0;
4163 curr_sg->reserved[1] = 0;
4164 curr_sg->reserved[2] = 0;
4165 curr_sg->chain_indicator = 0;
4166 curr_sg++;
4167 }
4168
4169 switch (cmd->sc_data_direction) {
4170 case DMA_TO_DEVICE:
dd0e19f3
ST
4171 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4172 cp->direction |= IOACCEL2_DIR_DATA_OUT;
c349775e
ST
4173 break;
4174 case DMA_FROM_DEVICE:
dd0e19f3
ST
4175 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4176 cp->direction |= IOACCEL2_DIR_DATA_IN;
c349775e
ST
4177 break;
4178 case DMA_NONE:
dd0e19f3
ST
4179 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4180 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e
ST
4181 break;
4182 default:
4183 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4184 cmd->sc_data_direction);
4185 BUG();
4186 break;
4187 }
4188 } else {
dd0e19f3
ST
4189 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4190 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e 4191 }
dd0e19f3
ST
4192
4193 /* Set encryption parameters, if necessary */
4194 set_encrypt_ioaccel2(h, c, cp);
4195
2b08b3e9 4196 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
f2405db8 4197 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
c349775e 4198 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
c349775e 4199
c349775e
ST
4200 cp->data_len = cpu_to_le32(total_len);
4201 cp->err_ptr = cpu_to_le64(c->busaddr +
4202 offsetof(struct io_accel2_cmd, error_data));
50a0decf 4203 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
c349775e 4204
d9a729f3
WS
4205 /* fill in sg elements */
4206 if (use_sg > h->ioaccel_maxsg) {
4207 cp->sg_count = 1;
4208 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4209 atomic_dec(&phys_disk->ioaccel_cmds_out);
4210 scsi_dma_unmap(cmd);
4211 return -1;
4212 }
4213 } else
4214 cp->sg_count = (u8) use_sg;
4215
c349775e
ST
4216 enqueue_cmd_and_start_io(h, c);
4217 return 0;
4218}
4219
4220/*
4221 * Queue a command to the correct I/O accelerator path.
4222 */
4223static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4224 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4225 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e 4226{
03383736
DB
4227 /* Try to honor the device's queue depth */
4228 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4229 phys_disk->queue_depth) {
4230 atomic_dec(&phys_disk->ioaccel_cmds_out);
4231 return IO_ACCEL_INELIGIBLE;
4232 }
c349775e
ST
4233 if (h->transMethod & CFGTBL_Trans_io_accel1)
4234 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
03383736
DB
4235 cdb, cdb_len, scsi3addr,
4236 phys_disk);
c349775e
ST
4237 else
4238 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
03383736
DB
4239 cdb, cdb_len, scsi3addr,
4240 phys_disk);
c349775e
ST
4241}
4242
6b80b18f
ST
4243static void raid_map_helper(struct raid_map_data *map,
4244 int offload_to_mirror, u32 *map_index, u32 *current_group)
4245{
4246 if (offload_to_mirror == 0) {
4247 /* use physical disk in the first mirrored group. */
2b08b3e9 4248 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4249 return;
4250 }
4251 do {
4252 /* determine mirror group that *map_index indicates */
2b08b3e9
DB
4253 *current_group = *map_index /
4254 le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4255 if (offload_to_mirror == *current_group)
4256 continue;
2b08b3e9 4257 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
6b80b18f 4258 /* select map index from next group */
2b08b3e9 4259 *map_index += le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4260 (*current_group)++;
4261 } else {
4262 /* select map index from first group */
2b08b3e9 4263 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4264 *current_group = 0;
4265 }
4266 } while (offload_to_mirror != *current_group);
4267}
4268
283b4a9b
SC
4269/*
4270 * Attempt to perform offload RAID mapping for a logical volume I/O.
4271 */
4272static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4273 struct CommandList *c)
4274{
4275 struct scsi_cmnd *cmd = c->scsi_cmd;
4276 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4277 struct raid_map_data *map = &dev->raid_map;
4278 struct raid_map_disk_data *dd = &map->data[0];
4279 int is_write = 0;
4280 u32 map_index;
4281 u64 first_block, last_block;
4282 u32 block_cnt;
4283 u32 blocks_per_row;
4284 u64 first_row, last_row;
4285 u32 first_row_offset, last_row_offset;
4286 u32 first_column, last_column;
6b80b18f
ST
4287 u64 r0_first_row, r0_last_row;
4288 u32 r5or6_blocks_per_row;
4289 u64 r5or6_first_row, r5or6_last_row;
4290 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4291 u32 r5or6_first_column, r5or6_last_column;
4292 u32 total_disks_per_row;
4293 u32 stripesize;
4294 u32 first_group, last_group, current_group;
283b4a9b
SC
4295 u32 map_row;
4296 u32 disk_handle;
4297 u64 disk_block;
4298 u32 disk_block_cnt;
4299 u8 cdb[16];
4300 u8 cdb_len;
2b08b3e9 4301 u16 strip_size;
283b4a9b
SC
4302#if BITS_PER_LONG == 32
4303 u64 tmpdiv;
4304#endif
6b80b18f 4305 int offload_to_mirror;
283b4a9b 4306
283b4a9b
SC
4307 /* check for valid opcode, get LBA and block count */
4308 switch (cmd->cmnd[0]) {
4309 case WRITE_6:
4310 is_write = 1;
4311 case READ_6:
4312 first_block =
4313 (((u64) cmd->cmnd[2]) << 8) |
4314 cmd->cmnd[3];
4315 block_cnt = cmd->cmnd[4];
3fa89a04
SC
4316 if (block_cnt == 0)
4317 block_cnt = 256;
283b4a9b
SC
4318 break;
4319 case WRITE_10:
4320 is_write = 1;
4321 case READ_10:
4322 first_block =
4323 (((u64) cmd->cmnd[2]) << 24) |
4324 (((u64) cmd->cmnd[3]) << 16) |
4325 (((u64) cmd->cmnd[4]) << 8) |
4326 cmd->cmnd[5];
4327 block_cnt =
4328 (((u32) cmd->cmnd[7]) << 8) |
4329 cmd->cmnd[8];
4330 break;
4331 case WRITE_12:
4332 is_write = 1;
4333 case READ_12:
4334 first_block =
4335 (((u64) cmd->cmnd[2]) << 24) |
4336 (((u64) cmd->cmnd[3]) << 16) |
4337 (((u64) cmd->cmnd[4]) << 8) |
4338 cmd->cmnd[5];
4339 block_cnt =
4340 (((u32) cmd->cmnd[6]) << 24) |
4341 (((u32) cmd->cmnd[7]) << 16) |
4342 (((u32) cmd->cmnd[8]) << 8) |
4343 cmd->cmnd[9];
4344 break;
4345 case WRITE_16:
4346 is_write = 1;
4347 case READ_16:
4348 first_block =
4349 (((u64) cmd->cmnd[2]) << 56) |
4350 (((u64) cmd->cmnd[3]) << 48) |
4351 (((u64) cmd->cmnd[4]) << 40) |
4352 (((u64) cmd->cmnd[5]) << 32) |
4353 (((u64) cmd->cmnd[6]) << 24) |
4354 (((u64) cmd->cmnd[7]) << 16) |
4355 (((u64) cmd->cmnd[8]) << 8) |
4356 cmd->cmnd[9];
4357 block_cnt =
4358 (((u32) cmd->cmnd[10]) << 24) |
4359 (((u32) cmd->cmnd[11]) << 16) |
4360 (((u32) cmd->cmnd[12]) << 8) |
4361 cmd->cmnd[13];
4362 break;
4363 default:
4364 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4365 }
283b4a9b
SC
4366 last_block = first_block + block_cnt - 1;
4367
4368 /* check for write to non-RAID-0 */
4369 if (is_write && dev->raid_level != 0)
4370 return IO_ACCEL_INELIGIBLE;
4371
4372 /* check for invalid block or wraparound */
2b08b3e9
DB
4373 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4374 last_block < first_block)
283b4a9b
SC
4375 return IO_ACCEL_INELIGIBLE;
4376
4377 /* calculate stripe information for the request */
2b08b3e9
DB
4378 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4379 le16_to_cpu(map->strip_size);
4380 strip_size = le16_to_cpu(map->strip_size);
283b4a9b
SC
4381#if BITS_PER_LONG == 32
4382 tmpdiv = first_block;
4383 (void) do_div(tmpdiv, blocks_per_row);
4384 first_row = tmpdiv;
4385 tmpdiv = last_block;
4386 (void) do_div(tmpdiv, blocks_per_row);
4387 last_row = tmpdiv;
4388 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4389 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4390 tmpdiv = first_row_offset;
2b08b3e9 4391 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4392 first_column = tmpdiv;
4393 tmpdiv = last_row_offset;
2b08b3e9 4394 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4395 last_column = tmpdiv;
4396#else
4397 first_row = first_block / blocks_per_row;
4398 last_row = last_block / blocks_per_row;
4399 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4400 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
2b08b3e9
DB
4401 first_column = first_row_offset / strip_size;
4402 last_column = last_row_offset / strip_size;
283b4a9b
SC
4403#endif
4404
4405 /* if this isn't a single row/column then give to the controller */
4406 if ((first_row != last_row) || (first_column != last_column))
4407 return IO_ACCEL_INELIGIBLE;
4408
4409 /* proceeding with driver mapping */
2b08b3e9
DB
4410 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4411 le16_to_cpu(map->metadata_disks_per_row);
283b4a9b 4412 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4413 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4414 map_index = (map_row * total_disks_per_row) + first_column;
4415
4416 switch (dev->raid_level) {
4417 case HPSA_RAID_0:
4418 break; /* nothing special to do */
4419 case HPSA_RAID_1:
4420 /* Handles load balance across RAID 1 members.
4421 * (2-drive R1 and R10 with even # of drives.)
4422 * Appropriate for SSDs, not optimal for HDDs
283b4a9b 4423 */
2b08b3e9 4424 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
283b4a9b 4425 if (dev->offload_to_mirror)
2b08b3e9 4426 map_index += le16_to_cpu(map->data_disks_per_row);
283b4a9b 4427 dev->offload_to_mirror = !dev->offload_to_mirror;
6b80b18f
ST
4428 break;
4429 case HPSA_RAID_ADM:
4430 /* Handles N-way mirrors (R1-ADM)
4431 * and R10 with # of drives divisible by 3.)
4432 */
2b08b3e9 4433 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
6b80b18f
ST
4434
4435 offload_to_mirror = dev->offload_to_mirror;
4436 raid_map_helper(map, offload_to_mirror,
4437 &map_index, &current_group);
4438 /* set mirror group to use next time */
4439 offload_to_mirror =
2b08b3e9
DB
4440 (offload_to_mirror >=
4441 le16_to_cpu(map->layout_map_count) - 1)
6b80b18f 4442 ? 0 : offload_to_mirror + 1;
6b80b18f
ST
4443 dev->offload_to_mirror = offload_to_mirror;
4444 /* Avoid direct use of dev->offload_to_mirror within this
4445 * function since multiple threads might simultaneously
4446 * increment it beyond the range of dev->layout_map_count -1.
4447 */
4448 break;
4449 case HPSA_RAID_5:
4450 case HPSA_RAID_6:
2b08b3e9 4451 if (le16_to_cpu(map->layout_map_count) <= 1)
6b80b18f
ST
4452 break;
4453
4454 /* Verify first and last block are in same RAID group */
4455 r5or6_blocks_per_row =
2b08b3e9
DB
4456 le16_to_cpu(map->strip_size) *
4457 le16_to_cpu(map->data_disks_per_row);
6b80b18f 4458 BUG_ON(r5or6_blocks_per_row == 0);
2b08b3e9
DB
4459 stripesize = r5or6_blocks_per_row *
4460 le16_to_cpu(map->layout_map_count);
6b80b18f
ST
4461#if BITS_PER_LONG == 32
4462 tmpdiv = first_block;
4463 first_group = do_div(tmpdiv, stripesize);
4464 tmpdiv = first_group;
4465 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4466 first_group = tmpdiv;
4467 tmpdiv = last_block;
4468 last_group = do_div(tmpdiv, stripesize);
4469 tmpdiv = last_group;
4470 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4471 last_group = tmpdiv;
4472#else
4473 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4474 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
6b80b18f 4475#endif
000ff7c2 4476 if (first_group != last_group)
6b80b18f
ST
4477 return IO_ACCEL_INELIGIBLE;
4478
4479 /* Verify request is in a single row of RAID 5/6 */
4480#if BITS_PER_LONG == 32
4481 tmpdiv = first_block;
4482 (void) do_div(tmpdiv, stripesize);
4483 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4484 tmpdiv = last_block;
4485 (void) do_div(tmpdiv, stripesize);
4486 r5or6_last_row = r0_last_row = tmpdiv;
4487#else
4488 first_row = r5or6_first_row = r0_first_row =
4489 first_block / stripesize;
4490 r5or6_last_row = r0_last_row = last_block / stripesize;
4491#endif
4492 if (r5or6_first_row != r5or6_last_row)
4493 return IO_ACCEL_INELIGIBLE;
4494
4495
4496 /* Verify request is in a single column */
4497#if BITS_PER_LONG == 32
4498 tmpdiv = first_block;
4499 first_row_offset = do_div(tmpdiv, stripesize);
4500 tmpdiv = first_row_offset;
4501 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4502 r5or6_first_row_offset = first_row_offset;
4503 tmpdiv = last_block;
4504 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4505 tmpdiv = r5or6_last_row_offset;
4506 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4507 tmpdiv = r5or6_first_row_offset;
4508 (void) do_div(tmpdiv, map->strip_size);
4509 first_column = r5or6_first_column = tmpdiv;
4510 tmpdiv = r5or6_last_row_offset;
4511 (void) do_div(tmpdiv, map->strip_size);
4512 r5or6_last_column = tmpdiv;
4513#else
4514 first_row_offset = r5or6_first_row_offset =
4515 (u32)((first_block % stripesize) %
4516 r5or6_blocks_per_row);
4517
4518 r5or6_last_row_offset =
4519 (u32)((last_block % stripesize) %
4520 r5or6_blocks_per_row);
4521
4522 first_column = r5or6_first_column =
2b08b3e9 4523 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
6b80b18f 4524 r5or6_last_column =
2b08b3e9 4525 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
6b80b18f
ST
4526#endif
4527 if (r5or6_first_column != r5or6_last_column)
4528 return IO_ACCEL_INELIGIBLE;
4529
4530 /* Request is eligible */
4531 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4532 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4533
4534 map_index = (first_group *
2b08b3e9 4535 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
6b80b18f
ST
4536 (map_row * total_disks_per_row) + first_column;
4537 break;
4538 default:
4539 return IO_ACCEL_INELIGIBLE;
283b4a9b 4540 }
6b80b18f 4541
07543e0c
SC
4542 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4543 return IO_ACCEL_INELIGIBLE;
4544
03383736
DB
4545 c->phys_disk = dev->phys_disk[map_index];
4546
283b4a9b 4547 disk_handle = dd[map_index].ioaccel_handle;
2b08b3e9
DB
4548 disk_block = le64_to_cpu(map->disk_starting_blk) +
4549 first_row * le16_to_cpu(map->strip_size) +
4550 (first_row_offset - first_column *
4551 le16_to_cpu(map->strip_size));
283b4a9b
SC
4552 disk_block_cnt = block_cnt;
4553
4554 /* handle differing logical/physical block sizes */
4555 if (map->phys_blk_shift) {
4556 disk_block <<= map->phys_blk_shift;
4557 disk_block_cnt <<= map->phys_blk_shift;
4558 }
4559 BUG_ON(disk_block_cnt > 0xffff);
4560
4561 /* build the new CDB for the physical disk I/O */
4562 if (disk_block > 0xffffffff) {
4563 cdb[0] = is_write ? WRITE_16 : READ_16;
4564 cdb[1] = 0;
4565 cdb[2] = (u8) (disk_block >> 56);
4566 cdb[3] = (u8) (disk_block >> 48);
4567 cdb[4] = (u8) (disk_block >> 40);
4568 cdb[5] = (u8) (disk_block >> 32);
4569 cdb[6] = (u8) (disk_block >> 24);
4570 cdb[7] = (u8) (disk_block >> 16);
4571 cdb[8] = (u8) (disk_block >> 8);
4572 cdb[9] = (u8) (disk_block);
4573 cdb[10] = (u8) (disk_block_cnt >> 24);
4574 cdb[11] = (u8) (disk_block_cnt >> 16);
4575 cdb[12] = (u8) (disk_block_cnt >> 8);
4576 cdb[13] = (u8) (disk_block_cnt);
4577 cdb[14] = 0;
4578 cdb[15] = 0;
4579 cdb_len = 16;
4580 } else {
4581 cdb[0] = is_write ? WRITE_10 : READ_10;
4582 cdb[1] = 0;
4583 cdb[2] = (u8) (disk_block >> 24);
4584 cdb[3] = (u8) (disk_block >> 16);
4585 cdb[4] = (u8) (disk_block >> 8);
4586 cdb[5] = (u8) (disk_block);
4587 cdb[6] = 0;
4588 cdb[7] = (u8) (disk_block_cnt >> 8);
4589 cdb[8] = (u8) (disk_block_cnt);
4590 cdb[9] = 0;
4591 cdb_len = 10;
4592 }
4593 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
03383736
DB
4594 dev->scsi3addr,
4595 dev->phys_disk[map_index]);
283b4a9b
SC
4596}
4597
25163bd5
WS
4598/*
4599 * Submit commands down the "normal" RAID stack path
4600 * All callers to hpsa_ciss_submit must check lockup_detected
4601 * beforehand, before (opt.) and after calling cmd_alloc
4602 */
574f05d3
SC
4603static int hpsa_ciss_submit(struct ctlr_info *h,
4604 struct CommandList *c, struct scsi_cmnd *cmd,
4605 unsigned char scsi3addr[])
edd16368 4606{
edd16368 4607 cmd->host_scribble = (unsigned char *) c;
edd16368
SC
4608 c->cmd_type = CMD_SCSI;
4609 c->scsi_cmd = cmd;
4610 c->Header.ReplyQueue = 0; /* unused in simple mode */
4611 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
f2405db8 4612 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
edd16368
SC
4613
4614 /* Fill in the request block... */
4615
4616 c->Request.Timeout = 0;
edd16368
SC
4617 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4618 c->Request.CDBLen = cmd->cmd_len;
4619 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
edd16368
SC
4620 switch (cmd->sc_data_direction) {
4621 case DMA_TO_DEVICE:
a505b86f
SC
4622 c->Request.type_attr_dir =
4623 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
4624 break;
4625 case DMA_FROM_DEVICE:
a505b86f
SC
4626 c->Request.type_attr_dir =
4627 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
edd16368
SC
4628 break;
4629 case DMA_NONE:
a505b86f
SC
4630 c->Request.type_attr_dir =
4631 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
4632 break;
4633 case DMA_BIDIRECTIONAL:
4634 /* This can happen if a buggy application does a scsi passthru
4635 * and sets both inlen and outlen to non-zero. ( see
4636 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4637 */
4638
a505b86f
SC
4639 c->Request.type_attr_dir =
4640 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
edd16368
SC
4641 /* This is technically wrong, and hpsa controllers should
4642 * reject it with CMD_INVALID, which is the most correct
4643 * response, but non-fibre backends appear to let it
4644 * slide by, and give the same results as if this field
4645 * were set correctly. Either way is acceptable for
4646 * our purposes here.
4647 */
4648
4649 break;
4650
4651 default:
4652 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4653 cmd->sc_data_direction);
4654 BUG();
4655 break;
4656 }
4657
33a2ffce 4658 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
73153fe5 4659 hpsa_cmd_resolve_and_free(h, c);
edd16368
SC
4660 return SCSI_MLQUEUE_HOST_BUSY;
4661 }
4662 enqueue_cmd_and_start_io(h, c);
4663 /* the cmd'll come back via intr handler in complete_scsi_command() */
4664 return 0;
4665}
4666
360c73bd
SC
4667static void hpsa_cmd_init(struct ctlr_info *h, int index,
4668 struct CommandList *c)
4669{
4670 dma_addr_t cmd_dma_handle, err_dma_handle;
4671
4672 /* Zero out all of commandlist except the last field, refcount */
4673 memset(c, 0, offsetof(struct CommandList, refcount));
4674 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4675 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4676 c->err_info = h->errinfo_pool + index;
4677 memset(c->err_info, 0, sizeof(*c->err_info));
4678 err_dma_handle = h->errinfo_pool_dhandle
4679 + index * sizeof(*c->err_info);
4680 c->cmdindex = index;
4681 c->busaddr = (u32) cmd_dma_handle;
4682 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4683 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4684 c->h = h;
a58e7e53 4685 c->scsi_cmd = SCSI_CMD_IDLE;
360c73bd
SC
4686}
4687
4688static void hpsa_preinitialize_commands(struct ctlr_info *h)
4689{
4690 int i;
4691
4692 for (i = 0; i < h->nr_cmds; i++) {
4693 struct CommandList *c = h->cmd_pool + i;
4694
4695 hpsa_cmd_init(h, i, c);
4696 atomic_set(&c->refcount, 0);
4697 }
4698}
4699
4700static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4701 struct CommandList *c)
4702{
4703 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4704
73153fe5
WS
4705 BUG_ON(c->cmdindex != index);
4706
360c73bd
SC
4707 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4708 memset(c->err_info, 0, sizeof(*c->err_info));
4709 c->busaddr = (u32) cmd_dma_handle;
4710}
4711
592a0ad5
WS
4712static int hpsa_ioaccel_submit(struct ctlr_info *h,
4713 struct CommandList *c, struct scsi_cmnd *cmd,
4714 unsigned char *scsi3addr)
4715{
4716 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4717 int rc = IO_ACCEL_INELIGIBLE;
4718
4719 cmd->host_scribble = (unsigned char *) c;
4720
4721 if (dev->offload_enabled) {
4722 hpsa_cmd_init(h, c->cmdindex, c);
4723 c->cmd_type = CMD_SCSI;
4724 c->scsi_cmd = cmd;
4725 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4726 if (rc < 0) /* scsi_dma_map failed. */
4727 rc = SCSI_MLQUEUE_HOST_BUSY;
a3144e0b 4728 } else if (dev->hba_ioaccel_enabled) {
592a0ad5
WS
4729 hpsa_cmd_init(h, c->cmdindex, c);
4730 c->cmd_type = CMD_SCSI;
4731 c->scsi_cmd = cmd;
4732 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4733 if (rc < 0) /* scsi_dma_map failed. */
4734 rc = SCSI_MLQUEUE_HOST_BUSY;
4735 }
4736 return rc;
4737}
4738
080ef1cc
DB
4739static void hpsa_command_resubmit_worker(struct work_struct *work)
4740{
4741 struct scsi_cmnd *cmd;
4742 struct hpsa_scsi_dev_t *dev;
8a0ff92c 4743 struct CommandList *c = container_of(work, struct CommandList, work);
080ef1cc
DB
4744
4745 cmd = c->scsi_cmd;
4746 dev = cmd->device->hostdata;
4747 if (!dev) {
4748 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 4749 return hpsa_cmd_free_and_done(c->h, c, cmd);
080ef1cc 4750 }
d604f533
WS
4751 if (c->reset_pending)
4752 return hpsa_cmd_resolve_and_free(c->h, c);
a58e7e53
WS
4753 if (c->abort_pending)
4754 return hpsa_cmd_abort_and_free(c->h, c, cmd);
592a0ad5
WS
4755 if (c->cmd_type == CMD_IOACCEL2) {
4756 struct ctlr_info *h = c->h;
4757 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4758 int rc;
4759
4760 if (c2->error_data.serv_response ==
4761 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4762 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4763 if (rc == 0)
4764 return;
4765 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4766 /*
4767 * If we get here, it means dma mapping failed.
4768 * Try again via scsi mid layer, which will
4769 * then get SCSI_MLQUEUE_HOST_BUSY.
4770 */
4771 cmd->result = DID_IMM_RETRY << 16;
8a0ff92c 4772 return hpsa_cmd_free_and_done(h, c, cmd);
592a0ad5
WS
4773 }
4774 /* else, fall thru and resubmit down CISS path */
4775 }
4776 }
360c73bd 4777 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
080ef1cc
DB
4778 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4779 /*
4780 * If we get here, it means dma mapping failed. Try
4781 * again via scsi mid layer, which will then get
4782 * SCSI_MLQUEUE_HOST_BUSY.
592a0ad5
WS
4783 *
4784 * hpsa_ciss_submit will have already freed c
4785 * if it encountered a dma mapping failure.
080ef1cc
DB
4786 */
4787 cmd->result = DID_IMM_RETRY << 16;
4788 cmd->scsi_done(cmd);
4789 }
4790}
4791
574f05d3
SC
4792/* Running in struct Scsi_Host->host_lock less mode */
4793static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4794{
4795 struct ctlr_info *h;
4796 struct hpsa_scsi_dev_t *dev;
4797 unsigned char scsi3addr[8];
4798 struct CommandList *c;
4799 int rc = 0;
4800
4801 /* Get the ptr to our adapter structure out of cmd->host. */
4802 h = sdev_to_hba(cmd->device);
73153fe5
WS
4803
4804 BUG_ON(cmd->request->tag < 0);
4805
574f05d3
SC
4806 dev = cmd->device->hostdata;
4807 if (!dev) {
4808 cmd->result = DID_NO_CONNECT << 16;
4809 cmd->scsi_done(cmd);
4810 return 0;
4811 }
574f05d3 4812
73153fe5 4813 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
bf43caf3 4814
407863cb 4815 if (unlikely(lockup_detected(h))) {
25163bd5 4816 cmd->result = DID_NO_CONNECT << 16;
407863cb
SC
4817 cmd->scsi_done(cmd);
4818 return 0;
4819 }
73153fe5 4820 c = cmd_tagged_alloc(h, cmd);
574f05d3 4821
407863cb
SC
4822 /*
4823 * Call alternate submit routine for I/O accelerated commands.
574f05d3
SC
4824 * Retries always go down the normal I/O path.
4825 */
4826 if (likely(cmd->retries == 0 &&
4827 cmd->request->cmd_type == REQ_TYPE_FS &&
4828 h->acciopath_status)) {
592a0ad5
WS
4829 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4830 if (rc == 0)
4831 return 0;
4832 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
73153fe5 4833 hpsa_cmd_resolve_and_free(h, c);
592a0ad5 4834 return SCSI_MLQUEUE_HOST_BUSY;
574f05d3
SC
4835 }
4836 }
4837 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4838}
4839
8ebc9248 4840static void hpsa_scan_complete(struct ctlr_info *h)
5f389360
SC
4841{
4842 unsigned long flags;
4843
8ebc9248
WS
4844 spin_lock_irqsave(&h->scan_lock, flags);
4845 h->scan_finished = 1;
4846 wake_up_all(&h->scan_wait_queue);
4847 spin_unlock_irqrestore(&h->scan_lock, flags);
5f389360
SC
4848}
4849
a08a8471
SC
4850static void hpsa_scan_start(struct Scsi_Host *sh)
4851{
4852 struct ctlr_info *h = shost_to_hba(sh);
4853 unsigned long flags;
4854
8ebc9248
WS
4855 /*
4856 * Don't let rescans be initiated on a controller known to be locked
4857 * up. If the controller locks up *during* a rescan, that thread is
4858 * probably hosed, but at least we can prevent new rescan threads from
4859 * piling up on a locked up controller.
4860 */
4861 if (unlikely(lockup_detected(h)))
4862 return hpsa_scan_complete(h);
5f389360 4863
a08a8471
SC
4864 /* wait until any scan already in progress is finished. */
4865 while (1) {
4866 spin_lock_irqsave(&h->scan_lock, flags);
4867 if (h->scan_finished)
4868 break;
4869 spin_unlock_irqrestore(&h->scan_lock, flags);
4870 wait_event(h->scan_wait_queue, h->scan_finished);
4871 /* Note: We don't need to worry about a race between this
4872 * thread and driver unload because the midlayer will
4873 * have incremented the reference count, so unload won't
4874 * happen if we're in here.
4875 */
4876 }
4877 h->scan_finished = 0; /* mark scan as in progress */
4878 spin_unlock_irqrestore(&h->scan_lock, flags);
4879
8ebc9248
WS
4880 if (unlikely(lockup_detected(h)))
4881 return hpsa_scan_complete(h);
5f389360 4882
a08a8471
SC
4883 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4884
8ebc9248 4885 hpsa_scan_complete(h);
a08a8471
SC
4886}
4887
7c0a0229
DB
4888static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4889{
03383736
DB
4890 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4891
4892 if (!logical_drive)
4893 return -ENODEV;
7c0a0229
DB
4894
4895 if (qdepth < 1)
4896 qdepth = 1;
03383736
DB
4897 else if (qdepth > logical_drive->queue_depth)
4898 qdepth = logical_drive->queue_depth;
4899
4900 return scsi_change_queue_depth(sdev, qdepth);
7c0a0229
DB
4901}
4902
a08a8471
SC
4903static int hpsa_scan_finished(struct Scsi_Host *sh,
4904 unsigned long elapsed_time)
4905{
4906 struct ctlr_info *h = shost_to_hba(sh);
4907 unsigned long flags;
4908 int finished;
4909
4910 spin_lock_irqsave(&h->scan_lock, flags);
4911 finished = h->scan_finished;
4912 spin_unlock_irqrestore(&h->scan_lock, flags);
4913 return finished;
4914}
4915
2946e82b 4916static int hpsa_scsi_host_alloc(struct ctlr_info *h)
edd16368 4917{
b705690d
SC
4918 struct Scsi_Host *sh;
4919 int error;
edd16368 4920
b705690d 4921 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2946e82b
RE
4922 if (sh == NULL) {
4923 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
4924 return -ENOMEM;
4925 }
b705690d
SC
4926
4927 sh->io_port = 0;
4928 sh->n_io_port = 0;
4929 sh->this_id = -1;
4930 sh->max_channel = 3;
4931 sh->max_cmd_len = MAX_COMMAND_SIZE;
4932 sh->max_lun = HPSA_MAX_LUN;
4933 sh->max_id = HPSA_MAX_LUN;
41ce4c35 4934 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
03383736 4935 sh->cmd_per_lun = sh->can_queue;
b705690d 4936 sh->sg_tablesize = h->maxsgentries;
b705690d
SC
4937 sh->hostdata[0] = (unsigned long) h;
4938 sh->irq = h->intr[h->intr_mode];
4939 sh->unique_id = sh->irq;
73153fe5
WS
4940 error = scsi_init_shared_tag_map(sh, sh->can_queue);
4941 if (error) {
4942 dev_err(&h->pdev->dev,
4943 "%s: scsi_init_shared_tag_map failed for controller %d\n",
4944 __func__, h->ctlr);
2946e82b
RE
4945 scsi_host_put(sh);
4946 return error;
73153fe5 4947 }
2946e82b 4948 h->scsi_host = sh;
b705690d 4949 return 0;
2946e82b 4950}
b705690d 4951
2946e82b
RE
4952static int hpsa_scsi_add_host(struct ctlr_info *h)
4953{
4954 int rv;
4955
4956 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
4957 if (rv) {
4958 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
4959 return rv;
4960 }
4961 scsi_scan_host(h->scsi_host);
4962 return 0;
edd16368
SC
4963}
4964
73153fe5
WS
4965/*
4966 * The block layer has already gone to the trouble of picking out a unique,
4967 * small-integer tag for this request. We use an offset from that value as
4968 * an index to select our command block. (The offset allows us to reserve the
4969 * low-numbered entries for our own uses.)
4970 */
4971static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
4972{
4973 int idx = scmd->request->tag;
4974
4975 if (idx < 0)
4976 return idx;
4977
4978 /* Offset to leave space for internal cmds. */
4979 return idx += HPSA_NRESERVED_CMDS;
4980}
4981
b69324ff
WS
4982/*
4983 * Send a TEST_UNIT_READY command to the specified LUN using the specified
4984 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
4985 */
4986static int hpsa_send_test_unit_ready(struct ctlr_info *h,
4987 struct CommandList *c, unsigned char lunaddr[],
4988 int reply_queue)
4989{
4990 int rc;
4991
4992 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4993 (void) fill_cmd(c, TEST_UNIT_READY, h,
4994 NULL, 0, 0, lunaddr, TYPE_CMD);
4995 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
4996 if (rc)
4997 return rc;
4998 /* no unmap needed here because no data xfer. */
4999
5000 /* Check if the unit is already ready. */
5001 if (c->err_info->CommandStatus == CMD_SUCCESS)
5002 return 0;
5003
5004 /*
5005 * The first command sent after reset will receive "unit attention" to
5006 * indicate that the LUN has been reset...this is actually what we're
5007 * looking for (but, success is good too).
5008 */
5009 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5010 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5011 (c->err_info->SenseInfo[2] == NO_SENSE ||
5012 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5013 return 0;
5014
5015 return 1;
5016}
5017
5018/*
5019 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5020 * returns zero when the unit is ready, and non-zero when giving up.
5021 */
5022static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5023 struct CommandList *c,
5024 unsigned char lunaddr[], int reply_queue)
edd16368 5025{
8919358e 5026 int rc;
edd16368
SC
5027 int count = 0;
5028 int waittime = 1; /* seconds */
edd16368
SC
5029
5030 /* Send test unit ready until device ready, or give up. */
b69324ff 5031 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
edd16368 5032
b69324ff
WS
5033 /*
5034 * Wait for a bit. do this first, because if we send
edd16368
SC
5035 * the TUR right away, the reset will just abort it.
5036 */
5037 msleep(1000 * waittime);
b69324ff
WS
5038
5039 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5040 if (!rc)
5041 break;
edd16368
SC
5042
5043 /* Increase wait time with each try, up to a point. */
5044 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
b69324ff 5045 waittime *= 2;
edd16368 5046
b69324ff
WS
5047 dev_warn(&h->pdev->dev,
5048 "waiting %d secs for device to become ready.\n",
5049 waittime);
5050 }
edd16368 5051
b69324ff
WS
5052 return rc;
5053}
edd16368 5054
b69324ff
WS
5055static int wait_for_device_to_become_ready(struct ctlr_info *h,
5056 unsigned char lunaddr[],
5057 int reply_queue)
5058{
5059 int first_queue;
5060 int last_queue;
5061 int rq;
5062 int rc = 0;
5063 struct CommandList *c;
5064
5065 c = cmd_alloc(h);
5066
5067 /*
5068 * If no specific reply queue was requested, then send the TUR
5069 * repeatedly, requesting a reply on each reply queue; otherwise execute
5070 * the loop exactly once using only the specified queue.
5071 */
5072 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5073 first_queue = 0;
5074 last_queue = h->nreply_queues - 1;
5075 } else {
5076 first_queue = reply_queue;
5077 last_queue = reply_queue;
5078 }
5079
5080 for (rq = first_queue; rq <= last_queue; rq++) {
5081 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5082 if (rc)
edd16368 5083 break;
edd16368
SC
5084 }
5085
5086 if (rc)
5087 dev_warn(&h->pdev->dev, "giving up on device.\n");
5088 else
5089 dev_warn(&h->pdev->dev, "device is ready.\n");
5090
45fcb86e 5091 cmd_free(h, c);
edd16368
SC
5092 return rc;
5093}
5094
5095/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5096 * complaining. Doing a host- or bus-reset can't do anything good here.
5097 */
5098static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5099{
5100 int rc;
5101 struct ctlr_info *h;
5102 struct hpsa_scsi_dev_t *dev;
2dc127bb 5103 char msg[48];
edd16368
SC
5104
5105 /* find the controller to which the command to be aborted was sent */
5106 h = sdev_to_hba(scsicmd->device);
5107 if (h == NULL) /* paranoia */
5108 return FAILED;
e345893b
DB
5109
5110 if (lockup_detected(h))
5111 return FAILED;
5112
edd16368
SC
5113 dev = scsicmd->device->hostdata;
5114 if (!dev) {
d604f533 5115 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
edd16368
SC
5116 return FAILED;
5117 }
25163bd5
WS
5118
5119 /* if controller locked up, we can guarantee command won't complete */
5120 if (lockup_detected(h)) {
2dc127bb
DC
5121 snprintf(msg, sizeof(msg),
5122 "cmd %d RESET FAILED, lockup detected",
5123 hpsa_get_cmd_index(scsicmd));
73153fe5 5124 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5125 return FAILED;
5126 }
5127
5128 /* this reset request might be the result of a lockup; check */
5129 if (detect_controller_lockup(h)) {
2dc127bb
DC
5130 snprintf(msg, sizeof(msg),
5131 "cmd %d RESET FAILED, new lockup detected",
5132 hpsa_get_cmd_index(scsicmd));
73153fe5 5133 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5134 return FAILED;
5135 }
5136
d604f533
WS
5137 /* Do not attempt on controller */
5138 if (is_hba_lunid(dev->scsi3addr))
5139 return SUCCESS;
5140
25163bd5
WS
5141 hpsa_show_dev_msg(KERN_WARNING, h, dev, "resetting");
5142
edd16368 5143 /* send a reset to the SCSI LUN which the command was sent to */
d604f533
WS
5144 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
5145 DEFAULT_REPLY_QUEUE);
2dc127bb
DC
5146 snprintf(msg, sizeof(msg), "reset %s",
5147 rc == 0 ? "completed successfully" : "failed");
d604f533
WS
5148 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5149 return rc == 0 ? SUCCESS : FAILED;
edd16368
SC
5150}
5151
6cba3f19
SC
5152static void swizzle_abort_tag(u8 *tag)
5153{
5154 u8 original_tag[8];
5155
5156 memcpy(original_tag, tag, 8);
5157 tag[0] = original_tag[3];
5158 tag[1] = original_tag[2];
5159 tag[2] = original_tag[1];
5160 tag[3] = original_tag[0];
5161 tag[4] = original_tag[7];
5162 tag[5] = original_tag[6];
5163 tag[6] = original_tag[5];
5164 tag[7] = original_tag[4];
5165}
5166
17eb87d2 5167static void hpsa_get_tag(struct ctlr_info *h,
2b08b3e9 5168 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
17eb87d2 5169{
2b08b3e9 5170 u64 tag;
17eb87d2
ST
5171 if (c->cmd_type == CMD_IOACCEL1) {
5172 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5173 &h->ioaccel_cmd_pool[c->cmdindex];
2b08b3e9
DB
5174 tag = le64_to_cpu(cm1->tag);
5175 *tagupper = cpu_to_le32(tag >> 32);
5176 *taglower = cpu_to_le32(tag);
54b6e9e9
ST
5177 return;
5178 }
5179 if (c->cmd_type == CMD_IOACCEL2) {
5180 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5181 &h->ioaccel2_cmd_pool[c->cmdindex];
dd0e19f3
ST
5182 /* upper tag not used in ioaccel2 mode */
5183 memset(tagupper, 0, sizeof(*tagupper));
5184 *taglower = cm2->Tag;
54b6e9e9 5185 return;
17eb87d2 5186 }
2b08b3e9
DB
5187 tag = le64_to_cpu(c->Header.tag);
5188 *tagupper = cpu_to_le32(tag >> 32);
5189 *taglower = cpu_to_le32(tag);
17eb87d2
ST
5190}
5191
75167d2c 5192static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
9b5c48c2 5193 struct CommandList *abort, int reply_queue)
75167d2c
SC
5194{
5195 int rc = IO_OK;
5196 struct CommandList *c;
5197 struct ErrorInfo *ei;
2b08b3e9 5198 __le32 tagupper, taglower;
75167d2c 5199
45fcb86e 5200 c = cmd_alloc(h);
75167d2c 5201
a2dac136 5202 /* fill_cmd can't fail here, no buffer to map */
9b5c48c2 5203 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
a2dac136 5204 0, 0, scsi3addr, TYPE_MSG);
9b5c48c2 5205 if (h->needs_abort_tags_swizzled)
6cba3f19 5206 swizzle_abort_tag(&c->Request.CDB[4]);
25163bd5 5207 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
17eb87d2 5208 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5209 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
17eb87d2 5210 __func__, tagupper, taglower);
75167d2c
SC
5211 /* no unmap needed here because no data xfer. */
5212
5213 ei = c->err_info;
5214 switch (ei->CommandStatus) {
5215 case CMD_SUCCESS:
5216 break;
9437ac43
SC
5217 case CMD_TMF_STATUS:
5218 rc = hpsa_evaluate_tmf_status(h, c);
5219 break;
75167d2c
SC
5220 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5221 rc = -1;
5222 break;
5223 default:
5224 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
17eb87d2 5225 __func__, tagupper, taglower);
d1e8beac 5226 hpsa_scsi_interpret_error(h, c);
75167d2c
SC
5227 rc = -1;
5228 break;
5229 }
45fcb86e 5230 cmd_free(h, c);
dd0e19f3
ST
5231 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5232 __func__, tagupper, taglower);
75167d2c
SC
5233 return rc;
5234}
5235
8be986cc
SC
5236static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5237 struct CommandList *command_to_abort, int reply_queue)
5238{
5239 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5240 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5241 struct io_accel2_cmd *c2a =
5242 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
a58e7e53 5243 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
8be986cc
SC
5244 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5245
5246 /*
5247 * We're overlaying struct hpsa_tmf_struct on top of something which
5248 * was allocated as a struct io_accel2_cmd, so we better be sure it
5249 * actually fits, and doesn't overrun the error info space.
5250 */
5251 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5252 sizeof(struct io_accel2_cmd));
5253 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5254 offsetof(struct hpsa_tmf_struct, error_len) +
5255 sizeof(ac->error_len));
5256
5257 c->cmd_type = IOACCEL2_TMF;
a58e7e53
WS
5258 c->scsi_cmd = SCSI_CMD_BUSY;
5259
8be986cc
SC
5260 /* Adjust the DMA address to point to the accelerated command buffer */
5261 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5262 (c->cmdindex * sizeof(struct io_accel2_cmd));
5263 BUG_ON(c->busaddr & 0x0000007F);
5264
5265 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5266 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5267 ac->reply_queue = reply_queue;
5268 ac->tmf = IOACCEL2_TMF_ABORT;
5269 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5270 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5271 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5272 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5273 ac->error_ptr = cpu_to_le64(c->busaddr +
5274 offsetof(struct io_accel2_cmd, error_data));
5275 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5276}
5277
54b6e9e9
ST
5278/* ioaccel2 path firmware cannot handle abort task requests.
5279 * Change abort requests to physical target reset, and send to the
5280 * address of the physical disk used for the ioaccel 2 command.
5281 * Return 0 on success (IO_OK)
5282 * -1 on failure
5283 */
5284
5285static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
25163bd5 5286 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
54b6e9e9
ST
5287{
5288 int rc = IO_OK;
5289 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5290 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5291 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5292 unsigned char *psa = &phys_scsi3addr[0];
5293
5294 /* Get a pointer to the hpsa logical device. */
7fa3030c 5295 scmd = abort->scsi_cmd;
54b6e9e9
ST
5296 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5297 if (dev == NULL) {
5298 dev_warn(&h->pdev->dev,
5299 "Cannot abort: no device pointer for command.\n");
5300 return -1; /* not abortable */
5301 }
5302
2ba8bfc8
SC
5303 if (h->raid_offload_debug > 0)
5304 dev_info(&h->pdev->dev,
0d96ef5f 5305 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2ba8bfc8 5306 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
0d96ef5f 5307 "Reset as abort",
2ba8bfc8
SC
5308 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5309 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5310
54b6e9e9
ST
5311 if (!dev->offload_enabled) {
5312 dev_warn(&h->pdev->dev,
5313 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5314 return -1; /* not abortable */
5315 }
5316
5317 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5318 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5319 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5320 return -1; /* not abortable */
5321 }
5322
5323 /* send the reset */
2ba8bfc8
SC
5324 if (h->raid_offload_debug > 0)
5325 dev_info(&h->pdev->dev,
5326 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5327 psa[0], psa[1], psa[2], psa[3],
5328 psa[4], psa[5], psa[6], psa[7]);
d604f533 5329 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
54b6e9e9
ST
5330 if (rc != 0) {
5331 dev_warn(&h->pdev->dev,
5332 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5333 psa[0], psa[1], psa[2], psa[3],
5334 psa[4], psa[5], psa[6], psa[7]);
5335 return rc; /* failed to reset */
5336 }
5337
5338 /* wait for device to recover */
b69324ff 5339 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
54b6e9e9
ST
5340 dev_warn(&h->pdev->dev,
5341 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5342 psa[0], psa[1], psa[2], psa[3],
5343 psa[4], psa[5], psa[6], psa[7]);
5344 return -1; /* failed to recover */
5345 }
5346
5347 /* device recovered */
5348 dev_info(&h->pdev->dev,
5349 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5350 psa[0], psa[1], psa[2], psa[3],
5351 psa[4], psa[5], psa[6], psa[7]);
5352
5353 return rc; /* success */
5354}
5355
8be986cc
SC
5356static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5357 struct CommandList *abort, int reply_queue)
5358{
5359 int rc = IO_OK;
5360 struct CommandList *c;
5361 __le32 taglower, tagupper;
5362 struct hpsa_scsi_dev_t *dev;
5363 struct io_accel2_cmd *c2;
5364
5365 dev = abort->scsi_cmd->device->hostdata;
5366 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5367 return -1;
5368
5369 c = cmd_alloc(h);
5370 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5371 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5372 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5373 hpsa_get_tag(h, abort, &taglower, &tagupper);
5374 dev_dbg(&h->pdev->dev,
5375 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5376 __func__, tagupper, taglower);
5377 /* no unmap needed here because no data xfer. */
5378
5379 dev_dbg(&h->pdev->dev,
5380 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5381 __func__, tagupper, taglower, c2->error_data.serv_response);
5382 switch (c2->error_data.serv_response) {
5383 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5384 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5385 rc = 0;
5386 break;
5387 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5388 case IOACCEL2_SERV_RESPONSE_FAILURE:
5389 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5390 rc = -1;
5391 break;
5392 default:
5393 dev_warn(&h->pdev->dev,
5394 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5395 __func__, tagupper, taglower,
5396 c2->error_data.serv_response);
5397 rc = -1;
5398 }
5399 cmd_free(h, c);
5400 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5401 tagupper, taglower);
5402 return rc;
5403}
5404
6cba3f19 5405static int hpsa_send_abort_both_ways(struct ctlr_info *h,
25163bd5 5406 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
6cba3f19 5407{
8be986cc
SC
5408 /*
5409 * ioccelerator mode 2 commands should be aborted via the
54b6e9e9 5410 * accelerated path, since RAID path is unaware of these commands,
8be986cc
SC
5411 * but not all underlying firmware can handle abort TMF.
5412 * Change abort to physical device reset when abort TMF is unsupported.
54b6e9e9 5413 */
8be986cc
SC
5414 if (abort->cmd_type == CMD_IOACCEL2) {
5415 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5416 return hpsa_send_abort_ioaccel2(h, abort,
5417 reply_queue);
5418 else
5419 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
25163bd5 5420 abort, reply_queue);
8be986cc 5421 }
9b5c48c2 5422 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
25163bd5 5423}
54b6e9e9 5424
25163bd5
WS
5425/* Find out which reply queue a command was meant to return on */
5426static int hpsa_extract_reply_queue(struct ctlr_info *h,
5427 struct CommandList *c)
5428{
5429 if (c->cmd_type == CMD_IOACCEL2)
5430 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5431 return c->Header.ReplyQueue;
6cba3f19
SC
5432}
5433
9b5c48c2
SC
5434/*
5435 * Limit concurrency of abort commands to prevent
5436 * over-subscription of commands
5437 */
5438static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5439{
5440#define ABORT_CMD_WAIT_MSECS 5000
5441 return !wait_event_timeout(h->abort_cmd_wait_queue,
5442 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5443 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5444}
5445
75167d2c
SC
5446/* Send an abort for the specified command.
5447 * If the device and controller support it,
5448 * send a task abort request.
5449 */
5450static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5451{
5452
a58e7e53 5453 int rc;
75167d2c
SC
5454 struct ctlr_info *h;
5455 struct hpsa_scsi_dev_t *dev;
5456 struct CommandList *abort; /* pointer to command to be aborted */
75167d2c
SC
5457 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5458 char msg[256]; /* For debug messaging. */
5459 int ml = 0;
2b08b3e9 5460 __le32 tagupper, taglower;
25163bd5
WS
5461 int refcount, reply_queue;
5462
5463 if (sc == NULL)
5464 return FAILED;
75167d2c 5465
9b5c48c2
SC
5466 if (sc->device == NULL)
5467 return FAILED;
5468
75167d2c
SC
5469 /* Find the controller of the command to be aborted */
5470 h = sdev_to_hba(sc->device);
9b5c48c2 5471 if (h == NULL)
75167d2c
SC
5472 return FAILED;
5473
25163bd5
WS
5474 /* Find the device of the command to be aborted */
5475 dev = sc->device->hostdata;
5476 if (!dev) {
5477 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5478 msg);
e345893b 5479 return FAILED;
25163bd5
WS
5480 }
5481
5482 /* If controller locked up, we can guarantee command won't complete */
5483 if (lockup_detected(h)) {
5484 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5485 "ABORT FAILED, lockup detected");
5486 return FAILED;
5487 }
5488
5489 /* This is a good time to check if controller lockup has occurred */
5490 if (detect_controller_lockup(h)) {
5491 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5492 "ABORT FAILED, new lockup detected");
5493 return FAILED;
5494 }
e345893b 5495
75167d2c
SC
5496 /* Check that controller supports some kind of task abort */
5497 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5498 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5499 return FAILED;
5500
5501 memset(msg, 0, sizeof(msg));
4b761557 5502 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
75167d2c 5503 h->scsi_host->host_no, sc->device->channel,
0d96ef5f 5504 sc->device->id, sc->device->lun,
4b761557 5505 "Aborting command", sc);
75167d2c 5506
75167d2c
SC
5507 /* Get SCSI command to be aborted */
5508 abort = (struct CommandList *) sc->host_scribble;
5509 if (abort == NULL) {
281a7fd0
WS
5510 /* This can happen if the command already completed. */
5511 return SUCCESS;
5512 }
5513 refcount = atomic_inc_return(&abort->refcount);
5514 if (refcount == 1) { /* Command is done already. */
5515 cmd_free(h, abort);
5516 return SUCCESS;
75167d2c 5517 }
9b5c48c2
SC
5518
5519 /* Don't bother trying the abort if we know it won't work. */
5520 if (abort->cmd_type != CMD_IOACCEL2 &&
5521 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5522 cmd_free(h, abort);
5523 return FAILED;
5524 }
5525
a58e7e53
WS
5526 /*
5527 * Check that we're aborting the right command.
5528 * It's possible the CommandList already completed and got re-used.
5529 */
5530 if (abort->scsi_cmd != sc) {
5531 cmd_free(h, abort);
5532 return SUCCESS;
5533 }
5534
5535 abort->abort_pending = true;
17eb87d2 5536 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5537 reply_queue = hpsa_extract_reply_queue(h, abort);
17eb87d2 5538 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
7fa3030c 5539 as = abort->scsi_cmd;
75167d2c 5540 if (as != NULL)
4b761557
RE
5541 ml += sprintf(msg+ml,
5542 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5543 as->cmd_len, as->cmnd[0], as->cmnd[1],
5544 as->serial_number);
5545 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
0d96ef5f 5546 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
4b761557 5547
75167d2c
SC
5548 /*
5549 * Command is in flight, or possibly already completed
5550 * by the firmware (but not to the scsi mid layer) but we can't
5551 * distinguish which. Send the abort down.
5552 */
9b5c48c2
SC
5553 if (wait_for_available_abort_cmd(h)) {
5554 dev_warn(&h->pdev->dev,
4b761557
RE
5555 "%s FAILED, timeout waiting for an abort command to become available.\n",
5556 msg);
9b5c48c2
SC
5557 cmd_free(h, abort);
5558 return FAILED;
5559 }
25163bd5 5560 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
9b5c48c2
SC
5561 atomic_inc(&h->abort_cmds_available);
5562 wake_up_all(&h->abort_cmd_wait_queue);
75167d2c 5563 if (rc != 0) {
4b761557 5564 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
0d96ef5f 5565 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4b761557 5566 "FAILED to abort command");
281a7fd0 5567 cmd_free(h, abort);
75167d2c
SC
5568 return FAILED;
5569 }
4b761557 5570 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
d604f533 5571 wait_event(h->event_sync_wait_queue,
a58e7e53 5572 abort->scsi_cmd != sc || lockup_detected(h));
281a7fd0 5573 cmd_free(h, abort);
a58e7e53 5574 return !lockup_detected(h) ? SUCCESS : FAILED;
75167d2c
SC
5575}
5576
73153fe5
WS
5577/*
5578 * For operations with an associated SCSI command, a command block is allocated
5579 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5580 * block request tag as an index into a table of entries. cmd_tagged_free() is
5581 * the complement, although cmd_free() may be called instead.
5582 */
5583static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5584 struct scsi_cmnd *scmd)
5585{
5586 int idx = hpsa_get_cmd_index(scmd);
5587 struct CommandList *c = h->cmd_pool + idx;
5588
5589 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5590 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5591 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5592 /* The index value comes from the block layer, so if it's out of
5593 * bounds, it's probably not our bug.
5594 */
5595 BUG();
5596 }
5597
5598 atomic_inc(&c->refcount);
5599 if (unlikely(!hpsa_is_cmd_idle(c))) {
5600 /*
5601 * We expect that the SCSI layer will hand us a unique tag
5602 * value. Thus, there should never be a collision here between
5603 * two requests...because if the selected command isn't idle
5604 * then someone is going to be very disappointed.
5605 */
5606 dev_err(&h->pdev->dev,
5607 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5608 idx);
5609 if (c->scsi_cmd != NULL)
5610 scsi_print_command(c->scsi_cmd);
5611 scsi_print_command(scmd);
5612 }
5613
5614 hpsa_cmd_partial_init(h, idx, c);
5615 return c;
5616}
5617
5618static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5619{
5620 /*
5621 * Release our reference to the block. We don't need to do anything
5622 * else to free it, because it is accessed by index. (There's no point
5623 * in checking the result of the decrement, since we cannot guarantee
5624 * that there isn't a concurrent abort which is also accessing it.)
5625 */
5626 (void)atomic_dec(&c->refcount);
5627}
5628
edd16368
SC
5629/*
5630 * For operations that cannot sleep, a command block is allocated at init,
5631 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5632 * which ones are free or in use. Lock must be held when calling this.
5633 * cmd_free() is the complement.
bf43caf3
RE
5634 * This function never gives up and returns NULL. If it hangs,
5635 * another thread must call cmd_free() to free some tags.
edd16368 5636 */
281a7fd0 5637
edd16368
SC
5638static struct CommandList *cmd_alloc(struct ctlr_info *h)
5639{
5640 struct CommandList *c;
360c73bd 5641 int refcount, i;
73153fe5 5642 int offset = 0;
4c413128 5643
33811026
RE
5644 /*
5645 * There is some *extremely* small but non-zero chance that that
4c413128
SC
5646 * multiple threads could get in here, and one thread could
5647 * be scanning through the list of bits looking for a free
5648 * one, but the free ones are always behind him, and other
5649 * threads sneak in behind him and eat them before he can
5650 * get to them, so that while there is always a free one, a
5651 * very unlucky thread might be starved anyway, never able to
5652 * beat the other threads. In reality, this happens so
5653 * infrequently as to be indistinguishable from never.
73153fe5
WS
5654 *
5655 * Note that we start allocating commands before the SCSI host structure
5656 * is initialized. Since the search starts at bit zero, this
5657 * all works, since we have at least one command structure available;
5658 * however, it means that the structures with the low indexes have to be
5659 * reserved for driver-initiated requests, while requests from the block
5660 * layer will use the higher indexes.
4c413128 5661 */
edd16368 5662
281a7fd0 5663 for (;;) {
73153fe5
WS
5664 i = find_next_zero_bit(h->cmd_pool_bits,
5665 HPSA_NRESERVED_CMDS,
5666 offset);
5667 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
281a7fd0
WS
5668 offset = 0;
5669 continue;
5670 }
5671 c = h->cmd_pool + i;
5672 refcount = atomic_inc_return(&c->refcount);
5673 if (unlikely(refcount > 1)) {
5674 cmd_free(h, c); /* already in use */
73153fe5 5675 offset = (i + 1) % HPSA_NRESERVED_CMDS;
281a7fd0
WS
5676 continue;
5677 }
5678 set_bit(i & (BITS_PER_LONG - 1),
5679 h->cmd_pool_bits + (i / BITS_PER_LONG));
5680 break; /* it's ours now. */
5681 }
360c73bd 5682 hpsa_cmd_partial_init(h, i, c);
edd16368
SC
5683 return c;
5684}
5685
73153fe5
WS
5686/*
5687 * This is the complementary operation to cmd_alloc(). Note, however, in some
5688 * corner cases it may also be used to free blocks allocated by
5689 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5690 * the clear-bit is harmless.
5691 */
edd16368
SC
5692static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5693{
281a7fd0
WS
5694 if (atomic_dec_and_test(&c->refcount)) {
5695 int i;
edd16368 5696
281a7fd0
WS
5697 i = c - h->cmd_pool;
5698 clear_bit(i & (BITS_PER_LONG - 1),
5699 h->cmd_pool_bits + (i / BITS_PER_LONG));
5700 }
edd16368
SC
5701}
5702
edd16368
SC
5703#ifdef CONFIG_COMPAT
5704
42a91641
DB
5705static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5706 void __user *arg)
edd16368
SC
5707{
5708 IOCTL32_Command_struct __user *arg32 =
5709 (IOCTL32_Command_struct __user *) arg;
5710 IOCTL_Command_struct arg64;
5711 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5712 int err;
5713 u32 cp;
5714
938abd84 5715 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5716 err = 0;
5717 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5718 sizeof(arg64.LUN_info));
5719 err |= copy_from_user(&arg64.Request, &arg32->Request,
5720 sizeof(arg64.Request));
5721 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5722 sizeof(arg64.error_info));
5723 err |= get_user(arg64.buf_size, &arg32->buf_size);
5724 err |= get_user(cp, &arg32->buf);
5725 arg64.buf = compat_ptr(cp);
5726 err |= copy_to_user(p, &arg64, sizeof(arg64));
5727
5728 if (err)
5729 return -EFAULT;
5730
42a91641 5731 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
edd16368
SC
5732 if (err)
5733 return err;
5734 err |= copy_in_user(&arg32->error_info, &p->error_info,
5735 sizeof(arg32->error_info));
5736 if (err)
5737 return -EFAULT;
5738 return err;
5739}
5740
5741static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
42a91641 5742 int cmd, void __user *arg)
edd16368
SC
5743{
5744 BIG_IOCTL32_Command_struct __user *arg32 =
5745 (BIG_IOCTL32_Command_struct __user *) arg;
5746 BIG_IOCTL_Command_struct arg64;
5747 BIG_IOCTL_Command_struct __user *p =
5748 compat_alloc_user_space(sizeof(arg64));
5749 int err;
5750 u32 cp;
5751
938abd84 5752 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5753 err = 0;
5754 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5755 sizeof(arg64.LUN_info));
5756 err |= copy_from_user(&arg64.Request, &arg32->Request,
5757 sizeof(arg64.Request));
5758 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5759 sizeof(arg64.error_info));
5760 err |= get_user(arg64.buf_size, &arg32->buf_size);
5761 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5762 err |= get_user(cp, &arg32->buf);
5763 arg64.buf = compat_ptr(cp);
5764 err |= copy_to_user(p, &arg64, sizeof(arg64));
5765
5766 if (err)
5767 return -EFAULT;
5768
42a91641 5769 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
edd16368
SC
5770 if (err)
5771 return err;
5772 err |= copy_in_user(&arg32->error_info, &p->error_info,
5773 sizeof(arg32->error_info));
5774 if (err)
5775 return -EFAULT;
5776 return err;
5777}
71fe75a7 5778
42a91641 5779static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
71fe75a7
SC
5780{
5781 switch (cmd) {
5782 case CCISS_GETPCIINFO:
5783 case CCISS_GETINTINFO:
5784 case CCISS_SETINTINFO:
5785 case CCISS_GETNODENAME:
5786 case CCISS_SETNODENAME:
5787 case CCISS_GETHEARTBEAT:
5788 case CCISS_GETBUSTYPES:
5789 case CCISS_GETFIRMVER:
5790 case CCISS_GETDRIVVER:
5791 case CCISS_REVALIDVOLS:
5792 case CCISS_DEREGDISK:
5793 case CCISS_REGNEWDISK:
5794 case CCISS_REGNEWD:
5795 case CCISS_RESCANDISK:
5796 case CCISS_GETLUNINFO:
5797 return hpsa_ioctl(dev, cmd, arg);
5798
5799 case CCISS_PASSTHRU32:
5800 return hpsa_ioctl32_passthru(dev, cmd, arg);
5801 case CCISS_BIG_PASSTHRU32:
5802 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5803
5804 default:
5805 return -ENOIOCTLCMD;
5806 }
5807}
edd16368
SC
5808#endif
5809
5810static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5811{
5812 struct hpsa_pci_info pciinfo;
5813
5814 if (!argp)
5815 return -EINVAL;
5816 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5817 pciinfo.bus = h->pdev->bus->number;
5818 pciinfo.dev_fn = h->pdev->devfn;
5819 pciinfo.board_id = h->board_id;
5820 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5821 return -EFAULT;
5822 return 0;
5823}
5824
5825static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5826{
5827 DriverVer_type DriverVer;
5828 unsigned char vmaj, vmin, vsubmin;
5829 int rc;
5830
5831 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5832 &vmaj, &vmin, &vsubmin);
5833 if (rc != 3) {
5834 dev_info(&h->pdev->dev, "driver version string '%s' "
5835 "unrecognized.", HPSA_DRIVER_VERSION);
5836 vmaj = 0;
5837 vmin = 0;
5838 vsubmin = 0;
5839 }
5840 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5841 if (!argp)
5842 return -EINVAL;
5843 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5844 return -EFAULT;
5845 return 0;
5846}
5847
5848static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5849{
5850 IOCTL_Command_struct iocommand;
5851 struct CommandList *c;
5852 char *buff = NULL;
50a0decf 5853 u64 temp64;
c1f63c8f 5854 int rc = 0;
edd16368
SC
5855
5856 if (!argp)
5857 return -EINVAL;
5858 if (!capable(CAP_SYS_RAWIO))
5859 return -EPERM;
5860 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5861 return -EFAULT;
5862 if ((iocommand.buf_size < 1) &&
5863 (iocommand.Request.Type.Direction != XFER_NONE)) {
5864 return -EINVAL;
5865 }
5866 if (iocommand.buf_size > 0) {
5867 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5868 if (buff == NULL)
2dd02d74 5869 return -ENOMEM;
9233fb10 5870 if (iocommand.Request.Type.Direction & XFER_WRITE) {
b03a7771
SC
5871 /* Copy the data into the buffer we created */
5872 if (copy_from_user(buff, iocommand.buf,
5873 iocommand.buf_size)) {
c1f63c8f
SC
5874 rc = -EFAULT;
5875 goto out_kfree;
b03a7771
SC
5876 }
5877 } else {
5878 memset(buff, 0, iocommand.buf_size);
edd16368 5879 }
b03a7771 5880 }
45fcb86e 5881 c = cmd_alloc(h);
bf43caf3 5882
edd16368
SC
5883 /* Fill in the command type */
5884 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 5885 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
5886 /* Fill in Command Header */
5887 c->Header.ReplyQueue = 0; /* unused in simple mode */
5888 if (iocommand.buf_size > 0) { /* buffer to fill */
5889 c->Header.SGList = 1;
50a0decf 5890 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
5891 } else { /* no buffers to fill */
5892 c->Header.SGList = 0;
50a0decf 5893 c->Header.SGTotal = cpu_to_le16(0);
edd16368
SC
5894 }
5895 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
edd16368
SC
5896
5897 /* Fill in Request block */
5898 memcpy(&c->Request, &iocommand.Request,
5899 sizeof(c->Request));
5900
5901 /* Fill in the scatter gather information */
5902 if (iocommand.buf_size > 0) {
50a0decf 5903 temp64 = pci_map_single(h->pdev, buff,
edd16368 5904 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
5905 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
5906 c->SG[0].Addr = cpu_to_le64(0);
5907 c->SG[0].Len = cpu_to_le32(0);
bcc48ffa
SC
5908 rc = -ENOMEM;
5909 goto out;
5910 }
50a0decf
SC
5911 c->SG[0].Addr = cpu_to_le64(temp64);
5912 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
5913 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
edd16368 5914 }
25163bd5 5915 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
c2dd32e0
SC
5916 if (iocommand.buf_size > 0)
5917 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368 5918 check_ioctl_unit_attention(h, c);
25163bd5
WS
5919 if (rc) {
5920 rc = -EIO;
5921 goto out;
5922 }
edd16368
SC
5923
5924 /* Copy the error information out */
5925 memcpy(&iocommand.error_info, c->err_info,
5926 sizeof(iocommand.error_info));
5927 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
c1f63c8f
SC
5928 rc = -EFAULT;
5929 goto out;
edd16368 5930 }
9233fb10 5931 if ((iocommand.Request.Type.Direction & XFER_READ) &&
b03a7771 5932 iocommand.buf_size > 0) {
edd16368
SC
5933 /* Copy the data out of the buffer we created */
5934 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
c1f63c8f
SC
5935 rc = -EFAULT;
5936 goto out;
edd16368
SC
5937 }
5938 }
c1f63c8f 5939out:
45fcb86e 5940 cmd_free(h, c);
c1f63c8f
SC
5941out_kfree:
5942 kfree(buff);
5943 return rc;
edd16368
SC
5944}
5945
5946static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5947{
5948 BIG_IOCTL_Command_struct *ioc;
5949 struct CommandList *c;
5950 unsigned char **buff = NULL;
5951 int *buff_size = NULL;
50a0decf 5952 u64 temp64;
edd16368
SC
5953 BYTE sg_used = 0;
5954 int status = 0;
01a02ffc
SC
5955 u32 left;
5956 u32 sz;
edd16368
SC
5957 BYTE __user *data_ptr;
5958
5959 if (!argp)
5960 return -EINVAL;
5961 if (!capable(CAP_SYS_RAWIO))
5962 return -EPERM;
5963 ioc = (BIG_IOCTL_Command_struct *)
5964 kmalloc(sizeof(*ioc), GFP_KERNEL);
5965 if (!ioc) {
5966 status = -ENOMEM;
5967 goto cleanup1;
5968 }
5969 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
5970 status = -EFAULT;
5971 goto cleanup1;
5972 }
5973 if ((ioc->buf_size < 1) &&
5974 (ioc->Request.Type.Direction != XFER_NONE)) {
5975 status = -EINVAL;
5976 goto cleanup1;
5977 }
5978 /* Check kmalloc limits using all SGs */
5979 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
5980 status = -EINVAL;
5981 goto cleanup1;
5982 }
d66ae08b 5983 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
5984 status = -EINVAL;
5985 goto cleanup1;
5986 }
d66ae08b 5987 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
5988 if (!buff) {
5989 status = -ENOMEM;
5990 goto cleanup1;
5991 }
d66ae08b 5992 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
5993 if (!buff_size) {
5994 status = -ENOMEM;
5995 goto cleanup1;
5996 }
5997 left = ioc->buf_size;
5998 data_ptr = ioc->buf;
5999 while (left) {
6000 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6001 buff_size[sg_used] = sz;
6002 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6003 if (buff[sg_used] == NULL) {
6004 status = -ENOMEM;
6005 goto cleanup1;
6006 }
9233fb10 6007 if (ioc->Request.Type.Direction & XFER_WRITE) {
edd16368 6008 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
0758f4f7 6009 status = -EFAULT;
edd16368
SC
6010 goto cleanup1;
6011 }
6012 } else
6013 memset(buff[sg_used], 0, sz);
6014 left -= sz;
6015 data_ptr += sz;
6016 sg_used++;
6017 }
45fcb86e 6018 c = cmd_alloc(h);
bf43caf3 6019
edd16368 6020 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6021 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368 6022 c->Header.ReplyQueue = 0;
50a0decf
SC
6023 c->Header.SGList = (u8) sg_used;
6024 c->Header.SGTotal = cpu_to_le16(sg_used);
edd16368 6025 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
edd16368
SC
6026 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6027 if (ioc->buf_size > 0) {
6028 int i;
6029 for (i = 0; i < sg_used; i++) {
50a0decf 6030 temp64 = pci_map_single(h->pdev, buff[i],
edd16368 6031 buff_size[i], PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
6032 if (dma_mapping_error(&h->pdev->dev,
6033 (dma_addr_t) temp64)) {
6034 c->SG[i].Addr = cpu_to_le64(0);
6035 c->SG[i].Len = cpu_to_le32(0);
bcc48ffa
SC
6036 hpsa_pci_unmap(h->pdev, c, i,
6037 PCI_DMA_BIDIRECTIONAL);
6038 status = -ENOMEM;
e2d4a1f6 6039 goto cleanup0;
bcc48ffa 6040 }
50a0decf
SC
6041 c->SG[i].Addr = cpu_to_le64(temp64);
6042 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6043 c->SG[i].Ext = cpu_to_le32(0);
edd16368 6044 }
50a0decf 6045 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
edd16368 6046 }
25163bd5 6047 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
b03a7771
SC
6048 if (sg_used)
6049 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368 6050 check_ioctl_unit_attention(h, c);
25163bd5
WS
6051 if (status) {
6052 status = -EIO;
6053 goto cleanup0;
6054 }
6055
edd16368
SC
6056 /* Copy the error information out */
6057 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6058 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
edd16368 6059 status = -EFAULT;
e2d4a1f6 6060 goto cleanup0;
edd16368 6061 }
9233fb10 6062 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
2b08b3e9
DB
6063 int i;
6064
edd16368
SC
6065 /* Copy the data out of the buffer we created */
6066 BYTE __user *ptr = ioc->buf;
6067 for (i = 0; i < sg_used; i++) {
6068 if (copy_to_user(ptr, buff[i], buff_size[i])) {
edd16368 6069 status = -EFAULT;
e2d4a1f6 6070 goto cleanup0;
edd16368
SC
6071 }
6072 ptr += buff_size[i];
6073 }
6074 }
edd16368 6075 status = 0;
e2d4a1f6 6076cleanup0:
45fcb86e 6077 cmd_free(h, c);
edd16368
SC
6078cleanup1:
6079 if (buff) {
2b08b3e9
DB
6080 int i;
6081
edd16368
SC
6082 for (i = 0; i < sg_used; i++)
6083 kfree(buff[i]);
6084 kfree(buff);
6085 }
6086 kfree(buff_size);
6087 kfree(ioc);
6088 return status;
6089}
6090
6091static void check_ioctl_unit_attention(struct ctlr_info *h,
6092 struct CommandList *c)
6093{
6094 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6095 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6096 (void) check_for_unit_attention(h, c);
6097}
0390f0c0 6098
edd16368
SC
6099/*
6100 * ioctl
6101 */
42a91641 6102static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
edd16368
SC
6103{
6104 struct ctlr_info *h;
6105 void __user *argp = (void __user *)arg;
0390f0c0 6106 int rc;
edd16368
SC
6107
6108 h = sdev_to_hba(dev);
6109
6110 switch (cmd) {
6111 case CCISS_DEREGDISK:
6112 case CCISS_REGNEWDISK:
6113 case CCISS_REGNEWD:
a08a8471 6114 hpsa_scan_start(h->scsi_host);
edd16368
SC
6115 return 0;
6116 case CCISS_GETPCIINFO:
6117 return hpsa_getpciinfo_ioctl(h, argp);
6118 case CCISS_GETDRIVVER:
6119 return hpsa_getdrivver_ioctl(h, argp);
6120 case CCISS_PASSTHRU:
34f0c627 6121 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6122 return -EAGAIN;
6123 rc = hpsa_passthru_ioctl(h, argp);
34f0c627 6124 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6125 return rc;
edd16368 6126 case CCISS_BIG_PASSTHRU:
34f0c627 6127 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6128 return -EAGAIN;
6129 rc = hpsa_big_passthru_ioctl(h, argp);
34f0c627 6130 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6131 return rc;
edd16368
SC
6132 default:
6133 return -ENOTTY;
6134 }
6135}
6136
bf43caf3 6137static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6f039790 6138 u8 reset_type)
64670ac8
SC
6139{
6140 struct CommandList *c;
6141
6142 c = cmd_alloc(h);
bf43caf3 6143
a2dac136
SC
6144 /* fill_cmd can't fail here, no data buffer to map */
6145 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
64670ac8
SC
6146 RAID_CTLR_LUNID, TYPE_MSG);
6147 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6148 c->waiting = NULL;
6149 enqueue_cmd_and_start_io(h, c);
6150 /* Don't wait for completion, the reset won't complete. Don't free
6151 * the command either. This is the last command we will send before
6152 * re-initializing everything, so it doesn't matter and won't leak.
6153 */
bf43caf3 6154 return;
64670ac8
SC
6155}
6156
a2dac136 6157static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 6158 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368
SC
6159 int cmd_type)
6160{
6161 int pci_dir = XFER_NONE;
9b5c48c2 6162 u64 tag; /* for commands to be aborted */
edd16368
SC
6163
6164 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6165 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
6166 c->Header.ReplyQueue = 0;
6167 if (buff != NULL && size > 0) {
6168 c->Header.SGList = 1;
50a0decf 6169 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
6170 } else {
6171 c->Header.SGList = 0;
50a0decf 6172 c->Header.SGTotal = cpu_to_le16(0);
edd16368 6173 }
edd16368
SC
6174 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6175
edd16368
SC
6176 if (cmd_type == TYPE_CMD) {
6177 switch (cmd) {
6178 case HPSA_INQUIRY:
6179 /* are we trying to read a vital product page */
b7bb24eb 6180 if (page_code & VPD_PAGE) {
edd16368 6181 c->Request.CDB[1] = 0x01;
b7bb24eb 6182 c->Request.CDB[2] = (page_code & 0xff);
edd16368
SC
6183 }
6184 c->Request.CDBLen = 6;
a505b86f
SC
6185 c->Request.type_attr_dir =
6186 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6187 c->Request.Timeout = 0;
6188 c->Request.CDB[0] = HPSA_INQUIRY;
6189 c->Request.CDB[4] = size & 0xFF;
6190 break;
6191 case HPSA_REPORT_LOG:
6192 case HPSA_REPORT_PHYS:
6193 /* Talking to controller so It's a physical command
6194 mode = 00 target = 0. Nothing to write.
6195 */
6196 c->Request.CDBLen = 12;
a505b86f
SC
6197 c->Request.type_attr_dir =
6198 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6199 c->Request.Timeout = 0;
6200 c->Request.CDB[0] = cmd;
6201 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6202 c->Request.CDB[7] = (size >> 16) & 0xFF;
6203 c->Request.CDB[8] = (size >> 8) & 0xFF;
6204 c->Request.CDB[9] = size & 0xFF;
6205 break;
edd16368
SC
6206 case HPSA_CACHE_FLUSH:
6207 c->Request.CDBLen = 12;
a505b86f
SC
6208 c->Request.type_attr_dir =
6209 TYPE_ATTR_DIR(cmd_type,
6210 ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
6211 c->Request.Timeout = 0;
6212 c->Request.CDB[0] = BMIC_WRITE;
6213 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
6214 c->Request.CDB[7] = (size >> 8) & 0xFF;
6215 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
6216 break;
6217 case TEST_UNIT_READY:
6218 c->Request.CDBLen = 6;
a505b86f
SC
6219 c->Request.type_attr_dir =
6220 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
6221 c->Request.Timeout = 0;
6222 break;
283b4a9b
SC
6223 case HPSA_GET_RAID_MAP:
6224 c->Request.CDBLen = 12;
a505b86f
SC
6225 c->Request.type_attr_dir =
6226 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
283b4a9b
SC
6227 c->Request.Timeout = 0;
6228 c->Request.CDB[0] = HPSA_CISS_READ;
6229 c->Request.CDB[1] = cmd;
6230 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6231 c->Request.CDB[7] = (size >> 16) & 0xFF;
6232 c->Request.CDB[8] = (size >> 8) & 0xFF;
6233 c->Request.CDB[9] = size & 0xFF;
6234 break;
316b221a
SC
6235 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6236 c->Request.CDBLen = 10;
a505b86f
SC
6237 c->Request.type_attr_dir =
6238 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
316b221a
SC
6239 c->Request.Timeout = 0;
6240 c->Request.CDB[0] = BMIC_READ;
6241 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6242 c->Request.CDB[7] = (size >> 16) & 0xFF;
6243 c->Request.CDB[8] = (size >> 8) & 0xFF;
6244 break;
03383736
DB
6245 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6246 c->Request.CDBLen = 10;
6247 c->Request.type_attr_dir =
6248 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6249 c->Request.Timeout = 0;
6250 c->Request.CDB[0] = BMIC_READ;
6251 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6252 c->Request.CDB[7] = (size >> 16) & 0xFF;
6253 c->Request.CDB[8] = (size >> 8) & 0XFF;
6254 break;
edd16368
SC
6255 default:
6256 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6257 BUG();
a2dac136 6258 return -1;
edd16368
SC
6259 }
6260 } else if (cmd_type == TYPE_MSG) {
6261 switch (cmd) {
6262
6263 case HPSA_DEVICE_RESET_MSG:
6264 c->Request.CDBLen = 16;
a505b86f
SC
6265 c->Request.type_attr_dir =
6266 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368 6267 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
6268 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6269 c->Request.CDB[0] = cmd;
21e89afd 6270 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
edd16368
SC
6271 /* If bytes 4-7 are zero, it means reset the */
6272 /* LunID device */
6273 c->Request.CDB[4] = 0x00;
6274 c->Request.CDB[5] = 0x00;
6275 c->Request.CDB[6] = 0x00;
6276 c->Request.CDB[7] = 0x00;
75167d2c
SC
6277 break;
6278 case HPSA_ABORT_MSG:
9b5c48c2 6279 memcpy(&tag, buff, sizeof(tag));
2b08b3e9 6280 dev_dbg(&h->pdev->dev,
9b5c48c2
SC
6281 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6282 tag, c->Header.tag);
75167d2c 6283 c->Request.CDBLen = 16;
a505b86f
SC
6284 c->Request.type_attr_dir =
6285 TYPE_ATTR_DIR(cmd_type,
6286 ATTR_SIMPLE, XFER_WRITE);
75167d2c
SC
6287 c->Request.Timeout = 0; /* Don't time out */
6288 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6289 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6290 c->Request.CDB[2] = 0x00; /* reserved */
6291 c->Request.CDB[3] = 0x00; /* reserved */
6292 /* Tag to abort goes in CDB[4]-CDB[11] */
9b5c48c2 6293 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
75167d2c
SC
6294 c->Request.CDB[12] = 0x00; /* reserved */
6295 c->Request.CDB[13] = 0x00; /* reserved */
6296 c->Request.CDB[14] = 0x00; /* reserved */
6297 c->Request.CDB[15] = 0x00; /* reserved */
edd16368 6298 break;
edd16368
SC
6299 default:
6300 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6301 cmd);
6302 BUG();
6303 }
6304 } else {
6305 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6306 BUG();
6307 }
6308
a505b86f 6309 switch (GET_DIR(c->Request.type_attr_dir)) {
edd16368
SC
6310 case XFER_READ:
6311 pci_dir = PCI_DMA_FROMDEVICE;
6312 break;
6313 case XFER_WRITE:
6314 pci_dir = PCI_DMA_TODEVICE;
6315 break;
6316 case XFER_NONE:
6317 pci_dir = PCI_DMA_NONE;
6318 break;
6319 default:
6320 pci_dir = PCI_DMA_BIDIRECTIONAL;
6321 }
a2dac136
SC
6322 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6323 return -1;
6324 return 0;
edd16368
SC
6325}
6326
6327/*
6328 * Map (physical) PCI mem into (virtual) kernel space
6329 */
6330static void __iomem *remap_pci_mem(ulong base, ulong size)
6331{
6332 ulong page_base = ((ulong) base) & PAGE_MASK;
6333 ulong page_offs = ((ulong) base) - page_base;
088ba34c
SC
6334 void __iomem *page_remapped = ioremap_nocache(page_base,
6335 page_offs + size);
edd16368
SC
6336
6337 return page_remapped ? (page_remapped + page_offs) : NULL;
6338}
6339
254f796b 6340static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
edd16368 6341{
254f796b 6342 return h->access.command_completed(h, q);
edd16368
SC
6343}
6344
900c5440 6345static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
6346{
6347 return h->access.intr_pending(h);
6348}
6349
6350static inline long interrupt_not_for_us(struct ctlr_info *h)
6351{
10f66018
SC
6352 return (h->access.intr_pending(h) == 0) ||
6353 (h->interrupts_enabled == 0);
edd16368
SC
6354}
6355
01a02ffc
SC
6356static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6357 u32 raw_tag)
edd16368
SC
6358{
6359 if (unlikely(tag_index >= h->nr_cmds)) {
6360 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6361 return 1;
6362 }
6363 return 0;
6364}
6365
5a3d16f5 6366static inline void finish_cmd(struct CommandList *c)
edd16368 6367{
e85c5974 6368 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
c349775e
ST
6369 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6370 || c->cmd_type == CMD_IOACCEL2))
1fb011fb 6371 complete_scsi_command(c);
8be986cc 6372 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
edd16368 6373 complete(c->waiting);
a104c99f
SC
6374}
6375
a9a3a273
SC
6376
6377static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
a104c99f 6378{
a9a3a273
SC
6379#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
6380#define HPSA_SIMPLE_ERROR_BITS 0x03
960a30e7 6381 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
a9a3a273
SC
6382 return tag & ~HPSA_SIMPLE_ERROR_BITS;
6383 return tag & ~HPSA_PERF_ERROR_BITS;
a104c99f
SC
6384}
6385
303932fd 6386/* process completion of an indexed ("direct lookup") command */
1d94f94d 6387static inline void process_indexed_cmd(struct ctlr_info *h,
303932fd
DB
6388 u32 raw_tag)
6389{
6390 u32 tag_index;
6391 struct CommandList *c;
6392
f2405db8 6393 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
1d94f94d
SC
6394 if (!bad_tag(h, tag_index, raw_tag)) {
6395 c = h->cmd_pool + tag_index;
6396 finish_cmd(c);
6397 }
303932fd
DB
6398}
6399
64670ac8
SC
6400/* Some controllers, like p400, will give us one interrupt
6401 * after a soft reset, even if we turned interrupts off.
6402 * Only need to check for this in the hpsa_xxx_discard_completions
6403 * functions.
6404 */
6405static int ignore_bogus_interrupt(struct ctlr_info *h)
6406{
6407 if (likely(!reset_devices))
6408 return 0;
6409
6410 if (likely(h->interrupts_enabled))
6411 return 0;
6412
6413 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6414 "(known firmware bug.) Ignoring.\n");
6415
6416 return 1;
6417}
6418
254f796b
MG
6419/*
6420 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6421 * Relies on (h-q[x] == x) being true for x such that
6422 * 0 <= x < MAX_REPLY_QUEUES.
6423 */
6424static struct ctlr_info *queue_to_hba(u8 *queue)
64670ac8 6425{
254f796b
MG
6426 return container_of((queue - *queue), struct ctlr_info, q[0]);
6427}
6428
6429static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6430{
6431 struct ctlr_info *h = queue_to_hba(queue);
6432 u8 q = *(u8 *) queue;
64670ac8
SC
6433 u32 raw_tag;
6434
6435 if (ignore_bogus_interrupt(h))
6436 return IRQ_NONE;
6437
6438 if (interrupt_not_for_us(h))
6439 return IRQ_NONE;
a0c12413 6440 h->last_intr_timestamp = get_jiffies_64();
64670ac8 6441 while (interrupt_pending(h)) {
254f796b 6442 raw_tag = get_next_completion(h, q);
64670ac8 6443 while (raw_tag != FIFO_EMPTY)
254f796b 6444 raw_tag = next_command(h, q);
64670ac8 6445 }
64670ac8
SC
6446 return IRQ_HANDLED;
6447}
6448
254f796b 6449static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
64670ac8 6450{
254f796b 6451 struct ctlr_info *h = queue_to_hba(queue);
64670ac8 6452 u32 raw_tag;
254f796b 6453 u8 q = *(u8 *) queue;
64670ac8
SC
6454
6455 if (ignore_bogus_interrupt(h))
6456 return IRQ_NONE;
6457
a0c12413 6458 h->last_intr_timestamp = get_jiffies_64();
254f796b 6459 raw_tag = get_next_completion(h, q);
64670ac8 6460 while (raw_tag != FIFO_EMPTY)
254f796b 6461 raw_tag = next_command(h, q);
64670ac8
SC
6462 return IRQ_HANDLED;
6463}
6464
254f796b 6465static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
edd16368 6466{
254f796b 6467 struct ctlr_info *h = queue_to_hba((u8 *) queue);
303932fd 6468 u32 raw_tag;
254f796b 6469 u8 q = *(u8 *) queue;
edd16368
SC
6470
6471 if (interrupt_not_for_us(h))
6472 return IRQ_NONE;
a0c12413 6473 h->last_intr_timestamp = get_jiffies_64();
10f66018 6474 while (interrupt_pending(h)) {
254f796b 6475 raw_tag = get_next_completion(h, q);
10f66018 6476 while (raw_tag != FIFO_EMPTY) {
f2405db8 6477 process_indexed_cmd(h, raw_tag);
254f796b 6478 raw_tag = next_command(h, q);
10f66018
SC
6479 }
6480 }
10f66018
SC
6481 return IRQ_HANDLED;
6482}
6483
254f796b 6484static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
10f66018 6485{
254f796b 6486 struct ctlr_info *h = queue_to_hba(queue);
10f66018 6487 u32 raw_tag;
254f796b 6488 u8 q = *(u8 *) queue;
10f66018 6489
a0c12413 6490 h->last_intr_timestamp = get_jiffies_64();
254f796b 6491 raw_tag = get_next_completion(h, q);
303932fd 6492 while (raw_tag != FIFO_EMPTY) {
f2405db8 6493 process_indexed_cmd(h, raw_tag);
254f796b 6494 raw_tag = next_command(h, q);
edd16368 6495 }
edd16368
SC
6496 return IRQ_HANDLED;
6497}
6498
a9a3a273
SC
6499/* Send a message CDB to the firmware. Careful, this only works
6500 * in simple mode, not performant mode due to the tag lookup.
6501 * We only ever use this immediately after a controller reset.
6502 */
6f039790
GKH
6503static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6504 unsigned char type)
edd16368
SC
6505{
6506 struct Command {
6507 struct CommandListHeader CommandHeader;
6508 struct RequestBlock Request;
6509 struct ErrDescriptor ErrorDescriptor;
6510 };
6511 struct Command *cmd;
6512 static const size_t cmd_sz = sizeof(*cmd) +
6513 sizeof(cmd->ErrorDescriptor);
6514 dma_addr_t paddr64;
2b08b3e9
DB
6515 __le32 paddr32;
6516 u32 tag;
edd16368
SC
6517 void __iomem *vaddr;
6518 int i, err;
6519
6520 vaddr = pci_ioremap_bar(pdev, 0);
6521 if (vaddr == NULL)
6522 return -ENOMEM;
6523
6524 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6525 * CCISS commands, so they must be allocated from the lower 4GiB of
6526 * memory.
6527 */
6528 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6529 if (err) {
6530 iounmap(vaddr);
1eaec8f3 6531 return err;
edd16368
SC
6532 }
6533
6534 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6535 if (cmd == NULL) {
6536 iounmap(vaddr);
6537 return -ENOMEM;
6538 }
6539
6540 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6541 * although there's no guarantee, we assume that the address is at
6542 * least 4-byte aligned (most likely, it's page-aligned).
6543 */
2b08b3e9 6544 paddr32 = cpu_to_le32(paddr64);
edd16368
SC
6545
6546 cmd->CommandHeader.ReplyQueue = 0;
6547 cmd->CommandHeader.SGList = 0;
50a0decf 6548 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
2b08b3e9 6549 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
edd16368
SC
6550 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6551
6552 cmd->Request.CDBLen = 16;
a505b86f
SC
6553 cmd->Request.type_attr_dir =
6554 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
edd16368
SC
6555 cmd->Request.Timeout = 0; /* Don't time out */
6556 cmd->Request.CDB[0] = opcode;
6557 cmd->Request.CDB[1] = type;
6558 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
50a0decf 6559 cmd->ErrorDescriptor.Addr =
2b08b3e9 6560 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
50a0decf 6561 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
edd16368 6562
2b08b3e9 6563 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
edd16368
SC
6564
6565 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6566 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2b08b3e9 6567 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
edd16368
SC
6568 break;
6569 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6570 }
6571
6572 iounmap(vaddr);
6573
6574 /* we leak the DMA buffer here ... no choice since the controller could
6575 * still complete the command.
6576 */
6577 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6578 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6579 opcode, type);
6580 return -ETIMEDOUT;
6581 }
6582
6583 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6584
6585 if (tag & HPSA_ERROR_BIT) {
6586 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6587 opcode, type);
6588 return -EIO;
6589 }
6590
6591 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6592 opcode, type);
6593 return 0;
6594}
6595
edd16368
SC
6596#define hpsa_noop(p) hpsa_message(p, 3, 0)
6597
1df8552a 6598static int hpsa_controller_hard_reset(struct pci_dev *pdev,
42a91641 6599 void __iomem *vaddr, u32 use_doorbell)
1df8552a 6600{
1df8552a
SC
6601
6602 if (use_doorbell) {
6603 /* For everything after the P600, the PCI power state method
6604 * of resetting the controller doesn't work, so we have this
6605 * other way using the doorbell register.
6606 */
6607 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 6608 writel(use_doorbell, vaddr + SA5_DOORBELL);
85009239 6609
00701a96 6610 /* PMC hardware guys tell us we need a 10 second delay after
85009239
SC
6611 * doorbell reset and before any attempt to talk to the board
6612 * at all to ensure that this actually works and doesn't fall
6613 * over in some weird corner cases.
6614 */
00701a96 6615 msleep(10000);
1df8552a
SC
6616 } else { /* Try to do it the PCI power state way */
6617
6618 /* Quoting from the Open CISS Specification: "The Power
6619 * Management Control/Status Register (CSR) controls the power
6620 * state of the device. The normal operating state is D0,
6621 * CSR=00h. The software off state is D3, CSR=03h. To reset
6622 * the controller, place the interface device in D3 then to D0,
6623 * this causes a secondary PCI reset which will reset the
6624 * controller." */
2662cab8
DB
6625
6626 int rc = 0;
6627
1df8552a 6628 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2662cab8 6629
1df8552a 6630 /* enter the D3hot power management state */
2662cab8
DB
6631 rc = pci_set_power_state(pdev, PCI_D3hot);
6632 if (rc)
6633 return rc;
1df8552a
SC
6634
6635 msleep(500);
6636
6637 /* enter the D0 power management state */
2662cab8
DB
6638 rc = pci_set_power_state(pdev, PCI_D0);
6639 if (rc)
6640 return rc;
c4853efe
MM
6641
6642 /*
6643 * The P600 requires a small delay when changing states.
6644 * Otherwise we may think the board did not reset and we bail.
6645 * This for kdump only and is particular to the P600.
6646 */
6647 msleep(500);
1df8552a
SC
6648 }
6649 return 0;
6650}
6651
6f039790 6652static void init_driver_version(char *driver_version, int len)
580ada3c
SC
6653{
6654 memset(driver_version, 0, len);
f79cfec6 6655 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
580ada3c
SC
6656}
6657
6f039790 6658static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6659{
6660 char *driver_version;
6661 int i, size = sizeof(cfgtable->driver_version);
6662
6663 driver_version = kmalloc(size, GFP_KERNEL);
6664 if (!driver_version)
6665 return -ENOMEM;
6666
6667 init_driver_version(driver_version, size);
6668 for (i = 0; i < size; i++)
6669 writeb(driver_version[i], &cfgtable->driver_version[i]);
6670 kfree(driver_version);
6671 return 0;
6672}
6673
6f039790
GKH
6674static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6675 unsigned char *driver_ver)
580ada3c
SC
6676{
6677 int i;
6678
6679 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6680 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6681}
6682
6f039790 6683static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6684{
6685
6686 char *driver_ver, *old_driver_ver;
6687 int rc, size = sizeof(cfgtable->driver_version);
6688
6689 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6690 if (!old_driver_ver)
6691 return -ENOMEM;
6692 driver_ver = old_driver_ver + size;
6693
6694 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6695 * should have been changed, otherwise we know the reset failed.
6696 */
6697 init_driver_version(old_driver_ver, size);
6698 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6699 rc = !memcmp(driver_ver, old_driver_ver, size);
6700 kfree(old_driver_ver);
6701 return rc;
6702}
edd16368 6703/* This does a hard reset of the controller using PCI power management
1df8552a 6704 * states or the using the doorbell register.
edd16368 6705 */
6b6c1cd7 6706static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
edd16368 6707{
1df8552a
SC
6708 u64 cfg_offset;
6709 u32 cfg_base_addr;
6710 u64 cfg_base_addr_index;
6711 void __iomem *vaddr;
6712 unsigned long paddr;
580ada3c 6713 u32 misc_fw_support;
270d05de 6714 int rc;
1df8552a 6715 struct CfgTable __iomem *cfgtable;
cf0b08d0 6716 u32 use_doorbell;
270d05de 6717 u16 command_register;
edd16368 6718
1df8552a
SC
6719 /* For controllers as old as the P600, this is very nearly
6720 * the same thing as
edd16368
SC
6721 *
6722 * pci_save_state(pci_dev);
6723 * pci_set_power_state(pci_dev, PCI_D3hot);
6724 * pci_set_power_state(pci_dev, PCI_D0);
6725 * pci_restore_state(pci_dev);
6726 *
1df8552a
SC
6727 * For controllers newer than the P600, the pci power state
6728 * method of resetting doesn't work so we have another way
6729 * using the doorbell register.
edd16368 6730 */
18867659 6731
60f923b9
RE
6732 if (!ctlr_is_resettable(board_id)) {
6733 dev_warn(&pdev->dev, "Controller not resettable\n");
25c1e56a
SC
6734 return -ENODEV;
6735 }
46380786
SC
6736
6737 /* if controller is soft- but not hard resettable... */
6738 if (!ctlr_is_hard_resettable(board_id))
6739 return -ENOTSUPP; /* try soft reset later. */
18867659 6740
270d05de
SC
6741 /* Save the PCI command register */
6742 pci_read_config_word(pdev, 4, &command_register);
270d05de 6743 pci_save_state(pdev);
edd16368 6744
1df8552a
SC
6745 /* find the first memory BAR, so we can find the cfg table */
6746 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6747 if (rc)
6748 return rc;
6749 vaddr = remap_pci_mem(paddr, 0x250);
6750 if (!vaddr)
6751 return -ENOMEM;
edd16368 6752
1df8552a
SC
6753 /* find cfgtable in order to check if reset via doorbell is supported */
6754 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6755 &cfg_base_addr_index, &cfg_offset);
6756 if (rc)
6757 goto unmap_vaddr;
6758 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6759 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6760 if (!cfgtable) {
6761 rc = -ENOMEM;
6762 goto unmap_vaddr;
6763 }
580ada3c
SC
6764 rc = write_driver_ver_to_cfgtable(cfgtable);
6765 if (rc)
03741d95 6766 goto unmap_cfgtable;
edd16368 6767
cf0b08d0
SC
6768 /* If reset via doorbell register is supported, use that.
6769 * There are two such methods. Favor the newest method.
6770 */
1df8552a 6771 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
6772 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6773 if (use_doorbell) {
6774 use_doorbell = DOORBELL_CTLR_RESET2;
6775 } else {
6776 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6777 if (use_doorbell) {
050f7147
SC
6778 dev_warn(&pdev->dev,
6779 "Soft reset not supported. Firmware update is required.\n");
64670ac8 6780 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
6781 goto unmap_cfgtable;
6782 }
6783 }
edd16368 6784
1df8552a
SC
6785 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6786 if (rc)
6787 goto unmap_cfgtable;
edd16368 6788
270d05de 6789 pci_restore_state(pdev);
270d05de 6790 pci_write_config_word(pdev, 4, command_register);
edd16368 6791
1df8552a
SC
6792 /* Some devices (notably the HP Smart Array 5i Controller)
6793 need a little pause here */
6794 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6795
fe5389c8
SC
6796 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6797 if (rc) {
6798 dev_warn(&pdev->dev,
050f7147 6799 "Failed waiting for board to become ready after hard reset\n");
fe5389c8
SC
6800 goto unmap_cfgtable;
6801 }
fe5389c8 6802
580ada3c
SC
6803 rc = controller_reset_failed(vaddr);
6804 if (rc < 0)
6805 goto unmap_cfgtable;
6806 if (rc) {
64670ac8
SC
6807 dev_warn(&pdev->dev, "Unable to successfully reset "
6808 "controller. Will try soft reset.\n");
6809 rc = -ENOTSUPP;
580ada3c 6810 } else {
64670ac8 6811 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
6812 }
6813
6814unmap_cfgtable:
6815 iounmap(cfgtable);
6816
6817unmap_vaddr:
6818 iounmap(vaddr);
6819 return rc;
edd16368
SC
6820}
6821
6822/*
6823 * We cannot read the structure directly, for portability we must use
6824 * the io functions.
6825 * This is for debug only.
6826 */
42a91641 6827static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
edd16368 6828{
58f8665c 6829#ifdef HPSA_DEBUG
edd16368
SC
6830 int i;
6831 char temp_name[17];
6832
6833 dev_info(dev, "Controller Configuration information\n");
6834 dev_info(dev, "------------------------------------\n");
6835 for (i = 0; i < 4; i++)
6836 temp_name[i] = readb(&(tb->Signature[i]));
6837 temp_name[4] = '\0';
6838 dev_info(dev, " Signature = %s\n", temp_name);
6839 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6840 dev_info(dev, " Transport methods supported = 0x%x\n",
6841 readl(&(tb->TransportSupport)));
6842 dev_info(dev, " Transport methods active = 0x%x\n",
6843 readl(&(tb->TransportActive)));
6844 dev_info(dev, " Requested transport Method = 0x%x\n",
6845 readl(&(tb->HostWrite.TransportRequest)));
6846 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6847 readl(&(tb->HostWrite.CoalIntDelay)));
6848 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6849 readl(&(tb->HostWrite.CoalIntCount)));
69d6e33d 6850 dev_info(dev, " Max outstanding commands = %d\n",
edd16368
SC
6851 readl(&(tb->CmdsOutMax)));
6852 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6853 for (i = 0; i < 16; i++)
6854 temp_name[i] = readb(&(tb->ServerName[i]));
6855 temp_name[16] = '\0';
6856 dev_info(dev, " Server Name = %s\n", temp_name);
6857 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6858 readl(&(tb->HeartBeat)));
edd16368 6859#endif /* HPSA_DEBUG */
58f8665c 6860}
edd16368
SC
6861
6862static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6863{
6864 int i, offset, mem_type, bar_type;
6865
6866 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6867 return 0;
6868 offset = 0;
6869 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6870 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6871 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6872 offset += 4;
6873 else {
6874 mem_type = pci_resource_flags(pdev, i) &
6875 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6876 switch (mem_type) {
6877 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6878 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6879 offset += 4; /* 32 bit */
6880 break;
6881 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6882 offset += 8;
6883 break;
6884 default: /* reserved in PCI 2.2 */
6885 dev_warn(&pdev->dev,
6886 "base address is invalid\n");
6887 return -1;
6888 break;
6889 }
6890 }
6891 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6892 return i + 1;
6893 }
6894 return -1;
6895}
6896
cc64c817
RE
6897static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
6898{
6899 if (h->msix_vector) {
6900 if (h->pdev->msix_enabled)
6901 pci_disable_msix(h->pdev);
105a3dbc 6902 h->msix_vector = 0;
cc64c817
RE
6903 } else if (h->msi_vector) {
6904 if (h->pdev->msi_enabled)
6905 pci_disable_msi(h->pdev);
105a3dbc 6906 h->msi_vector = 0;
cc64c817
RE
6907 }
6908}
6909
edd16368 6910/* If MSI/MSI-X is supported by the kernel we will try to enable it on
050f7147 6911 * controllers that are capable. If not, we use legacy INTx mode.
edd16368 6912 */
6f039790 6913static void hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
6914{
6915#ifdef CONFIG_PCI_MSI
254f796b
MG
6916 int err, i;
6917 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6918
6919 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6920 hpsa_msix_entries[i].vector = 0;
6921 hpsa_msix_entries[i].entry = i;
6922 }
edd16368
SC
6923
6924 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
6925 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6926 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 6927 goto default_int_mode;
55c06c71 6928 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
050f7147 6929 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
eee0f03a 6930 h->msix_vector = MAX_REPLY_QUEUES;
f89439bc
SC
6931 if (h->msix_vector > num_online_cpus())
6932 h->msix_vector = num_online_cpus();
18fce3c4
AG
6933 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6934 1, h->msix_vector);
6935 if (err < 0) {
6936 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6937 h->msix_vector = 0;
6938 goto single_msi_mode;
6939 } else if (err < h->msix_vector) {
55c06c71 6940 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368 6941 "available\n", err);
edd16368 6942 }
18fce3c4
AG
6943 h->msix_vector = err;
6944 for (i = 0; i < h->msix_vector; i++)
6945 h->intr[i] = hpsa_msix_entries[i].vector;
6946 return;
edd16368 6947 }
18fce3c4 6948single_msi_mode:
55c06c71 6949 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
050f7147 6950 dev_info(&h->pdev->dev, "MSI capable controller\n");
55c06c71 6951 if (!pci_enable_msi(h->pdev))
edd16368
SC
6952 h->msi_vector = 1;
6953 else
55c06c71 6954 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
6955 }
6956default_int_mode:
6957#endif /* CONFIG_PCI_MSI */
6958 /* if we get here we're going to use the default interrupt mode */
a9a3a273 6959 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
6960}
6961
6f039790 6962static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
e5c880d1
SC
6963{
6964 int i;
6965 u32 subsystem_vendor_id, subsystem_device_id;
6966
6967 subsystem_vendor_id = pdev->subsystem_vendor;
6968 subsystem_device_id = pdev->subsystem_device;
6969 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6970 subsystem_vendor_id;
6971
6972 for (i = 0; i < ARRAY_SIZE(products); i++)
6973 if (*board_id == products[i].board_id)
6974 return i;
6975
6798cc0a
SC
6976 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6977 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6978 !hpsa_allow_any) {
e5c880d1
SC
6979 dev_warn(&pdev->dev, "unrecognized board ID: "
6980 "0x%08x, ignoring.\n", *board_id);
6981 return -ENODEV;
6982 }
6983 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6984}
6985
6f039790
GKH
6986static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6987 unsigned long *memory_bar)
3a7774ce
SC
6988{
6989 int i;
6990
6991 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 6992 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 6993 /* addressing mode bits already removed */
12d2cd47
SC
6994 *memory_bar = pci_resource_start(pdev, i);
6995 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
6996 *memory_bar);
6997 return 0;
6998 }
12d2cd47 6999 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
7000 return -ENODEV;
7001}
7002
6f039790
GKH
7003static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7004 int wait_for_ready)
2c4c8c8b 7005{
fe5389c8 7006 int i, iterations;
2c4c8c8b 7007 u32 scratchpad;
fe5389c8
SC
7008 if (wait_for_ready)
7009 iterations = HPSA_BOARD_READY_ITERATIONS;
7010 else
7011 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 7012
fe5389c8
SC
7013 for (i = 0; i < iterations; i++) {
7014 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7015 if (wait_for_ready) {
7016 if (scratchpad == HPSA_FIRMWARE_READY)
7017 return 0;
7018 } else {
7019 if (scratchpad != HPSA_FIRMWARE_READY)
7020 return 0;
7021 }
2c4c8c8b
SC
7022 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7023 }
fe5389c8 7024 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
7025 return -ENODEV;
7026}
7027
6f039790
GKH
7028static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7029 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7030 u64 *cfg_offset)
a51fd47f
SC
7031{
7032 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7033 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7034 *cfg_base_addr &= (u32) 0x0000ffff;
7035 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7036 if (*cfg_base_addr_index == -1) {
7037 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7038 return -ENODEV;
7039 }
7040 return 0;
7041}
7042
195f2c65
RE
7043static void hpsa_free_cfgtables(struct ctlr_info *h)
7044{
105a3dbc 7045 if (h->transtable) {
195f2c65 7046 iounmap(h->transtable);
105a3dbc
RE
7047 h->transtable = NULL;
7048 }
7049 if (h->cfgtable) {
195f2c65 7050 iounmap(h->cfgtable);
105a3dbc
RE
7051 h->cfgtable = NULL;
7052 }
195f2c65
RE
7053}
7054
7055/* Find and map CISS config table and transfer table
7056+ * several items must be unmapped (freed) later
7057+ * */
6f039790 7058static int hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 7059{
01a02ffc
SC
7060 u64 cfg_offset;
7061 u32 cfg_base_addr;
7062 u64 cfg_base_addr_index;
303932fd 7063 u32 trans_offset;
a51fd47f 7064 int rc;
77c4495c 7065
a51fd47f
SC
7066 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7067 &cfg_base_addr_index, &cfg_offset);
7068 if (rc)
7069 return rc;
77c4495c 7070 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 7071 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
cd3c81c4
RE
7072 if (!h->cfgtable) {
7073 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
77c4495c 7074 return -ENOMEM;
cd3c81c4 7075 }
580ada3c
SC
7076 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7077 if (rc)
7078 return rc;
77c4495c 7079 /* Find performant mode table. */
a51fd47f 7080 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
7081 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7082 cfg_base_addr_index)+cfg_offset+trans_offset,
7083 sizeof(*h->transtable));
195f2c65
RE
7084 if (!h->transtable) {
7085 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7086 hpsa_free_cfgtables(h);
77c4495c 7087 return -ENOMEM;
195f2c65 7088 }
77c4495c
SC
7089 return 0;
7090}
7091
6f039790 7092static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
cba3d38b 7093{
41ce4c35
SC
7094#define MIN_MAX_COMMANDS 16
7095 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7096
7097 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
72ceeaec
SC
7098
7099 /* Limit commands in memory limited kdump scenario. */
7100 if (reset_devices && h->max_commands > 32)
7101 h->max_commands = 32;
7102
41ce4c35
SC
7103 if (h->max_commands < MIN_MAX_COMMANDS) {
7104 dev_warn(&h->pdev->dev,
7105 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7106 h->max_commands,
7107 MIN_MAX_COMMANDS);
7108 h->max_commands = MIN_MAX_COMMANDS;
cba3d38b
SC
7109 }
7110}
7111
c7ee65b3
WS
7112/* If the controller reports that the total max sg entries is greater than 512,
7113 * then we know that chained SG blocks work. (Original smart arrays did not
7114 * support chained SG blocks and would return zero for max sg entries.)
7115 */
7116static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7117{
7118 return h->maxsgentries > 512;
7119}
7120
b93d7536
SC
7121/* Interrogate the hardware for some limits:
7122 * max commands, max SG elements without chaining, and with chaining,
7123 * SG chain block size, etc.
7124 */
6f039790 7125static void hpsa_find_board_params(struct ctlr_info *h)
b93d7536 7126{
cba3d38b 7127 hpsa_get_max_perf_mode_cmds(h);
45fcb86e 7128 h->nr_cmds = h->max_commands;
b93d7536 7129 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
283b4a9b 7130 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
c7ee65b3
WS
7131 if (hpsa_supports_chained_sg_blocks(h)) {
7132 /* Limit in-command s/g elements to 32 save dma'able memory. */
b93d7536 7133 h->max_cmd_sg_entries = 32;
1a63ea6f 7134 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
b93d7536
SC
7135 h->maxsgentries--; /* save one for chain pointer */
7136 } else {
c7ee65b3
WS
7137 /*
7138 * Original smart arrays supported at most 31 s/g entries
7139 * embedded inline in the command (trying to use more
7140 * would lock up the controller)
7141 */
7142 h->max_cmd_sg_entries = 31;
1a63ea6f 7143 h->maxsgentries = 31; /* default to traditional values */
c7ee65b3 7144 h->chainsize = 0;
b93d7536 7145 }
75167d2c
SC
7146
7147 /* Find out what task management functions are supported and cache */
7148 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
0e7a7fce
ST
7149 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7150 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7151 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7152 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
8be986cc
SC
7153 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7154 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
b93d7536
SC
7155}
7156
76c46e49
SC
7157static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7158{
0fc9fd40 7159 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
050f7147 7160 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
76c46e49
SC
7161 return false;
7162 }
7163 return true;
7164}
7165
97a5e98c 7166static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
f7c39101 7167{
97a5e98c 7168 u32 driver_support;
f7c39101 7169
97a5e98c 7170 driver_support = readl(&(h->cfgtable->driver_support));
0b9e7b74
AB
7171 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7172#ifdef CONFIG_X86
97a5e98c 7173 driver_support |= ENABLE_SCSI_PREFETCH;
f7c39101 7174#endif
28e13446
SC
7175 driver_support |= ENABLE_UNIT_ATTN;
7176 writel(driver_support, &(h->cfgtable->driver_support));
f7c39101
SC
7177}
7178
3d0eab67
SC
7179/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7180 * in a prefetch beyond physical memory.
7181 */
7182static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7183{
7184 u32 dma_prefetch;
7185
7186 if (h->board_id != 0x3225103C)
7187 return;
7188 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7189 dma_prefetch |= 0x8000;
7190 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7191}
7192
c706a795 7193static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
76438d08
SC
7194{
7195 int i;
7196 u32 doorbell_value;
7197 unsigned long flags;
7198 /* wait until the clear_event_notify bit 6 is cleared by controller. */
007e7aa9 7199 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
76438d08
SC
7200 spin_lock_irqsave(&h->lock, flags);
7201 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7202 spin_unlock_irqrestore(&h->lock, flags);
7203 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
c706a795 7204 goto done;
76438d08 7205 /* delay and try again */
007e7aa9 7206 msleep(CLEAR_EVENT_WAIT_INTERVAL);
76438d08 7207 }
c706a795
RE
7208 return -ENODEV;
7209done:
7210 return 0;
76438d08
SC
7211}
7212
c706a795 7213static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
7214{
7215 int i;
6eaf46fd
SC
7216 u32 doorbell_value;
7217 unsigned long flags;
eb6b2ae9
SC
7218
7219 /* under certain very rare conditions, this can take awhile.
7220 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7221 * as we enter this code.)
7222 */
007e7aa9 7223 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
25163bd5
WS
7224 if (h->remove_in_progress)
7225 goto done;
6eaf46fd
SC
7226 spin_lock_irqsave(&h->lock, flags);
7227 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7228 spin_unlock_irqrestore(&h->lock, flags);
382be668 7229 if (!(doorbell_value & CFGTBL_ChangeReq))
c706a795 7230 goto done;
eb6b2ae9 7231 /* delay and try again */
007e7aa9 7232 msleep(MODE_CHANGE_WAIT_INTERVAL);
eb6b2ae9 7233 }
c706a795
RE
7234 return -ENODEV;
7235done:
7236 return 0;
3f4336f3
SC
7237}
7238
c706a795 7239/* return -ENODEV or other reason on error, 0 on success */
6f039790 7240static int hpsa_enter_simple_mode(struct ctlr_info *h)
3f4336f3
SC
7241{
7242 u32 trans_support;
7243
7244 trans_support = readl(&(h->cfgtable->TransportSupport));
7245 if (!(trans_support & SIMPLE_MODE))
7246 return -ENOTSUPP;
7247
7248 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
283b4a9b 7249
3f4336f3
SC
7250 /* Update the field, and then ring the doorbell */
7251 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
b9af4937 7252 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
3f4336f3 7253 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
7254 if (hpsa_wait_for_mode_change_ack(h))
7255 goto error;
eb6b2ae9 7256 print_cfg_table(&h->pdev->dev, h->cfgtable);
283b4a9b
SC
7257 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7258 goto error;
960a30e7 7259 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9 7260 return 0;
283b4a9b 7261error:
050f7147 7262 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
283b4a9b 7263 return -ENODEV;
eb6b2ae9
SC
7264}
7265
195f2c65
RE
7266/* free items allocated or mapped by hpsa_pci_init */
7267static void hpsa_free_pci_init(struct ctlr_info *h)
7268{
7269 hpsa_free_cfgtables(h); /* pci_init 4 */
7270 iounmap(h->vaddr); /* pci_init 3 */
105a3dbc 7271 h->vaddr = NULL;
195f2c65 7272 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
943a7021
RE
7273 /*
7274 * call pci_disable_device before pci_release_regions per
7275 * Documentation/PCI/pci.txt
7276 */
195f2c65 7277 pci_disable_device(h->pdev); /* pci_init 1 */
943a7021 7278 pci_release_regions(h->pdev); /* pci_init 2 */
195f2c65
RE
7279}
7280
7281/* several items must be freed later */
6f039790 7282static int hpsa_pci_init(struct ctlr_info *h)
77c4495c 7283{
eb6b2ae9 7284 int prod_index, err;
edd16368 7285
e5c880d1
SC
7286 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7287 if (prod_index < 0)
60f923b9 7288 return prod_index;
e5c880d1
SC
7289 h->product_name = products[prod_index].product_name;
7290 h->access = *(products[prod_index].access);
edd16368 7291
9b5c48c2
SC
7292 h->needs_abort_tags_swizzled =
7293 ctlr_needs_abort_tags_swizzled(h->board_id);
7294
e5a44df8
MG
7295 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7296 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7297
55c06c71 7298 err = pci_enable_device(h->pdev);
edd16368 7299 if (err) {
195f2c65 7300 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
943a7021 7301 pci_disable_device(h->pdev);
edd16368
SC
7302 return err;
7303 }
7304
f79cfec6 7305 err = pci_request_regions(h->pdev, HPSA);
edd16368 7306 if (err) {
55c06c71 7307 dev_err(&h->pdev->dev,
195f2c65 7308 "failed to obtain PCI resources\n");
943a7021
RE
7309 pci_disable_device(h->pdev);
7310 return err;
edd16368 7311 }
4fa604e1
RE
7312
7313 pci_set_master(h->pdev);
7314
6b3f4c52 7315 hpsa_interrupt_mode(h);
12d2cd47 7316 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 7317 if (err)
195f2c65 7318 goto clean2; /* intmode+region, pci */
edd16368 7319 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9 7320 if (!h->vaddr) {
195f2c65 7321 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
204892e9 7322 err = -ENOMEM;
195f2c65 7323 goto clean2; /* intmode+region, pci */
204892e9 7324 }
fe5389c8 7325 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 7326 if (err)
195f2c65 7327 goto clean3; /* vaddr, intmode+region, pci */
77c4495c
SC
7328 err = hpsa_find_cfgtables(h);
7329 if (err)
195f2c65 7330 goto clean3; /* vaddr, intmode+region, pci */
b93d7536 7331 hpsa_find_board_params(h);
edd16368 7332
76c46e49 7333 if (!hpsa_CISS_signature_present(h)) {
edd16368 7334 err = -ENODEV;
195f2c65 7335 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368 7336 }
97a5e98c 7337 hpsa_set_driver_support_bits(h);
3d0eab67 7338 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
7339 err = hpsa_enter_simple_mode(h);
7340 if (err)
195f2c65 7341 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368
SC
7342 return 0;
7343
195f2c65
RE
7344clean4: /* cfgtables, vaddr, intmode+region, pci */
7345 hpsa_free_cfgtables(h);
7346clean3: /* vaddr, intmode+region, pci */
7347 iounmap(h->vaddr);
105a3dbc 7348 h->vaddr = NULL;
195f2c65
RE
7349clean2: /* intmode+region, pci */
7350 hpsa_disable_interrupt_mode(h);
943a7021
RE
7351 /*
7352 * call pci_disable_device before pci_release_regions per
7353 * Documentation/PCI/pci.txt
7354 */
195f2c65 7355 pci_disable_device(h->pdev);
943a7021 7356 pci_release_regions(h->pdev);
edd16368
SC
7357 return err;
7358}
7359
6f039790 7360static void hpsa_hba_inquiry(struct ctlr_info *h)
339b2b14
SC
7361{
7362 int rc;
7363
7364#define HBA_INQUIRY_BYTE_COUNT 64
7365 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7366 if (!h->hba_inquiry_data)
7367 return;
7368 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7369 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7370 if (rc != 0) {
7371 kfree(h->hba_inquiry_data);
7372 h->hba_inquiry_data = NULL;
7373 }
7374}
7375
6b6c1cd7 7376static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
4c2a8c40 7377{
1df8552a 7378 int rc, i;
3b747298 7379 void __iomem *vaddr;
4c2a8c40
SC
7380
7381 if (!reset_devices)
7382 return 0;
7383
132aa220
TH
7384 /* kdump kernel is loading, we don't know in which state is
7385 * the pci interface. The dev->enable_cnt is equal zero
7386 * so we call enable+disable, wait a while and switch it on.
7387 */
7388 rc = pci_enable_device(pdev);
7389 if (rc) {
7390 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7391 return -ENODEV;
7392 }
7393 pci_disable_device(pdev);
7394 msleep(260); /* a randomly chosen number */
7395 rc = pci_enable_device(pdev);
7396 if (rc) {
7397 dev_warn(&pdev->dev, "failed to enable device.\n");
7398 return -ENODEV;
7399 }
4fa604e1 7400
859c75ab 7401 pci_set_master(pdev);
4fa604e1 7402
3b747298
TH
7403 vaddr = pci_ioremap_bar(pdev, 0);
7404 if (vaddr == NULL) {
7405 rc = -ENOMEM;
7406 goto out_disable;
7407 }
7408 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7409 iounmap(vaddr);
7410
1df8552a 7411 /* Reset the controller with a PCI power-cycle or via doorbell */
6b6c1cd7 7412 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
4c2a8c40 7413
1df8552a
SC
7414 /* -ENOTSUPP here means we cannot reset the controller
7415 * but it's already (and still) up and running in
18867659
SC
7416 * "performant mode". Or, it might be 640x, which can't reset
7417 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a 7418 */
adf1b3a3 7419 if (rc)
132aa220 7420 goto out_disable;
4c2a8c40
SC
7421
7422 /* Now try to get the controller to respond to a no-op */
1ba66c9c 7423 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
7424 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7425 if (hpsa_noop(pdev) == 0)
7426 break;
7427 else
7428 dev_warn(&pdev->dev, "no-op failed%s\n",
7429 (i < 11 ? "; re-trying" : ""));
7430 }
132aa220
TH
7431
7432out_disable:
7433
7434 pci_disable_device(pdev);
7435 return rc;
4c2a8c40
SC
7436}
7437
1fb7c98a
RE
7438static void hpsa_free_cmd_pool(struct ctlr_info *h)
7439{
7440 kfree(h->cmd_pool_bits);
105a3dbc
RE
7441 h->cmd_pool_bits = NULL;
7442 if (h->cmd_pool) {
1fb7c98a
RE
7443 pci_free_consistent(h->pdev,
7444 h->nr_cmds * sizeof(struct CommandList),
7445 h->cmd_pool,
7446 h->cmd_pool_dhandle);
105a3dbc
RE
7447 h->cmd_pool = NULL;
7448 h->cmd_pool_dhandle = 0;
7449 }
7450 if (h->errinfo_pool) {
1fb7c98a
RE
7451 pci_free_consistent(h->pdev,
7452 h->nr_cmds * sizeof(struct ErrorInfo),
7453 h->errinfo_pool,
7454 h->errinfo_pool_dhandle);
105a3dbc
RE
7455 h->errinfo_pool = NULL;
7456 h->errinfo_pool_dhandle = 0;
7457 }
1fb7c98a
RE
7458}
7459
d37ffbe4 7460static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
2e9d1b36
SC
7461{
7462 h->cmd_pool_bits = kzalloc(
7463 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7464 sizeof(unsigned long), GFP_KERNEL);
7465 h->cmd_pool = pci_alloc_consistent(h->pdev,
7466 h->nr_cmds * sizeof(*h->cmd_pool),
7467 &(h->cmd_pool_dhandle));
7468 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7469 h->nr_cmds * sizeof(*h->errinfo_pool),
7470 &(h->errinfo_pool_dhandle));
7471 if ((h->cmd_pool_bits == NULL)
7472 || (h->cmd_pool == NULL)
7473 || (h->errinfo_pool == NULL)) {
7474 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
2c143342 7475 goto clean_up;
2e9d1b36 7476 }
360c73bd 7477 hpsa_preinitialize_commands(h);
2e9d1b36 7478 return 0;
2c143342
RE
7479clean_up:
7480 hpsa_free_cmd_pool(h);
7481 return -ENOMEM;
2e9d1b36
SC
7482}
7483
41b3cf08
SC
7484static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7485{
ec429952 7486 int i, cpu;
41b3cf08
SC
7487
7488 cpu = cpumask_first(cpu_online_mask);
7489 for (i = 0; i < h->msix_vector; i++) {
ec429952 7490 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
41b3cf08
SC
7491 cpu = cpumask_next(cpu, cpu_online_mask);
7492 }
7493}
7494
ec501a18
RE
7495/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7496static void hpsa_free_irqs(struct ctlr_info *h)
7497{
7498 int i;
7499
7500 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7501 /* Single reply queue, only one irq to free */
7502 i = h->intr_mode;
7503 irq_set_affinity_hint(h->intr[i], NULL);
7504 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7505 h->q[i] = 0;
ec501a18
RE
7506 return;
7507 }
7508
7509 for (i = 0; i < h->msix_vector; i++) {
7510 irq_set_affinity_hint(h->intr[i], NULL);
7511 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7512 h->q[i] = 0;
ec501a18 7513 }
a4e17fc1
RE
7514 for (; i < MAX_REPLY_QUEUES; i++)
7515 h->q[i] = 0;
ec501a18
RE
7516}
7517
9ee61794
RE
7518/* returns 0 on success; cleans up and returns -Enn on error */
7519static int hpsa_request_irqs(struct ctlr_info *h,
0ae01a32
SC
7520 irqreturn_t (*msixhandler)(int, void *),
7521 irqreturn_t (*intxhandler)(int, void *))
7522{
254f796b 7523 int rc, i;
0ae01a32 7524
254f796b
MG
7525 /*
7526 * initialize h->q[x] = x so that interrupt handlers know which
7527 * queue to process.
7528 */
7529 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7530 h->q[i] = (u8) i;
7531
eee0f03a 7532 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
254f796b 7533 /* If performant mode and MSI-X, use multiple reply queues */
a4e17fc1 7534 for (i = 0; i < h->msix_vector; i++) {
8b47004a 7535 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
254f796b 7536 rc = request_irq(h->intr[i], msixhandler,
8b47004a 7537 0, h->intrname[i],
254f796b 7538 &h->q[i]);
a4e17fc1
RE
7539 if (rc) {
7540 int j;
7541
7542 dev_err(&h->pdev->dev,
7543 "failed to get irq %d for %s\n",
7544 h->intr[i], h->devname);
7545 for (j = 0; j < i; j++) {
7546 free_irq(h->intr[j], &h->q[j]);
7547 h->q[j] = 0;
7548 }
7549 for (; j < MAX_REPLY_QUEUES; j++)
7550 h->q[j] = 0;
7551 return rc;
7552 }
7553 }
41b3cf08 7554 hpsa_irq_affinity_hints(h);
254f796b
MG
7555 } else {
7556 /* Use single reply pool */
eee0f03a 7557 if (h->msix_vector > 0 || h->msi_vector) {
8b47004a
RE
7558 if (h->msix_vector)
7559 sprintf(h->intrname[h->intr_mode],
7560 "%s-msix", h->devname);
7561 else
7562 sprintf(h->intrname[h->intr_mode],
7563 "%s-msi", h->devname);
254f796b 7564 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7565 msixhandler, 0,
7566 h->intrname[h->intr_mode],
254f796b
MG
7567 &h->q[h->intr_mode]);
7568 } else {
8b47004a
RE
7569 sprintf(h->intrname[h->intr_mode],
7570 "%s-intx", h->devname);
254f796b 7571 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7572 intxhandler, IRQF_SHARED,
7573 h->intrname[h->intr_mode],
254f796b
MG
7574 &h->q[h->intr_mode]);
7575 }
105a3dbc 7576 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
254f796b 7577 }
0ae01a32 7578 if (rc) {
195f2c65 7579 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
0ae01a32 7580 h->intr[h->intr_mode], h->devname);
195f2c65 7581 hpsa_free_irqs(h);
0ae01a32
SC
7582 return -ENODEV;
7583 }
7584 return 0;
7585}
7586
6f039790 7587static int hpsa_kdump_soft_reset(struct ctlr_info *h)
64670ac8 7588{
39c53f55 7589 int rc;
bf43caf3 7590 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
64670ac8
SC
7591
7592 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
39c53f55
RE
7593 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7594 if (rc) {
64670ac8 7595 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
39c53f55 7596 return rc;
64670ac8
SC
7597 }
7598
7599 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
39c53f55
RE
7600 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7601 if (rc) {
64670ac8
SC
7602 dev_warn(&h->pdev->dev, "Board failed to become ready "
7603 "after soft reset.\n");
39c53f55 7604 return rc;
64670ac8
SC
7605 }
7606
7607 return 0;
7608}
7609
072b0518
SC
7610static void hpsa_free_reply_queues(struct ctlr_info *h)
7611{
7612 int i;
7613
7614 for (i = 0; i < h->nreply_queues; i++) {
7615 if (!h->reply_queue[i].head)
7616 continue;
1fb7c98a
RE
7617 pci_free_consistent(h->pdev,
7618 h->reply_queue_size,
7619 h->reply_queue[i].head,
7620 h->reply_queue[i].busaddr);
072b0518
SC
7621 h->reply_queue[i].head = NULL;
7622 h->reply_queue[i].busaddr = 0;
7623 }
105a3dbc 7624 h->reply_queue_size = 0;
072b0518
SC
7625}
7626
0097f0f4
SC
7627static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7628{
105a3dbc
RE
7629 hpsa_free_performant_mode(h); /* init_one 7 */
7630 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7631 hpsa_free_cmd_pool(h); /* init_one 5 */
7632 hpsa_free_irqs(h); /* init_one 4 */
2946e82b
RE
7633 scsi_host_put(h->scsi_host); /* init_one 3 */
7634 h->scsi_host = NULL; /* init_one 3 */
7635 hpsa_free_pci_init(h); /* init_one 2_5 */
9ecd953a
RE
7636 free_percpu(h->lockup_detected); /* init_one 2 */
7637 h->lockup_detected = NULL; /* init_one 2 */
7638 if (h->resubmit_wq) {
7639 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7640 h->resubmit_wq = NULL;
7641 }
7642 if (h->rescan_ctlr_wq) {
7643 destroy_workqueue(h->rescan_ctlr_wq);
7644 h->rescan_ctlr_wq = NULL;
7645 }
105a3dbc 7646 kfree(h); /* init_one 1 */
64670ac8
SC
7647}
7648
a0c12413 7649/* Called when controller lockup detected. */
f2405db8 7650static void fail_all_outstanding_cmds(struct ctlr_info *h)
a0c12413 7651{
281a7fd0
WS
7652 int i, refcount;
7653 struct CommandList *c;
25163bd5 7654 int failcount = 0;
a0c12413 7655
080ef1cc 7656 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
f2405db8 7657 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 7658 c = h->cmd_pool + i;
281a7fd0
WS
7659 refcount = atomic_inc_return(&c->refcount);
7660 if (refcount > 1) {
25163bd5 7661 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
281a7fd0 7662 finish_cmd(c);
433b5f4d 7663 atomic_dec(&h->commands_outstanding);
25163bd5 7664 failcount++;
281a7fd0
WS
7665 }
7666 cmd_free(h, c);
a0c12413 7667 }
25163bd5
WS
7668 dev_warn(&h->pdev->dev,
7669 "failed %d commands in fail_all\n", failcount);
a0c12413
SC
7670}
7671
094963da
SC
7672static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7673{
c8ed0010 7674 int cpu;
094963da 7675
c8ed0010 7676 for_each_online_cpu(cpu) {
094963da
SC
7677 u32 *lockup_detected;
7678 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7679 *lockup_detected = value;
094963da
SC
7680 }
7681 wmb(); /* be sure the per-cpu variables are out to memory */
7682}
7683
a0c12413
SC
7684static void controller_lockup_detected(struct ctlr_info *h)
7685{
7686 unsigned long flags;
094963da 7687 u32 lockup_detected;
a0c12413 7688
a0c12413
SC
7689 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7690 spin_lock_irqsave(&h->lock, flags);
094963da
SC
7691 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7692 if (!lockup_detected) {
7693 /* no heartbeat, but controller gave us a zero. */
7694 dev_warn(&h->pdev->dev,
25163bd5
WS
7695 "lockup detected after %d but scratchpad register is zero\n",
7696 h->heartbeat_sample_interval / HZ);
094963da
SC
7697 lockup_detected = 0xffffffff;
7698 }
7699 set_lockup_detected_for_all_cpus(h, lockup_detected);
a0c12413 7700 spin_unlock_irqrestore(&h->lock, flags);
25163bd5
WS
7701 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7702 lockup_detected, h->heartbeat_sample_interval / HZ);
a0c12413 7703 pci_disable_device(h->pdev);
f2405db8 7704 fail_all_outstanding_cmds(h);
a0c12413
SC
7705}
7706
25163bd5 7707static int detect_controller_lockup(struct ctlr_info *h)
a0c12413
SC
7708{
7709 u64 now;
7710 u32 heartbeat;
7711 unsigned long flags;
7712
a0c12413
SC
7713 now = get_jiffies_64();
7714 /* If we've received an interrupt recently, we're ok. */
7715 if (time_after64(h->last_intr_timestamp +
e85c5974 7716 (h->heartbeat_sample_interval), now))
25163bd5 7717 return false;
a0c12413
SC
7718
7719 /*
7720 * If we've already checked the heartbeat recently, we're ok.
7721 * This could happen if someone sends us a signal. We
7722 * otherwise don't care about signals in this thread.
7723 */
7724 if (time_after64(h->last_heartbeat_timestamp +
e85c5974 7725 (h->heartbeat_sample_interval), now))
25163bd5 7726 return false;
a0c12413
SC
7727
7728 /* If heartbeat has not changed since we last looked, we're not ok. */
7729 spin_lock_irqsave(&h->lock, flags);
7730 heartbeat = readl(&h->cfgtable->HeartBeat);
7731 spin_unlock_irqrestore(&h->lock, flags);
7732 if (h->last_heartbeat == heartbeat) {
7733 controller_lockup_detected(h);
25163bd5 7734 return true;
a0c12413
SC
7735 }
7736
7737 /* We're ok. */
7738 h->last_heartbeat = heartbeat;
7739 h->last_heartbeat_timestamp = now;
25163bd5 7740 return false;
a0c12413
SC
7741}
7742
9846590e 7743static void hpsa_ack_ctlr_events(struct ctlr_info *h)
76438d08
SC
7744{
7745 int i;
7746 char *event_type;
7747
e4aa3e6a
SC
7748 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7749 return;
7750
76438d08 7751 /* Ask the controller to clear the events we're handling. */
1f7cee8c
SC
7752 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7753 | CFGTBL_Trans_io_accel2)) &&
76438d08
SC
7754 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7755 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7756
7757 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7758 event_type = "state change";
7759 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7760 event_type = "configuration change";
7761 /* Stop sending new RAID offload reqs via the IO accelerator */
7762 scsi_block_requests(h->scsi_host);
7763 for (i = 0; i < h->ndevices; i++)
7764 h->dev[i]->offload_enabled = 0;
23100dd9 7765 hpsa_drain_accel_commands(h);
76438d08
SC
7766 /* Set 'accelerator path config change' bit */
7767 dev_warn(&h->pdev->dev,
7768 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7769 h->events, event_type);
7770 writel(h->events, &(h->cfgtable->clear_event_notify));
7771 /* Set the "clear event notify field update" bit 6 */
7772 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7773 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7774 hpsa_wait_for_clear_event_notify_ack(h);
7775 scsi_unblock_requests(h->scsi_host);
7776 } else {
7777 /* Acknowledge controller notification events. */
7778 writel(h->events, &(h->cfgtable->clear_event_notify));
7779 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7780 hpsa_wait_for_clear_event_notify_ack(h);
7781#if 0
7782 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7783 hpsa_wait_for_mode_change_ack(h);
7784#endif
7785 }
9846590e 7786 return;
76438d08
SC
7787}
7788
7789/* Check a register on the controller to see if there are configuration
7790 * changes (added/changed/removed logical drives, etc.) which mean that
e863d68e
ST
7791 * we should rescan the controller for devices.
7792 * Also check flag for driver-initiated rescan.
76438d08 7793 */
9846590e 7794static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
76438d08
SC
7795{
7796 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
9846590e 7797 return 0;
76438d08
SC
7798
7799 h->events = readl(&(h->cfgtable->event_notify));
9846590e
SC
7800 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7801}
76438d08 7802
9846590e
SC
7803/*
7804 * Check if any of the offline devices have become ready
7805 */
7806static int hpsa_offline_devices_ready(struct ctlr_info *h)
7807{
7808 unsigned long flags;
7809 struct offline_device_entry *d;
7810 struct list_head *this, *tmp;
7811
7812 spin_lock_irqsave(&h->offline_device_lock, flags);
7813 list_for_each_safe(this, tmp, &h->offline_device_list) {
7814 d = list_entry(this, struct offline_device_entry,
7815 offline_list);
7816 spin_unlock_irqrestore(&h->offline_device_lock, flags);
d1fea47c
SC
7817 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7818 spin_lock_irqsave(&h->offline_device_lock, flags);
7819 list_del(&d->offline_list);
7820 spin_unlock_irqrestore(&h->offline_device_lock, flags);
9846590e 7821 return 1;
d1fea47c 7822 }
9846590e
SC
7823 spin_lock_irqsave(&h->offline_device_lock, flags);
7824 }
7825 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7826 return 0;
76438d08
SC
7827}
7828
6636e7f4 7829static void hpsa_rescan_ctlr_worker(struct work_struct *work)
a0c12413
SC
7830{
7831 unsigned long flags;
8a98db73 7832 struct ctlr_info *h = container_of(to_delayed_work(work),
6636e7f4
DB
7833 struct ctlr_info, rescan_ctlr_work);
7834
7835
7836 if (h->remove_in_progress)
8a98db73 7837 return;
9846590e
SC
7838
7839 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7840 scsi_host_get(h->scsi_host);
9846590e
SC
7841 hpsa_ack_ctlr_events(h);
7842 hpsa_scan_start(h->scsi_host);
7843 scsi_host_put(h->scsi_host);
7844 }
8a98db73 7845 spin_lock_irqsave(&h->lock, flags);
6636e7f4
DB
7846 if (!h->remove_in_progress)
7847 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7848 h->heartbeat_sample_interval);
7849 spin_unlock_irqrestore(&h->lock, flags);
7850}
7851
7852static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7853{
7854 unsigned long flags;
7855 struct ctlr_info *h = container_of(to_delayed_work(work),
7856 struct ctlr_info, monitor_ctlr_work);
7857
7858 detect_controller_lockup(h);
7859 if (lockup_detected(h))
a0c12413 7860 return;
6636e7f4
DB
7861
7862 spin_lock_irqsave(&h->lock, flags);
7863 if (!h->remove_in_progress)
7864 schedule_delayed_work(&h->monitor_ctlr_work,
8a98db73
SC
7865 h->heartbeat_sample_interval);
7866 spin_unlock_irqrestore(&h->lock, flags);
a0c12413
SC
7867}
7868
6636e7f4
DB
7869static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7870 char *name)
7871{
7872 struct workqueue_struct *wq = NULL;
6636e7f4 7873
397ea9cb 7874 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6636e7f4
DB
7875 if (!wq)
7876 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
7877
7878 return wq;
7879}
7880
6f039790 7881static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
edd16368 7882{
4c2a8c40 7883 int dac, rc;
edd16368 7884 struct ctlr_info *h;
64670ac8
SC
7885 int try_soft_reset = 0;
7886 unsigned long flags;
6b6c1cd7 7887 u32 board_id;
edd16368
SC
7888
7889 if (number_of_controllers == 0)
7890 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 7891
6b6c1cd7
TH
7892 rc = hpsa_lookup_board_id(pdev, &board_id);
7893 if (rc < 0) {
7894 dev_warn(&pdev->dev, "Board ID not found\n");
7895 return rc;
7896 }
7897
7898 rc = hpsa_init_reset_devices(pdev, board_id);
64670ac8
SC
7899 if (rc) {
7900 if (rc != -ENOTSUPP)
7901 return rc;
7902 /* If the reset fails in a particular way (it has no way to do
7903 * a proper hard reset, so returns -ENOTSUPP) we can try to do
7904 * a soft reset once we get the controller configured up to the
7905 * point that it can accept a command.
7906 */
7907 try_soft_reset = 1;
7908 rc = 0;
7909 }
7910
7911reinit_after_soft_reset:
edd16368 7912
303932fd
DB
7913 /* Command structures must be aligned on a 32-byte boundary because
7914 * the 5 lower bits of the address are used by the hardware. and by
7915 * the driver. See comments in hpsa.h for more info.
7916 */
303932fd 7917 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368 7918 h = kzalloc(sizeof(*h), GFP_KERNEL);
105a3dbc
RE
7919 if (!h) {
7920 dev_err(&pdev->dev, "Failed to allocate controller head\n");
ecd9aad4 7921 return -ENOMEM;
105a3dbc 7922 }
edd16368 7923
55c06c71 7924 h->pdev = pdev;
105a3dbc 7925
a9a3a273 7926 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9846590e 7927 INIT_LIST_HEAD(&h->offline_device_list);
6eaf46fd 7928 spin_lock_init(&h->lock);
9846590e 7929 spin_lock_init(&h->offline_device_lock);
6eaf46fd 7930 spin_lock_init(&h->scan_lock);
34f0c627 7931 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
9b5c48c2 7932 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
094963da
SC
7933
7934 /* Allocate and clear per-cpu variable lockup_detected */
7935 h->lockup_detected = alloc_percpu(u32);
2a5ac326 7936 if (!h->lockup_detected) {
105a3dbc 7937 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
2a5ac326 7938 rc = -ENOMEM;
2efa5929 7939 goto clean1; /* aer/h */
2a5ac326 7940 }
094963da
SC
7941 set_lockup_detected_for_all_cpus(h, 0);
7942
55c06c71 7943 rc = hpsa_pci_init(h);
105a3dbc 7944 if (rc)
2946e82b
RE
7945 goto clean2; /* lu, aer/h */
7946
7947 /* relies on h-> settings made by hpsa_pci_init, including
7948 * interrupt_mode h->intr */
7949 rc = hpsa_scsi_host_alloc(h);
7950 if (rc)
7951 goto clean2_5; /* pci, lu, aer/h */
edd16368 7952
2946e82b 7953 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
edd16368
SC
7954 h->ctlr = number_of_controllers;
7955 number_of_controllers++;
edd16368
SC
7956
7957 /* configure PCI DMA stuff */
ecd9aad4
SC
7958 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
7959 if (rc == 0) {
edd16368 7960 dac = 1;
ecd9aad4
SC
7961 } else {
7962 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7963 if (rc == 0) {
7964 dac = 0;
7965 } else {
7966 dev_err(&pdev->dev, "no suitable DMA available\n");
2946e82b 7967 goto clean3; /* shost, pci, lu, aer/h */
ecd9aad4 7968 }
edd16368
SC
7969 }
7970
7971 /* make sure the board interrupts are off */
7972 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 7973
105a3dbc
RE
7974 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
7975 if (rc)
2946e82b 7976 goto clean3; /* shost, pci, lu, aer/h */
d37ffbe4 7977 rc = hpsa_alloc_cmd_pool(h);
8947fd10 7978 if (rc)
2946e82b 7979 goto clean4; /* irq, shost, pci, lu, aer/h */
105a3dbc
RE
7980 rc = hpsa_alloc_sg_chain_blocks(h);
7981 if (rc)
2946e82b 7982 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
a08a8471 7983 init_waitqueue_head(&h->scan_wait_queue);
9b5c48c2 7984 init_waitqueue_head(&h->abort_cmd_wait_queue);
d604f533
WS
7985 init_waitqueue_head(&h->event_sync_wait_queue);
7986 mutex_init(&h->reset_mutex);
a08a8471 7987 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
7988
7989 pci_set_drvdata(pdev, h);
9a41338e 7990 h->ndevices = 0;
316b221a 7991 h->hba_mode_enabled = 0;
2946e82b 7992
9a41338e 7993 spin_lock_init(&h->devlock);
105a3dbc
RE
7994 rc = hpsa_put_ctlr_into_performant_mode(h);
7995 if (rc)
2946e82b
RE
7996 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
7997
7998 /* hook into SCSI subsystem */
7999 rc = hpsa_scsi_add_host(h);
8000 if (rc)
8001 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
2efa5929
RE
8002
8003 /* create the resubmit workqueue */
8004 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8005 if (!h->rescan_ctlr_wq) {
8006 rc = -ENOMEM;
8007 goto clean7;
8008 }
8009
8010 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8011 if (!h->resubmit_wq) {
8012 rc = -ENOMEM;
8013 goto clean7; /* aer/h */
8014 }
64670ac8 8015
105a3dbc
RE
8016 /*
8017 * At this point, the controller is ready to take commands.
64670ac8
SC
8018 * Now, if reset_devices and the hard reset didn't work, try
8019 * the soft reset and see if that works.
8020 */
8021 if (try_soft_reset) {
8022
8023 /* This is kind of gross. We may or may not get a completion
8024 * from the soft reset command, and if we do, then the value
8025 * from the fifo may or may not be valid. So, we wait 10 secs
8026 * after the reset throwing away any completions we get during
8027 * that time. Unregister the interrupt handler and register
8028 * fake ones to scoop up any residual completions.
8029 */
8030 spin_lock_irqsave(&h->lock, flags);
8031 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8032 spin_unlock_irqrestore(&h->lock, flags);
ec501a18 8033 hpsa_free_irqs(h);
9ee61794 8034 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
64670ac8
SC
8035 hpsa_intx_discard_completions);
8036 if (rc) {
9ee61794
RE
8037 dev_warn(&h->pdev->dev,
8038 "Failed to request_irq after soft reset.\n");
d498757c 8039 /*
b2ef480c
RE
8040 * cannot goto clean7 or free_irqs will be called
8041 * again. Instead, do its work
8042 */
8043 hpsa_free_performant_mode(h); /* clean7 */
8044 hpsa_free_sg_chain_blocks(h); /* clean6 */
8045 hpsa_free_cmd_pool(h); /* clean5 */
8046 /*
8047 * skip hpsa_free_irqs(h) clean4 since that
8048 * was just called before request_irqs failed
d498757c
RE
8049 */
8050 goto clean3;
64670ac8
SC
8051 }
8052
8053 rc = hpsa_kdump_soft_reset(h);
8054 if (rc)
8055 /* Neither hard nor soft reset worked, we're hosed. */
7ef7323f 8056 goto clean7;
64670ac8
SC
8057
8058 dev_info(&h->pdev->dev, "Board READY.\n");
8059 dev_info(&h->pdev->dev,
8060 "Waiting for stale completions to drain.\n");
8061 h->access.set_intr_mask(h, HPSA_INTR_ON);
8062 msleep(10000);
8063 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8064
8065 rc = controller_reset_failed(h->cfgtable);
8066 if (rc)
8067 dev_info(&h->pdev->dev,
8068 "Soft reset appears to have failed.\n");
8069
8070 /* since the controller's reset, we have to go back and re-init
8071 * everything. Easiest to just forget what we've done and do it
8072 * all over again.
8073 */
8074 hpsa_undo_allocations_after_kdump_soft_reset(h);
8075 try_soft_reset = 0;
8076 if (rc)
b2ef480c 8077 /* don't goto clean, we already unallocated */
64670ac8
SC
8078 return -ENODEV;
8079
8080 goto reinit_after_soft_reset;
8081 }
edd16368 8082
105a3dbc
RE
8083 /* Enable Accelerated IO path at driver layer */
8084 h->acciopath_status = 1;
da0697bd 8085
e863d68e 8086
edd16368
SC
8087 /* Turn the interrupts on so we can service requests */
8088 h->access.set_intr_mask(h, HPSA_INTR_ON);
8089
339b2b14 8090 hpsa_hba_inquiry(h);
8a98db73
SC
8091
8092 /* Monitor the controller for firmware lockups */
8093 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8094 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8095 schedule_delayed_work(&h->monitor_ctlr_work,
8096 h->heartbeat_sample_interval);
6636e7f4
DB
8097 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8098 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8099 h->heartbeat_sample_interval);
88bf6d62 8100 return 0;
edd16368 8101
2946e82b 8102clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
105a3dbc
RE
8103 hpsa_free_performant_mode(h);
8104 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8105clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
33a2ffce 8106 hpsa_free_sg_chain_blocks(h);
2946e82b 8107clean5: /* cmd, irq, shost, pci, lu, aer/h */
2e9d1b36 8108 hpsa_free_cmd_pool(h);
2946e82b 8109clean4: /* irq, shost, pci, lu, aer/h */
ec501a18 8110 hpsa_free_irqs(h);
2946e82b
RE
8111clean3: /* shost, pci, lu, aer/h */
8112 scsi_host_put(h->scsi_host);
8113 h->scsi_host = NULL;
8114clean2_5: /* pci, lu, aer/h */
195f2c65 8115 hpsa_free_pci_init(h);
2946e82b 8116clean2: /* lu, aer/h */
105a3dbc
RE
8117 if (h->lockup_detected) {
8118 free_percpu(h->lockup_detected);
8119 h->lockup_detected = NULL;
8120 }
8121clean1: /* wq/aer/h */
8122 if (h->resubmit_wq) {
080ef1cc 8123 destroy_workqueue(h->resubmit_wq);
105a3dbc
RE
8124 h->resubmit_wq = NULL;
8125 }
8126 if (h->rescan_ctlr_wq) {
6636e7f4 8127 destroy_workqueue(h->rescan_ctlr_wq);
105a3dbc
RE
8128 h->rescan_ctlr_wq = NULL;
8129 }
edd16368 8130 kfree(h);
ecd9aad4 8131 return rc;
edd16368
SC
8132}
8133
8134static void hpsa_flush_cache(struct ctlr_info *h)
8135{
8136 char *flush_buf;
8137 struct CommandList *c;
25163bd5 8138 int rc;
702890e3 8139
094963da 8140 if (unlikely(lockup_detected(h)))
702890e3 8141 return;
edd16368
SC
8142 flush_buf = kzalloc(4, GFP_KERNEL);
8143 if (!flush_buf)
8144 return;
8145
45fcb86e 8146 c = cmd_alloc(h);
bf43caf3 8147
a2dac136
SC
8148 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8149 RAID_CTLR_LUNID, TYPE_CMD)) {
8150 goto out;
8151 }
25163bd5
WS
8152 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8153 PCI_DMA_TODEVICE, NO_TIMEOUT);
8154 if (rc)
8155 goto out;
edd16368 8156 if (c->err_info->CommandStatus != 0)
a2dac136 8157out:
edd16368
SC
8158 dev_warn(&h->pdev->dev,
8159 "error flushing cache on controller\n");
45fcb86e 8160 cmd_free(h, c);
edd16368
SC
8161 kfree(flush_buf);
8162}
8163
8164static void hpsa_shutdown(struct pci_dev *pdev)
8165{
8166 struct ctlr_info *h;
8167
8168 h = pci_get_drvdata(pdev);
8169 /* Turn board interrupts off and send the flush cache command
8170 * sendcmd will turn off interrupt, and send the flush...
8171 * To write all data in the battery backed cache to disks
8172 */
8173 hpsa_flush_cache(h);
8174 h->access.set_intr_mask(h, HPSA_INTR_OFF);
105a3dbc 8175 hpsa_free_irqs(h); /* init_one 4 */
cc64c817 8176 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
edd16368
SC
8177}
8178
6f039790 8179static void hpsa_free_device_info(struct ctlr_info *h)
55e14e76
SC
8180{
8181 int i;
8182
105a3dbc 8183 for (i = 0; i < h->ndevices; i++) {
55e14e76 8184 kfree(h->dev[i]);
105a3dbc
RE
8185 h->dev[i] = NULL;
8186 }
55e14e76
SC
8187}
8188
6f039790 8189static void hpsa_remove_one(struct pci_dev *pdev)
edd16368
SC
8190{
8191 struct ctlr_info *h;
8a98db73 8192 unsigned long flags;
edd16368
SC
8193
8194 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 8195 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
8196 return;
8197 }
8198 h = pci_get_drvdata(pdev);
8a98db73
SC
8199
8200 /* Get rid of any controller monitoring work items */
8201 spin_lock_irqsave(&h->lock, flags);
8202 h->remove_in_progress = 1;
8a98db73 8203 spin_unlock_irqrestore(&h->lock, flags);
6636e7f4
DB
8204 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8205 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8206 destroy_workqueue(h->rescan_ctlr_wq);
8207 destroy_workqueue(h->resubmit_wq);
cc64c817 8208
105a3dbc 8209 /* includes hpsa_free_irqs - init_one 4 */
195f2c65 8210 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
edd16368 8211 hpsa_shutdown(pdev);
cc64c817 8212
105a3dbc
RE
8213 hpsa_free_device_info(h); /* scan */
8214
2946e82b
RE
8215 kfree(h->hba_inquiry_data); /* init_one 10 */
8216 h->hba_inquiry_data = NULL; /* init_one 10 */
8217 if (h->scsi_host)
8218 scsi_remove_host(h->scsi_host); /* init_one 8 */
8219 hpsa_free_ioaccel2_sg_chain_blocks(h);
105a3dbc
RE
8220 hpsa_free_performant_mode(h); /* init_one 7 */
8221 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8222 hpsa_free_cmd_pool(h); /* init_one 5 */
8223
8224 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
195f2c65 8225
2946e82b
RE
8226 scsi_host_put(h->scsi_host); /* init_one 3 */
8227 h->scsi_host = NULL; /* init_one 3 */
8228
195f2c65 8229 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
2946e82b 8230 hpsa_free_pci_init(h); /* init_one 2.5 */
195f2c65 8231
105a3dbc
RE
8232 free_percpu(h->lockup_detected); /* init_one 2 */
8233 h->lockup_detected = NULL; /* init_one 2 */
8234 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8235 kfree(h); /* init_one 1 */
edd16368
SC
8236}
8237
8238static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8239 __attribute__((unused)) pm_message_t state)
8240{
8241 return -ENOSYS;
8242}
8243
8244static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8245{
8246 return -ENOSYS;
8247}
8248
8249static struct pci_driver hpsa_pci_driver = {
f79cfec6 8250 .name = HPSA,
edd16368 8251 .probe = hpsa_init_one,
6f039790 8252 .remove = hpsa_remove_one,
edd16368
SC
8253 .id_table = hpsa_pci_device_id, /* id_table */
8254 .shutdown = hpsa_shutdown,
8255 .suspend = hpsa_suspend,
8256 .resume = hpsa_resume,
8257};
8258
303932fd
DB
8259/* Fill in bucket_map[], given nsgs (the max number of
8260 * scatter gather elements supported) and bucket[],
8261 * which is an array of 8 integers. The bucket[] array
8262 * contains 8 different DMA transfer sizes (in 16
8263 * byte increments) which the controller uses to fetch
8264 * commands. This function fills in bucket_map[], which
8265 * maps a given number of scatter gather elements to one of
8266 * the 8 DMA transfer sizes. The point of it is to allow the
8267 * controller to only do as much DMA as needed to fetch the
8268 * command, with the DMA transfer size encoded in the lower
8269 * bits of the command address.
8270 */
8271static void calc_bucket_map(int bucket[], int num_buckets,
2b08b3e9 8272 int nsgs, int min_blocks, u32 *bucket_map)
303932fd
DB
8273{
8274 int i, j, b, size;
8275
303932fd
DB
8276 /* Note, bucket_map must have nsgs+1 entries. */
8277 for (i = 0; i <= nsgs; i++) {
8278 /* Compute size of a command with i SG entries */
e1f7de0c 8279 size = i + min_blocks;
303932fd
DB
8280 b = num_buckets; /* Assume the biggest bucket */
8281 /* Find the bucket that is just big enough */
e1f7de0c 8282 for (j = 0; j < num_buckets; j++) {
303932fd
DB
8283 if (bucket[j] >= size) {
8284 b = j;
8285 break;
8286 }
8287 }
8288 /* for a command with i SG entries, use bucket b. */
8289 bucket_map[i] = b;
8290 }
8291}
8292
105a3dbc
RE
8293/*
8294 * return -ENODEV on err, 0 on success (or no action)
8295 * allocates numerous items that must be freed later
8296 */
c706a795 8297static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
303932fd 8298{
6c311b57
SC
8299 int i;
8300 unsigned long register_value;
e1f7de0c
MG
8301 unsigned long transMethod = CFGTBL_Trans_Performant |
8302 (trans_support & CFGTBL_Trans_use_short_tags) |
b9af4937
SC
8303 CFGTBL_Trans_enable_directed_msix |
8304 (trans_support & (CFGTBL_Trans_io_accel1 |
8305 CFGTBL_Trans_io_accel2));
e1f7de0c 8306 struct access_method access = SA5_performant_access;
def342bd
SC
8307
8308 /* This is a bit complicated. There are 8 registers on
8309 * the controller which we write to to tell it 8 different
8310 * sizes of commands which there may be. It's a way of
8311 * reducing the DMA done to fetch each command. Encoded into
8312 * each command's tag are 3 bits which communicate to the controller
8313 * which of the eight sizes that command fits within. The size of
8314 * each command depends on how many scatter gather entries there are.
8315 * Each SG entry requires 16 bytes. The eight registers are programmed
8316 * with the number of 16-byte blocks a command of that size requires.
8317 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 8318 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
8319 * blocks. Note, this only extends to the SG entries contained
8320 * within the command block, and does not extend to chained blocks
8321 * of SG elements. bft[] contains the eight values we write to
8322 * the registers. They are not evenly distributed, but have more
8323 * sizes for small commands, and fewer sizes for larger commands.
8324 */
d66ae08b 8325 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
b9af4937
SC
8326#define MIN_IOACCEL2_BFT_ENTRY 5
8327#define HPSA_IOACCEL2_HEADER_SZ 4
8328 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8329 13, 14, 15, 16, 17, 18, 19,
8330 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8331 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8332 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8333 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8334 16 * MIN_IOACCEL2_BFT_ENTRY);
8335 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
d66ae08b 8336 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
8337 /* 5 = 1 s/g entry or 4k
8338 * 6 = 2 s/g entry or 8k
8339 * 8 = 4 s/g entry or 16k
8340 * 10 = 6 s/g entry or 24k
8341 */
303932fd 8342
b3a52e79
SC
8343 /* If the controller supports either ioaccel method then
8344 * we can also use the RAID stack submit path that does not
8345 * perform the superfluous readl() after each command submission.
8346 */
8347 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8348 access = SA5_performant_access_no_read;
8349
303932fd 8350 /* Controller spec: zero out this buffer. */
072b0518
SC
8351 for (i = 0; i < h->nreply_queues; i++)
8352 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
303932fd 8353
d66ae08b
SC
8354 bft[7] = SG_ENTRIES_IN_CMD + 4;
8355 calc_bucket_map(bft, ARRAY_SIZE(bft),
e1f7de0c 8356 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
303932fd
DB
8357 for (i = 0; i < 8; i++)
8358 writel(bft[i], &h->transtable->BlockFetch[i]);
8359
8360 /* size of controller ring buffer */
8361 writel(h->max_commands, &h->transtable->RepQSize);
254f796b 8362 writel(h->nreply_queues, &h->transtable->RepQCount);
303932fd
DB
8363 writel(0, &h->transtable->RepQCtrAddrLow32);
8364 writel(0, &h->transtable->RepQCtrAddrHigh32);
254f796b
MG
8365
8366 for (i = 0; i < h->nreply_queues; i++) {
8367 writel(0, &h->transtable->RepQAddr[i].upper);
072b0518 8368 writel(h->reply_queue[i].busaddr,
254f796b
MG
8369 &h->transtable->RepQAddr[i].lower);
8370 }
8371
b9af4937 8372 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
e1f7de0c
MG
8373 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8374 /*
8375 * enable outbound interrupt coalescing in accelerator mode;
8376 */
8377 if (trans_support & CFGTBL_Trans_io_accel1) {
8378 access = SA5_ioaccel_mode1_access;
8379 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8380 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
c349775e
ST
8381 } else {
8382 if (trans_support & CFGTBL_Trans_io_accel2) {
8383 access = SA5_ioaccel_mode2_access;
8384 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8385 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8386 }
e1f7de0c 8387 }
303932fd 8388 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8389 if (hpsa_wait_for_mode_change_ack(h)) {
8390 dev_err(&h->pdev->dev,
8391 "performant mode problem - doorbell timeout\n");
8392 return -ENODEV;
8393 }
303932fd
DB
8394 register_value = readl(&(h->cfgtable->TransportActive));
8395 if (!(register_value & CFGTBL_Trans_Performant)) {
050f7147
SC
8396 dev_err(&h->pdev->dev,
8397 "performant mode problem - transport not active\n");
c706a795 8398 return -ENODEV;
303932fd 8399 }
960a30e7 8400 /* Change the access methods to the performant access methods */
e1f7de0c
MG
8401 h->access = access;
8402 h->transMethod = transMethod;
8403
b9af4937
SC
8404 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8405 (trans_support & CFGTBL_Trans_io_accel2)))
c706a795 8406 return 0;
e1f7de0c 8407
b9af4937
SC
8408 if (trans_support & CFGTBL_Trans_io_accel1) {
8409 /* Set up I/O accelerator mode */
8410 for (i = 0; i < h->nreply_queues; i++) {
8411 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8412 h->reply_queue[i].current_entry =
8413 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8414 }
8415 bft[7] = h->ioaccel_maxsg + 8;
8416 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8417 h->ioaccel1_blockFetchTable);
e1f7de0c 8418
b9af4937 8419 /* initialize all reply queue entries to unused */
072b0518
SC
8420 for (i = 0; i < h->nreply_queues; i++)
8421 memset(h->reply_queue[i].head,
8422 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8423 h->reply_queue_size);
e1f7de0c 8424
b9af4937
SC
8425 /* set all the constant fields in the accelerator command
8426 * frames once at init time to save CPU cycles later.
8427 */
8428 for (i = 0; i < h->nr_cmds; i++) {
8429 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8430
8431 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8432 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8433 (i * sizeof(struct ErrorInfo)));
8434 cp->err_info_len = sizeof(struct ErrorInfo);
8435 cp->sgl_offset = IOACCEL1_SGLOFFSET;
2b08b3e9
DB
8436 cp->host_context_flags =
8437 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
b9af4937
SC
8438 cp->timeout_sec = 0;
8439 cp->ReplyQueue = 0;
50a0decf 8440 cp->tag =
f2405db8 8441 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
50a0decf
SC
8442 cp->host_addr =
8443 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
b9af4937 8444 (i * sizeof(struct io_accel1_cmd)));
b9af4937
SC
8445 }
8446 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8447 u64 cfg_offset, cfg_base_addr_index;
8448 u32 bft2_offset, cfg_base_addr;
8449 int rc;
8450
8451 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8452 &cfg_base_addr_index, &cfg_offset);
8453 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8454 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8455 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8456 4, h->ioaccel2_blockFetchTable);
8457 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8458 BUILD_BUG_ON(offsetof(struct CfgTable,
8459 io_accel_request_size_offset) != 0xb8);
8460 h->ioaccel2_bft2_regs =
8461 remap_pci_mem(pci_resource_start(h->pdev,
8462 cfg_base_addr_index) +
8463 cfg_offset + bft2_offset,
8464 ARRAY_SIZE(bft2) *
8465 sizeof(*h->ioaccel2_bft2_regs));
8466 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8467 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
e1f7de0c 8468 }
b9af4937 8469 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8470 if (hpsa_wait_for_mode_change_ack(h)) {
8471 dev_err(&h->pdev->dev,
8472 "performant mode problem - enabling ioaccel mode\n");
8473 return -ENODEV;
8474 }
8475 return 0;
e1f7de0c
MG
8476}
8477
1fb7c98a
RE
8478/* Free ioaccel1 mode command blocks and block fetch table */
8479static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8480{
105a3dbc 8481 if (h->ioaccel_cmd_pool) {
1fb7c98a
RE
8482 pci_free_consistent(h->pdev,
8483 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8484 h->ioaccel_cmd_pool,
8485 h->ioaccel_cmd_pool_dhandle);
105a3dbc
RE
8486 h->ioaccel_cmd_pool = NULL;
8487 h->ioaccel_cmd_pool_dhandle = 0;
8488 }
1fb7c98a 8489 kfree(h->ioaccel1_blockFetchTable);
105a3dbc 8490 h->ioaccel1_blockFetchTable = NULL;
1fb7c98a
RE
8491}
8492
d37ffbe4
RE
8493/* Allocate ioaccel1 mode command blocks and block fetch table */
8494static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
e1f7de0c 8495{
283b4a9b
SC
8496 h->ioaccel_maxsg =
8497 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8498 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8499 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8500
e1f7de0c
MG
8501 /* Command structures must be aligned on a 128-byte boundary
8502 * because the 7 lower bits of the address are used by the
8503 * hardware.
8504 */
e1f7de0c
MG
8505 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8506 IOACCEL1_COMMANDLIST_ALIGNMENT);
8507 h->ioaccel_cmd_pool =
8508 pci_alloc_consistent(h->pdev,
8509 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8510 &(h->ioaccel_cmd_pool_dhandle));
8511
8512 h->ioaccel1_blockFetchTable =
283b4a9b 8513 kmalloc(((h->ioaccel_maxsg + 1) *
e1f7de0c
MG
8514 sizeof(u32)), GFP_KERNEL);
8515
8516 if ((h->ioaccel_cmd_pool == NULL) ||
8517 (h->ioaccel1_blockFetchTable == NULL))
8518 goto clean_up;
8519
8520 memset(h->ioaccel_cmd_pool, 0,
8521 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8522 return 0;
8523
8524clean_up:
1fb7c98a 8525 hpsa_free_ioaccel1_cmd_and_bft(h);
2dd02d74 8526 return -ENOMEM;
6c311b57
SC
8527}
8528
1fb7c98a
RE
8529/* Free ioaccel2 mode command blocks and block fetch table */
8530static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8531{
d9a729f3
WS
8532 hpsa_free_ioaccel2_sg_chain_blocks(h);
8533
105a3dbc 8534 if (h->ioaccel2_cmd_pool) {
1fb7c98a
RE
8535 pci_free_consistent(h->pdev,
8536 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8537 h->ioaccel2_cmd_pool,
8538 h->ioaccel2_cmd_pool_dhandle);
105a3dbc
RE
8539 h->ioaccel2_cmd_pool = NULL;
8540 h->ioaccel2_cmd_pool_dhandle = 0;
8541 }
1fb7c98a 8542 kfree(h->ioaccel2_blockFetchTable);
105a3dbc 8543 h->ioaccel2_blockFetchTable = NULL;
1fb7c98a
RE
8544}
8545
d37ffbe4
RE
8546/* Allocate ioaccel2 mode command blocks and block fetch table */
8547static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
aca9012a 8548{
d9a729f3
WS
8549 int rc;
8550
aca9012a
SC
8551 /* Allocate ioaccel2 mode command blocks and block fetch table */
8552
8553 h->ioaccel_maxsg =
8554 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8555 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8556 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8557
aca9012a
SC
8558 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8559 IOACCEL2_COMMANDLIST_ALIGNMENT);
8560 h->ioaccel2_cmd_pool =
8561 pci_alloc_consistent(h->pdev,
8562 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8563 &(h->ioaccel2_cmd_pool_dhandle));
8564
8565 h->ioaccel2_blockFetchTable =
8566 kmalloc(((h->ioaccel_maxsg + 1) *
8567 sizeof(u32)), GFP_KERNEL);
8568
8569 if ((h->ioaccel2_cmd_pool == NULL) ||
d9a729f3
WS
8570 (h->ioaccel2_blockFetchTable == NULL)) {
8571 rc = -ENOMEM;
8572 goto clean_up;
8573 }
8574
8575 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8576 if (rc)
aca9012a
SC
8577 goto clean_up;
8578
8579 memset(h->ioaccel2_cmd_pool, 0,
8580 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8581 return 0;
8582
8583clean_up:
1fb7c98a 8584 hpsa_free_ioaccel2_cmd_and_bft(h);
d9a729f3 8585 return rc;
aca9012a
SC
8586}
8587
105a3dbc
RE
8588/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8589static void hpsa_free_performant_mode(struct ctlr_info *h)
8590{
8591 kfree(h->blockFetchTable);
8592 h->blockFetchTable = NULL;
8593 hpsa_free_reply_queues(h);
8594 hpsa_free_ioaccel1_cmd_and_bft(h);
8595 hpsa_free_ioaccel2_cmd_and_bft(h);
8596}
8597
8598/* return -ENODEV on error, 0 on success (or no action)
8599 * allocates numerous items that must be freed later
8600 */
8601static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
6c311b57
SC
8602{
8603 u32 trans_support;
e1f7de0c
MG
8604 unsigned long transMethod = CFGTBL_Trans_Performant |
8605 CFGTBL_Trans_use_short_tags;
105a3dbc 8606 int i, rc;
6c311b57 8607
02ec19c8 8608 if (hpsa_simple_mode)
105a3dbc 8609 return 0;
02ec19c8 8610
67c99a72 8611 trans_support = readl(&(h->cfgtable->TransportSupport));
8612 if (!(trans_support & PERFORMANT_MODE))
105a3dbc 8613 return 0;
67c99a72 8614
e1f7de0c
MG
8615 /* Check for I/O accelerator mode support */
8616 if (trans_support & CFGTBL_Trans_io_accel1) {
8617 transMethod |= CFGTBL_Trans_io_accel1 |
8618 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8619 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8620 if (rc)
8621 return rc;
8622 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8623 transMethod |= CFGTBL_Trans_io_accel2 |
aca9012a 8624 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8625 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8626 if (rc)
8627 return rc;
e1f7de0c
MG
8628 }
8629
eee0f03a 8630 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
cba3d38b 8631 hpsa_get_max_perf_mode_cmds(h);
6c311b57 8632 /* Performant mode ring buffer and supporting data structures */
072b0518 8633 h->reply_queue_size = h->max_commands * sizeof(u64);
6c311b57 8634
254f796b 8635 for (i = 0; i < h->nreply_queues; i++) {
072b0518
SC
8636 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8637 h->reply_queue_size,
8638 &(h->reply_queue[i].busaddr));
105a3dbc
RE
8639 if (!h->reply_queue[i].head) {
8640 rc = -ENOMEM;
8641 goto clean1; /* rq, ioaccel */
8642 }
254f796b
MG
8643 h->reply_queue[i].size = h->max_commands;
8644 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8645 h->reply_queue[i].current_entry = 0;
8646 }
8647
6c311b57 8648 /* Need a block fetch table for performant mode */
d66ae08b 8649 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57 8650 sizeof(u32)), GFP_KERNEL);
105a3dbc
RE
8651 if (!h->blockFetchTable) {
8652 rc = -ENOMEM;
8653 goto clean1; /* rq, ioaccel */
8654 }
6c311b57 8655
105a3dbc
RE
8656 rc = hpsa_enter_performant_mode(h, trans_support);
8657 if (rc)
8658 goto clean2; /* bft, rq, ioaccel */
8659 return 0;
303932fd 8660
105a3dbc 8661clean2: /* bft, rq, ioaccel */
303932fd 8662 kfree(h->blockFetchTable);
105a3dbc
RE
8663 h->blockFetchTable = NULL;
8664clean1: /* rq, ioaccel */
8665 hpsa_free_reply_queues(h);
8666 hpsa_free_ioaccel1_cmd_and_bft(h);
8667 hpsa_free_ioaccel2_cmd_and_bft(h);
8668 return rc;
303932fd
DB
8669}
8670
23100dd9 8671static int is_accelerated_cmd(struct CommandList *c)
76438d08 8672{
23100dd9
SC
8673 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8674}
8675
8676static void hpsa_drain_accel_commands(struct ctlr_info *h)
8677{
8678 struct CommandList *c = NULL;
f2405db8 8679 int i, accel_cmds_out;
281a7fd0 8680 int refcount;
76438d08 8681
f2405db8 8682 do { /* wait for all outstanding ioaccel commands to drain out */
23100dd9 8683 accel_cmds_out = 0;
f2405db8 8684 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 8685 c = h->cmd_pool + i;
281a7fd0
WS
8686 refcount = atomic_inc_return(&c->refcount);
8687 if (refcount > 1) /* Command is allocated */
8688 accel_cmds_out += is_accelerated_cmd(c);
8689 cmd_free(h, c);
f2405db8 8690 }
23100dd9 8691 if (accel_cmds_out <= 0)
281a7fd0 8692 break;
76438d08
SC
8693 msleep(100);
8694 } while (1);
8695}
8696
edd16368
SC
8697/*
8698 * This is it. Register the PCI driver information for the cards we control
8699 * the OS will call our registered routines when it finds one of our cards.
8700 */
8701static int __init hpsa_init(void)
8702{
31468401 8703 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
8704}
8705
8706static void __exit hpsa_cleanup(void)
8707{
8708 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
8709}
8710
e1f7de0c
MG
8711static void __attribute__((unused)) verify_offsets(void)
8712{
dd0e19f3
ST
8713#define VERIFY_OFFSET(member, offset) \
8714 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8715
8716 VERIFY_OFFSET(structure_size, 0);
8717 VERIFY_OFFSET(volume_blk_size, 4);
8718 VERIFY_OFFSET(volume_blk_cnt, 8);
8719 VERIFY_OFFSET(phys_blk_shift, 16);
8720 VERIFY_OFFSET(parity_rotation_shift, 17);
8721 VERIFY_OFFSET(strip_size, 18);
8722 VERIFY_OFFSET(disk_starting_blk, 20);
8723 VERIFY_OFFSET(disk_blk_cnt, 28);
8724 VERIFY_OFFSET(data_disks_per_row, 36);
8725 VERIFY_OFFSET(metadata_disks_per_row, 38);
8726 VERIFY_OFFSET(row_cnt, 40);
8727 VERIFY_OFFSET(layout_map_count, 42);
8728 VERIFY_OFFSET(flags, 44);
8729 VERIFY_OFFSET(dekindex, 46);
8730 /* VERIFY_OFFSET(reserved, 48 */
8731 VERIFY_OFFSET(data, 64);
8732
8733#undef VERIFY_OFFSET
8734
b66cc250
MM
8735#define VERIFY_OFFSET(member, offset) \
8736 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8737
8738 VERIFY_OFFSET(IU_type, 0);
8739 VERIFY_OFFSET(direction, 1);
8740 VERIFY_OFFSET(reply_queue, 2);
8741 /* VERIFY_OFFSET(reserved1, 3); */
8742 VERIFY_OFFSET(scsi_nexus, 4);
8743 VERIFY_OFFSET(Tag, 8);
8744 VERIFY_OFFSET(cdb, 16);
8745 VERIFY_OFFSET(cciss_lun, 32);
8746 VERIFY_OFFSET(data_len, 40);
8747 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8748 VERIFY_OFFSET(sg_count, 45);
8749 /* VERIFY_OFFSET(reserved3 */
8750 VERIFY_OFFSET(err_ptr, 48);
8751 VERIFY_OFFSET(err_len, 56);
8752 /* VERIFY_OFFSET(reserved4 */
8753 VERIFY_OFFSET(sg, 64);
8754
8755#undef VERIFY_OFFSET
8756
e1f7de0c
MG
8757#define VERIFY_OFFSET(member, offset) \
8758 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8759
8760 VERIFY_OFFSET(dev_handle, 0x00);
8761 VERIFY_OFFSET(reserved1, 0x02);
8762 VERIFY_OFFSET(function, 0x03);
8763 VERIFY_OFFSET(reserved2, 0x04);
8764 VERIFY_OFFSET(err_info, 0x0C);
8765 VERIFY_OFFSET(reserved3, 0x10);
8766 VERIFY_OFFSET(err_info_len, 0x12);
8767 VERIFY_OFFSET(reserved4, 0x13);
8768 VERIFY_OFFSET(sgl_offset, 0x14);
8769 VERIFY_OFFSET(reserved5, 0x15);
8770 VERIFY_OFFSET(transfer_len, 0x1C);
8771 VERIFY_OFFSET(reserved6, 0x20);
8772 VERIFY_OFFSET(io_flags, 0x24);
8773 VERIFY_OFFSET(reserved7, 0x26);
8774 VERIFY_OFFSET(LUN, 0x34);
8775 VERIFY_OFFSET(control, 0x3C);
8776 VERIFY_OFFSET(CDB, 0x40);
8777 VERIFY_OFFSET(reserved8, 0x50);
8778 VERIFY_OFFSET(host_context_flags, 0x60);
8779 VERIFY_OFFSET(timeout_sec, 0x62);
8780 VERIFY_OFFSET(ReplyQueue, 0x64);
8781 VERIFY_OFFSET(reserved9, 0x65);
50a0decf 8782 VERIFY_OFFSET(tag, 0x68);
e1f7de0c
MG
8783 VERIFY_OFFSET(host_addr, 0x70);
8784 VERIFY_OFFSET(CISS_LUN, 0x78);
8785 VERIFY_OFFSET(SG, 0x78 + 8);
8786#undef VERIFY_OFFSET
8787}
8788
edd16368
SC
8789module_init(hpsa_init);
8790module_exit(hpsa_cleanup);