]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/hpsa.c
hpsa: simplify check for device exposure
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
1358f6dc
DB
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
edd16368
SC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
1358f6dc 15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
edd16368
SC
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/interrupt.h>
21#include <linux/types.h>
22#include <linux/pci.h>
e5a44df8 23#include <linux/pci-aspm.h>
edd16368
SC
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/fs.h>
28#include <linux/timer.h>
edd16368
SC
29#include <linux/init.h>
30#include <linux/spinlock.h>
edd16368
SC
31#include <linux/compat.h>
32#include <linux/blktrace_api.h>
33#include <linux/uaccess.h>
34#include <linux/io.h>
35#include <linux/dma-mapping.h>
36#include <linux/completion.h>
37#include <linux/moduleparam.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_cmnd.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_host.h>
667e23d4 42#include <scsi/scsi_tcq.h>
9437ac43 43#include <scsi/scsi_eh.h>
73153fe5 44#include <scsi/scsi_dbg.h>
edd16368
SC
45#include <linux/cciss_ioctl.h>
46#include <linux/string.h>
47#include <linux/bitmap.h>
60063497 48#include <linux/atomic.h>
a0c12413 49#include <linux/jiffies.h>
42a91641 50#include <linux/percpu-defs.h>
094963da 51#include <linux/percpu.h>
2b08b3e9 52#include <asm/unaligned.h>
283b4a9b 53#include <asm/div64.h>
edd16368
SC
54#include "hpsa_cmd.h"
55#include "hpsa.h"
56
57/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
f532a3f9 58#define HPSA_DRIVER_VERSION "3.4.10-0"
edd16368 59#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
f79cfec6 60#define HPSA "hpsa"
edd16368 61
007e7aa9
RE
62/* How long to wait for CISS doorbell communication */
63#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
64#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
65#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
66#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
edd16368
SC
67#define MAX_IOCTL_CONFIG_WAIT 1000
68
69/*define how many times we will try a command because of bus resets */
70#define MAX_CMD_RETRIES 3
71
72/* Embedded module documentation macros - see modules.h */
73MODULE_AUTHOR("Hewlett-Packard Company");
74MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
75 HPSA_DRIVER_VERSION);
76MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77MODULE_VERSION(HPSA_DRIVER_VERSION);
78MODULE_LICENSE("GPL");
79
80static int hpsa_allow_any;
81module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_allow_any,
83 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
84static int hpsa_simple_mode;
85module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86MODULE_PARM_DESC(hpsa_simple_mode,
87 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
88
89/* define the PCI info for the cards we can control */
90static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
163dbcd8
MM
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
f8b01eb9 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
fe0c9610
MM
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
fe0c9610
MM
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
97b9f53d
MM
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
3b7a45e5 122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
97b9f53d
MM
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
3b7a45e5
JH
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
fdfa4b6d 131 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
cbb47dcb
DB
132 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
133 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
8e616a5e
SC
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
138 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
139 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
140 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
141 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
7c03b870 142 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 143 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
144 {0,}
145};
146
147MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
148
149/* board_id = Subsystem Device ID & Vendor ID
150 * product = Marketing Name for the board
151 * access = Address of the struct of function pointers
152 */
153static struct board_type products[] = {
edd16368
SC
154 {0x3241103C, "Smart Array P212", &SA5_access},
155 {0x3243103C, "Smart Array P410", &SA5_access},
156 {0x3245103C, "Smart Array P410i", &SA5_access},
157 {0x3247103C, "Smart Array P411", &SA5_access},
158 {0x3249103C, "Smart Array P812", &SA5_access},
163dbcd8
MM
159 {0x324A103C, "Smart Array P712m", &SA5_access},
160 {0x324B103C, "Smart Array P711m", &SA5_access},
7d2cce58 161 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
fe0c9610
MM
162 {0x3350103C, "Smart Array P222", &SA5_access},
163 {0x3351103C, "Smart Array P420", &SA5_access},
164 {0x3352103C, "Smart Array P421", &SA5_access},
165 {0x3353103C, "Smart Array P822", &SA5_access},
166 {0x3354103C, "Smart Array P420i", &SA5_access},
167 {0x3355103C, "Smart Array P220i", &SA5_access},
168 {0x3356103C, "Smart Array P721m", &SA5_access},
1fd6c8e3
MM
169 {0x1921103C, "Smart Array P830i", &SA5_access},
170 {0x1922103C, "Smart Array P430", &SA5_access},
171 {0x1923103C, "Smart Array P431", &SA5_access},
172 {0x1924103C, "Smart Array P830", &SA5_access},
173 {0x1926103C, "Smart Array P731m", &SA5_access},
174 {0x1928103C, "Smart Array P230i", &SA5_access},
175 {0x1929103C, "Smart Array P530", &SA5_access},
27fb8137
DB
176 {0x21BD103C, "Smart Array P244br", &SA5_access},
177 {0x21BE103C, "Smart Array P741m", &SA5_access},
178 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
179 {0x21C0103C, "Smart Array P440ar", &SA5_access},
c8ae0ab1 180 {0x21C1103C, "Smart Array P840ar", &SA5_access},
27fb8137
DB
181 {0x21C2103C, "Smart Array P440", &SA5_access},
182 {0x21C3103C, "Smart Array P441", &SA5_access},
97b9f53d 183 {0x21C4103C, "Smart Array", &SA5_access},
27fb8137
DB
184 {0x21C5103C, "Smart Array P841", &SA5_access},
185 {0x21C6103C, "Smart HBA H244br", &SA5_access},
186 {0x21C7103C, "Smart HBA H240", &SA5_access},
187 {0x21C8103C, "Smart HBA H241", &SA5_access},
97b9f53d 188 {0x21C9103C, "Smart Array", &SA5_access},
27fb8137
DB
189 {0x21CA103C, "Smart Array P246br", &SA5_access},
190 {0x21CB103C, "Smart Array P840", &SA5_access},
3b7a45e5
JH
191 {0x21CC103C, "Smart Array", &SA5_access},
192 {0x21CD103C, "Smart Array", &SA5_access},
27fb8137 193 {0x21CE103C, "Smart HBA", &SA5_access},
fdfa4b6d 194 {0x05809005, "SmartHBA-SA", &SA5_access},
cbb47dcb
DB
195 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
196 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
197 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
198 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
199 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
8e616a5e
SC
200 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
201 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
202 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
203 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
204 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
edd16368
SC
205 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
206};
207
a58e7e53
WS
208#define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
209static const struct scsi_cmnd hpsa_cmd_busy;
210#define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
211static const struct scsi_cmnd hpsa_cmd_idle;
edd16368
SC
212static int number_of_controllers;
213
10f66018
SC
214static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
215static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
42a91641 216static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
edd16368
SC
217
218#ifdef CONFIG_COMPAT
42a91641
DB
219static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
220 void __user *arg);
edd16368
SC
221#endif
222
223static void cmd_free(struct ctlr_info *h, struct CommandList *c);
edd16368 224static struct CommandList *cmd_alloc(struct ctlr_info *h);
73153fe5
WS
225static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
226static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
227 struct scsi_cmnd *scmd);
a2dac136 228static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 229 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368 230 int cmd_type);
2c143342 231static void hpsa_free_cmd_pool(struct ctlr_info *h);
b7bb24eb 232#define VPD_PAGE (1 << 8)
b48d9804 233#define HPSA_SIMPLE_ERROR_BITS 0x03
edd16368 234
f281233d 235static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
236static void hpsa_scan_start(struct Scsi_Host *);
237static int hpsa_scan_finished(struct Scsi_Host *sh,
238 unsigned long elapsed_time);
7c0a0229 239static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
edd16368
SC
240
241static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
75167d2c 242static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
edd16368 243static int hpsa_slave_alloc(struct scsi_device *sdev);
41ce4c35 244static int hpsa_slave_configure(struct scsi_device *sdev);
edd16368
SC
245static void hpsa_slave_destroy(struct scsi_device *sdev);
246
8aa60681 247static void hpsa_update_scsi_devices(struct ctlr_info *h);
edd16368
SC
248static int check_for_unit_attention(struct ctlr_info *h,
249 struct CommandList *c);
250static void check_ioctl_unit_attention(struct ctlr_info *h,
251 struct CommandList *c);
303932fd
DB
252/* performant mode helper functions */
253static void calc_bucket_map(int *bucket, int num_buckets,
2b08b3e9 254 int nsgs, int min_blocks, u32 *bucket_map);
105a3dbc
RE
255static void hpsa_free_performant_mode(struct ctlr_info *h);
256static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
254f796b 257static inline u32 next_command(struct ctlr_info *h, u8 q);
6f039790
GKH
258static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
259 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
260 u64 *cfg_offset);
261static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
262 unsigned long *memory_bar);
263static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
264static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
265 int wait_for_ready);
75167d2c 266static inline void finish_cmd(struct CommandList *c);
c706a795 267static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
fe5389c8
SC
268#define BOARD_NOT_READY 0
269#define BOARD_READY 1
23100dd9 270static void hpsa_drain_accel_commands(struct ctlr_info *h);
76438d08 271static void hpsa_flush_cache(struct ctlr_info *h);
c349775e
ST
272static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
273 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 274 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
080ef1cc 275static void hpsa_command_resubmit_worker(struct work_struct *work);
25163bd5
WS
276static u32 lockup_detected(struct ctlr_info *h);
277static int detect_controller_lockup(struct ctlr_info *h);
8270b862 278static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
edd16368 279
edd16368
SC
280static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
281{
282 unsigned long *priv = shost_priv(sdev->host);
283 return (struct ctlr_info *) *priv;
284}
285
a23513e8
SC
286static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
287{
288 unsigned long *priv = shost_priv(sh);
289 return (struct ctlr_info *) *priv;
290}
291
a58e7e53
WS
292static inline bool hpsa_is_cmd_idle(struct CommandList *c)
293{
294 return c->scsi_cmd == SCSI_CMD_IDLE;
295}
296
d604f533
WS
297static inline bool hpsa_is_pending_event(struct CommandList *c)
298{
299 return c->abort_pending || c->reset_pending;
300}
301
9437ac43
SC
302/* extract sense key, asc, and ascq from sense data. -1 means invalid. */
303static void decode_sense_data(const u8 *sense_data, int sense_data_len,
304 u8 *sense_key, u8 *asc, u8 *ascq)
305{
306 struct scsi_sense_hdr sshdr;
307 bool rc;
308
309 *sense_key = -1;
310 *asc = -1;
311 *ascq = -1;
312
313 if (sense_data_len < 1)
314 return;
315
316 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
317 if (rc) {
318 *sense_key = sshdr.sense_key;
319 *asc = sshdr.asc;
320 *ascq = sshdr.ascq;
321 }
322}
323
edd16368
SC
324static int check_for_unit_attention(struct ctlr_info *h,
325 struct CommandList *c)
326{
9437ac43
SC
327 u8 sense_key, asc, ascq;
328 int sense_len;
329
330 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
331 sense_len = sizeof(c->err_info->SenseInfo);
332 else
333 sense_len = c->err_info->SenseLen;
334
335 decode_sense_data(c->err_info->SenseInfo, sense_len,
336 &sense_key, &asc, &ascq);
81c27557 337 if (sense_key != UNIT_ATTENTION || asc == 0xff)
edd16368
SC
338 return 0;
339
9437ac43 340 switch (asc) {
edd16368 341 case STATE_CHANGED:
9437ac43 342 dev_warn(&h->pdev->dev,
2946e82b
RE
343 "%s: a state change detected, command retried\n",
344 h->devname);
edd16368
SC
345 break;
346 case LUN_FAILED:
7f73695a 347 dev_warn(&h->pdev->dev,
2946e82b 348 "%s: LUN failure detected\n", h->devname);
edd16368
SC
349 break;
350 case REPORT_LUNS_CHANGED:
7f73695a 351 dev_warn(&h->pdev->dev,
2946e82b 352 "%s: report LUN data changed\n", h->devname);
edd16368 353 /*
4f4eb9f1
ST
354 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
355 * target (array) devices.
edd16368
SC
356 */
357 break;
358 case POWER_OR_RESET:
2946e82b
RE
359 dev_warn(&h->pdev->dev,
360 "%s: a power on or device reset detected\n",
361 h->devname);
edd16368
SC
362 break;
363 case UNIT_ATTENTION_CLEARED:
2946e82b
RE
364 dev_warn(&h->pdev->dev,
365 "%s: unit attention cleared by another initiator\n",
366 h->devname);
edd16368
SC
367 break;
368 default:
2946e82b
RE
369 dev_warn(&h->pdev->dev,
370 "%s: unknown unit attention detected\n",
371 h->devname);
edd16368
SC
372 break;
373 }
374 return 1;
375}
376
852af20a
MB
377static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
378{
379 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
380 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
381 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
382 return 0;
383 dev_warn(&h->pdev->dev, HPSA "device busy");
384 return 1;
385}
386
e985c58f
SC
387static u32 lockup_detected(struct ctlr_info *h);
388static ssize_t host_show_lockup_detected(struct device *dev,
389 struct device_attribute *attr, char *buf)
390{
391 int ld;
392 struct ctlr_info *h;
393 struct Scsi_Host *shost = class_to_shost(dev);
394
395 h = shost_to_hba(shost);
396 ld = lockup_detected(h);
397
398 return sprintf(buf, "ld=%d\n", ld);
399}
400
da0697bd
ST
401static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
402 struct device_attribute *attr,
403 const char *buf, size_t count)
404{
405 int status, len;
406 struct ctlr_info *h;
407 struct Scsi_Host *shost = class_to_shost(dev);
408 char tmpbuf[10];
409
410 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
411 return -EACCES;
412 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
413 strncpy(tmpbuf, buf, len);
414 tmpbuf[len] = '\0';
415 if (sscanf(tmpbuf, "%d", &status) != 1)
416 return -EINVAL;
417 h = shost_to_hba(shost);
418 h->acciopath_status = !!status;
419 dev_warn(&h->pdev->dev,
420 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
421 h->acciopath_status ? "enabled" : "disabled");
422 return count;
423}
424
2ba8bfc8
SC
425static ssize_t host_store_raid_offload_debug(struct device *dev,
426 struct device_attribute *attr,
427 const char *buf, size_t count)
428{
429 int debug_level, len;
430 struct ctlr_info *h;
431 struct Scsi_Host *shost = class_to_shost(dev);
432 char tmpbuf[10];
433
434 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
435 return -EACCES;
436 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
437 strncpy(tmpbuf, buf, len);
438 tmpbuf[len] = '\0';
439 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
440 return -EINVAL;
441 if (debug_level < 0)
442 debug_level = 0;
443 h = shost_to_hba(shost);
444 h->raid_offload_debug = debug_level;
445 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
446 h->raid_offload_debug);
447 return count;
448}
449
edd16368
SC
450static ssize_t host_store_rescan(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
453{
454 struct ctlr_info *h;
455 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 456 h = shost_to_hba(shost);
31468401 457 hpsa_scan_start(h->scsi_host);
edd16368
SC
458 return count;
459}
460
d28ce020
SC
461static ssize_t host_show_firmware_revision(struct device *dev,
462 struct device_attribute *attr, char *buf)
463{
464 struct ctlr_info *h;
465 struct Scsi_Host *shost = class_to_shost(dev);
466 unsigned char *fwrev;
467
468 h = shost_to_hba(shost);
469 if (!h->hba_inquiry_data)
470 return 0;
471 fwrev = &h->hba_inquiry_data[32];
472 return snprintf(buf, 20, "%c%c%c%c\n",
473 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
474}
475
94a13649
SC
476static ssize_t host_show_commands_outstanding(struct device *dev,
477 struct device_attribute *attr, char *buf)
478{
479 struct Scsi_Host *shost = class_to_shost(dev);
480 struct ctlr_info *h = shost_to_hba(shost);
481
0cbf768e
SC
482 return snprintf(buf, 20, "%d\n",
483 atomic_read(&h->commands_outstanding));
94a13649
SC
484}
485
745a7a25
SC
486static ssize_t host_show_transport_mode(struct device *dev,
487 struct device_attribute *attr, char *buf)
488{
489 struct ctlr_info *h;
490 struct Scsi_Host *shost = class_to_shost(dev);
491
492 h = shost_to_hba(shost);
493 return snprintf(buf, 20, "%s\n",
960a30e7 494 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
495 "performant" : "simple");
496}
497
da0697bd
ST
498static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
499 struct device_attribute *attr, char *buf)
500{
501 struct ctlr_info *h;
502 struct Scsi_Host *shost = class_to_shost(dev);
503
504 h = shost_to_hba(shost);
505 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
506 (h->acciopath_status == 1) ? "enabled" : "disabled");
507}
508
46380786 509/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
510static u32 unresettable_controller[] = {
511 0x324a103C, /* Smart Array P712m */
9b5c48c2 512 0x324b103C, /* Smart Array P711m */
941b1cda
SC
513 0x3223103C, /* Smart Array P800 */
514 0x3234103C, /* Smart Array P400 */
515 0x3235103C, /* Smart Array P400i */
516 0x3211103C, /* Smart Array E200i */
517 0x3212103C, /* Smart Array E200 */
518 0x3213103C, /* Smart Array E200i */
519 0x3214103C, /* Smart Array E200i */
520 0x3215103C, /* Smart Array E200i */
521 0x3237103C, /* Smart Array E500 */
522 0x323D103C, /* Smart Array P700m */
7af0abbc 523 0x40800E11, /* Smart Array 5i */
941b1cda
SC
524 0x409C0E11, /* Smart Array 6400 */
525 0x409D0E11, /* Smart Array 6400 EM */
5a4f934e
TH
526 0x40700E11, /* Smart Array 5300 */
527 0x40820E11, /* Smart Array 532 */
528 0x40830E11, /* Smart Array 5312 */
529 0x409A0E11, /* Smart Array 641 */
530 0x409B0E11, /* Smart Array 642 */
531 0x40910E11, /* Smart Array 6i */
941b1cda
SC
532};
533
46380786
SC
534/* List of controllers which cannot even be soft reset */
535static u32 soft_unresettable_controller[] = {
7af0abbc 536 0x40800E11, /* Smart Array 5i */
5a4f934e
TH
537 0x40700E11, /* Smart Array 5300 */
538 0x40820E11, /* Smart Array 532 */
539 0x40830E11, /* Smart Array 5312 */
540 0x409A0E11, /* Smart Array 641 */
541 0x409B0E11, /* Smart Array 642 */
542 0x40910E11, /* Smart Array 6i */
46380786
SC
543 /* Exclude 640x boards. These are two pci devices in one slot
544 * which share a battery backed cache module. One controls the
545 * cache, the other accesses the cache through the one that controls
546 * it. If we reset the one controlling the cache, the other will
547 * likely not be happy. Just forbid resetting this conjoined mess.
548 * The 640x isn't really supported by hpsa anyway.
549 */
550 0x409C0E11, /* Smart Array 6400 */
551 0x409D0E11, /* Smart Array 6400 EM */
552};
553
9b5c48c2
SC
554static u32 needs_abort_tags_swizzled[] = {
555 0x323D103C, /* Smart Array P700m */
556 0x324a103C, /* Smart Array P712m */
557 0x324b103C, /* SmartArray P711m */
558};
559
560static int board_id_in_array(u32 a[], int nelems, u32 board_id)
941b1cda
SC
561{
562 int i;
563
9b5c48c2
SC
564 for (i = 0; i < nelems; i++)
565 if (a[i] == board_id)
566 return 1;
567 return 0;
46380786
SC
568}
569
9b5c48c2 570static int ctlr_is_hard_resettable(u32 board_id)
46380786 571{
9b5c48c2
SC
572 return !board_id_in_array(unresettable_controller,
573 ARRAY_SIZE(unresettable_controller), board_id);
574}
46380786 575
9b5c48c2
SC
576static int ctlr_is_soft_resettable(u32 board_id)
577{
578 return !board_id_in_array(soft_unresettable_controller,
579 ARRAY_SIZE(soft_unresettable_controller), board_id);
941b1cda
SC
580}
581
46380786
SC
582static int ctlr_is_resettable(u32 board_id)
583{
584 return ctlr_is_hard_resettable(board_id) ||
585 ctlr_is_soft_resettable(board_id);
586}
587
9b5c48c2
SC
588static int ctlr_needs_abort_tags_swizzled(u32 board_id)
589{
590 return board_id_in_array(needs_abort_tags_swizzled,
591 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
592}
593
941b1cda
SC
594static ssize_t host_show_resettable(struct device *dev,
595 struct device_attribute *attr, char *buf)
596{
597 struct ctlr_info *h;
598 struct Scsi_Host *shost = class_to_shost(dev);
599
600 h = shost_to_hba(shost);
46380786 601 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
602}
603
edd16368
SC
604static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
605{
606 return (scsi3addr[3] & 0xC0) == 0x40;
607}
608
f2ef0ce7
RE
609static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
610 "1(+0)ADM", "UNKNOWN"
edd16368 611};
6b80b18f
ST
612#define HPSA_RAID_0 0
613#define HPSA_RAID_4 1
614#define HPSA_RAID_1 2 /* also used for RAID 10 */
615#define HPSA_RAID_5 3 /* also used for RAID 50 */
616#define HPSA_RAID_51 4
617#define HPSA_RAID_6 5 /* also used for RAID 60 */
618#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
edd16368
SC
619#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
620
621static ssize_t raid_level_show(struct device *dev,
622 struct device_attribute *attr, char *buf)
623{
624 ssize_t l = 0;
82a72c0a 625 unsigned char rlevel;
edd16368
SC
626 struct ctlr_info *h;
627 struct scsi_device *sdev;
628 struct hpsa_scsi_dev_t *hdev;
629 unsigned long flags;
630
631 sdev = to_scsi_device(dev);
632 h = sdev_to_hba(sdev);
633 spin_lock_irqsave(&h->lock, flags);
634 hdev = sdev->hostdata;
635 if (!hdev) {
636 spin_unlock_irqrestore(&h->lock, flags);
637 return -ENODEV;
638 }
639
640 /* Is this even a logical drive? */
641 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
642 spin_unlock_irqrestore(&h->lock, flags);
643 l = snprintf(buf, PAGE_SIZE, "N/A\n");
644 return l;
645 }
646
647 rlevel = hdev->raid_level;
648 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 649 if (rlevel > RAID_UNKNOWN)
edd16368
SC
650 rlevel = RAID_UNKNOWN;
651 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
652 return l;
653}
654
655static ssize_t lunid_show(struct device *dev,
656 struct device_attribute *attr, char *buf)
657{
658 struct ctlr_info *h;
659 struct scsi_device *sdev;
660 struct hpsa_scsi_dev_t *hdev;
661 unsigned long flags;
662 unsigned char lunid[8];
663
664 sdev = to_scsi_device(dev);
665 h = sdev_to_hba(sdev);
666 spin_lock_irqsave(&h->lock, flags);
667 hdev = sdev->hostdata;
668 if (!hdev) {
669 spin_unlock_irqrestore(&h->lock, flags);
670 return -ENODEV;
671 }
672 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
673 spin_unlock_irqrestore(&h->lock, flags);
674 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
675 lunid[0], lunid[1], lunid[2], lunid[3],
676 lunid[4], lunid[5], lunid[6], lunid[7]);
677}
678
679static ssize_t unique_id_show(struct device *dev,
680 struct device_attribute *attr, char *buf)
681{
682 struct ctlr_info *h;
683 struct scsi_device *sdev;
684 struct hpsa_scsi_dev_t *hdev;
685 unsigned long flags;
686 unsigned char sn[16];
687
688 sdev = to_scsi_device(dev);
689 h = sdev_to_hba(sdev);
690 spin_lock_irqsave(&h->lock, flags);
691 hdev = sdev->hostdata;
692 if (!hdev) {
693 spin_unlock_irqrestore(&h->lock, flags);
694 return -ENODEV;
695 }
696 memcpy(sn, hdev->device_id, sizeof(sn));
697 spin_unlock_irqrestore(&h->lock, flags);
698 return snprintf(buf, 16 * 2 + 2,
699 "%02X%02X%02X%02X%02X%02X%02X%02X"
700 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
701 sn[0], sn[1], sn[2], sn[3],
702 sn[4], sn[5], sn[6], sn[7],
703 sn[8], sn[9], sn[10], sn[11],
704 sn[12], sn[13], sn[14], sn[15]);
705}
706
c1988684
ST
707static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
708 struct device_attribute *attr, char *buf)
709{
710 struct ctlr_info *h;
711 struct scsi_device *sdev;
712 struct hpsa_scsi_dev_t *hdev;
713 unsigned long flags;
714 int offload_enabled;
715
716 sdev = to_scsi_device(dev);
717 h = sdev_to_hba(sdev);
718 spin_lock_irqsave(&h->lock, flags);
719 hdev = sdev->hostdata;
720 if (!hdev) {
721 spin_unlock_irqrestore(&h->lock, flags);
722 return -ENODEV;
723 }
724 offload_enabled = hdev->offload_enabled;
725 spin_unlock_irqrestore(&h->lock, flags);
726 return snprintf(buf, 20, "%d\n", offload_enabled);
727}
728
8270b862
JH
729#define MAX_PATHS 8
730#define PATH_STRING_LEN 50
731
732static ssize_t path_info_show(struct device *dev,
733 struct device_attribute *attr, char *buf)
734{
735 struct ctlr_info *h;
736 struct scsi_device *sdev;
737 struct hpsa_scsi_dev_t *hdev;
738 unsigned long flags;
739 int i;
740 int output_len = 0;
741 u8 box;
742 u8 bay;
743 u8 path_map_index = 0;
744 char *active;
745 unsigned char phys_connector[2];
746 unsigned char path[MAX_PATHS][PATH_STRING_LEN];
747
748 memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
749 sdev = to_scsi_device(dev);
750 h = sdev_to_hba(sdev);
751 spin_lock_irqsave(&h->devlock, flags);
752 hdev = sdev->hostdata;
753 if (!hdev) {
754 spin_unlock_irqrestore(&h->devlock, flags);
755 return -ENODEV;
756 }
757
758 bay = hdev->bay;
759 for (i = 0; i < MAX_PATHS; i++) {
760 path_map_index = 1<<i;
761 if (i == hdev->active_path_index)
762 active = "Active";
763 else if (hdev->path_map & path_map_index)
764 active = "Inactive";
765 else
766 continue;
767
768 output_len = snprintf(path[i],
769 PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
770 h->scsi_host->host_no,
771 hdev->bus, hdev->target, hdev->lun,
772 scsi_device_type(hdev->devtype));
773
774 if (is_ext_target(h, hdev) ||
775 (hdev->devtype == TYPE_RAID) ||
776 is_logical_dev_addr_mode(hdev->scsi3addr)) {
777 output_len += snprintf(path[i] + output_len,
778 PATH_STRING_LEN, "%s\n",
779 active);
780 continue;
781 }
782
783 box = hdev->box[i];
784 memcpy(&phys_connector, &hdev->phys_connector[i],
785 sizeof(phys_connector));
786 if (phys_connector[0] < '0')
787 phys_connector[0] = '0';
788 if (phys_connector[1] < '0')
789 phys_connector[1] = '0';
790 if (hdev->phys_connector[i] > 0)
791 output_len += snprintf(path[i] + output_len,
792 PATH_STRING_LEN,
793 "PORT: %.2s ",
794 phys_connector);
2a168208 795 if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
8270b862
JH
796 if (box == 0 || box == 0xFF) {
797 output_len += snprintf(path[i] + output_len,
798 PATH_STRING_LEN,
799 "BAY: %hhu %s\n",
800 bay, active);
801 } else {
802 output_len += snprintf(path[i] + output_len,
803 PATH_STRING_LEN,
804 "BOX: %hhu BAY: %hhu %s\n",
805 box, bay, active);
806 }
807 } else if (box != 0 && box != 0xFF) {
808 output_len += snprintf(path[i] + output_len,
809 PATH_STRING_LEN, "BOX: %hhu %s\n",
810 box, active);
811 } else
812 output_len += snprintf(path[i] + output_len,
813 PATH_STRING_LEN, "%s\n", active);
814 }
815
816 spin_unlock_irqrestore(&h->devlock, flags);
817 return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
818 path[0], path[1], path[2], path[3],
819 path[4], path[5], path[6], path[7]);
820}
821
3f5eac3a
SC
822static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
823static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
824static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
825static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
c1988684
ST
826static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
827 host_show_hp_ssd_smart_path_enabled, NULL);
8270b862 828static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
da0697bd
ST
829static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
830 host_show_hp_ssd_smart_path_status,
831 host_store_hp_ssd_smart_path_status);
2ba8bfc8
SC
832static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
833 host_store_raid_offload_debug);
3f5eac3a
SC
834static DEVICE_ATTR(firmware_revision, S_IRUGO,
835 host_show_firmware_revision, NULL);
836static DEVICE_ATTR(commands_outstanding, S_IRUGO,
837 host_show_commands_outstanding, NULL);
838static DEVICE_ATTR(transport_mode, S_IRUGO,
839 host_show_transport_mode, NULL);
941b1cda
SC
840static DEVICE_ATTR(resettable, S_IRUGO,
841 host_show_resettable, NULL);
e985c58f
SC
842static DEVICE_ATTR(lockup_detected, S_IRUGO,
843 host_show_lockup_detected, NULL);
3f5eac3a
SC
844
845static struct device_attribute *hpsa_sdev_attrs[] = {
846 &dev_attr_raid_level,
847 &dev_attr_lunid,
848 &dev_attr_unique_id,
c1988684 849 &dev_attr_hp_ssd_smart_path_enabled,
8270b862 850 &dev_attr_path_info,
e985c58f 851 &dev_attr_lockup_detected,
3f5eac3a
SC
852 NULL,
853};
854
855static struct device_attribute *hpsa_shost_attrs[] = {
856 &dev_attr_rescan,
857 &dev_attr_firmware_revision,
858 &dev_attr_commands_outstanding,
859 &dev_attr_transport_mode,
941b1cda 860 &dev_attr_resettable,
da0697bd 861 &dev_attr_hp_ssd_smart_path_status,
2ba8bfc8 862 &dev_attr_raid_offload_debug,
3f5eac3a
SC
863 NULL,
864};
865
41ce4c35
SC
866#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
867 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
868
3f5eac3a
SC
869static struct scsi_host_template hpsa_driver_template = {
870 .module = THIS_MODULE,
f79cfec6
SC
871 .name = HPSA,
872 .proc_name = HPSA,
3f5eac3a
SC
873 .queuecommand = hpsa_scsi_queue_command,
874 .scan_start = hpsa_scan_start,
875 .scan_finished = hpsa_scan_finished,
7c0a0229 876 .change_queue_depth = hpsa_change_queue_depth,
3f5eac3a
SC
877 .this_id = -1,
878 .use_clustering = ENABLE_CLUSTERING,
75167d2c 879 .eh_abort_handler = hpsa_eh_abort_handler,
3f5eac3a
SC
880 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
881 .ioctl = hpsa_ioctl,
882 .slave_alloc = hpsa_slave_alloc,
41ce4c35 883 .slave_configure = hpsa_slave_configure,
3f5eac3a
SC
884 .slave_destroy = hpsa_slave_destroy,
885#ifdef CONFIG_COMPAT
886 .compat_ioctl = hpsa_compat_ioctl,
887#endif
888 .sdev_attrs = hpsa_sdev_attrs,
889 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 890 .max_sectors = 8192,
54b2b50c 891 .no_write_same = 1,
3f5eac3a
SC
892};
893
254f796b 894static inline u32 next_command(struct ctlr_info *h, u8 q)
3f5eac3a
SC
895{
896 u32 a;
072b0518 897 struct reply_queue_buffer *rq = &h->reply_queue[q];
3f5eac3a 898
e1f7de0c
MG
899 if (h->transMethod & CFGTBL_Trans_io_accel1)
900 return h->access.command_completed(h, q);
901
3f5eac3a 902 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
254f796b 903 return h->access.command_completed(h, q);
3f5eac3a 904
254f796b
MG
905 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
906 a = rq->head[rq->current_entry];
907 rq->current_entry++;
0cbf768e 908 atomic_dec(&h->commands_outstanding);
3f5eac3a
SC
909 } else {
910 a = FIFO_EMPTY;
911 }
912 /* Check for wraparound */
254f796b
MG
913 if (rq->current_entry == h->max_commands) {
914 rq->current_entry = 0;
915 rq->wraparound ^= 1;
3f5eac3a
SC
916 }
917 return a;
918}
919
c349775e
ST
920/*
921 * There are some special bits in the bus address of the
922 * command that we have to set for the controller to know
923 * how to process the command:
924 *
925 * Normal performant mode:
926 * bit 0: 1 means performant mode, 0 means simple mode.
927 * bits 1-3 = block fetch table entry
928 * bits 4-6 = command type (== 0)
929 *
930 * ioaccel1 mode:
931 * bit 0 = "performant mode" bit.
932 * bits 1-3 = block fetch table entry
933 * bits 4-6 = command type (== 110)
934 * (command type is needed because ioaccel1 mode
935 * commands are submitted through the same register as normal
936 * mode commands, so this is how the controller knows whether
937 * the command is normal mode or ioaccel1 mode.)
938 *
939 * ioaccel2 mode:
940 * bit 0 = "performant mode" bit.
941 * bits 1-4 = block fetch table entry (note extra bit)
942 * bits 4-6 = not needed, because ioaccel2 mode has
943 * a separate special register for submitting commands.
944 */
945
25163bd5
WS
946/*
947 * set_performant_mode: Modify the tag for cciss performant
3f5eac3a
SC
948 * set bit 0 for pull model, bits 3-1 for block fetch
949 * register number
950 */
25163bd5
WS
951#define DEFAULT_REPLY_QUEUE (-1)
952static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
953 int reply_queue)
3f5eac3a 954{
254f796b 955 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
3f5eac3a 956 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
25163bd5
WS
957 if (unlikely(!h->msix_vector))
958 return;
959 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
254f796b 960 c->Header.ReplyQueue =
804a5cb5 961 raw_smp_processor_id() % h->nreply_queues;
25163bd5
WS
962 else
963 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
254f796b 964 }
3f5eac3a
SC
965}
966
c349775e 967static void set_ioaccel1_performant_mode(struct ctlr_info *h,
25163bd5
WS
968 struct CommandList *c,
969 int reply_queue)
c349775e
ST
970{
971 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
972
25163bd5
WS
973 /*
974 * Tell the controller to post the reply to the queue for this
c349775e
ST
975 * processor. This seems to give the best I/O throughput.
976 */
25163bd5
WS
977 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
978 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
979 else
980 cp->ReplyQueue = reply_queue % h->nreply_queues;
981 /*
982 * Set the bits in the address sent down to include:
c349775e
ST
983 * - performant mode bit (bit 0)
984 * - pull count (bits 1-3)
985 * - command type (bits 4-6)
986 */
987 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
988 IOACCEL1_BUSADDR_CMDTYPE;
989}
990
8be986cc
SC
991static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
992 struct CommandList *c,
993 int reply_queue)
994{
995 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
996 &h->ioaccel2_cmd_pool[c->cmdindex];
997
998 /* Tell the controller to post the reply to the queue for this
999 * processor. This seems to give the best I/O throughput.
1000 */
1001 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1002 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1003 else
1004 cp->reply_queue = reply_queue % h->nreply_queues;
1005 /* Set the bits in the address sent down to include:
1006 * - performant mode bit not used in ioaccel mode 2
1007 * - pull count (bits 0-3)
1008 * - command type isn't needed for ioaccel2
1009 */
1010 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1011}
1012
c349775e 1013static void set_ioaccel2_performant_mode(struct ctlr_info *h,
25163bd5
WS
1014 struct CommandList *c,
1015 int reply_queue)
c349775e
ST
1016{
1017 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1018
25163bd5
WS
1019 /*
1020 * Tell the controller to post the reply to the queue for this
c349775e
ST
1021 * processor. This seems to give the best I/O throughput.
1022 */
25163bd5
WS
1023 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1024 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1025 else
1026 cp->reply_queue = reply_queue % h->nreply_queues;
1027 /*
1028 * Set the bits in the address sent down to include:
c349775e
ST
1029 * - performant mode bit not used in ioaccel mode 2
1030 * - pull count (bits 0-3)
1031 * - command type isn't needed for ioaccel2
1032 */
1033 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1034}
1035
e85c5974
SC
1036static int is_firmware_flash_cmd(u8 *cdb)
1037{
1038 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1039}
1040
1041/*
1042 * During firmware flash, the heartbeat register may not update as frequently
1043 * as it should. So we dial down lockup detection during firmware flash. and
1044 * dial it back up when firmware flash completes.
1045 */
1046#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1047#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1048static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1049 struct CommandList *c)
1050{
1051 if (!is_firmware_flash_cmd(c->Request.CDB))
1052 return;
1053 atomic_inc(&h->firmware_flash_in_progress);
1054 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1055}
1056
1057static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1058 struct CommandList *c)
1059{
1060 if (is_firmware_flash_cmd(c->Request.CDB) &&
1061 atomic_dec_and_test(&h->firmware_flash_in_progress))
1062 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1063}
1064
25163bd5
WS
1065static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1066 struct CommandList *c, int reply_queue)
3f5eac3a 1067{
c05e8866
SC
1068 dial_down_lockup_detection_during_fw_flash(h, c);
1069 atomic_inc(&h->commands_outstanding);
c349775e
ST
1070 switch (c->cmd_type) {
1071 case CMD_IOACCEL1:
25163bd5 1072 set_ioaccel1_performant_mode(h, c, reply_queue);
c05e8866 1073 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
c349775e
ST
1074 break;
1075 case CMD_IOACCEL2:
25163bd5 1076 set_ioaccel2_performant_mode(h, c, reply_queue);
c05e8866 1077 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
c349775e 1078 break;
8be986cc
SC
1079 case IOACCEL2_TMF:
1080 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1081 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1082 break;
c349775e 1083 default:
25163bd5 1084 set_performant_mode(h, c, reply_queue);
c05e8866 1085 h->access.submit_command(h, c);
c349775e 1086 }
3f5eac3a
SC
1087}
1088
a58e7e53 1089static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
25163bd5 1090{
d604f533 1091 if (unlikely(hpsa_is_pending_event(c)))
a58e7e53
WS
1092 return finish_cmd(c);
1093
25163bd5
WS
1094 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1095}
1096
3f5eac3a
SC
1097static inline int is_hba_lunid(unsigned char scsi3addr[])
1098{
1099 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1100}
1101
1102static inline int is_scsi_rev_5(struct ctlr_info *h)
1103{
1104 if (!h->hba_inquiry_data)
1105 return 0;
1106 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1107 return 1;
1108 return 0;
1109}
1110
edd16368
SC
1111static int hpsa_find_target_lun(struct ctlr_info *h,
1112 unsigned char scsi3addr[], int bus, int *target, int *lun)
1113{
1114 /* finds an unused bus, target, lun for a new physical device
1115 * assumes h->devlock is held
1116 */
1117 int i, found = 0;
cfe5badc 1118 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 1119
263d9401 1120 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
edd16368
SC
1121
1122 for (i = 0; i < h->ndevices; i++) {
1123 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
263d9401 1124 __set_bit(h->dev[i]->target, lun_taken);
edd16368
SC
1125 }
1126
263d9401
AM
1127 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1128 if (i < HPSA_MAX_DEVICES) {
1129 /* *bus = 1; */
1130 *target = i;
1131 *lun = 0;
1132 found = 1;
edd16368
SC
1133 }
1134 return !found;
1135}
1136
1d33d85d 1137static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
0d96ef5f
WS
1138 struct hpsa_scsi_dev_t *dev, char *description)
1139{
9975ec9d
DB
1140 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1141 return;
1142
0d96ef5f
WS
1143 dev_printk(level, &h->pdev->dev,
1144 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1145 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1146 description,
1147 scsi_device_type(dev->devtype),
1148 dev->vendor,
1149 dev->model,
1150 dev->raid_level > RAID_UNKNOWN ?
1151 "RAID-?" : raid_label[dev->raid_level],
1152 dev->offload_config ? '+' : '-',
1153 dev->offload_enabled ? '+' : '-',
2a168208 1154 dev->expose_device);
0d96ef5f
WS
1155}
1156
edd16368 1157/* Add an entry into h->dev[] array. */
8aa60681 1158static int hpsa_scsi_add_entry(struct ctlr_info *h,
edd16368
SC
1159 struct hpsa_scsi_dev_t *device,
1160 struct hpsa_scsi_dev_t *added[], int *nadded)
1161{
1162 /* assumes h->devlock is held */
1163 int n = h->ndevices;
1164 int i;
1165 unsigned char addr1[8], addr2[8];
1166 struct hpsa_scsi_dev_t *sd;
1167
cfe5badc 1168 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
1169 dev_err(&h->pdev->dev, "too many devices, some will be "
1170 "inaccessible.\n");
1171 return -1;
1172 }
1173
1174 /* physical devices do not have lun or target assigned until now. */
1175 if (device->lun != -1)
1176 /* Logical device, lun is already assigned. */
1177 goto lun_assigned;
1178
1179 /* If this device a non-zero lun of a multi-lun device
1180 * byte 4 of the 8-byte LUN addr will contain the logical
2b08b3e9 1181 * unit no, zero otherwise.
edd16368
SC
1182 */
1183 if (device->scsi3addr[4] == 0) {
1184 /* This is not a non-zero lun of a multi-lun device */
1185 if (hpsa_find_target_lun(h, device->scsi3addr,
1186 device->bus, &device->target, &device->lun) != 0)
1187 return -1;
1188 goto lun_assigned;
1189 }
1190
1191 /* This is a non-zero lun of a multi-lun device.
1192 * Search through our list and find the device which
9a4178b7 1193 * has the same 8 byte LUN address, excepting byte 4 and 5.
edd16368
SC
1194 * Assign the same bus and target for this new LUN.
1195 * Use the logical unit number from the firmware.
1196 */
1197 memcpy(addr1, device->scsi3addr, 8);
1198 addr1[4] = 0;
9a4178b7 1199 addr1[5] = 0;
edd16368
SC
1200 for (i = 0; i < n; i++) {
1201 sd = h->dev[i];
1202 memcpy(addr2, sd->scsi3addr, 8);
1203 addr2[4] = 0;
9a4178b7 1204 addr2[5] = 0;
1205 /* differ only in byte 4 and 5? */
edd16368
SC
1206 if (memcmp(addr1, addr2, 8) == 0) {
1207 device->bus = sd->bus;
1208 device->target = sd->target;
1209 device->lun = device->scsi3addr[4];
1210 break;
1211 }
1212 }
1213 if (device->lun == -1) {
1214 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1215 " suspect firmware bug or unsupported hardware "
1216 "configuration.\n");
1217 return -1;
1218 }
1219
1220lun_assigned:
1221
1222 h->dev[n] = device;
1223 h->ndevices++;
1224 added[*nadded] = device;
1225 (*nadded)++;
0d96ef5f 1226 hpsa_show_dev_msg(KERN_INFO, h, device,
2a168208 1227 device->expose_device ? "added" : "masked");
a473d86c
RE
1228 device->offload_to_be_enabled = device->offload_enabled;
1229 device->offload_enabled = 0;
edd16368
SC
1230 return 0;
1231}
1232
bd9244f7 1233/* Update an entry in h->dev[] array. */
8aa60681 1234static void hpsa_scsi_update_entry(struct ctlr_info *h,
bd9244f7
ST
1235 int entry, struct hpsa_scsi_dev_t *new_entry)
1236{
a473d86c 1237 int offload_enabled;
bd9244f7
ST
1238 /* assumes h->devlock is held */
1239 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1240
1241 /* Raid level changed. */
1242 h->dev[entry]->raid_level = new_entry->raid_level;
250fb125 1243
03383736
DB
1244 /* Raid offload parameters changed. Careful about the ordering. */
1245 if (new_entry->offload_config && new_entry->offload_enabled) {
1246 /*
1247 * if drive is newly offload_enabled, we want to copy the
1248 * raid map data first. If previously offload_enabled and
1249 * offload_config were set, raid map data had better be
1250 * the same as it was before. if raid map data is changed
1251 * then it had better be the case that
1252 * h->dev[entry]->offload_enabled is currently 0.
1253 */
1254 h->dev[entry]->raid_map = new_entry->raid_map;
1255 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
03383736 1256 }
a3144e0b
JH
1257 if (new_entry->hba_ioaccel_enabled) {
1258 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1259 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1260 }
1261 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
250fb125 1262 h->dev[entry]->offload_config = new_entry->offload_config;
9fb0de2d 1263 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
03383736 1264 h->dev[entry]->queue_depth = new_entry->queue_depth;
250fb125 1265
41ce4c35
SC
1266 /*
1267 * We can turn off ioaccel offload now, but need to delay turning
1268 * it on until we can update h->dev[entry]->phys_disk[], but we
1269 * can't do that until all the devices are updated.
1270 */
1271 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1272 if (!new_entry->offload_enabled)
1273 h->dev[entry]->offload_enabled = 0;
1274
a473d86c
RE
1275 offload_enabled = h->dev[entry]->offload_enabled;
1276 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
0d96ef5f 1277 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
a473d86c 1278 h->dev[entry]->offload_enabled = offload_enabled;
bd9244f7
ST
1279}
1280
2a8ccf31 1281/* Replace an entry from h->dev[] array. */
8aa60681 1282static void hpsa_scsi_replace_entry(struct ctlr_info *h,
2a8ccf31
SC
1283 int entry, struct hpsa_scsi_dev_t *new_entry,
1284 struct hpsa_scsi_dev_t *added[], int *nadded,
1285 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1286{
1287 /* assumes h->devlock is held */
cfe5badc 1288 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
1289 removed[*nremoved] = h->dev[entry];
1290 (*nremoved)++;
01350d05
SC
1291
1292 /*
1293 * New physical devices won't have target/lun assigned yet
1294 * so we need to preserve the values in the slot we are replacing.
1295 */
1296 if (new_entry->target == -1) {
1297 new_entry->target = h->dev[entry]->target;
1298 new_entry->lun = h->dev[entry]->lun;
1299 }
1300
2a8ccf31
SC
1301 h->dev[entry] = new_entry;
1302 added[*nadded] = new_entry;
1303 (*nadded)++;
0d96ef5f 1304 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
a473d86c
RE
1305 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1306 new_entry->offload_enabled = 0;
2a8ccf31
SC
1307}
1308
edd16368 1309/* Remove an entry from h->dev[] array. */
8aa60681 1310static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
edd16368
SC
1311 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1312{
1313 /* assumes h->devlock is held */
1314 int i;
1315 struct hpsa_scsi_dev_t *sd;
1316
cfe5badc 1317 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
1318
1319 sd = h->dev[entry];
1320 removed[*nremoved] = h->dev[entry];
1321 (*nremoved)++;
1322
1323 for (i = entry; i < h->ndevices-1; i++)
1324 h->dev[i] = h->dev[i+1];
1325 h->ndevices--;
0d96ef5f 1326 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
edd16368
SC
1327}
1328
1329#define SCSI3ADDR_EQ(a, b) ( \
1330 (a)[7] == (b)[7] && \
1331 (a)[6] == (b)[6] && \
1332 (a)[5] == (b)[5] && \
1333 (a)[4] == (b)[4] && \
1334 (a)[3] == (b)[3] && \
1335 (a)[2] == (b)[2] && \
1336 (a)[1] == (b)[1] && \
1337 (a)[0] == (b)[0])
1338
1339static void fixup_botched_add(struct ctlr_info *h,
1340 struct hpsa_scsi_dev_t *added)
1341{
1342 /* called when scsi_add_device fails in order to re-adjust
1343 * h->dev[] to match the mid layer's view.
1344 */
1345 unsigned long flags;
1346 int i, j;
1347
1348 spin_lock_irqsave(&h->lock, flags);
1349 for (i = 0; i < h->ndevices; i++) {
1350 if (h->dev[i] == added) {
1351 for (j = i; j < h->ndevices-1; j++)
1352 h->dev[j] = h->dev[j+1];
1353 h->ndevices--;
1354 break;
1355 }
1356 }
1357 spin_unlock_irqrestore(&h->lock, flags);
1358 kfree(added);
1359}
1360
1361static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1362 struct hpsa_scsi_dev_t *dev2)
1363{
edd16368
SC
1364 /* we compare everything except lun and target as these
1365 * are not yet assigned. Compare parts likely
1366 * to differ first
1367 */
1368 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1369 sizeof(dev1->scsi3addr)) != 0)
1370 return 0;
1371 if (memcmp(dev1->device_id, dev2->device_id,
1372 sizeof(dev1->device_id)) != 0)
1373 return 0;
1374 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1375 return 0;
1376 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1377 return 0;
edd16368
SC
1378 if (dev1->devtype != dev2->devtype)
1379 return 0;
edd16368
SC
1380 if (dev1->bus != dev2->bus)
1381 return 0;
1382 return 1;
1383}
1384
bd9244f7
ST
1385static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1386 struct hpsa_scsi_dev_t *dev2)
1387{
1388 /* Device attributes that can change, but don't mean
1389 * that the device is a different device, nor that the OS
1390 * needs to be told anything about the change.
1391 */
1392 if (dev1->raid_level != dev2->raid_level)
1393 return 1;
250fb125
SC
1394 if (dev1->offload_config != dev2->offload_config)
1395 return 1;
1396 if (dev1->offload_enabled != dev2->offload_enabled)
1397 return 1;
93849508
DB
1398 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1399 if (dev1->queue_depth != dev2->queue_depth)
1400 return 1;
bd9244f7
ST
1401 return 0;
1402}
1403
edd16368
SC
1404/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1405 * and return needle location in *index. If scsi3addr matches, but not
1406 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
bd9244f7
ST
1407 * location in *index.
1408 * In the case of a minor device attribute change, such as RAID level, just
1409 * return DEVICE_UPDATED, along with the updated device's location in index.
1410 * If needle not found, return DEVICE_NOT_FOUND.
edd16368
SC
1411 */
1412static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1413 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1414 int *index)
1415{
1416 int i;
1417#define DEVICE_NOT_FOUND 0
1418#define DEVICE_CHANGED 1
1419#define DEVICE_SAME 2
bd9244f7 1420#define DEVICE_UPDATED 3
1d33d85d
DB
1421 if (needle == NULL)
1422 return DEVICE_NOT_FOUND;
1423
edd16368 1424 for (i = 0; i < haystack_size; i++) {
23231048
SC
1425 if (haystack[i] == NULL) /* previously removed. */
1426 continue;
edd16368
SC
1427 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1428 *index = i;
bd9244f7
ST
1429 if (device_is_the_same(needle, haystack[i])) {
1430 if (device_updated(needle, haystack[i]))
1431 return DEVICE_UPDATED;
edd16368 1432 return DEVICE_SAME;
bd9244f7 1433 } else {
9846590e
SC
1434 /* Keep offline devices offline */
1435 if (needle->volume_offline)
1436 return DEVICE_NOT_FOUND;
edd16368 1437 return DEVICE_CHANGED;
bd9244f7 1438 }
edd16368
SC
1439 }
1440 }
1441 *index = -1;
1442 return DEVICE_NOT_FOUND;
1443}
1444
9846590e
SC
1445static void hpsa_monitor_offline_device(struct ctlr_info *h,
1446 unsigned char scsi3addr[])
1447{
1448 struct offline_device_entry *device;
1449 unsigned long flags;
1450
1451 /* Check to see if device is already on the list */
1452 spin_lock_irqsave(&h->offline_device_lock, flags);
1453 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1454 if (memcmp(device->scsi3addr, scsi3addr,
1455 sizeof(device->scsi3addr)) == 0) {
1456 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1457 return;
1458 }
1459 }
1460 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1461
1462 /* Device is not on the list, add it. */
1463 device = kmalloc(sizeof(*device), GFP_KERNEL);
1464 if (!device) {
1465 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1466 return;
1467 }
1468 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1469 spin_lock_irqsave(&h->offline_device_lock, flags);
1470 list_add_tail(&device->offline_list, &h->offline_device_list);
1471 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1472}
1473
1474/* Print a message explaining various offline volume states */
1475static void hpsa_show_volume_status(struct ctlr_info *h,
1476 struct hpsa_scsi_dev_t *sd)
1477{
1478 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1479 dev_info(&h->pdev->dev,
1480 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1481 h->scsi_host->host_no,
1482 sd->bus, sd->target, sd->lun);
1483 switch (sd->volume_offline) {
1484 case HPSA_LV_OK:
1485 break;
1486 case HPSA_LV_UNDERGOING_ERASE:
1487 dev_info(&h->pdev->dev,
1488 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1489 h->scsi_host->host_no,
1490 sd->bus, sd->target, sd->lun);
1491 break;
5ca01204
SB
1492 case HPSA_LV_NOT_AVAILABLE:
1493 dev_info(&h->pdev->dev,
1494 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1495 h->scsi_host->host_no,
1496 sd->bus, sd->target, sd->lun);
1497 break;
9846590e
SC
1498 case HPSA_LV_UNDERGOING_RPI:
1499 dev_info(&h->pdev->dev,
5ca01204 1500 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
9846590e
SC
1501 h->scsi_host->host_no,
1502 sd->bus, sd->target, sd->lun);
1503 break;
1504 case HPSA_LV_PENDING_RPI:
1505 dev_info(&h->pdev->dev,
5ca01204
SB
1506 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1507 h->scsi_host->host_no,
1508 sd->bus, sd->target, sd->lun);
9846590e
SC
1509 break;
1510 case HPSA_LV_ENCRYPTED_NO_KEY:
1511 dev_info(&h->pdev->dev,
1512 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1513 h->scsi_host->host_no,
1514 sd->bus, sd->target, sd->lun);
1515 break;
1516 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1517 dev_info(&h->pdev->dev,
1518 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1519 h->scsi_host->host_no,
1520 sd->bus, sd->target, sd->lun);
1521 break;
1522 case HPSA_LV_UNDERGOING_ENCRYPTION:
1523 dev_info(&h->pdev->dev,
1524 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1525 h->scsi_host->host_no,
1526 sd->bus, sd->target, sd->lun);
1527 break;
1528 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1529 dev_info(&h->pdev->dev,
1530 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1531 h->scsi_host->host_no,
1532 sd->bus, sd->target, sd->lun);
1533 break;
1534 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1535 dev_info(&h->pdev->dev,
1536 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1537 h->scsi_host->host_no,
1538 sd->bus, sd->target, sd->lun);
1539 break;
1540 case HPSA_LV_PENDING_ENCRYPTION:
1541 dev_info(&h->pdev->dev,
1542 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1543 h->scsi_host->host_no,
1544 sd->bus, sd->target, sd->lun);
1545 break;
1546 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1547 dev_info(&h->pdev->dev,
1548 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1549 h->scsi_host->host_no,
1550 sd->bus, sd->target, sd->lun);
1551 break;
1552 }
1553}
1554
03383736
DB
1555/*
1556 * Figure the list of physical drive pointers for a logical drive with
1557 * raid offload configured.
1558 */
1559static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1560 struct hpsa_scsi_dev_t *dev[], int ndevices,
1561 struct hpsa_scsi_dev_t *logical_drive)
1562{
1563 struct raid_map_data *map = &logical_drive->raid_map;
1564 struct raid_map_disk_data *dd = &map->data[0];
1565 int i, j;
1566 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1567 le16_to_cpu(map->metadata_disks_per_row);
1568 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1569 le16_to_cpu(map->layout_map_count) *
1570 total_disks_per_row;
1571 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1572 total_disks_per_row;
1573 int qdepth;
1574
1575 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1576 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1577
d604f533
WS
1578 logical_drive->nphysical_disks = nraid_map_entries;
1579
03383736
DB
1580 qdepth = 0;
1581 for (i = 0; i < nraid_map_entries; i++) {
1582 logical_drive->phys_disk[i] = NULL;
1583 if (!logical_drive->offload_config)
1584 continue;
1585 for (j = 0; j < ndevices; j++) {
1d33d85d
DB
1586 if (dev[j] == NULL)
1587 continue;
03383736
DB
1588 if (dev[j]->devtype != TYPE_DISK)
1589 continue;
1590 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1591 continue;
1592 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1593 continue;
1594
1595 logical_drive->phys_disk[i] = dev[j];
1596 if (i < nphys_disk)
1597 qdepth = min(h->nr_cmds, qdepth +
1598 logical_drive->phys_disk[i]->queue_depth);
1599 break;
1600 }
1601
1602 /*
1603 * This can happen if a physical drive is removed and
1604 * the logical drive is degraded. In that case, the RAID
1605 * map data will refer to a physical disk which isn't actually
1606 * present. And in that case offload_enabled should already
1607 * be 0, but we'll turn it off here just in case
1608 */
1609 if (!logical_drive->phys_disk[i]) {
1610 logical_drive->offload_enabled = 0;
41ce4c35
SC
1611 logical_drive->offload_to_be_enabled = 0;
1612 logical_drive->queue_depth = 8;
03383736
DB
1613 }
1614 }
1615 if (nraid_map_entries)
1616 /*
1617 * This is correct for reads, too high for full stripe writes,
1618 * way too high for partial stripe writes
1619 */
1620 logical_drive->queue_depth = qdepth;
1621 else
1622 logical_drive->queue_depth = h->nr_cmds;
1623}
1624
1625static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1626 struct hpsa_scsi_dev_t *dev[], int ndevices)
1627{
1628 int i;
1629
1630 for (i = 0; i < ndevices; i++) {
1d33d85d
DB
1631 if (dev[i] == NULL)
1632 continue;
03383736
DB
1633 if (dev[i]->devtype != TYPE_DISK)
1634 continue;
1635 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1636 continue;
41ce4c35
SC
1637
1638 /*
1639 * If offload is currently enabled, the RAID map and
1640 * phys_disk[] assignment *better* not be changing
1641 * and since it isn't changing, we do not need to
1642 * update it.
1643 */
1644 if (dev[i]->offload_enabled)
1645 continue;
1646
03383736
DB
1647 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1648 }
1649}
1650
8aa60681 1651static void adjust_hpsa_scsi_table(struct ctlr_info *h,
edd16368
SC
1652 struct hpsa_scsi_dev_t *sd[], int nsds)
1653{
1654 /* sd contains scsi3 addresses and devtypes, and inquiry
1655 * data. This function takes what's in sd to be the current
1656 * reality and updates h->dev[] to reflect that reality.
1657 */
1658 int i, entry, device_change, changes = 0;
1659 struct hpsa_scsi_dev_t *csd;
1660 unsigned long flags;
1661 struct hpsa_scsi_dev_t **added, **removed;
1662 int nadded, nremoved;
1663 struct Scsi_Host *sh = NULL;
1664
da03ded0
DB
1665 /*
1666 * A reset can cause a device status to change
1667 * re-schedule the scan to see what happened.
1668 */
1669 if (h->reset_in_progress) {
1670 h->drv_req_rescan = 1;
1671 return;
1672 }
1673
cfe5badc
ST
1674 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1675 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1676
1677 if (!added || !removed) {
1678 dev_warn(&h->pdev->dev, "out of memory in "
1679 "adjust_hpsa_scsi_table\n");
1680 goto free_and_out;
1681 }
1682
1683 spin_lock_irqsave(&h->devlock, flags);
1684
1685 /* find any devices in h->dev[] that are not in
1686 * sd[] and remove them from h->dev[], and for any
1687 * devices which have changed, remove the old device
1688 * info and add the new device info.
bd9244f7
ST
1689 * If minor device attributes change, just update
1690 * the existing device structure.
edd16368
SC
1691 */
1692 i = 0;
1693 nremoved = 0;
1694 nadded = 0;
1695 while (i < h->ndevices) {
1696 csd = h->dev[i];
1697 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1698 if (device_change == DEVICE_NOT_FOUND) {
1699 changes++;
8aa60681 1700 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
edd16368
SC
1701 continue; /* remove ^^^, hence i not incremented */
1702 } else if (device_change == DEVICE_CHANGED) {
1703 changes++;
8aa60681 1704 hpsa_scsi_replace_entry(h, i, sd[entry],
2a8ccf31 1705 added, &nadded, removed, &nremoved);
c7f172dc
SC
1706 /* Set it to NULL to prevent it from being freed
1707 * at the bottom of hpsa_update_scsi_devices()
1708 */
1709 sd[entry] = NULL;
bd9244f7 1710 } else if (device_change == DEVICE_UPDATED) {
8aa60681 1711 hpsa_scsi_update_entry(h, i, sd[entry]);
edd16368
SC
1712 }
1713 i++;
1714 }
1715
1716 /* Now, make sure every device listed in sd[] is also
1717 * listed in h->dev[], adding them if they aren't found
1718 */
1719
1720 for (i = 0; i < nsds; i++) {
1721 if (!sd[i]) /* if already added above. */
1722 continue;
9846590e
SC
1723
1724 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1725 * as the SCSI mid-layer does not handle such devices well.
1726 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1727 * at 160Hz, and prevents the system from coming up.
1728 */
1729 if (sd[i]->volume_offline) {
1730 hpsa_show_volume_status(h, sd[i]);
0d96ef5f 1731 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
9846590e
SC
1732 continue;
1733 }
1734
edd16368
SC
1735 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1736 h->ndevices, &entry);
1737 if (device_change == DEVICE_NOT_FOUND) {
1738 changes++;
8aa60681 1739 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
edd16368
SC
1740 break;
1741 sd[i] = NULL; /* prevent from being freed later. */
1742 } else if (device_change == DEVICE_CHANGED) {
1743 /* should never happen... */
1744 changes++;
1745 dev_warn(&h->pdev->dev,
1746 "device unexpectedly changed.\n");
1747 /* but if it does happen, we just ignore that device */
1748 }
1749 }
41ce4c35
SC
1750 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1751
1752 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1753 * any logical drives that need it enabled.
1754 */
1d33d85d
DB
1755 for (i = 0; i < h->ndevices; i++) {
1756 if (h->dev[i] == NULL)
1757 continue;
41ce4c35 1758 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1d33d85d 1759 }
41ce4c35 1760
edd16368
SC
1761 spin_unlock_irqrestore(&h->devlock, flags);
1762
9846590e
SC
1763 /* Monitor devices which are in one of several NOT READY states to be
1764 * brought online later. This must be done without holding h->devlock,
1765 * so don't touch h->dev[]
1766 */
1767 for (i = 0; i < nsds; i++) {
1768 if (!sd[i]) /* if already added above. */
1769 continue;
1770 if (sd[i]->volume_offline)
1771 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1772 }
1773
edd16368
SC
1774 /* Don't notify scsi mid layer of any changes the first time through
1775 * (or if there are no changes) scsi_scan_host will do it later the
1776 * first time through.
1777 */
8aa60681 1778 if (!changes)
edd16368
SC
1779 goto free_and_out;
1780
1781 sh = h->scsi_host;
da03ded0
DB
1782 if (sh == NULL) {
1783 dev_warn(&h->pdev->dev, "%s: scsi_host is null\n", __func__);
1784 goto free_and_out;
1785 }
edd16368
SC
1786 /* Notify scsi mid layer of any removed devices */
1787 for (i = 0; i < nremoved; i++) {
1d33d85d
DB
1788 if (removed[i] == NULL)
1789 continue;
2a168208 1790 if (removed[i]->expose_device) {
41ce4c35
SC
1791 struct scsi_device *sdev =
1792 scsi_device_lookup(sh, removed[i]->bus,
1793 removed[i]->target, removed[i]->lun);
1794 if (sdev != NULL) {
1795 scsi_remove_device(sdev);
1796 scsi_device_put(sdev);
1797 } else {
1798 /*
1799 * We don't expect to get here.
1800 * future cmds to this device will get selection
1801 * timeout as if the device was gone.
1802 */
0d96ef5f
WS
1803 hpsa_show_dev_msg(KERN_WARNING, h, removed[i],
1804 "didn't find device for removal.");
41ce4c35 1805 }
edd16368
SC
1806 }
1807 kfree(removed[i]);
1808 removed[i] = NULL;
1809 }
1810
1811 /* Notify scsi mid layer of any added devices */
1812 for (i = 0; i < nadded; i++) {
1d33d85d
DB
1813 if (added[i] == NULL)
1814 continue;
2a168208 1815 if (!(added[i]->expose_device))
41ce4c35 1816 continue;
edd16368
SC
1817 if (scsi_add_device(sh, added[i]->bus,
1818 added[i]->target, added[i]->lun) == 0)
1819 continue;
1d33d85d 1820 dev_warn(&h->pdev->dev, "addition failed, device not added.");
edd16368
SC
1821 /* now we have to remove it from h->dev,
1822 * since it didn't get added to scsi mid layer
1823 */
1824 fixup_botched_add(h, added[i]);
853633e8 1825 h->drv_req_rescan = 1;
edd16368
SC
1826 }
1827
1828free_and_out:
1829 kfree(added);
1830 kfree(removed);
edd16368
SC
1831}
1832
1833/*
9e03aa2f 1834 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
edd16368
SC
1835 * Assume's h->devlock is held.
1836 */
1837static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1838 int bus, int target, int lun)
1839{
1840 int i;
1841 struct hpsa_scsi_dev_t *sd;
1842
1843 for (i = 0; i < h->ndevices; i++) {
1844 sd = h->dev[i];
1845 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1846 return sd;
1847 }
1848 return NULL;
1849}
1850
edd16368
SC
1851static int hpsa_slave_alloc(struct scsi_device *sdev)
1852{
1853 struct hpsa_scsi_dev_t *sd;
1854 unsigned long flags;
1855 struct ctlr_info *h;
1856
1857 h = sdev_to_hba(sdev);
1858 spin_lock_irqsave(&h->devlock, flags);
1859 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1860 sdev_id(sdev), sdev->lun);
41ce4c35 1861 if (likely(sd)) {
03383736 1862 atomic_set(&sd->ioaccel_cmds_out, 0);
2a168208 1863 sdev->hostdata = sd->expose_device ? sd : NULL;
41ce4c35
SC
1864 } else
1865 sdev->hostdata = NULL;
edd16368
SC
1866 spin_unlock_irqrestore(&h->devlock, flags);
1867 return 0;
1868}
1869
41ce4c35
SC
1870/* configure scsi device based on internal per-device structure */
1871static int hpsa_slave_configure(struct scsi_device *sdev)
1872{
1873 struct hpsa_scsi_dev_t *sd;
1874 int queue_depth;
1875
1876 sd = sdev->hostdata;
2a168208 1877 sdev->no_uld_attach = !sd || !sd->expose_device;
41ce4c35
SC
1878
1879 if (sd)
1880 queue_depth = sd->queue_depth != 0 ?
1881 sd->queue_depth : sdev->host->can_queue;
1882 else
1883 queue_depth = sdev->host->can_queue;
1884
1885 scsi_change_queue_depth(sdev, queue_depth);
1886
1887 return 0;
1888}
1889
edd16368
SC
1890static void hpsa_slave_destroy(struct scsi_device *sdev)
1891{
bcc44255 1892 /* nothing to do. */
edd16368
SC
1893}
1894
d9a729f3
WS
1895static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1896{
1897 int i;
1898
1899 if (!h->ioaccel2_cmd_sg_list)
1900 return;
1901 for (i = 0; i < h->nr_cmds; i++) {
1902 kfree(h->ioaccel2_cmd_sg_list[i]);
1903 h->ioaccel2_cmd_sg_list[i] = NULL;
1904 }
1905 kfree(h->ioaccel2_cmd_sg_list);
1906 h->ioaccel2_cmd_sg_list = NULL;
1907}
1908
1909static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1910{
1911 int i;
1912
1913 if (h->chainsize <= 0)
1914 return 0;
1915
1916 h->ioaccel2_cmd_sg_list =
1917 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1918 GFP_KERNEL);
1919 if (!h->ioaccel2_cmd_sg_list)
1920 return -ENOMEM;
1921 for (i = 0; i < h->nr_cmds; i++) {
1922 h->ioaccel2_cmd_sg_list[i] =
1923 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1924 h->maxsgentries, GFP_KERNEL);
1925 if (!h->ioaccel2_cmd_sg_list[i])
1926 goto clean;
1927 }
1928 return 0;
1929
1930clean:
1931 hpsa_free_ioaccel2_sg_chain_blocks(h);
1932 return -ENOMEM;
1933}
1934
33a2ffce
SC
1935static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1936{
1937 int i;
1938
1939 if (!h->cmd_sg_list)
1940 return;
1941 for (i = 0; i < h->nr_cmds; i++) {
1942 kfree(h->cmd_sg_list[i]);
1943 h->cmd_sg_list[i] = NULL;
1944 }
1945 kfree(h->cmd_sg_list);
1946 h->cmd_sg_list = NULL;
1947}
1948
105a3dbc 1949static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
33a2ffce
SC
1950{
1951 int i;
1952
1953 if (h->chainsize <= 0)
1954 return 0;
1955
1956 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1957 GFP_KERNEL);
3d4e6af8
RE
1958 if (!h->cmd_sg_list) {
1959 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
33a2ffce 1960 return -ENOMEM;
3d4e6af8 1961 }
33a2ffce
SC
1962 for (i = 0; i < h->nr_cmds; i++) {
1963 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1964 h->chainsize, GFP_KERNEL);
3d4e6af8
RE
1965 if (!h->cmd_sg_list[i]) {
1966 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
33a2ffce 1967 goto clean;
3d4e6af8 1968 }
33a2ffce
SC
1969 }
1970 return 0;
1971
1972clean:
1973 hpsa_free_sg_chain_blocks(h);
1974 return -ENOMEM;
1975}
1976
d9a729f3
WS
1977static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
1978 struct io_accel2_cmd *cp, struct CommandList *c)
1979{
1980 struct ioaccel2_sg_element *chain_block;
1981 u64 temp64;
1982 u32 chain_size;
1983
1984 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
a736e9b6 1985 chain_size = le32_to_cpu(cp->sg[0].length);
d9a729f3
WS
1986 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
1987 PCI_DMA_TODEVICE);
1988 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1989 /* prevent subsequent unmapping */
1990 cp->sg->address = 0;
1991 return -1;
1992 }
1993 cp->sg->address = cpu_to_le64(temp64);
1994 return 0;
1995}
1996
1997static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
1998 struct io_accel2_cmd *cp)
1999{
2000 struct ioaccel2_sg_element *chain_sg;
2001 u64 temp64;
2002 u32 chain_size;
2003
2004 chain_sg = cp->sg;
2005 temp64 = le64_to_cpu(chain_sg->address);
a736e9b6 2006 chain_size = le32_to_cpu(cp->sg[0].length);
d9a729f3
WS
2007 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2008}
2009
e2bea6df 2010static int hpsa_map_sg_chain_block(struct ctlr_info *h,
33a2ffce
SC
2011 struct CommandList *c)
2012{
2013 struct SGDescriptor *chain_sg, *chain_block;
2014 u64 temp64;
50a0decf 2015 u32 chain_len;
33a2ffce
SC
2016
2017 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2018 chain_block = h->cmd_sg_list[c->cmdindex];
50a0decf
SC
2019 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2020 chain_len = sizeof(*chain_sg) *
2b08b3e9 2021 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
50a0decf
SC
2022 chain_sg->Len = cpu_to_le32(chain_len);
2023 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
33a2ffce 2024 PCI_DMA_TODEVICE);
e2bea6df
SC
2025 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2026 /* prevent subsequent unmapping */
50a0decf 2027 chain_sg->Addr = cpu_to_le64(0);
e2bea6df
SC
2028 return -1;
2029 }
50a0decf 2030 chain_sg->Addr = cpu_to_le64(temp64);
e2bea6df 2031 return 0;
33a2ffce
SC
2032}
2033
2034static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2035 struct CommandList *c)
2036{
2037 struct SGDescriptor *chain_sg;
33a2ffce 2038
50a0decf 2039 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
33a2ffce
SC
2040 return;
2041
2042 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
50a0decf
SC
2043 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2044 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
33a2ffce
SC
2045}
2046
a09c1441
ST
2047
2048/* Decode the various types of errors on ioaccel2 path.
2049 * Return 1 for any error that should generate a RAID path retry.
2050 * Return 0 for errors that don't require a RAID path retry.
2051 */
2052static int handle_ioaccel_mode2_error(struct ctlr_info *h,
c349775e
ST
2053 struct CommandList *c,
2054 struct scsi_cmnd *cmd,
2055 struct io_accel2_cmd *c2)
2056{
2057 int data_len;
a09c1441 2058 int retry = 0;
c40820d5 2059 u32 ioaccel2_resid = 0;
c349775e
ST
2060
2061 switch (c2->error_data.serv_response) {
2062 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2063 switch (c2->error_data.status) {
2064 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2065 break;
2066 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
ee6b1889 2067 cmd->result |= SAM_STAT_CHECK_CONDITION;
c349775e 2068 if (c2->error_data.data_present !=
ee6b1889
SC
2069 IOACCEL2_SENSE_DATA_PRESENT) {
2070 memset(cmd->sense_buffer, 0,
2071 SCSI_SENSE_BUFFERSIZE);
c349775e 2072 break;
ee6b1889 2073 }
c349775e
ST
2074 /* copy the sense data */
2075 data_len = c2->error_data.sense_data_len;
2076 if (data_len > SCSI_SENSE_BUFFERSIZE)
2077 data_len = SCSI_SENSE_BUFFERSIZE;
2078 if (data_len > sizeof(c2->error_data.sense_data_buff))
2079 data_len =
2080 sizeof(c2->error_data.sense_data_buff);
2081 memcpy(cmd->sense_buffer,
2082 c2->error_data.sense_data_buff, data_len);
a09c1441 2083 retry = 1;
c349775e
ST
2084 break;
2085 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
a09c1441 2086 retry = 1;
c349775e
ST
2087 break;
2088 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
a09c1441 2089 retry = 1;
c349775e
ST
2090 break;
2091 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
4a8da22b 2092 retry = 1;
c349775e
ST
2093 break;
2094 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
a09c1441 2095 retry = 1;
c349775e
ST
2096 break;
2097 default:
a09c1441 2098 retry = 1;
c349775e
ST
2099 break;
2100 }
2101 break;
2102 case IOACCEL2_SERV_RESPONSE_FAILURE:
c40820d5
JH
2103 switch (c2->error_data.status) {
2104 case IOACCEL2_STATUS_SR_IO_ERROR:
2105 case IOACCEL2_STATUS_SR_IO_ABORTED:
2106 case IOACCEL2_STATUS_SR_OVERRUN:
2107 retry = 1;
2108 break;
2109 case IOACCEL2_STATUS_SR_UNDERRUN:
2110 cmd->result = (DID_OK << 16); /* host byte */
2111 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2112 ioaccel2_resid = get_unaligned_le32(
2113 &c2->error_data.resid_cnt[0]);
2114 scsi_set_resid(cmd, ioaccel2_resid);
2115 break;
2116 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2117 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2118 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2119 /* We will get an event from ctlr to trigger rescan */
2120 retry = 1;
2121 break;
2122 default:
2123 retry = 1;
c40820d5 2124 }
c349775e
ST
2125 break;
2126 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2127 break;
2128 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2129 break;
2130 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
a09c1441 2131 retry = 1;
c349775e
ST
2132 break;
2133 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
c349775e
ST
2134 break;
2135 default:
a09c1441 2136 retry = 1;
c349775e
ST
2137 break;
2138 }
a09c1441
ST
2139
2140 return retry; /* retry on raid path? */
c349775e
ST
2141}
2142
a58e7e53
WS
2143static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2144 struct CommandList *c)
2145{
d604f533
WS
2146 bool do_wake = false;
2147
a58e7e53
WS
2148 /*
2149 * Prevent the following race in the abort handler:
2150 *
2151 * 1. LLD is requested to abort a SCSI command
2152 * 2. The SCSI command completes
2153 * 3. The struct CommandList associated with step 2 is made available
2154 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2155 * 5. Abort handler follows scsi_cmnd->host_scribble and
2156 * finds struct CommandList and tries to aborts it
2157 * Now we have aborted the wrong command.
2158 *
d604f533
WS
2159 * Reset c->scsi_cmd here so that the abort or reset handler will know
2160 * this command has completed. Then, check to see if the handler is
a58e7e53
WS
2161 * waiting for this command, and, if so, wake it.
2162 */
2163 c->scsi_cmd = SCSI_CMD_IDLE;
d604f533 2164 mb(); /* Declare command idle before checking for pending events. */
a58e7e53 2165 if (c->abort_pending) {
d604f533 2166 do_wake = true;
a58e7e53 2167 c->abort_pending = false;
a58e7e53 2168 }
d604f533
WS
2169 if (c->reset_pending) {
2170 unsigned long flags;
2171 struct hpsa_scsi_dev_t *dev;
2172
2173 /*
2174 * There appears to be a reset pending; lock the lock and
2175 * reconfirm. If so, then decrement the count of outstanding
2176 * commands and wake the reset command if this is the last one.
2177 */
2178 spin_lock_irqsave(&h->lock, flags);
2179 dev = c->reset_pending; /* Re-fetch under the lock. */
2180 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2181 do_wake = true;
2182 c->reset_pending = NULL;
2183 spin_unlock_irqrestore(&h->lock, flags);
2184 }
2185
2186 if (do_wake)
2187 wake_up_all(&h->event_sync_wait_queue);
a58e7e53
WS
2188}
2189
73153fe5
WS
2190static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2191 struct CommandList *c)
2192{
2193 hpsa_cmd_resolve_events(h, c);
2194 cmd_tagged_free(h, c);
2195}
2196
8a0ff92c
WS
2197static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2198 struct CommandList *c, struct scsi_cmnd *cmd)
2199{
73153fe5 2200 hpsa_cmd_resolve_and_free(h, c);
8a0ff92c
WS
2201 cmd->scsi_done(cmd);
2202}
2203
2204static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2205{
2206 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2207 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2208}
2209
a58e7e53
WS
2210static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2211{
2212 cmd->result = DID_ABORT << 16;
2213}
2214
2215static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2216 struct scsi_cmnd *cmd)
2217{
2218 hpsa_set_scsi_cmd_aborted(cmd);
2219 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2220 c->Request.CDB, c->err_info->ScsiStatus);
73153fe5 2221 hpsa_cmd_resolve_and_free(h, c);
a58e7e53
WS
2222}
2223
c349775e
ST
2224static void process_ioaccel2_completion(struct ctlr_info *h,
2225 struct CommandList *c, struct scsi_cmnd *cmd,
2226 struct hpsa_scsi_dev_t *dev)
2227{
2228 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2229
2230 /* check for good status */
2231 if (likely(c2->error_data.serv_response == 0 &&
8a0ff92c
WS
2232 c2->error_data.status == 0))
2233 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e 2234
8a0ff92c
WS
2235 /*
2236 * Any RAID offload error results in retry which will use
c349775e
ST
2237 * the normal I/O path so the controller can handle whatever's
2238 * wrong.
2239 */
2240 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
2241 c2->error_data.serv_response ==
2242 IOACCEL2_SERV_RESPONSE_FAILURE) {
080ef1cc
DB
2243 if (c2->error_data.status ==
2244 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2245 dev->offload_enabled = 0;
8a0ff92c
WS
2246
2247 return hpsa_retry_cmd(h, c);
a09c1441 2248 }
080ef1cc
DB
2249
2250 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
8a0ff92c 2251 return hpsa_retry_cmd(h, c);
080ef1cc 2252
8a0ff92c 2253 return hpsa_cmd_free_and_done(h, c, cmd);
c349775e
ST
2254}
2255
9437ac43
SC
2256/* Returns 0 on success, < 0 otherwise. */
2257static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2258 struct CommandList *cp)
2259{
2260 u8 tmf_status = cp->err_info->ScsiStatus;
2261
2262 switch (tmf_status) {
2263 case CISS_TMF_COMPLETE:
2264 /*
2265 * CISS_TMF_COMPLETE never happens, instead,
2266 * ei->CommandStatus == 0 for this case.
2267 */
2268 case CISS_TMF_SUCCESS:
2269 return 0;
2270 case CISS_TMF_INVALID_FRAME:
2271 case CISS_TMF_NOT_SUPPORTED:
2272 case CISS_TMF_FAILED:
2273 case CISS_TMF_WRONG_LUN:
2274 case CISS_TMF_OVERLAPPED_TAG:
2275 break;
2276 default:
2277 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2278 tmf_status);
2279 break;
2280 }
2281 return -tmf_status;
2282}
2283
1fb011fb 2284static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
2285{
2286 struct scsi_cmnd *cmd;
2287 struct ctlr_info *h;
2288 struct ErrorInfo *ei;
283b4a9b 2289 struct hpsa_scsi_dev_t *dev;
d9a729f3 2290 struct io_accel2_cmd *c2;
edd16368 2291
9437ac43
SC
2292 u8 sense_key;
2293 u8 asc; /* additional sense code */
2294 u8 ascq; /* additional sense code qualifier */
db111e18 2295 unsigned long sense_data_size;
edd16368
SC
2296
2297 ei = cp->err_info;
7fa3030c 2298 cmd = cp->scsi_cmd;
edd16368 2299 h = cp->h;
283b4a9b 2300 dev = cmd->device->hostdata;
d9a729f3 2301 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
edd16368
SC
2302
2303 scsi_dma_unmap(cmd); /* undo the DMA mappings */
e1f7de0c 2304 if ((cp->cmd_type == CMD_SCSI) &&
2b08b3e9 2305 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
33a2ffce 2306 hpsa_unmap_sg_chain_block(h, cp);
edd16368 2307
d9a729f3
WS
2308 if ((cp->cmd_type == CMD_IOACCEL2) &&
2309 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2310 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2311
edd16368
SC
2312 cmd->result = (DID_OK << 16); /* host byte */
2313 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
c349775e 2314
03383736
DB
2315 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2316 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2317
25163bd5
WS
2318 /*
2319 * We check for lockup status here as it may be set for
2320 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2321 * fail_all_oustanding_cmds()
2322 */
2323 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2324 /* DID_NO_CONNECT will prevent a retry */
2325 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 2326 return hpsa_cmd_free_and_done(h, cp, cmd);
25163bd5
WS
2327 }
2328
d604f533
WS
2329 if ((unlikely(hpsa_is_pending_event(cp)))) {
2330 if (cp->reset_pending)
2331 return hpsa_cmd_resolve_and_free(h, cp);
2332 if (cp->abort_pending)
2333 return hpsa_cmd_abort_and_free(h, cp, cmd);
2334 }
2335
c349775e
ST
2336 if (cp->cmd_type == CMD_IOACCEL2)
2337 return process_ioaccel2_completion(h, cp, cmd, dev);
2338
6aa4c361 2339 scsi_set_resid(cmd, ei->ResidualCnt);
8a0ff92c
WS
2340 if (ei->CommandStatus == 0)
2341 return hpsa_cmd_free_and_done(h, cp, cmd);
6aa4c361 2342
e1f7de0c
MG
2343 /* For I/O accelerator commands, copy over some fields to the normal
2344 * CISS header used below for error handling.
2345 */
2346 if (cp->cmd_type == CMD_IOACCEL1) {
2347 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2b08b3e9
DB
2348 cp->Header.SGList = scsi_sg_count(cmd);
2349 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2350 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2351 IOACCEL1_IOFLAGS_CDBLEN_MASK;
50a0decf 2352 cp->Header.tag = c->tag;
e1f7de0c
MG
2353 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2354 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
283b4a9b
SC
2355
2356 /* Any RAID offload error results in retry which will use
2357 * the normal I/O path so the controller can handle whatever's
2358 * wrong.
2359 */
2360 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
2361 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2362 dev->offload_enabled = 0;
d604f533 2363 return hpsa_retry_cmd(h, cp);
283b4a9b 2364 }
e1f7de0c
MG
2365 }
2366
edd16368
SC
2367 /* an error has occurred */
2368 switch (ei->CommandStatus) {
2369
2370 case CMD_TARGET_STATUS:
9437ac43
SC
2371 cmd->result |= ei->ScsiStatus;
2372 /* copy the sense data */
2373 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2374 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2375 else
2376 sense_data_size = sizeof(ei->SenseInfo);
2377 if (ei->SenseLen < sense_data_size)
2378 sense_data_size = ei->SenseLen;
2379 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2380 if (ei->ScsiStatus)
2381 decode_sense_data(ei->SenseInfo, sense_data_size,
2382 &sense_key, &asc, &ascq);
edd16368 2383 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1d3b3609 2384 if (sense_key == ABORTED_COMMAND) {
2e311fba 2385 cmd->result |= DID_SOFT_ERROR << 16;
1d3b3609
MG
2386 break;
2387 }
edd16368
SC
2388 break;
2389 }
edd16368
SC
2390 /* Problem was not a check condition
2391 * Pass it up to the upper layers...
2392 */
2393 if (ei->ScsiStatus) {
2394 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2395 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2396 "Returning result: 0x%x\n",
2397 cp, ei->ScsiStatus,
2398 sense_key, asc, ascq,
2399 cmd->result);
2400 } else { /* scsi status is zero??? How??? */
2401 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2402 "Returning no connection.\n", cp),
2403
2404 /* Ordinarily, this case should never happen,
2405 * but there is a bug in some released firmware
2406 * revisions that allows it to happen if, for
2407 * example, a 4100 backplane loses power and
2408 * the tape drive is in it. We assume that
2409 * it's a fatal error of some kind because we
2410 * can't show that it wasn't. We will make it
2411 * look like selection timeout since that is
2412 * the most common reason for this to occur,
2413 * and it's severe enough.
2414 */
2415
2416 cmd->result = DID_NO_CONNECT << 16;
2417 }
2418 break;
2419
2420 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2421 break;
2422 case CMD_DATA_OVERRUN:
f42e81e1
SC
2423 dev_warn(&h->pdev->dev,
2424 "CDB %16phN data overrun\n", cp->Request.CDB);
edd16368
SC
2425 break;
2426 case CMD_INVALID: {
2427 /* print_bytes(cp, sizeof(*cp), 1, 0);
2428 print_cmd(cp); */
2429 /* We get CMD_INVALID if you address a non-existent device
2430 * instead of a selection timeout (no response). You will
2431 * see this if you yank out a drive, then try to access it.
2432 * This is kind of a shame because it means that any other
2433 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2434 * missing target. */
2435 cmd->result = DID_NO_CONNECT << 16;
2436 }
2437 break;
2438 case CMD_PROTOCOL_ERR:
256d0eaa 2439 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2440 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2441 cp->Request.CDB);
edd16368
SC
2442 break;
2443 case CMD_HARDWARE_ERR:
2444 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2445 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2446 cp->Request.CDB);
edd16368
SC
2447 break;
2448 case CMD_CONNECTION_LOST:
2449 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2450 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2451 cp->Request.CDB);
edd16368
SC
2452 break;
2453 case CMD_ABORTED:
a58e7e53
WS
2454 /* Return now to avoid calling scsi_done(). */
2455 return hpsa_cmd_abort_and_free(h, cp, cmd);
edd16368
SC
2456 case CMD_ABORT_FAILED:
2457 cmd->result = DID_ERROR << 16;
f42e81e1
SC
2458 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2459 cp->Request.CDB);
edd16368
SC
2460 break;
2461 case CMD_UNSOLICITED_ABORT:
f6e76055 2462 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
f42e81e1
SC
2463 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2464 cp->Request.CDB);
edd16368
SC
2465 break;
2466 case CMD_TIMEOUT:
2467 cmd->result = DID_TIME_OUT << 16;
f42e81e1
SC
2468 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2469 cp->Request.CDB);
edd16368 2470 break;
1d5e2ed0
SC
2471 case CMD_UNABORTABLE:
2472 cmd->result = DID_ERROR << 16;
2473 dev_warn(&h->pdev->dev, "Command unabortable\n");
2474 break;
9437ac43
SC
2475 case CMD_TMF_STATUS:
2476 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2477 cmd->result = DID_ERROR << 16;
2478 break;
283b4a9b
SC
2479 case CMD_IOACCEL_DISABLED:
2480 /* This only handles the direct pass-through case since RAID
2481 * offload is handled above. Just attempt a retry.
2482 */
2483 cmd->result = DID_SOFT_ERROR << 16;
2484 dev_warn(&h->pdev->dev,
2485 "cp %p had HP SSD Smart Path error\n", cp);
2486 break;
edd16368
SC
2487 default:
2488 cmd->result = DID_ERROR << 16;
2489 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2490 cp, ei->CommandStatus);
2491 }
8a0ff92c
WS
2492
2493 return hpsa_cmd_free_and_done(h, cp, cmd);
edd16368
SC
2494}
2495
edd16368
SC
2496static void hpsa_pci_unmap(struct pci_dev *pdev,
2497 struct CommandList *c, int sg_used, int data_direction)
2498{
2499 int i;
edd16368 2500
50a0decf
SC
2501 for (i = 0; i < sg_used; i++)
2502 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2503 le32_to_cpu(c->SG[i].Len),
2504 data_direction);
edd16368
SC
2505}
2506
a2dac136 2507static int hpsa_map_one(struct pci_dev *pdev,
edd16368
SC
2508 struct CommandList *cp,
2509 unsigned char *buf,
2510 size_t buflen,
2511 int data_direction)
2512{
01a02ffc 2513 u64 addr64;
edd16368
SC
2514
2515 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2516 cp->Header.SGList = 0;
50a0decf 2517 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2518 return 0;
edd16368
SC
2519 }
2520
50a0decf 2521 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
eceaae18 2522 if (dma_mapping_error(&pdev->dev, addr64)) {
a2dac136 2523 /* Prevent subsequent unmap of something never mapped */
eceaae18 2524 cp->Header.SGList = 0;
50a0decf 2525 cp->Header.SGTotal = cpu_to_le16(0);
a2dac136 2526 return -1;
eceaae18 2527 }
50a0decf
SC
2528 cp->SG[0].Addr = cpu_to_le64(addr64);
2529 cp->SG[0].Len = cpu_to_le32(buflen);
2530 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2531 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2532 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
a2dac136 2533 return 0;
edd16368
SC
2534}
2535
25163bd5
WS
2536#define NO_TIMEOUT ((unsigned long) -1)
2537#define DEFAULT_TIMEOUT 30000 /* milliseconds */
2538static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2539 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
edd16368
SC
2540{
2541 DECLARE_COMPLETION_ONSTACK(wait);
2542
2543 c->waiting = &wait;
25163bd5
WS
2544 __enqueue_cmd_and_start_io(h, c, reply_queue);
2545 if (timeout_msecs == NO_TIMEOUT) {
2546 /* TODO: get rid of this no-timeout thing */
2547 wait_for_completion_io(&wait);
2548 return IO_OK;
2549 }
2550 if (!wait_for_completion_io_timeout(&wait,
2551 msecs_to_jiffies(timeout_msecs))) {
2552 dev_warn(&h->pdev->dev, "Command timed out.\n");
2553 return -ETIMEDOUT;
2554 }
2555 return IO_OK;
2556}
2557
2558static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2559 int reply_queue, unsigned long timeout_msecs)
2560{
2561 if (unlikely(lockup_detected(h))) {
2562 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2563 return IO_OK;
2564 }
2565 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
edd16368
SC
2566}
2567
094963da
SC
2568static u32 lockup_detected(struct ctlr_info *h)
2569{
2570 int cpu;
2571 u32 rc, *lockup_detected;
2572
2573 cpu = get_cpu();
2574 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2575 rc = *lockup_detected;
2576 put_cpu();
2577 return rc;
2578}
2579
9c2fc160 2580#define MAX_DRIVER_CMD_RETRIES 25
25163bd5
WS
2581static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2582 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
edd16368 2583{
9c2fc160 2584 int backoff_time = 10, retry_count = 0;
25163bd5 2585 int rc;
edd16368
SC
2586
2587 do {
7630abd0 2588 memset(c->err_info, 0, sizeof(*c->err_info));
25163bd5
WS
2589 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2590 timeout_msecs);
2591 if (rc)
2592 break;
edd16368 2593 retry_count++;
9c2fc160
SC
2594 if (retry_count > 3) {
2595 msleep(backoff_time);
2596 if (backoff_time < 1000)
2597 backoff_time *= 2;
2598 }
852af20a 2599 } while ((check_for_unit_attention(h, c) ||
9c2fc160
SC
2600 check_for_busy(h, c)) &&
2601 retry_count <= MAX_DRIVER_CMD_RETRIES);
edd16368 2602 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
25163bd5
WS
2603 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2604 rc = -EIO;
2605 return rc;
edd16368
SC
2606}
2607
d1e8beac
SC
2608static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2609 struct CommandList *c)
edd16368 2610{
d1e8beac
SC
2611 const u8 *cdb = c->Request.CDB;
2612 const u8 *lun = c->Header.LUN.LunAddrBytes;
2613
2614 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2615 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2616 txt, lun[0], lun[1], lun[2], lun[3],
2617 lun[4], lun[5], lun[6], lun[7],
2618 cdb[0], cdb[1], cdb[2], cdb[3],
2619 cdb[4], cdb[5], cdb[6], cdb[7],
2620 cdb[8], cdb[9], cdb[10], cdb[11],
2621 cdb[12], cdb[13], cdb[14], cdb[15]);
2622}
2623
2624static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2625 struct CommandList *cp)
2626{
2627 const struct ErrorInfo *ei = cp->err_info;
edd16368 2628 struct device *d = &cp->h->pdev->dev;
9437ac43
SC
2629 u8 sense_key, asc, ascq;
2630 int sense_len;
edd16368 2631
edd16368
SC
2632 switch (ei->CommandStatus) {
2633 case CMD_TARGET_STATUS:
9437ac43
SC
2634 if (ei->SenseLen > sizeof(ei->SenseInfo))
2635 sense_len = sizeof(ei->SenseInfo);
2636 else
2637 sense_len = ei->SenseLen;
2638 decode_sense_data(ei->SenseInfo, sense_len,
2639 &sense_key, &asc, &ascq);
d1e8beac
SC
2640 hpsa_print_cmd(h, "SCSI status", cp);
2641 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
9437ac43
SC
2642 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2643 sense_key, asc, ascq);
d1e8beac 2644 else
9437ac43 2645 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
edd16368
SC
2646 if (ei->ScsiStatus == 0)
2647 dev_warn(d, "SCSI status is abnormally zero. "
2648 "(probably indicates selection timeout "
2649 "reported incorrectly due to a known "
2650 "firmware bug, circa July, 2001.)\n");
2651 break;
2652 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
edd16368
SC
2653 break;
2654 case CMD_DATA_OVERRUN:
d1e8beac 2655 hpsa_print_cmd(h, "overrun condition", cp);
edd16368
SC
2656 break;
2657 case CMD_INVALID: {
2658 /* controller unfortunately reports SCSI passthru's
2659 * to non-existent targets as invalid commands.
2660 */
d1e8beac
SC
2661 hpsa_print_cmd(h, "invalid command", cp);
2662 dev_warn(d, "probably means device no longer present\n");
edd16368
SC
2663 }
2664 break;
2665 case CMD_PROTOCOL_ERR:
d1e8beac 2666 hpsa_print_cmd(h, "protocol error", cp);
edd16368
SC
2667 break;
2668 case CMD_HARDWARE_ERR:
d1e8beac 2669 hpsa_print_cmd(h, "hardware error", cp);
edd16368
SC
2670 break;
2671 case CMD_CONNECTION_LOST:
d1e8beac 2672 hpsa_print_cmd(h, "connection lost", cp);
edd16368
SC
2673 break;
2674 case CMD_ABORTED:
d1e8beac 2675 hpsa_print_cmd(h, "aborted", cp);
edd16368
SC
2676 break;
2677 case CMD_ABORT_FAILED:
d1e8beac 2678 hpsa_print_cmd(h, "abort failed", cp);
edd16368
SC
2679 break;
2680 case CMD_UNSOLICITED_ABORT:
d1e8beac 2681 hpsa_print_cmd(h, "unsolicited abort", cp);
edd16368
SC
2682 break;
2683 case CMD_TIMEOUT:
d1e8beac 2684 hpsa_print_cmd(h, "timed out", cp);
edd16368 2685 break;
1d5e2ed0 2686 case CMD_UNABORTABLE:
d1e8beac 2687 hpsa_print_cmd(h, "unabortable", cp);
1d5e2ed0 2688 break;
25163bd5
WS
2689 case CMD_CTLR_LOCKUP:
2690 hpsa_print_cmd(h, "controller lockup detected", cp);
2691 break;
edd16368 2692 default:
d1e8beac
SC
2693 hpsa_print_cmd(h, "unknown status", cp);
2694 dev_warn(d, "Unknown command status %x\n",
edd16368
SC
2695 ei->CommandStatus);
2696 }
2697}
2698
2699static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
b7bb24eb 2700 u16 page, unsigned char *buf,
edd16368
SC
2701 unsigned char bufsize)
2702{
2703 int rc = IO_OK;
2704 struct CommandList *c;
2705 struct ErrorInfo *ei;
2706
45fcb86e 2707 c = cmd_alloc(h);
edd16368 2708
a2dac136
SC
2709 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2710 page, scsi3addr, TYPE_CMD)) {
2711 rc = -1;
2712 goto out;
2713 }
25163bd5
WS
2714 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2715 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2716 if (rc)
2717 goto out;
edd16368
SC
2718 ei = c->err_info;
2719 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2720 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2721 rc = -1;
2722 }
a2dac136 2723out:
45fcb86e 2724 cmd_free(h, c);
edd16368
SC
2725 return rc;
2726}
2727
bf711ac6 2728static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
25163bd5 2729 u8 reset_type, int reply_queue)
edd16368
SC
2730{
2731 int rc = IO_OK;
2732 struct CommandList *c;
2733 struct ErrorInfo *ei;
2734
45fcb86e 2735 c = cmd_alloc(h);
edd16368 2736
edd16368 2737
a2dac136 2738 /* fill_cmd can't fail here, no data buffer to map. */
0b9b7b6e 2739 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
bf711ac6 2740 scsi3addr, TYPE_MSG);
25163bd5
WS
2741 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2742 if (rc) {
2743 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2744 goto out;
2745 }
edd16368
SC
2746 /* no unmap needed here because no data xfer. */
2747
2748 ei = c->err_info;
2749 if (ei->CommandStatus != 0) {
d1e8beac 2750 hpsa_scsi_interpret_error(h, c);
edd16368
SC
2751 rc = -1;
2752 }
25163bd5 2753out:
45fcb86e 2754 cmd_free(h, c);
edd16368
SC
2755 return rc;
2756}
2757
d604f533
WS
2758static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2759 struct hpsa_scsi_dev_t *dev,
2760 unsigned char *scsi3addr)
2761{
2762 int i;
2763 bool match = false;
2764 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2765 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2766
2767 if (hpsa_is_cmd_idle(c))
2768 return false;
2769
2770 switch (c->cmd_type) {
2771 case CMD_SCSI:
2772 case CMD_IOCTL_PEND:
2773 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2774 sizeof(c->Header.LUN.LunAddrBytes));
2775 break;
2776
2777 case CMD_IOACCEL1:
2778 case CMD_IOACCEL2:
2779 if (c->phys_disk == dev) {
2780 /* HBA mode match */
2781 match = true;
2782 } else {
2783 /* Possible RAID mode -- check each phys dev. */
2784 /* FIXME: Do we need to take out a lock here? If
2785 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2786 * instead. */
2787 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2788 /* FIXME: an alternate test might be
2789 *
2790 * match = dev->phys_disk[i]->ioaccel_handle
2791 * == c2->scsi_nexus; */
2792 match = dev->phys_disk[i] == c->phys_disk;
2793 }
2794 }
2795 break;
2796
2797 case IOACCEL2_TMF:
2798 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2799 match = dev->phys_disk[i]->ioaccel_handle ==
2800 le32_to_cpu(ac->it_nexus);
2801 }
2802 break;
2803
2804 case 0: /* The command is in the middle of being initialized. */
2805 match = false;
2806 break;
2807
2808 default:
2809 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2810 c->cmd_type);
2811 BUG();
2812 }
2813
2814 return match;
2815}
2816
2817static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2818 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2819{
2820 int i;
2821 int rc = 0;
2822
2823 /* We can really only handle one reset at a time */
2824 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2825 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2826 return -EINTR;
2827 }
2828
2829 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2830
2831 for (i = 0; i < h->nr_cmds; i++) {
2832 struct CommandList *c = h->cmd_pool + i;
2833 int refcount = atomic_inc_return(&c->refcount);
2834
2835 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2836 unsigned long flags;
2837
2838 /*
2839 * Mark the target command as having a reset pending,
2840 * then lock a lock so that the command cannot complete
2841 * while we're considering it. If the command is not
2842 * idle then count it; otherwise revoke the event.
2843 */
2844 c->reset_pending = dev;
2845 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2846 if (!hpsa_is_cmd_idle(c))
2847 atomic_inc(&dev->reset_cmds_out);
2848 else
2849 c->reset_pending = NULL;
2850 spin_unlock_irqrestore(&h->lock, flags);
2851 }
2852
2853 cmd_free(h, c);
2854 }
2855
2856 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2857 if (!rc)
2858 wait_event(h->event_sync_wait_queue,
2859 atomic_read(&dev->reset_cmds_out) == 0 ||
2860 lockup_detected(h));
2861
2862 if (unlikely(lockup_detected(h))) {
77678d3a
DB
2863 dev_warn(&h->pdev->dev,
2864 "Controller lockup detected during reset wait\n");
2865 rc = -ENODEV;
2866 }
d604f533
WS
2867
2868 if (unlikely(rc))
2869 atomic_set(&dev->reset_cmds_out, 0);
2870
2871 mutex_unlock(&h->reset_mutex);
2872 return rc;
2873}
2874
edd16368
SC
2875static void hpsa_get_raid_level(struct ctlr_info *h,
2876 unsigned char *scsi3addr, unsigned char *raid_level)
2877{
2878 int rc;
2879 unsigned char *buf;
2880
2881 *raid_level = RAID_UNKNOWN;
2882 buf = kzalloc(64, GFP_KERNEL);
2883 if (!buf)
2884 return;
b7bb24eb 2885 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
edd16368
SC
2886 if (rc == 0)
2887 *raid_level = buf[8];
2888 if (*raid_level > RAID_UNKNOWN)
2889 *raid_level = RAID_UNKNOWN;
2890 kfree(buf);
2891 return;
2892}
2893
283b4a9b
SC
2894#define HPSA_MAP_DEBUG
2895#ifdef HPSA_MAP_DEBUG
2896static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2897 struct raid_map_data *map_buff)
2898{
2899 struct raid_map_disk_data *dd = &map_buff->data[0];
2900 int map, row, col;
2901 u16 map_cnt, row_cnt, disks_per_row;
2902
2903 if (rc != 0)
2904 return;
2905
2ba8bfc8
SC
2906 /* Show details only if debugging has been activated. */
2907 if (h->raid_offload_debug < 2)
2908 return;
2909
283b4a9b
SC
2910 dev_info(&h->pdev->dev, "structure_size = %u\n",
2911 le32_to_cpu(map_buff->structure_size));
2912 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2913 le32_to_cpu(map_buff->volume_blk_size));
2914 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2915 le64_to_cpu(map_buff->volume_blk_cnt));
2916 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2917 map_buff->phys_blk_shift);
2918 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2919 map_buff->parity_rotation_shift);
2920 dev_info(&h->pdev->dev, "strip_size = %u\n",
2921 le16_to_cpu(map_buff->strip_size));
2922 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2923 le64_to_cpu(map_buff->disk_starting_blk));
2924 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2925 le64_to_cpu(map_buff->disk_blk_cnt));
2926 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2927 le16_to_cpu(map_buff->data_disks_per_row));
2928 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2929 le16_to_cpu(map_buff->metadata_disks_per_row));
2930 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2931 le16_to_cpu(map_buff->row_cnt));
2932 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2933 le16_to_cpu(map_buff->layout_map_count));
2b08b3e9 2934 dev_info(&h->pdev->dev, "flags = 0x%x\n",
dd0e19f3 2935 le16_to_cpu(map_buff->flags));
2b08b3e9
DB
2936 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2937 le16_to_cpu(map_buff->flags) &
2938 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
dd0e19f3
ST
2939 dev_info(&h->pdev->dev, "dekindex = %u\n",
2940 le16_to_cpu(map_buff->dekindex));
283b4a9b
SC
2941 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2942 for (map = 0; map < map_cnt; map++) {
2943 dev_info(&h->pdev->dev, "Map%u:\n", map);
2944 row_cnt = le16_to_cpu(map_buff->row_cnt);
2945 for (row = 0; row < row_cnt; row++) {
2946 dev_info(&h->pdev->dev, " Row%u:\n", row);
2947 disks_per_row =
2948 le16_to_cpu(map_buff->data_disks_per_row);
2949 for (col = 0; col < disks_per_row; col++, dd++)
2950 dev_info(&h->pdev->dev,
2951 " D%02u: h=0x%04x xor=%u,%u\n",
2952 col, dd->ioaccel_handle,
2953 dd->xor_mult[0], dd->xor_mult[1]);
2954 disks_per_row =
2955 le16_to_cpu(map_buff->metadata_disks_per_row);
2956 for (col = 0; col < disks_per_row; col++, dd++)
2957 dev_info(&h->pdev->dev,
2958 " M%02u: h=0x%04x xor=%u,%u\n",
2959 col, dd->ioaccel_handle,
2960 dd->xor_mult[0], dd->xor_mult[1]);
2961 }
2962 }
2963}
2964#else
2965static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2966 __attribute__((unused)) int rc,
2967 __attribute__((unused)) struct raid_map_data *map_buff)
2968{
2969}
2970#endif
2971
2972static int hpsa_get_raid_map(struct ctlr_info *h,
2973 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2974{
2975 int rc = 0;
2976 struct CommandList *c;
2977 struct ErrorInfo *ei;
2978
45fcb86e 2979 c = cmd_alloc(h);
bf43caf3 2980
283b4a9b
SC
2981 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2982 sizeof(this_device->raid_map), 0,
2983 scsi3addr, TYPE_CMD)) {
2dd02d74
RE
2984 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
2985 cmd_free(h, c);
2986 return -1;
283b4a9b 2987 }
25163bd5
WS
2988 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2989 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2990 if (rc)
2991 goto out;
283b4a9b
SC
2992 ei = c->err_info;
2993 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 2994 hpsa_scsi_interpret_error(h, c);
25163bd5
WS
2995 rc = -1;
2996 goto out;
283b4a9b 2997 }
45fcb86e 2998 cmd_free(h, c);
283b4a9b
SC
2999
3000 /* @todo in the future, dynamically allocate RAID map memory */
3001 if (le32_to_cpu(this_device->raid_map.structure_size) >
3002 sizeof(this_device->raid_map)) {
3003 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3004 rc = -1;
3005 }
3006 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3007 return rc;
25163bd5
WS
3008out:
3009 cmd_free(h, c);
3010 return rc;
283b4a9b
SC
3011}
3012
03383736
DB
3013static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3014 unsigned char scsi3addr[], u16 bmic_device_index,
3015 struct bmic_identify_physical_device *buf, size_t bufsize)
3016{
3017 int rc = IO_OK;
3018 struct CommandList *c;
3019 struct ErrorInfo *ei;
3020
3021 c = cmd_alloc(h);
3022 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3023 0, RAID_CTLR_LUNID, TYPE_CMD);
3024 if (rc)
3025 goto out;
3026
3027 c->Request.CDB[2] = bmic_device_index & 0xff;
3028 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3029
25163bd5
WS
3030 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3031 NO_TIMEOUT);
03383736
DB
3032 ei = c->err_info;
3033 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3034 hpsa_scsi_interpret_error(h, c);
3035 rc = -1;
3036 }
3037out:
3038 cmd_free(h, c);
3039 return rc;
3040}
3041
1b70150a
SC
3042static int hpsa_vpd_page_supported(struct ctlr_info *h,
3043 unsigned char scsi3addr[], u8 page)
3044{
3045 int rc;
3046 int i;
3047 int pages;
3048 unsigned char *buf, bufsize;
3049
3050 buf = kzalloc(256, GFP_KERNEL);
3051 if (!buf)
3052 return 0;
3053
3054 /* Get the size of the page list first */
3055 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3056 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3057 buf, HPSA_VPD_HEADER_SZ);
3058 if (rc != 0)
3059 goto exit_unsupported;
3060 pages = buf[3];
3061 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3062 bufsize = pages + HPSA_VPD_HEADER_SZ;
3063 else
3064 bufsize = 255;
3065
3066 /* Get the whole VPD page list */
3067 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3068 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3069 buf, bufsize);
3070 if (rc != 0)
3071 goto exit_unsupported;
3072
3073 pages = buf[3];
3074 for (i = 1; i <= pages; i++)
3075 if (buf[3 + i] == page)
3076 goto exit_supported;
3077exit_unsupported:
3078 kfree(buf);
3079 return 0;
3080exit_supported:
3081 kfree(buf);
3082 return 1;
3083}
3084
283b4a9b
SC
3085static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3086 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3087{
3088 int rc;
3089 unsigned char *buf;
3090 u8 ioaccel_status;
3091
3092 this_device->offload_config = 0;
3093 this_device->offload_enabled = 0;
41ce4c35 3094 this_device->offload_to_be_enabled = 0;
283b4a9b
SC
3095
3096 buf = kzalloc(64, GFP_KERNEL);
3097 if (!buf)
3098 return;
1b70150a
SC
3099 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3100 goto out;
283b4a9b 3101 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
b7bb24eb 3102 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
283b4a9b
SC
3103 if (rc != 0)
3104 goto out;
3105
3106#define IOACCEL_STATUS_BYTE 4
3107#define OFFLOAD_CONFIGURED_BIT 0x01
3108#define OFFLOAD_ENABLED_BIT 0x02
3109 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3110 this_device->offload_config =
3111 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3112 if (this_device->offload_config) {
3113 this_device->offload_enabled =
3114 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3115 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3116 this_device->offload_enabled = 0;
3117 }
41ce4c35 3118 this_device->offload_to_be_enabled = this_device->offload_enabled;
283b4a9b
SC
3119out:
3120 kfree(buf);
3121 return;
3122}
3123
edd16368
SC
3124/* Get the device id from inquiry page 0x83 */
3125static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3126 unsigned char *device_id, int buflen)
3127{
3128 int rc;
3129 unsigned char *buf;
3130
3131 if (buflen > 16)
3132 buflen = 16;
3133 buf = kzalloc(64, GFP_KERNEL);
3134 if (!buf)
a84d794d 3135 return -ENOMEM;
b7bb24eb 3136 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
edd16368
SC
3137 if (rc == 0)
3138 memcpy(device_id, &buf[8], buflen);
3139 kfree(buf);
3140 return rc != 0;
3141}
3142
3143static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
03383736 3144 void *buf, int bufsize,
edd16368
SC
3145 int extended_response)
3146{
3147 int rc = IO_OK;
3148 struct CommandList *c;
3149 unsigned char scsi3addr[8];
3150 struct ErrorInfo *ei;
3151
45fcb86e 3152 c = cmd_alloc(h);
bf43caf3 3153
e89c0ae7
SC
3154 /* address the controller */
3155 memset(scsi3addr, 0, sizeof(scsi3addr));
a2dac136
SC
3156 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3157 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3158 rc = -1;
3159 goto out;
3160 }
edd16368
SC
3161 if (extended_response)
3162 c->Request.CDB[1] = extended_response;
25163bd5
WS
3163 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3164 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3165 if (rc)
3166 goto out;
edd16368
SC
3167 ei = c->err_info;
3168 if (ei->CommandStatus != 0 &&
3169 ei->CommandStatus != CMD_DATA_UNDERRUN) {
d1e8beac 3170 hpsa_scsi_interpret_error(h, c);
edd16368 3171 rc = -1;
283b4a9b 3172 } else {
03383736
DB
3173 struct ReportLUNdata *rld = buf;
3174
3175 if (rld->extended_response_flag != extended_response) {
283b4a9b
SC
3176 dev_err(&h->pdev->dev,
3177 "report luns requested format %u, got %u\n",
3178 extended_response,
03383736 3179 rld->extended_response_flag);
283b4a9b
SC
3180 rc = -1;
3181 }
edd16368 3182 }
a2dac136 3183out:
45fcb86e 3184 cmd_free(h, c);
edd16368
SC
3185 return rc;
3186}
3187
3188static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
03383736 3189 struct ReportExtendedLUNdata *buf, int bufsize)
edd16368 3190{
03383736
DB
3191 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3192 HPSA_REPORT_PHYS_EXTENDED);
edd16368
SC
3193}
3194
3195static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3196 struct ReportLUNdata *buf, int bufsize)
3197{
3198 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3199}
3200
3201static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3202 int bus, int target, int lun)
3203{
3204 device->bus = bus;
3205 device->target = target;
3206 device->lun = lun;
3207}
3208
9846590e
SC
3209/* Use VPD inquiry to get details of volume status */
3210static int hpsa_get_volume_status(struct ctlr_info *h,
3211 unsigned char scsi3addr[])
3212{
3213 int rc;
3214 int status;
3215 int size;
3216 unsigned char *buf;
3217
3218 buf = kzalloc(64, GFP_KERNEL);
3219 if (!buf)
3220 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3221
3222 /* Does controller have VPD for logical volume status? */
24a4b078 3223 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
9846590e 3224 goto exit_failed;
9846590e
SC
3225
3226 /* Get the size of the VPD return buffer */
3227 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3228 buf, HPSA_VPD_HEADER_SZ);
24a4b078 3229 if (rc != 0)
9846590e 3230 goto exit_failed;
9846590e
SC
3231 size = buf[3];
3232
3233 /* Now get the whole VPD buffer */
3234 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3235 buf, size + HPSA_VPD_HEADER_SZ);
24a4b078 3236 if (rc != 0)
9846590e 3237 goto exit_failed;
9846590e
SC
3238 status = buf[4]; /* status byte */
3239
3240 kfree(buf);
3241 return status;
3242exit_failed:
3243 kfree(buf);
3244 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3245}
3246
3247/* Determine offline status of a volume.
3248 * Return either:
3249 * 0 (not offline)
67955ba3 3250 * 0xff (offline for unknown reasons)
9846590e
SC
3251 * # (integer code indicating one of several NOT READY states
3252 * describing why a volume is to be kept offline)
3253 */
67955ba3 3254static int hpsa_volume_offline(struct ctlr_info *h,
9846590e
SC
3255 unsigned char scsi3addr[])
3256{
3257 struct CommandList *c;
9437ac43
SC
3258 unsigned char *sense;
3259 u8 sense_key, asc, ascq;
3260 int sense_len;
25163bd5 3261 int rc, ldstat = 0;
9846590e
SC
3262 u16 cmd_status;
3263 u8 scsi_status;
3264#define ASC_LUN_NOT_READY 0x04
3265#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3266#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3267
3268 c = cmd_alloc(h);
bf43caf3 3269
9846590e 3270 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
25163bd5
WS
3271 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3272 if (rc) {
3273 cmd_free(h, c);
3274 return 0;
3275 }
9846590e 3276 sense = c->err_info->SenseInfo;
9437ac43
SC
3277 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3278 sense_len = sizeof(c->err_info->SenseInfo);
3279 else
3280 sense_len = c->err_info->SenseLen;
3281 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
9846590e
SC
3282 cmd_status = c->err_info->CommandStatus;
3283 scsi_status = c->err_info->ScsiStatus;
3284 cmd_free(h, c);
3285 /* Is the volume 'not ready'? */
3286 if (cmd_status != CMD_TARGET_STATUS ||
3287 scsi_status != SAM_STAT_CHECK_CONDITION ||
3288 sense_key != NOT_READY ||
3289 asc != ASC_LUN_NOT_READY) {
3290 return 0;
3291 }
3292
3293 /* Determine the reason for not ready state */
3294 ldstat = hpsa_get_volume_status(h, scsi3addr);
3295
3296 /* Keep volume offline in certain cases: */
3297 switch (ldstat) {
3298 case HPSA_LV_UNDERGOING_ERASE:
5ca01204 3299 case HPSA_LV_NOT_AVAILABLE:
9846590e
SC
3300 case HPSA_LV_UNDERGOING_RPI:
3301 case HPSA_LV_PENDING_RPI:
3302 case HPSA_LV_ENCRYPTED_NO_KEY:
3303 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3304 case HPSA_LV_UNDERGOING_ENCRYPTION:
3305 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3306 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3307 return ldstat;
3308 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3309 /* If VPD status page isn't available,
3310 * use ASC/ASCQ to determine state
3311 */
3312 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3313 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3314 return ldstat;
3315 break;
3316 default:
3317 break;
3318 }
3319 return 0;
3320}
3321
9b5c48c2
SC
3322/*
3323 * Find out if a logical device supports aborts by simply trying one.
3324 * Smart Array may claim not to support aborts on logical drives, but
3325 * if a MSA2000 * is connected, the drives on that will be presented
3326 * by the Smart Array as logical drives, and aborts may be sent to
3327 * those devices successfully. So the simplest way to find out is
3328 * to simply try an abort and see how the device responds.
3329 */
3330static int hpsa_device_supports_aborts(struct ctlr_info *h,
3331 unsigned char *scsi3addr)
3332{
3333 struct CommandList *c;
3334 struct ErrorInfo *ei;
3335 int rc = 0;
3336
3337 u64 tag = (u64) -1; /* bogus tag */
3338
3339 /* Assume that physical devices support aborts */
3340 if (!is_logical_dev_addr_mode(scsi3addr))
3341 return 1;
3342
3343 c = cmd_alloc(h);
bf43caf3 3344
9b5c48c2
SC
3345 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3346 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3347 /* no unmap needed here because no data xfer. */
3348 ei = c->err_info;
3349 switch (ei->CommandStatus) {
3350 case CMD_INVALID:
3351 rc = 0;
3352 break;
3353 case CMD_UNABORTABLE:
3354 case CMD_ABORT_FAILED:
3355 rc = 1;
3356 break;
9437ac43
SC
3357 case CMD_TMF_STATUS:
3358 rc = hpsa_evaluate_tmf_status(h, c);
3359 break;
9b5c48c2
SC
3360 default:
3361 rc = 0;
3362 break;
3363 }
3364 cmd_free(h, c);
3365 return rc;
3366}
3367
edd16368 3368static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
3369 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3370 unsigned char *is_OBDR_device)
edd16368 3371{
0b0e1d6c
SC
3372
3373#define OBDR_SIG_OFFSET 43
3374#define OBDR_TAPE_SIG "$DR-10"
3375#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3376#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3377
ea6d3bc3 3378 unsigned char *inq_buff;
0b0e1d6c 3379 unsigned char *obdr_sig;
683fc444 3380 int rc = 0;
edd16368 3381
ea6d3bc3 3382 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
683fc444
DB
3383 if (!inq_buff) {
3384 rc = -ENOMEM;
edd16368 3385 goto bail_out;
683fc444 3386 }
edd16368 3387
edd16368
SC
3388 /* Do an inquiry to the device to see what it is. */
3389 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3390 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3391 /* Inquiry failed (msg printed already) */
3392 dev_err(&h->pdev->dev,
3393 "hpsa_update_device_info: inquiry failed\n");
683fc444 3394 rc = -EIO;
edd16368
SC
3395 goto bail_out;
3396 }
3397
edd16368
SC
3398 this_device->devtype = (inq_buff[0] & 0x1f);
3399 memcpy(this_device->scsi3addr, scsi3addr, 8);
3400 memcpy(this_device->vendor, &inq_buff[8],
3401 sizeof(this_device->vendor));
3402 memcpy(this_device->model, &inq_buff[16],
3403 sizeof(this_device->model));
edd16368
SC
3404 memset(this_device->device_id, 0,
3405 sizeof(this_device->device_id));
3406 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
3407 sizeof(this_device->device_id));
3408
3409 if (this_device->devtype == TYPE_DISK &&
283b4a9b 3410 is_logical_dev_addr_mode(scsi3addr)) {
67955ba3
SC
3411 int volume_offline;
3412
edd16368 3413 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
283b4a9b
SC
3414 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3415 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
67955ba3
SC
3416 volume_offline = hpsa_volume_offline(h, scsi3addr);
3417 if (volume_offline < 0 || volume_offline > 0xff)
3418 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3419 this_device->volume_offline = volume_offline & 0xff;
283b4a9b 3420 } else {
edd16368 3421 this_device->raid_level = RAID_UNKNOWN;
283b4a9b
SC
3422 this_device->offload_config = 0;
3423 this_device->offload_enabled = 0;
41ce4c35 3424 this_device->offload_to_be_enabled = 0;
a3144e0b 3425 this_device->hba_ioaccel_enabled = 0;
9846590e 3426 this_device->volume_offline = 0;
03383736 3427 this_device->queue_depth = h->nr_cmds;
283b4a9b 3428 }
edd16368 3429
0b0e1d6c
SC
3430 if (is_OBDR_device) {
3431 /* See if this is a One-Button-Disaster-Recovery device
3432 * by looking for "$DR-10" at offset 43 in inquiry data.
3433 */
3434 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3435 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3436 strncmp(obdr_sig, OBDR_TAPE_SIG,
3437 OBDR_SIG_LEN) == 0);
3438 }
edd16368
SC
3439 kfree(inq_buff);
3440 return 0;
3441
3442bail_out:
3443 kfree(inq_buff);
683fc444 3444 return rc;
edd16368
SC
3445}
3446
9b5c48c2
SC
3447static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3448 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3449{
3450 unsigned long flags;
3451 int rc, entry;
3452 /*
3453 * See if this device supports aborts. If we already know
3454 * the device, we already know if it supports aborts, otherwise
3455 * we have to find out if it supports aborts by trying one.
3456 */
3457 spin_lock_irqsave(&h->devlock, flags);
3458 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3459 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3460 entry >= 0 && entry < h->ndevices) {
3461 dev->supports_aborts = h->dev[entry]->supports_aborts;
3462 spin_unlock_irqrestore(&h->devlock, flags);
3463 } else {
3464 spin_unlock_irqrestore(&h->devlock, flags);
3465 dev->supports_aborts =
3466 hpsa_device_supports_aborts(h, scsi3addr);
3467 if (dev->supports_aborts < 0)
3468 dev->supports_aborts = 0;
3469 }
3470}
3471
4f4eb9f1 3472static unsigned char *ext_target_model[] = {
edd16368
SC
3473 "MSA2012",
3474 "MSA2024",
3475 "MSA2312",
3476 "MSA2324",
fda38518 3477 "P2000 G3 SAS",
e06c8e5c 3478 "MSA 2040 SAS",
edd16368
SC
3479 NULL,
3480};
3481
4f4eb9f1 3482static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
edd16368
SC
3483{
3484 int i;
3485
4f4eb9f1
ST
3486 for (i = 0; ext_target_model[i]; i++)
3487 if (strncmp(device->model, ext_target_model[i],
3488 strlen(ext_target_model[i])) == 0)
edd16368
SC
3489 return 1;
3490 return 0;
3491}
3492
3493/* Helper function to assign bus, target, lun mapping of devices.
4f4eb9f1 3494 * Puts non-external target logical volumes on bus 0, external target logical
edd16368
SC
3495 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
3496 * Logical drive target and lun are assigned at this time, but
3497 * physical device lun and target assignment are deferred (assigned
3498 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3499 */
3500static void figure_bus_target_lun(struct ctlr_info *h,
1f310bde 3501 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
edd16368 3502{
1f310bde
SC
3503 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
3504
3505 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3506 /* physical device, target and lun filled in later */
edd16368 3507 if (is_hba_lunid(lunaddrbytes))
1f310bde 3508 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
edd16368 3509 else
1f310bde
SC
3510 /* defer target, lun assignment for physical devices */
3511 hpsa_set_bus_target_lun(device, 2, -1, -1);
3512 return;
3513 }
3514 /* It's a logical device */
4f4eb9f1
ST
3515 if (is_ext_target(h, device)) {
3516 /* external target way, put logicals on bus 1
1f310bde
SC
3517 * and match target/lun numbers box
3518 * reports, other smart array, bus 0, target 0, match lunid
3519 */
3520 hpsa_set_bus_target_lun(device,
3521 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
3522 return;
edd16368 3523 }
1f310bde 3524 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
edd16368
SC
3525}
3526
3527/*
3528 * If there is no lun 0 on a target, linux won't find any devices.
4f4eb9f1 3529 * For the external targets (arrays), we have to manually detect the enclosure
edd16368
SC
3530 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3531 * it for some reason. *tmpdevice is the target we're adding,
3532 * this_device is a pointer into the current element of currentsd[]
3533 * that we're building up in update_scsi_devices(), below.
3534 * lunzerobits is a bitmap that tracks which targets already have a
3535 * lun 0 assigned.
3536 * Returns 1 if an enclosure was added, 0 if not.
3537 */
4f4eb9f1 3538static int add_ext_target_dev(struct ctlr_info *h,
edd16368 3539 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 3540 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
4f4eb9f1 3541 unsigned long lunzerobits[], int *n_ext_target_devs)
edd16368
SC
3542{
3543 unsigned char scsi3addr[8];
3544
1f310bde 3545 if (test_bit(tmpdevice->target, lunzerobits))
edd16368
SC
3546 return 0; /* There is already a lun 0 on this target. */
3547
3548 if (!is_logical_dev_addr_mode(lunaddrbytes))
3549 return 0; /* It's the logical targets that may lack lun 0. */
3550
4f4eb9f1
ST
3551 if (!is_ext_target(h, tmpdevice))
3552 return 0; /* Only external target devices have this problem. */
edd16368 3553
1f310bde 3554 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
edd16368
SC
3555 return 0;
3556
c4f8a299 3557 memset(scsi3addr, 0, 8);
1f310bde 3558 scsi3addr[3] = tmpdevice->target;
edd16368
SC
3559 if (is_hba_lunid(scsi3addr))
3560 return 0; /* Don't add the RAID controller here. */
3561
339b2b14
SC
3562 if (is_scsi_rev_5(h))
3563 return 0; /* p1210m doesn't need to do this. */
3564
4f4eb9f1 3565 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
aca4a520
ST
3566 dev_warn(&h->pdev->dev, "Maximum number of external "
3567 "target devices exceeded. Check your hardware "
edd16368
SC
3568 "configuration.");
3569 return 0;
3570 }
3571
0b0e1d6c 3572 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368 3573 return 0;
4f4eb9f1 3574 (*n_ext_target_devs)++;
1f310bde
SC
3575 hpsa_set_bus_target_lun(this_device,
3576 tmpdevice->bus, tmpdevice->target, 0);
9b5c48c2 3577 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
1f310bde 3578 set_bit(tmpdevice->target, lunzerobits);
edd16368
SC
3579 return 1;
3580}
3581
54b6e9e9
ST
3582/*
3583 * Get address of physical disk used for an ioaccel2 mode command:
3584 * 1. Extract ioaccel2 handle from the command.
3585 * 2. Find a matching ioaccel2 handle from list of physical disks.
3586 * 3. Return:
3587 * 1 and set scsi3addr to address of matching physical
3588 * 0 if no matching physical disk was found.
3589 */
3590static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3591 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3592{
41ce4c35
SC
3593 struct io_accel2_cmd *c2 =
3594 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3595 unsigned long flags;
54b6e9e9 3596 int i;
54b6e9e9 3597
41ce4c35
SC
3598 spin_lock_irqsave(&h->devlock, flags);
3599 for (i = 0; i < h->ndevices; i++)
3600 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3601 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3602 sizeof(h->dev[i]->scsi3addr));
3603 spin_unlock_irqrestore(&h->devlock, flags);
3604 return 1;
3605 }
3606 spin_unlock_irqrestore(&h->devlock, flags);
3607 return 0;
54b6e9e9 3608}
41ce4c35 3609
edd16368
SC
3610/*
3611 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3612 * logdev. The number of luns in physdev and logdev are returned in
3613 * *nphysicals and *nlogicals, respectively.
3614 * Returns 0 on success, -1 otherwise.
3615 */
3616static int hpsa_gather_lun_info(struct ctlr_info *h,
03383736 3617 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
01a02ffc 3618 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368 3619{
03383736 3620 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
edd16368
SC
3621 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3622 return -1;
3623 }
03383736 3624 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
edd16368 3625 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
03383736
DB
3626 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3627 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
edd16368
SC
3628 *nphysicals = HPSA_MAX_PHYS_LUN;
3629 }
03383736 3630 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
edd16368
SC
3631 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3632 return -1;
3633 }
6df1e954 3634 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
3635 /* Reject Logicals in excess of our max capability. */
3636 if (*nlogicals > HPSA_MAX_LUN) {
3637 dev_warn(&h->pdev->dev,
3638 "maximum logical LUNs (%d) exceeded. "
3639 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3640 *nlogicals - HPSA_MAX_LUN);
3641 *nlogicals = HPSA_MAX_LUN;
3642 }
3643 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3644 dev_warn(&h->pdev->dev,
3645 "maximum logical + physical LUNs (%d) exceeded. "
3646 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3647 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3648 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3649 }
3650 return 0;
3651}
3652
42a91641
DB
3653static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3654 int i, int nphysicals, int nlogicals,
a93aa1fe 3655 struct ReportExtendedLUNdata *physdev_list,
339b2b14
SC
3656 struct ReportLUNdata *logdev_list)
3657{
3658 /* Helper function, figure out where the LUN ID info is coming from
3659 * given index i, lists of physical and logical devices, where in
3660 * the list the raid controller is supposed to appear (first or last)
3661 */
3662
3663 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3664 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3665
3666 if (i == raid_ctlr_position)
3667 return RAID_CTLR_LUNID;
3668
3669 if (i < logicals_start)
d5b5d964
SC
3670 return &physdev_list->LUN[i -
3671 (raid_ctlr_position == 0)].lunid[0];
339b2b14
SC
3672
3673 if (i < last_device)
3674 return &logdev_list->LUN[i - nphysicals -
3675 (raid_ctlr_position == 0)][0];
3676 BUG();
3677 return NULL;
3678}
3679
03383736
DB
3680/* get physical drive ioaccel handle and queue depth */
3681static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3682 struct hpsa_scsi_dev_t *dev,
f2039b03 3683 struct ReportExtendedLUNdata *rlep, int rle_index,
03383736
DB
3684 struct bmic_identify_physical_device *id_phys)
3685{
3686 int rc;
f2039b03 3687 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
03383736
DB
3688
3689 dev->ioaccel_handle = rle->ioaccel_handle;
f2039b03 3690 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
a3144e0b 3691 dev->hba_ioaccel_enabled = 1;
03383736 3692 memset(id_phys, 0, sizeof(*id_phys));
f2039b03
DB
3693 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
3694 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
03383736
DB
3695 sizeof(*id_phys));
3696 if (!rc)
3697 /* Reserve space for FW operations */
3698#define DRIVE_CMDS_RESERVED_FOR_FW 2
3699#define DRIVE_QUEUE_DEPTH 7
3700 dev->queue_depth =
3701 le16_to_cpu(id_phys->current_queue_depth_limit) -
3702 DRIVE_CMDS_RESERVED_FOR_FW;
3703 else
3704 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
03383736
DB
3705}
3706
8270b862 3707static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
f2039b03 3708 struct ReportExtendedLUNdata *rlep, int rle_index,
8270b862
JH
3709 struct bmic_identify_physical_device *id_phys)
3710{
f2039b03
DB
3711 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3712
3713 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
8270b862
JH
3714 this_device->hba_ioaccel_enabled = 1;
3715
3716 memcpy(&this_device->active_path_index,
3717 &id_phys->active_path_number,
3718 sizeof(this_device->active_path_index));
3719 memcpy(&this_device->path_map,
3720 &id_phys->redundant_path_present_map,
3721 sizeof(this_device->path_map));
3722 memcpy(&this_device->box,
3723 &id_phys->alternate_paths_phys_box_on_port,
3724 sizeof(this_device->box));
3725 memcpy(&this_device->phys_connector,
3726 &id_phys->alternate_paths_phys_connector,
3727 sizeof(this_device->phys_connector));
3728 memcpy(&this_device->bay,
3729 &id_phys->phys_bay_in_box,
3730 sizeof(this_device->bay));
3731}
3732
8aa60681 3733static void hpsa_update_scsi_devices(struct ctlr_info *h)
edd16368
SC
3734{
3735 /* the idea here is we could get notified
3736 * that some devices have changed, so we do a report
3737 * physical luns and report logical luns cmd, and adjust
3738 * our list of devices accordingly.
3739 *
3740 * The scsi3addr's of devices won't change so long as the
3741 * adapter is not reset. That means we can rescan and
3742 * tell which devices we already know about, vs. new
3743 * devices, vs. disappearing devices.
3744 */
a93aa1fe 3745 struct ReportExtendedLUNdata *physdev_list = NULL;
edd16368 3746 struct ReportLUNdata *logdev_list = NULL;
03383736 3747 struct bmic_identify_physical_device *id_phys = NULL;
01a02ffc
SC
3748 u32 nphysicals = 0;
3749 u32 nlogicals = 0;
3750 u32 ndev_allocated = 0;
edd16368
SC
3751 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3752 int ncurrent = 0;
4f4eb9f1 3753 int i, n_ext_target_devs, ndevs_to_allocate;
339b2b14 3754 int raid_ctlr_position;
aca4a520 3755 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
edd16368 3756
cfe5badc 3757 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
92084715
SC
3758 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3759 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
edd16368 3760 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
03383736 3761 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
edd16368 3762
03383736
DB
3763 if (!currentsd || !physdev_list || !logdev_list ||
3764 !tmpdevice || !id_phys) {
edd16368
SC
3765 dev_err(&h->pdev->dev, "out of memory\n");
3766 goto out;
3767 }
3768 memset(lunzerobits, 0, sizeof(lunzerobits));
3769
853633e8
DB
3770 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
3771
03383736 3772 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
853633e8
DB
3773 logdev_list, &nlogicals)) {
3774 h->drv_req_rescan = 1;
edd16368 3775 goto out;
853633e8 3776 }
edd16368 3777
aca4a520
ST
3778 /* We might see up to the maximum number of logical and physical disks
3779 * plus external target devices, and a device for the local RAID
3780 * controller.
edd16368 3781 */
aca4a520 3782 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
edd16368
SC
3783
3784 /* Allocate the per device structures */
3785 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
3786 if (i >= HPSA_MAX_DEVICES) {
3787 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3788 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3789 ndevs_to_allocate - HPSA_MAX_DEVICES);
3790 break;
3791 }
3792
edd16368
SC
3793 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3794 if (!currentsd[i]) {
3795 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3796 __FILE__, __LINE__);
853633e8 3797 h->drv_req_rescan = 1;
edd16368
SC
3798 goto out;
3799 }
3800 ndev_allocated++;
3801 }
3802
8645291b 3803 if (is_scsi_rev_5(h))
339b2b14
SC
3804 raid_ctlr_position = 0;
3805 else
3806 raid_ctlr_position = nphysicals + nlogicals;
3807
edd16368 3808 /* adjust our table of devices */
4f4eb9f1 3809 n_ext_target_devs = 0;
edd16368 3810 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 3811 u8 *lunaddrbytes, is_OBDR = 0;
683fc444 3812 int rc = 0;
f2039b03 3813 int phys_dev_index = i - (raid_ctlr_position == 0);
edd16368
SC
3814
3815 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
3816 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3817 i, nphysicals, nlogicals, physdev_list, logdev_list);
41ce4c35
SC
3818
3819 /* skip masked non-disk devices */
3820 if (MASKED_DEVICE(lunaddrbytes))
3821 if (i < nphysicals + (raid_ctlr_position == 0) &&
f2039b03
DB
3822 (physdev_list->
3823 LUN[phys_dev_index].device_flags & 0x01))
41ce4c35 3824 continue;
edd16368
SC
3825
3826 /* Get device type, vendor, model, device id */
683fc444
DB
3827 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3828 &is_OBDR);
3829 if (rc == -ENOMEM) {
3830 dev_warn(&h->pdev->dev,
3831 "Out of memory, rescan deferred.\n");
853633e8 3832 h->drv_req_rescan = 1;
683fc444 3833 goto out;
853633e8 3834 }
683fc444
DB
3835 if (rc) {
3836 dev_warn(&h->pdev->dev,
3837 "Inquiry failed, skipping device.\n");
3838 continue;
3839 }
3840
1f310bde 3841 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
9b5c48c2 3842 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
edd16368
SC
3843 this_device = currentsd[ncurrent];
3844
3845 /*
4f4eb9f1 3846 * For external target devices, we have to insert a LUN 0 which
edd16368
SC
3847 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3848 * is nonetheless an enclosure device there. We have to
3849 * present that otherwise linux won't find anything if
3850 * there is no lun 0.
3851 */
4f4eb9f1 3852 if (add_ext_target_dev(h, tmpdevice, this_device,
1f310bde 3853 lunaddrbytes, lunzerobits,
4f4eb9f1 3854 &n_ext_target_devs)) {
edd16368
SC
3855 ncurrent++;
3856 this_device = currentsd[ncurrent];
3857 }
3858
3859 *this_device = *tmpdevice;
edd16368 3860
41ce4c35
SC
3861 /* do not expose masked devices */
3862 if (MASKED_DEVICE(lunaddrbytes) &&
2a168208
KB
3863 i < nphysicals + (raid_ctlr_position == 0))
3864 this_device->expose_device = 0;
3865 else
3866 this_device->expose_device = 1;
41ce4c35 3867
edd16368 3868 switch (this_device->devtype) {
0b0e1d6c 3869 case TYPE_ROM:
edd16368
SC
3870 /* We don't *really* support actual CD-ROM devices,
3871 * just "One Button Disaster Recovery" tape drive
3872 * which temporarily pretends to be a CD-ROM drive.
3873 * So we check that the device is really an OBDR tape
3874 * device by checking for "$DR-10" in bytes 43-48 of
3875 * the inquiry data.
3876 */
0b0e1d6c
SC
3877 if (is_OBDR)
3878 ncurrent++;
edd16368
SC
3879 break;
3880 case TYPE_DISK:
b9092b79
KB
3881 if (i < nphysicals + (raid_ctlr_position == 0)) {
3882 /* The disk is in HBA mode. */
3883 /* Never use RAID mapper in HBA mode. */
ecf418d1 3884 this_device->offload_enabled = 0;
b9092b79 3885 hpsa_get_ioaccel_drive_info(h, this_device,
f2039b03
DB
3886 physdev_list, phys_dev_index, id_phys);
3887 hpsa_get_path_info(this_device,
3888 physdev_list, phys_dev_index, id_phys);
b9092b79 3889 }
ecf418d1 3890 ncurrent++;
edd16368
SC
3891 break;
3892 case TYPE_TAPE:
3893 case TYPE_MEDIUM_CHANGER:
41ce4c35 3894 case TYPE_ENCLOSURE:
b9092b79 3895 ncurrent++;
41ce4c35 3896 break;
edd16368
SC
3897 case TYPE_RAID:
3898 /* Only present the Smartarray HBA as a RAID controller.
3899 * If it's a RAID controller other than the HBA itself
3900 * (an external RAID controller, MSA500 or similar)
3901 * don't present it.
3902 */
3903 if (!is_hba_lunid(lunaddrbytes))
3904 break;
3905 ncurrent++;
3906 break;
3907 default:
3908 break;
3909 }
cfe5badc 3910 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
3911 break;
3912 }
8aa60681 3913 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
edd16368
SC
3914out:
3915 kfree(tmpdevice);
3916 for (i = 0; i < ndev_allocated; i++)
3917 kfree(currentsd[i]);
3918 kfree(currentsd);
edd16368
SC
3919 kfree(physdev_list);
3920 kfree(logdev_list);
03383736 3921 kfree(id_phys);
edd16368
SC
3922}
3923
ec5cbf04
WS
3924static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3925 struct scatterlist *sg)
3926{
3927 u64 addr64 = (u64) sg_dma_address(sg);
3928 unsigned int len = sg_dma_len(sg);
3929
3930 desc->Addr = cpu_to_le64(addr64);
3931 desc->Len = cpu_to_le32(len);
3932 desc->Ext = 0;
3933}
3934
c7ee65b3
WS
3935/*
3936 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
edd16368
SC
3937 * dma mapping and fills in the scatter gather entries of the
3938 * hpsa command, cp.
3939 */
33a2ffce 3940static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
3941 struct CommandList *cp,
3942 struct scsi_cmnd *cmd)
3943{
edd16368 3944 struct scatterlist *sg;
b3a7ba7c 3945 int use_sg, i, sg_limit, chained, last_sg;
33a2ffce 3946 struct SGDescriptor *curr_sg;
edd16368 3947
33a2ffce 3948 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
3949
3950 use_sg = scsi_dma_map(cmd);
3951 if (use_sg < 0)
3952 return use_sg;
3953
3954 if (!use_sg)
3955 goto sglist_finished;
3956
b3a7ba7c
WS
3957 /*
3958 * If the number of entries is greater than the max for a single list,
3959 * then we have a chained list; we will set up all but one entry in the
3960 * first list (the last entry is saved for link information);
3961 * otherwise, we don't have a chained list and we'll set up at each of
3962 * the entries in the one list.
3963 */
33a2ffce 3964 curr_sg = cp->SG;
b3a7ba7c
WS
3965 chained = use_sg > h->max_cmd_sg_entries;
3966 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
3967 last_sg = scsi_sg_count(cmd) - 1;
3968 scsi_for_each_sg(cmd, sg, sg_limit, i) {
ec5cbf04 3969 hpsa_set_sg_descriptor(curr_sg, sg);
33a2ffce
SC
3970 curr_sg++;
3971 }
ec5cbf04 3972
b3a7ba7c
WS
3973 if (chained) {
3974 /*
3975 * Continue with the chained list. Set curr_sg to the chained
3976 * list. Modify the limit to the total count less the entries
3977 * we've already set up. Resume the scan at the list entry
3978 * where the previous loop left off.
3979 */
3980 curr_sg = h->cmd_sg_list[cp->cmdindex];
3981 sg_limit = use_sg - sg_limit;
3982 for_each_sg(sg, sg, sg_limit, i) {
3983 hpsa_set_sg_descriptor(curr_sg, sg);
3984 curr_sg++;
3985 }
3986 }
3987
ec5cbf04 3988 /* Back the pointer up to the last entry and mark it as "last". */
b3a7ba7c 3989 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
33a2ffce
SC
3990
3991 if (use_sg + chained > h->maxSG)
3992 h->maxSG = use_sg + chained;
3993
3994 if (chained) {
3995 cp->Header.SGList = h->max_cmd_sg_entries;
50a0decf 3996 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
e2bea6df
SC
3997 if (hpsa_map_sg_chain_block(h, cp)) {
3998 scsi_dma_unmap(cmd);
3999 return -1;
4000 }
33a2ffce 4001 return 0;
edd16368
SC
4002 }
4003
4004sglist_finished:
4005
01a02ffc 4006 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
c7ee65b3 4007 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
edd16368
SC
4008 return 0;
4009}
4010
283b4a9b
SC
4011#define IO_ACCEL_INELIGIBLE (1)
4012static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4013{
4014 int is_write = 0;
4015 u32 block;
4016 u32 block_cnt;
4017
4018 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4019 switch (cdb[0]) {
4020 case WRITE_6:
4021 case WRITE_12:
4022 is_write = 1;
4023 case READ_6:
4024 case READ_12:
4025 if (*cdb_len == 6) {
c8a6c9a6 4026 block = get_unaligned_be16(&cdb[2]);
283b4a9b 4027 block_cnt = cdb[4];
c8a6c9a6
DB
4028 if (block_cnt == 0)
4029 block_cnt = 256;
283b4a9b
SC
4030 } else {
4031 BUG_ON(*cdb_len != 12);
c8a6c9a6
DB
4032 block = get_unaligned_be32(&cdb[2]);
4033 block_cnt = get_unaligned_be32(&cdb[6]);
283b4a9b
SC
4034 }
4035 if (block_cnt > 0xffff)
4036 return IO_ACCEL_INELIGIBLE;
4037
4038 cdb[0] = is_write ? WRITE_10 : READ_10;
4039 cdb[1] = 0;
4040 cdb[2] = (u8) (block >> 24);
4041 cdb[3] = (u8) (block >> 16);
4042 cdb[4] = (u8) (block >> 8);
4043 cdb[5] = (u8) (block);
4044 cdb[6] = 0;
4045 cdb[7] = (u8) (block_cnt >> 8);
4046 cdb[8] = (u8) (block_cnt);
4047 cdb[9] = 0;
4048 *cdb_len = 10;
4049 break;
4050 }
4051 return 0;
4052}
4053
c349775e 4054static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
283b4a9b 4055 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4056 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
e1f7de0c
MG
4057{
4058 struct scsi_cmnd *cmd = c->scsi_cmd;
e1f7de0c
MG
4059 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4060 unsigned int len;
4061 unsigned int total_len = 0;
4062 struct scatterlist *sg;
4063 u64 addr64;
4064 int use_sg, i;
4065 struct SGDescriptor *curr_sg;
4066 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4067
283b4a9b 4068 /* TODO: implement chaining support */
03383736
DB
4069 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4070 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 4071 return IO_ACCEL_INELIGIBLE;
03383736 4072 }
283b4a9b 4073
e1f7de0c
MG
4074 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4075
03383736
DB
4076 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4077 atomic_dec(&phys_disk->ioaccel_cmds_out);
283b4a9b 4078 return IO_ACCEL_INELIGIBLE;
03383736 4079 }
283b4a9b 4080
e1f7de0c
MG
4081 c->cmd_type = CMD_IOACCEL1;
4082
4083 /* Adjust the DMA address to point to the accelerated command buffer */
4084 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4085 (c->cmdindex * sizeof(*cp));
4086 BUG_ON(c->busaddr & 0x0000007F);
4087
4088 use_sg = scsi_dma_map(cmd);
03383736
DB
4089 if (use_sg < 0) {
4090 atomic_dec(&phys_disk->ioaccel_cmds_out);
e1f7de0c 4091 return use_sg;
03383736 4092 }
e1f7de0c
MG
4093
4094 if (use_sg) {
4095 curr_sg = cp->SG;
4096 scsi_for_each_sg(cmd, sg, use_sg, i) {
4097 addr64 = (u64) sg_dma_address(sg);
4098 len = sg_dma_len(sg);
4099 total_len += len;
50a0decf
SC
4100 curr_sg->Addr = cpu_to_le64(addr64);
4101 curr_sg->Len = cpu_to_le32(len);
4102 curr_sg->Ext = cpu_to_le32(0);
e1f7de0c
MG
4103 curr_sg++;
4104 }
50a0decf 4105 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
e1f7de0c
MG
4106
4107 switch (cmd->sc_data_direction) {
4108 case DMA_TO_DEVICE:
4109 control |= IOACCEL1_CONTROL_DATA_OUT;
4110 break;
4111 case DMA_FROM_DEVICE:
4112 control |= IOACCEL1_CONTROL_DATA_IN;
4113 break;
4114 case DMA_NONE:
4115 control |= IOACCEL1_CONTROL_NODATAXFER;
4116 break;
4117 default:
4118 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4119 cmd->sc_data_direction);
4120 BUG();
4121 break;
4122 }
4123 } else {
4124 control |= IOACCEL1_CONTROL_NODATAXFER;
4125 }
4126
c349775e 4127 c->Header.SGList = use_sg;
e1f7de0c 4128 /* Fill out the command structure to submit */
2b08b3e9
DB
4129 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4130 cp->transfer_len = cpu_to_le32(total_len);
4131 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4132 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4133 cp->control = cpu_to_le32(control);
283b4a9b
SC
4134 memcpy(cp->CDB, cdb, cdb_len);
4135 memcpy(cp->CISS_LUN, scsi3addr, 8);
c349775e 4136 /* Tag was already set at init time. */
283b4a9b 4137 enqueue_cmd_and_start_io(h, c);
e1f7de0c
MG
4138 return 0;
4139}
edd16368 4140
283b4a9b
SC
4141/*
4142 * Queue a command directly to a device behind the controller using the
4143 * I/O accelerator path.
4144 */
4145static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4146 struct CommandList *c)
4147{
4148 struct scsi_cmnd *cmd = c->scsi_cmd;
4149 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4150
03383736
DB
4151 c->phys_disk = dev;
4152
283b4a9b 4153 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
03383736 4154 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
283b4a9b
SC
4155}
4156
dd0e19f3
ST
4157/*
4158 * Set encryption parameters for the ioaccel2 request
4159 */
4160static void set_encrypt_ioaccel2(struct ctlr_info *h,
4161 struct CommandList *c, struct io_accel2_cmd *cp)
4162{
4163 struct scsi_cmnd *cmd = c->scsi_cmd;
4164 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4165 struct raid_map_data *map = &dev->raid_map;
4166 u64 first_block;
4167
dd0e19f3 4168 /* Are we doing encryption on this device */
2b08b3e9 4169 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
dd0e19f3
ST
4170 return;
4171 /* Set the data encryption key index. */
4172 cp->dekindex = map->dekindex;
4173
4174 /* Set the encryption enable flag, encoded into direction field. */
4175 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4176
4177 /* Set encryption tweak values based on logical block address
4178 * If block size is 512, tweak value is LBA.
4179 * For other block sizes, tweak is (LBA * block size)/ 512)
4180 */
4181 switch (cmd->cmnd[0]) {
4182 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4183 case WRITE_6:
4184 case READ_6:
2b08b3e9 4185 first_block = get_unaligned_be16(&cmd->cmnd[2]);
dd0e19f3
ST
4186 break;
4187 case WRITE_10:
4188 case READ_10:
dd0e19f3
ST
4189 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4190 case WRITE_12:
4191 case READ_12:
2b08b3e9 4192 first_block = get_unaligned_be32(&cmd->cmnd[2]);
dd0e19f3
ST
4193 break;
4194 case WRITE_16:
4195 case READ_16:
2b08b3e9 4196 first_block = get_unaligned_be64(&cmd->cmnd[2]);
dd0e19f3
ST
4197 break;
4198 default:
4199 dev_err(&h->pdev->dev,
2b08b3e9
DB
4200 "ERROR: %s: size (0x%x) not supported for encryption\n",
4201 __func__, cmd->cmnd[0]);
dd0e19f3
ST
4202 BUG();
4203 break;
4204 }
2b08b3e9
DB
4205
4206 if (le32_to_cpu(map->volume_blk_size) != 512)
4207 first_block = first_block *
4208 le32_to_cpu(map->volume_blk_size)/512;
4209
4210 cp->tweak_lower = cpu_to_le32(first_block);
4211 cp->tweak_upper = cpu_to_le32(first_block >> 32);
dd0e19f3
ST
4212}
4213
c349775e
ST
4214static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4215 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4216 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e
ST
4217{
4218 struct scsi_cmnd *cmd = c->scsi_cmd;
4219 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4220 struct ioaccel2_sg_element *curr_sg;
4221 int use_sg, i;
4222 struct scatterlist *sg;
4223 u64 addr64;
4224 u32 len;
4225 u32 total_len = 0;
4226
d9a729f3 4227 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
c349775e 4228
03383736
DB
4229 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4230 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4231 return IO_ACCEL_INELIGIBLE;
03383736
DB
4232 }
4233
c349775e
ST
4234 c->cmd_type = CMD_IOACCEL2;
4235 /* Adjust the DMA address to point to the accelerated command buffer */
4236 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4237 (c->cmdindex * sizeof(*cp));
4238 BUG_ON(c->busaddr & 0x0000007F);
4239
4240 memset(cp, 0, sizeof(*cp));
4241 cp->IU_type = IOACCEL2_IU_TYPE;
4242
4243 use_sg = scsi_dma_map(cmd);
03383736
DB
4244 if (use_sg < 0) {
4245 atomic_dec(&phys_disk->ioaccel_cmds_out);
c349775e 4246 return use_sg;
03383736 4247 }
c349775e
ST
4248
4249 if (use_sg) {
c349775e 4250 curr_sg = cp->sg;
d9a729f3
WS
4251 if (use_sg > h->ioaccel_maxsg) {
4252 addr64 = le64_to_cpu(
4253 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4254 curr_sg->address = cpu_to_le64(addr64);
4255 curr_sg->length = 0;
4256 curr_sg->reserved[0] = 0;
4257 curr_sg->reserved[1] = 0;
4258 curr_sg->reserved[2] = 0;
4259 curr_sg->chain_indicator = 0x80;
4260
4261 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4262 }
c349775e
ST
4263 scsi_for_each_sg(cmd, sg, use_sg, i) {
4264 addr64 = (u64) sg_dma_address(sg);
4265 len = sg_dma_len(sg);
4266 total_len += len;
4267 curr_sg->address = cpu_to_le64(addr64);
4268 curr_sg->length = cpu_to_le32(len);
4269 curr_sg->reserved[0] = 0;
4270 curr_sg->reserved[1] = 0;
4271 curr_sg->reserved[2] = 0;
4272 curr_sg->chain_indicator = 0;
4273 curr_sg++;
4274 }
4275
4276 switch (cmd->sc_data_direction) {
4277 case DMA_TO_DEVICE:
dd0e19f3
ST
4278 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4279 cp->direction |= IOACCEL2_DIR_DATA_OUT;
c349775e
ST
4280 break;
4281 case DMA_FROM_DEVICE:
dd0e19f3
ST
4282 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4283 cp->direction |= IOACCEL2_DIR_DATA_IN;
c349775e
ST
4284 break;
4285 case DMA_NONE:
dd0e19f3
ST
4286 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4287 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e
ST
4288 break;
4289 default:
4290 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4291 cmd->sc_data_direction);
4292 BUG();
4293 break;
4294 }
4295 } else {
dd0e19f3
ST
4296 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4297 cp->direction |= IOACCEL2_DIR_NO_DATA;
c349775e 4298 }
dd0e19f3
ST
4299
4300 /* Set encryption parameters, if necessary */
4301 set_encrypt_ioaccel2(h, c, cp);
4302
2b08b3e9 4303 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
f2405db8 4304 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
c349775e 4305 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
c349775e 4306
c349775e
ST
4307 cp->data_len = cpu_to_le32(total_len);
4308 cp->err_ptr = cpu_to_le64(c->busaddr +
4309 offsetof(struct io_accel2_cmd, error_data));
50a0decf 4310 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
c349775e 4311
d9a729f3
WS
4312 /* fill in sg elements */
4313 if (use_sg > h->ioaccel_maxsg) {
4314 cp->sg_count = 1;
a736e9b6 4315 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
d9a729f3
WS
4316 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4317 atomic_dec(&phys_disk->ioaccel_cmds_out);
4318 scsi_dma_unmap(cmd);
4319 return -1;
4320 }
4321 } else
4322 cp->sg_count = (u8) use_sg;
4323
c349775e
ST
4324 enqueue_cmd_and_start_io(h, c);
4325 return 0;
4326}
4327
4328/*
4329 * Queue a command to the correct I/O accelerator path.
4330 */
4331static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4332 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
03383736 4333 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
c349775e 4334{
03383736
DB
4335 /* Try to honor the device's queue depth */
4336 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4337 phys_disk->queue_depth) {
4338 atomic_dec(&phys_disk->ioaccel_cmds_out);
4339 return IO_ACCEL_INELIGIBLE;
4340 }
c349775e
ST
4341 if (h->transMethod & CFGTBL_Trans_io_accel1)
4342 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
03383736
DB
4343 cdb, cdb_len, scsi3addr,
4344 phys_disk);
c349775e
ST
4345 else
4346 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
03383736
DB
4347 cdb, cdb_len, scsi3addr,
4348 phys_disk);
c349775e
ST
4349}
4350
6b80b18f
ST
4351static void raid_map_helper(struct raid_map_data *map,
4352 int offload_to_mirror, u32 *map_index, u32 *current_group)
4353{
4354 if (offload_to_mirror == 0) {
4355 /* use physical disk in the first mirrored group. */
2b08b3e9 4356 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4357 return;
4358 }
4359 do {
4360 /* determine mirror group that *map_index indicates */
2b08b3e9
DB
4361 *current_group = *map_index /
4362 le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4363 if (offload_to_mirror == *current_group)
4364 continue;
2b08b3e9 4365 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
6b80b18f 4366 /* select map index from next group */
2b08b3e9 4367 *map_index += le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4368 (*current_group)++;
4369 } else {
4370 /* select map index from first group */
2b08b3e9 4371 *map_index %= le16_to_cpu(map->data_disks_per_row);
6b80b18f
ST
4372 *current_group = 0;
4373 }
4374 } while (offload_to_mirror != *current_group);
4375}
4376
283b4a9b
SC
4377/*
4378 * Attempt to perform offload RAID mapping for a logical volume I/O.
4379 */
4380static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4381 struct CommandList *c)
4382{
4383 struct scsi_cmnd *cmd = c->scsi_cmd;
4384 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4385 struct raid_map_data *map = &dev->raid_map;
4386 struct raid_map_disk_data *dd = &map->data[0];
4387 int is_write = 0;
4388 u32 map_index;
4389 u64 first_block, last_block;
4390 u32 block_cnt;
4391 u32 blocks_per_row;
4392 u64 first_row, last_row;
4393 u32 first_row_offset, last_row_offset;
4394 u32 first_column, last_column;
6b80b18f
ST
4395 u64 r0_first_row, r0_last_row;
4396 u32 r5or6_blocks_per_row;
4397 u64 r5or6_first_row, r5or6_last_row;
4398 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4399 u32 r5or6_first_column, r5or6_last_column;
4400 u32 total_disks_per_row;
4401 u32 stripesize;
4402 u32 first_group, last_group, current_group;
283b4a9b
SC
4403 u32 map_row;
4404 u32 disk_handle;
4405 u64 disk_block;
4406 u32 disk_block_cnt;
4407 u8 cdb[16];
4408 u8 cdb_len;
2b08b3e9 4409 u16 strip_size;
283b4a9b
SC
4410#if BITS_PER_LONG == 32
4411 u64 tmpdiv;
4412#endif
6b80b18f 4413 int offload_to_mirror;
283b4a9b 4414
283b4a9b
SC
4415 /* check for valid opcode, get LBA and block count */
4416 switch (cmd->cmnd[0]) {
4417 case WRITE_6:
4418 is_write = 1;
4419 case READ_6:
c8a6c9a6 4420 first_block = get_unaligned_be16(&cmd->cmnd[2]);
283b4a9b 4421 block_cnt = cmd->cmnd[4];
3fa89a04
SC
4422 if (block_cnt == 0)
4423 block_cnt = 256;
283b4a9b
SC
4424 break;
4425 case WRITE_10:
4426 is_write = 1;
4427 case READ_10:
4428 first_block =
4429 (((u64) cmd->cmnd[2]) << 24) |
4430 (((u64) cmd->cmnd[3]) << 16) |
4431 (((u64) cmd->cmnd[4]) << 8) |
4432 cmd->cmnd[5];
4433 block_cnt =
4434 (((u32) cmd->cmnd[7]) << 8) |
4435 cmd->cmnd[8];
4436 break;
4437 case WRITE_12:
4438 is_write = 1;
4439 case READ_12:
4440 first_block =
4441 (((u64) cmd->cmnd[2]) << 24) |
4442 (((u64) cmd->cmnd[3]) << 16) |
4443 (((u64) cmd->cmnd[4]) << 8) |
4444 cmd->cmnd[5];
4445 block_cnt =
4446 (((u32) cmd->cmnd[6]) << 24) |
4447 (((u32) cmd->cmnd[7]) << 16) |
4448 (((u32) cmd->cmnd[8]) << 8) |
4449 cmd->cmnd[9];
4450 break;
4451 case WRITE_16:
4452 is_write = 1;
4453 case READ_16:
4454 first_block =
4455 (((u64) cmd->cmnd[2]) << 56) |
4456 (((u64) cmd->cmnd[3]) << 48) |
4457 (((u64) cmd->cmnd[4]) << 40) |
4458 (((u64) cmd->cmnd[5]) << 32) |
4459 (((u64) cmd->cmnd[6]) << 24) |
4460 (((u64) cmd->cmnd[7]) << 16) |
4461 (((u64) cmd->cmnd[8]) << 8) |
4462 cmd->cmnd[9];
4463 block_cnt =
4464 (((u32) cmd->cmnd[10]) << 24) |
4465 (((u32) cmd->cmnd[11]) << 16) |
4466 (((u32) cmd->cmnd[12]) << 8) |
4467 cmd->cmnd[13];
4468 break;
4469 default:
4470 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4471 }
283b4a9b
SC
4472 last_block = first_block + block_cnt - 1;
4473
4474 /* check for write to non-RAID-0 */
4475 if (is_write && dev->raid_level != 0)
4476 return IO_ACCEL_INELIGIBLE;
4477
4478 /* check for invalid block or wraparound */
2b08b3e9
DB
4479 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4480 last_block < first_block)
283b4a9b
SC
4481 return IO_ACCEL_INELIGIBLE;
4482
4483 /* calculate stripe information for the request */
2b08b3e9
DB
4484 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4485 le16_to_cpu(map->strip_size);
4486 strip_size = le16_to_cpu(map->strip_size);
283b4a9b
SC
4487#if BITS_PER_LONG == 32
4488 tmpdiv = first_block;
4489 (void) do_div(tmpdiv, blocks_per_row);
4490 first_row = tmpdiv;
4491 tmpdiv = last_block;
4492 (void) do_div(tmpdiv, blocks_per_row);
4493 last_row = tmpdiv;
4494 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4495 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4496 tmpdiv = first_row_offset;
2b08b3e9 4497 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4498 first_column = tmpdiv;
4499 tmpdiv = last_row_offset;
2b08b3e9 4500 (void) do_div(tmpdiv, strip_size);
283b4a9b
SC
4501 last_column = tmpdiv;
4502#else
4503 first_row = first_block / blocks_per_row;
4504 last_row = last_block / blocks_per_row;
4505 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4506 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
2b08b3e9
DB
4507 first_column = first_row_offset / strip_size;
4508 last_column = last_row_offset / strip_size;
283b4a9b
SC
4509#endif
4510
4511 /* if this isn't a single row/column then give to the controller */
4512 if ((first_row != last_row) || (first_column != last_column))
4513 return IO_ACCEL_INELIGIBLE;
4514
4515 /* proceeding with driver mapping */
2b08b3e9
DB
4516 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4517 le16_to_cpu(map->metadata_disks_per_row);
283b4a9b 4518 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4519 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4520 map_index = (map_row * total_disks_per_row) + first_column;
4521
4522 switch (dev->raid_level) {
4523 case HPSA_RAID_0:
4524 break; /* nothing special to do */
4525 case HPSA_RAID_1:
4526 /* Handles load balance across RAID 1 members.
4527 * (2-drive R1 and R10 with even # of drives.)
4528 * Appropriate for SSDs, not optimal for HDDs
283b4a9b 4529 */
2b08b3e9 4530 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
283b4a9b 4531 if (dev->offload_to_mirror)
2b08b3e9 4532 map_index += le16_to_cpu(map->data_disks_per_row);
283b4a9b 4533 dev->offload_to_mirror = !dev->offload_to_mirror;
6b80b18f
ST
4534 break;
4535 case HPSA_RAID_ADM:
4536 /* Handles N-way mirrors (R1-ADM)
4537 * and R10 with # of drives divisible by 3.)
4538 */
2b08b3e9 4539 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
6b80b18f
ST
4540
4541 offload_to_mirror = dev->offload_to_mirror;
4542 raid_map_helper(map, offload_to_mirror,
4543 &map_index, &current_group);
4544 /* set mirror group to use next time */
4545 offload_to_mirror =
2b08b3e9
DB
4546 (offload_to_mirror >=
4547 le16_to_cpu(map->layout_map_count) - 1)
6b80b18f 4548 ? 0 : offload_to_mirror + 1;
6b80b18f
ST
4549 dev->offload_to_mirror = offload_to_mirror;
4550 /* Avoid direct use of dev->offload_to_mirror within this
4551 * function since multiple threads might simultaneously
4552 * increment it beyond the range of dev->layout_map_count -1.
4553 */
4554 break;
4555 case HPSA_RAID_5:
4556 case HPSA_RAID_6:
2b08b3e9 4557 if (le16_to_cpu(map->layout_map_count) <= 1)
6b80b18f
ST
4558 break;
4559
4560 /* Verify first and last block are in same RAID group */
4561 r5or6_blocks_per_row =
2b08b3e9
DB
4562 le16_to_cpu(map->strip_size) *
4563 le16_to_cpu(map->data_disks_per_row);
6b80b18f 4564 BUG_ON(r5or6_blocks_per_row == 0);
2b08b3e9
DB
4565 stripesize = r5or6_blocks_per_row *
4566 le16_to_cpu(map->layout_map_count);
6b80b18f
ST
4567#if BITS_PER_LONG == 32
4568 tmpdiv = first_block;
4569 first_group = do_div(tmpdiv, stripesize);
4570 tmpdiv = first_group;
4571 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4572 first_group = tmpdiv;
4573 tmpdiv = last_block;
4574 last_group = do_div(tmpdiv, stripesize);
4575 tmpdiv = last_group;
4576 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4577 last_group = tmpdiv;
4578#else
4579 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4580 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
6b80b18f 4581#endif
000ff7c2 4582 if (first_group != last_group)
6b80b18f
ST
4583 return IO_ACCEL_INELIGIBLE;
4584
4585 /* Verify request is in a single row of RAID 5/6 */
4586#if BITS_PER_LONG == 32
4587 tmpdiv = first_block;
4588 (void) do_div(tmpdiv, stripesize);
4589 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4590 tmpdiv = last_block;
4591 (void) do_div(tmpdiv, stripesize);
4592 r5or6_last_row = r0_last_row = tmpdiv;
4593#else
4594 first_row = r5or6_first_row = r0_first_row =
4595 first_block / stripesize;
4596 r5or6_last_row = r0_last_row = last_block / stripesize;
4597#endif
4598 if (r5or6_first_row != r5or6_last_row)
4599 return IO_ACCEL_INELIGIBLE;
4600
4601
4602 /* Verify request is in a single column */
4603#if BITS_PER_LONG == 32
4604 tmpdiv = first_block;
4605 first_row_offset = do_div(tmpdiv, stripesize);
4606 tmpdiv = first_row_offset;
4607 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4608 r5or6_first_row_offset = first_row_offset;
4609 tmpdiv = last_block;
4610 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4611 tmpdiv = r5or6_last_row_offset;
4612 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4613 tmpdiv = r5or6_first_row_offset;
4614 (void) do_div(tmpdiv, map->strip_size);
4615 first_column = r5or6_first_column = tmpdiv;
4616 tmpdiv = r5or6_last_row_offset;
4617 (void) do_div(tmpdiv, map->strip_size);
4618 r5or6_last_column = tmpdiv;
4619#else
4620 first_row_offset = r5or6_first_row_offset =
4621 (u32)((first_block % stripesize) %
4622 r5or6_blocks_per_row);
4623
4624 r5or6_last_row_offset =
4625 (u32)((last_block % stripesize) %
4626 r5or6_blocks_per_row);
4627
4628 first_column = r5or6_first_column =
2b08b3e9 4629 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
6b80b18f 4630 r5or6_last_column =
2b08b3e9 4631 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
6b80b18f
ST
4632#endif
4633 if (r5or6_first_column != r5or6_last_column)
4634 return IO_ACCEL_INELIGIBLE;
4635
4636 /* Request is eligible */
4637 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
2b08b3e9 4638 le16_to_cpu(map->row_cnt);
6b80b18f
ST
4639
4640 map_index = (first_group *
2b08b3e9 4641 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
6b80b18f
ST
4642 (map_row * total_disks_per_row) + first_column;
4643 break;
4644 default:
4645 return IO_ACCEL_INELIGIBLE;
283b4a9b 4646 }
6b80b18f 4647
07543e0c
SC
4648 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4649 return IO_ACCEL_INELIGIBLE;
4650
03383736
DB
4651 c->phys_disk = dev->phys_disk[map_index];
4652
283b4a9b 4653 disk_handle = dd[map_index].ioaccel_handle;
2b08b3e9
DB
4654 disk_block = le64_to_cpu(map->disk_starting_blk) +
4655 first_row * le16_to_cpu(map->strip_size) +
4656 (first_row_offset - first_column *
4657 le16_to_cpu(map->strip_size));
283b4a9b
SC
4658 disk_block_cnt = block_cnt;
4659
4660 /* handle differing logical/physical block sizes */
4661 if (map->phys_blk_shift) {
4662 disk_block <<= map->phys_blk_shift;
4663 disk_block_cnt <<= map->phys_blk_shift;
4664 }
4665 BUG_ON(disk_block_cnt > 0xffff);
4666
4667 /* build the new CDB for the physical disk I/O */
4668 if (disk_block > 0xffffffff) {
4669 cdb[0] = is_write ? WRITE_16 : READ_16;
4670 cdb[1] = 0;
4671 cdb[2] = (u8) (disk_block >> 56);
4672 cdb[3] = (u8) (disk_block >> 48);
4673 cdb[4] = (u8) (disk_block >> 40);
4674 cdb[5] = (u8) (disk_block >> 32);
4675 cdb[6] = (u8) (disk_block >> 24);
4676 cdb[7] = (u8) (disk_block >> 16);
4677 cdb[8] = (u8) (disk_block >> 8);
4678 cdb[9] = (u8) (disk_block);
4679 cdb[10] = (u8) (disk_block_cnt >> 24);
4680 cdb[11] = (u8) (disk_block_cnt >> 16);
4681 cdb[12] = (u8) (disk_block_cnt >> 8);
4682 cdb[13] = (u8) (disk_block_cnt);
4683 cdb[14] = 0;
4684 cdb[15] = 0;
4685 cdb_len = 16;
4686 } else {
4687 cdb[0] = is_write ? WRITE_10 : READ_10;
4688 cdb[1] = 0;
4689 cdb[2] = (u8) (disk_block >> 24);
4690 cdb[3] = (u8) (disk_block >> 16);
4691 cdb[4] = (u8) (disk_block >> 8);
4692 cdb[5] = (u8) (disk_block);
4693 cdb[6] = 0;
4694 cdb[7] = (u8) (disk_block_cnt >> 8);
4695 cdb[8] = (u8) (disk_block_cnt);
4696 cdb[9] = 0;
4697 cdb_len = 10;
4698 }
4699 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
03383736
DB
4700 dev->scsi3addr,
4701 dev->phys_disk[map_index]);
283b4a9b
SC
4702}
4703
25163bd5
WS
4704/*
4705 * Submit commands down the "normal" RAID stack path
4706 * All callers to hpsa_ciss_submit must check lockup_detected
4707 * beforehand, before (opt.) and after calling cmd_alloc
4708 */
574f05d3
SC
4709static int hpsa_ciss_submit(struct ctlr_info *h,
4710 struct CommandList *c, struct scsi_cmnd *cmd,
4711 unsigned char scsi3addr[])
edd16368 4712{
edd16368 4713 cmd->host_scribble = (unsigned char *) c;
edd16368
SC
4714 c->cmd_type = CMD_SCSI;
4715 c->scsi_cmd = cmd;
4716 c->Header.ReplyQueue = 0; /* unused in simple mode */
4717 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
f2405db8 4718 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
edd16368
SC
4719
4720 /* Fill in the request block... */
4721
4722 c->Request.Timeout = 0;
edd16368
SC
4723 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4724 c->Request.CDBLen = cmd->cmd_len;
4725 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
edd16368
SC
4726 switch (cmd->sc_data_direction) {
4727 case DMA_TO_DEVICE:
a505b86f
SC
4728 c->Request.type_attr_dir =
4729 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
4730 break;
4731 case DMA_FROM_DEVICE:
a505b86f
SC
4732 c->Request.type_attr_dir =
4733 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
edd16368
SC
4734 break;
4735 case DMA_NONE:
a505b86f
SC
4736 c->Request.type_attr_dir =
4737 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
4738 break;
4739 case DMA_BIDIRECTIONAL:
4740 /* This can happen if a buggy application does a scsi passthru
4741 * and sets both inlen and outlen to non-zero. ( see
4742 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4743 */
4744
a505b86f
SC
4745 c->Request.type_attr_dir =
4746 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
edd16368
SC
4747 /* This is technically wrong, and hpsa controllers should
4748 * reject it with CMD_INVALID, which is the most correct
4749 * response, but non-fibre backends appear to let it
4750 * slide by, and give the same results as if this field
4751 * were set correctly. Either way is acceptable for
4752 * our purposes here.
4753 */
4754
4755 break;
4756
4757 default:
4758 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4759 cmd->sc_data_direction);
4760 BUG();
4761 break;
4762 }
4763
33a2ffce 4764 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
73153fe5 4765 hpsa_cmd_resolve_and_free(h, c);
edd16368
SC
4766 return SCSI_MLQUEUE_HOST_BUSY;
4767 }
4768 enqueue_cmd_and_start_io(h, c);
4769 /* the cmd'll come back via intr handler in complete_scsi_command() */
4770 return 0;
4771}
4772
360c73bd
SC
4773static void hpsa_cmd_init(struct ctlr_info *h, int index,
4774 struct CommandList *c)
4775{
4776 dma_addr_t cmd_dma_handle, err_dma_handle;
4777
4778 /* Zero out all of commandlist except the last field, refcount */
4779 memset(c, 0, offsetof(struct CommandList, refcount));
4780 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4781 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4782 c->err_info = h->errinfo_pool + index;
4783 memset(c->err_info, 0, sizeof(*c->err_info));
4784 err_dma_handle = h->errinfo_pool_dhandle
4785 + index * sizeof(*c->err_info);
4786 c->cmdindex = index;
4787 c->busaddr = (u32) cmd_dma_handle;
4788 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4789 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4790 c->h = h;
a58e7e53 4791 c->scsi_cmd = SCSI_CMD_IDLE;
360c73bd
SC
4792}
4793
4794static void hpsa_preinitialize_commands(struct ctlr_info *h)
4795{
4796 int i;
4797
4798 for (i = 0; i < h->nr_cmds; i++) {
4799 struct CommandList *c = h->cmd_pool + i;
4800
4801 hpsa_cmd_init(h, i, c);
4802 atomic_set(&c->refcount, 0);
4803 }
4804}
4805
4806static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4807 struct CommandList *c)
4808{
4809 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4810
73153fe5
WS
4811 BUG_ON(c->cmdindex != index);
4812
360c73bd
SC
4813 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4814 memset(c->err_info, 0, sizeof(*c->err_info));
4815 c->busaddr = (u32) cmd_dma_handle;
4816}
4817
592a0ad5
WS
4818static int hpsa_ioaccel_submit(struct ctlr_info *h,
4819 struct CommandList *c, struct scsi_cmnd *cmd,
4820 unsigned char *scsi3addr)
4821{
4822 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4823 int rc = IO_ACCEL_INELIGIBLE;
4824
4825 cmd->host_scribble = (unsigned char *) c;
4826
4827 if (dev->offload_enabled) {
4828 hpsa_cmd_init(h, c->cmdindex, c);
4829 c->cmd_type = CMD_SCSI;
4830 c->scsi_cmd = cmd;
4831 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4832 if (rc < 0) /* scsi_dma_map failed. */
4833 rc = SCSI_MLQUEUE_HOST_BUSY;
a3144e0b 4834 } else if (dev->hba_ioaccel_enabled) {
592a0ad5
WS
4835 hpsa_cmd_init(h, c->cmdindex, c);
4836 c->cmd_type = CMD_SCSI;
4837 c->scsi_cmd = cmd;
4838 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4839 if (rc < 0) /* scsi_dma_map failed. */
4840 rc = SCSI_MLQUEUE_HOST_BUSY;
4841 }
4842 return rc;
4843}
4844
080ef1cc
DB
4845static void hpsa_command_resubmit_worker(struct work_struct *work)
4846{
4847 struct scsi_cmnd *cmd;
4848 struct hpsa_scsi_dev_t *dev;
8a0ff92c 4849 struct CommandList *c = container_of(work, struct CommandList, work);
080ef1cc
DB
4850
4851 cmd = c->scsi_cmd;
4852 dev = cmd->device->hostdata;
4853 if (!dev) {
4854 cmd->result = DID_NO_CONNECT << 16;
8a0ff92c 4855 return hpsa_cmd_free_and_done(c->h, c, cmd);
080ef1cc 4856 }
d604f533
WS
4857 if (c->reset_pending)
4858 return hpsa_cmd_resolve_and_free(c->h, c);
a58e7e53
WS
4859 if (c->abort_pending)
4860 return hpsa_cmd_abort_and_free(c->h, c, cmd);
592a0ad5
WS
4861 if (c->cmd_type == CMD_IOACCEL2) {
4862 struct ctlr_info *h = c->h;
4863 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4864 int rc;
4865
4866 if (c2->error_data.serv_response ==
4867 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4868 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4869 if (rc == 0)
4870 return;
4871 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4872 /*
4873 * If we get here, it means dma mapping failed.
4874 * Try again via scsi mid layer, which will
4875 * then get SCSI_MLQUEUE_HOST_BUSY.
4876 */
4877 cmd->result = DID_IMM_RETRY << 16;
8a0ff92c 4878 return hpsa_cmd_free_and_done(h, c, cmd);
592a0ad5
WS
4879 }
4880 /* else, fall thru and resubmit down CISS path */
4881 }
4882 }
360c73bd 4883 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
080ef1cc
DB
4884 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4885 /*
4886 * If we get here, it means dma mapping failed. Try
4887 * again via scsi mid layer, which will then get
4888 * SCSI_MLQUEUE_HOST_BUSY.
592a0ad5
WS
4889 *
4890 * hpsa_ciss_submit will have already freed c
4891 * if it encountered a dma mapping failure.
080ef1cc
DB
4892 */
4893 cmd->result = DID_IMM_RETRY << 16;
4894 cmd->scsi_done(cmd);
4895 }
4896}
4897
574f05d3
SC
4898/* Running in struct Scsi_Host->host_lock less mode */
4899static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4900{
4901 struct ctlr_info *h;
4902 struct hpsa_scsi_dev_t *dev;
4903 unsigned char scsi3addr[8];
4904 struct CommandList *c;
4905 int rc = 0;
4906
4907 /* Get the ptr to our adapter structure out of cmd->host. */
4908 h = sdev_to_hba(cmd->device);
73153fe5
WS
4909
4910 BUG_ON(cmd->request->tag < 0);
4911
574f05d3
SC
4912 dev = cmd->device->hostdata;
4913 if (!dev) {
4914 cmd->result = DID_NO_CONNECT << 16;
4915 cmd->scsi_done(cmd);
4916 return 0;
4917 }
574f05d3 4918
73153fe5 4919 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
bf43caf3 4920
407863cb 4921 if (unlikely(lockup_detected(h))) {
25163bd5 4922 cmd->result = DID_NO_CONNECT << 16;
407863cb
SC
4923 cmd->scsi_done(cmd);
4924 return 0;
4925 }
73153fe5 4926 c = cmd_tagged_alloc(h, cmd);
574f05d3 4927
407863cb
SC
4928 /*
4929 * Call alternate submit routine for I/O accelerated commands.
574f05d3
SC
4930 * Retries always go down the normal I/O path.
4931 */
4932 if (likely(cmd->retries == 0 &&
4933 cmd->request->cmd_type == REQ_TYPE_FS &&
4934 h->acciopath_status)) {
592a0ad5
WS
4935 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4936 if (rc == 0)
4937 return 0;
4938 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
73153fe5 4939 hpsa_cmd_resolve_and_free(h, c);
592a0ad5 4940 return SCSI_MLQUEUE_HOST_BUSY;
574f05d3
SC
4941 }
4942 }
4943 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4944}
4945
8ebc9248 4946static void hpsa_scan_complete(struct ctlr_info *h)
5f389360
SC
4947{
4948 unsigned long flags;
4949
8ebc9248
WS
4950 spin_lock_irqsave(&h->scan_lock, flags);
4951 h->scan_finished = 1;
4952 wake_up_all(&h->scan_wait_queue);
4953 spin_unlock_irqrestore(&h->scan_lock, flags);
5f389360
SC
4954}
4955
a08a8471
SC
4956static void hpsa_scan_start(struct Scsi_Host *sh)
4957{
4958 struct ctlr_info *h = shost_to_hba(sh);
4959 unsigned long flags;
4960
8ebc9248
WS
4961 /*
4962 * Don't let rescans be initiated on a controller known to be locked
4963 * up. If the controller locks up *during* a rescan, that thread is
4964 * probably hosed, but at least we can prevent new rescan threads from
4965 * piling up on a locked up controller.
4966 */
4967 if (unlikely(lockup_detected(h)))
4968 return hpsa_scan_complete(h);
5f389360 4969
a08a8471
SC
4970 /* wait until any scan already in progress is finished. */
4971 while (1) {
4972 spin_lock_irqsave(&h->scan_lock, flags);
4973 if (h->scan_finished)
4974 break;
4975 spin_unlock_irqrestore(&h->scan_lock, flags);
4976 wait_event(h->scan_wait_queue, h->scan_finished);
4977 /* Note: We don't need to worry about a race between this
4978 * thread and driver unload because the midlayer will
4979 * have incremented the reference count, so unload won't
4980 * happen if we're in here.
4981 */
4982 }
4983 h->scan_finished = 0; /* mark scan as in progress */
4984 spin_unlock_irqrestore(&h->scan_lock, flags);
4985
8ebc9248
WS
4986 if (unlikely(lockup_detected(h)))
4987 return hpsa_scan_complete(h);
5f389360 4988
8aa60681 4989 hpsa_update_scsi_devices(h);
a08a8471 4990
8ebc9248 4991 hpsa_scan_complete(h);
a08a8471
SC
4992}
4993
7c0a0229
DB
4994static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4995{
03383736
DB
4996 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4997
4998 if (!logical_drive)
4999 return -ENODEV;
7c0a0229
DB
5000
5001 if (qdepth < 1)
5002 qdepth = 1;
03383736
DB
5003 else if (qdepth > logical_drive->queue_depth)
5004 qdepth = logical_drive->queue_depth;
5005
5006 return scsi_change_queue_depth(sdev, qdepth);
7c0a0229
DB
5007}
5008
a08a8471
SC
5009static int hpsa_scan_finished(struct Scsi_Host *sh,
5010 unsigned long elapsed_time)
5011{
5012 struct ctlr_info *h = shost_to_hba(sh);
5013 unsigned long flags;
5014 int finished;
5015
5016 spin_lock_irqsave(&h->scan_lock, flags);
5017 finished = h->scan_finished;
5018 spin_unlock_irqrestore(&h->scan_lock, flags);
5019 return finished;
5020}
5021
2946e82b 5022static int hpsa_scsi_host_alloc(struct ctlr_info *h)
edd16368 5023{
b705690d
SC
5024 struct Scsi_Host *sh;
5025 int error;
edd16368 5026
b705690d 5027 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2946e82b
RE
5028 if (sh == NULL) {
5029 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5030 return -ENOMEM;
5031 }
b705690d
SC
5032
5033 sh->io_port = 0;
5034 sh->n_io_port = 0;
5035 sh->this_id = -1;
5036 sh->max_channel = 3;
5037 sh->max_cmd_len = MAX_COMMAND_SIZE;
5038 sh->max_lun = HPSA_MAX_LUN;
5039 sh->max_id = HPSA_MAX_LUN;
41ce4c35 5040 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
03383736 5041 sh->cmd_per_lun = sh->can_queue;
b705690d 5042 sh->sg_tablesize = h->maxsgentries;
b705690d
SC
5043 sh->hostdata[0] = (unsigned long) h;
5044 sh->irq = h->intr[h->intr_mode];
5045 sh->unique_id = sh->irq;
73153fe5
WS
5046 error = scsi_init_shared_tag_map(sh, sh->can_queue);
5047 if (error) {
5048 dev_err(&h->pdev->dev,
5049 "%s: scsi_init_shared_tag_map failed for controller %d\n",
5050 __func__, h->ctlr);
2946e82b
RE
5051 scsi_host_put(sh);
5052 return error;
73153fe5 5053 }
2946e82b 5054 h->scsi_host = sh;
b705690d 5055 return 0;
2946e82b 5056}
b705690d 5057
2946e82b
RE
5058static int hpsa_scsi_add_host(struct ctlr_info *h)
5059{
5060 int rv;
5061
5062 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5063 if (rv) {
5064 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5065 return rv;
5066 }
5067 scsi_scan_host(h->scsi_host);
5068 return 0;
edd16368
SC
5069}
5070
73153fe5
WS
5071/*
5072 * The block layer has already gone to the trouble of picking out a unique,
5073 * small-integer tag for this request. We use an offset from that value as
5074 * an index to select our command block. (The offset allows us to reserve the
5075 * low-numbered entries for our own uses.)
5076 */
5077static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5078{
5079 int idx = scmd->request->tag;
5080
5081 if (idx < 0)
5082 return idx;
5083
5084 /* Offset to leave space for internal cmds. */
5085 return idx += HPSA_NRESERVED_CMDS;
5086}
5087
b69324ff
WS
5088/*
5089 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5090 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5091 */
5092static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5093 struct CommandList *c, unsigned char lunaddr[],
5094 int reply_queue)
5095{
5096 int rc;
5097
5098 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5099 (void) fill_cmd(c, TEST_UNIT_READY, h,
5100 NULL, 0, 0, lunaddr, TYPE_CMD);
5101 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5102 if (rc)
5103 return rc;
5104 /* no unmap needed here because no data xfer. */
5105
5106 /* Check if the unit is already ready. */
5107 if (c->err_info->CommandStatus == CMD_SUCCESS)
5108 return 0;
5109
5110 /*
5111 * The first command sent after reset will receive "unit attention" to
5112 * indicate that the LUN has been reset...this is actually what we're
5113 * looking for (but, success is good too).
5114 */
5115 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5116 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5117 (c->err_info->SenseInfo[2] == NO_SENSE ||
5118 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5119 return 0;
5120
5121 return 1;
5122}
5123
5124/*
5125 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5126 * returns zero when the unit is ready, and non-zero when giving up.
5127 */
5128static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5129 struct CommandList *c,
5130 unsigned char lunaddr[], int reply_queue)
edd16368 5131{
8919358e 5132 int rc;
edd16368
SC
5133 int count = 0;
5134 int waittime = 1; /* seconds */
edd16368
SC
5135
5136 /* Send test unit ready until device ready, or give up. */
b69324ff 5137 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
edd16368 5138
b69324ff
WS
5139 /*
5140 * Wait for a bit. do this first, because if we send
edd16368
SC
5141 * the TUR right away, the reset will just abort it.
5142 */
5143 msleep(1000 * waittime);
b69324ff
WS
5144
5145 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5146 if (!rc)
5147 break;
edd16368
SC
5148
5149 /* Increase wait time with each try, up to a point. */
5150 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
b69324ff 5151 waittime *= 2;
edd16368 5152
b69324ff
WS
5153 dev_warn(&h->pdev->dev,
5154 "waiting %d secs for device to become ready.\n",
5155 waittime);
5156 }
edd16368 5157
b69324ff
WS
5158 return rc;
5159}
edd16368 5160
b69324ff
WS
5161static int wait_for_device_to_become_ready(struct ctlr_info *h,
5162 unsigned char lunaddr[],
5163 int reply_queue)
5164{
5165 int first_queue;
5166 int last_queue;
5167 int rq;
5168 int rc = 0;
5169 struct CommandList *c;
5170
5171 c = cmd_alloc(h);
5172
5173 /*
5174 * If no specific reply queue was requested, then send the TUR
5175 * repeatedly, requesting a reply on each reply queue; otherwise execute
5176 * the loop exactly once using only the specified queue.
5177 */
5178 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5179 first_queue = 0;
5180 last_queue = h->nreply_queues - 1;
5181 } else {
5182 first_queue = reply_queue;
5183 last_queue = reply_queue;
5184 }
5185
5186 for (rq = first_queue; rq <= last_queue; rq++) {
5187 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5188 if (rc)
edd16368 5189 break;
edd16368
SC
5190 }
5191
5192 if (rc)
5193 dev_warn(&h->pdev->dev, "giving up on device.\n");
5194 else
5195 dev_warn(&h->pdev->dev, "device is ready.\n");
5196
45fcb86e 5197 cmd_free(h, c);
edd16368
SC
5198 return rc;
5199}
5200
5201/* Need at least one of these error handlers to keep ../scsi/hosts.c from
5202 * complaining. Doing a host- or bus-reset can't do anything good here.
5203 */
5204static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5205{
5206 int rc;
5207 struct ctlr_info *h;
5208 struct hpsa_scsi_dev_t *dev;
0b9b7b6e 5209 u8 reset_type;
2dc127bb 5210 char msg[48];
edd16368
SC
5211
5212 /* find the controller to which the command to be aborted was sent */
5213 h = sdev_to_hba(scsicmd->device);
5214 if (h == NULL) /* paranoia */
5215 return FAILED;
e345893b
DB
5216
5217 if (lockup_detected(h))
5218 return FAILED;
5219
edd16368
SC
5220 dev = scsicmd->device->hostdata;
5221 if (!dev) {
d604f533 5222 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
edd16368
SC
5223 return FAILED;
5224 }
25163bd5
WS
5225
5226 /* if controller locked up, we can guarantee command won't complete */
5227 if (lockup_detected(h)) {
2dc127bb
DC
5228 snprintf(msg, sizeof(msg),
5229 "cmd %d RESET FAILED, lockup detected",
5230 hpsa_get_cmd_index(scsicmd));
73153fe5 5231 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5232 return FAILED;
5233 }
5234
5235 /* this reset request might be the result of a lockup; check */
5236 if (detect_controller_lockup(h)) {
2dc127bb
DC
5237 snprintf(msg, sizeof(msg),
5238 "cmd %d RESET FAILED, new lockup detected",
5239 hpsa_get_cmd_index(scsicmd));
73153fe5 5240 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5
WS
5241 return FAILED;
5242 }
5243
d604f533
WS
5244 /* Do not attempt on controller */
5245 if (is_hba_lunid(dev->scsi3addr))
5246 return SUCCESS;
5247
0b9b7b6e
ST
5248 if (is_logical_dev_addr_mode(dev->scsi3addr))
5249 reset_type = HPSA_DEVICE_RESET_MSG;
5250 else
5251 reset_type = HPSA_PHYS_TARGET_RESET;
5252
5253 sprintf(msg, "resetting %s",
5254 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5255 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
25163bd5 5256
da03ded0
DB
5257 h->reset_in_progress = 1;
5258
edd16368 5259 /* send a reset to the SCSI LUN which the command was sent to */
0b9b7b6e 5260 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
d604f533 5261 DEFAULT_REPLY_QUEUE);
0b9b7b6e
ST
5262 sprintf(msg, "reset %s %s",
5263 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5264 rc == 0 ? "completed successfully" : "failed");
d604f533 5265 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
da03ded0 5266 h->reset_in_progress = 0;
d604f533 5267 return rc == 0 ? SUCCESS : FAILED;
edd16368
SC
5268}
5269
6cba3f19
SC
5270static void swizzle_abort_tag(u8 *tag)
5271{
5272 u8 original_tag[8];
5273
5274 memcpy(original_tag, tag, 8);
5275 tag[0] = original_tag[3];
5276 tag[1] = original_tag[2];
5277 tag[2] = original_tag[1];
5278 tag[3] = original_tag[0];
5279 tag[4] = original_tag[7];
5280 tag[5] = original_tag[6];
5281 tag[6] = original_tag[5];
5282 tag[7] = original_tag[4];
5283}
5284
17eb87d2 5285static void hpsa_get_tag(struct ctlr_info *h,
2b08b3e9 5286 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
17eb87d2 5287{
2b08b3e9 5288 u64 tag;
17eb87d2
ST
5289 if (c->cmd_type == CMD_IOACCEL1) {
5290 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5291 &h->ioaccel_cmd_pool[c->cmdindex];
2b08b3e9
DB
5292 tag = le64_to_cpu(cm1->tag);
5293 *tagupper = cpu_to_le32(tag >> 32);
5294 *taglower = cpu_to_le32(tag);
54b6e9e9
ST
5295 return;
5296 }
5297 if (c->cmd_type == CMD_IOACCEL2) {
5298 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5299 &h->ioaccel2_cmd_pool[c->cmdindex];
dd0e19f3
ST
5300 /* upper tag not used in ioaccel2 mode */
5301 memset(tagupper, 0, sizeof(*tagupper));
5302 *taglower = cm2->Tag;
54b6e9e9 5303 return;
17eb87d2 5304 }
2b08b3e9
DB
5305 tag = le64_to_cpu(c->Header.tag);
5306 *tagupper = cpu_to_le32(tag >> 32);
5307 *taglower = cpu_to_le32(tag);
17eb87d2
ST
5308}
5309
75167d2c 5310static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
9b5c48c2 5311 struct CommandList *abort, int reply_queue)
75167d2c
SC
5312{
5313 int rc = IO_OK;
5314 struct CommandList *c;
5315 struct ErrorInfo *ei;
2b08b3e9 5316 __le32 tagupper, taglower;
75167d2c 5317
45fcb86e 5318 c = cmd_alloc(h);
75167d2c 5319
a2dac136 5320 /* fill_cmd can't fail here, no buffer to map */
9b5c48c2 5321 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
a2dac136 5322 0, 0, scsi3addr, TYPE_MSG);
9b5c48c2 5323 if (h->needs_abort_tags_swizzled)
6cba3f19 5324 swizzle_abort_tag(&c->Request.CDB[4]);
25163bd5 5325 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
17eb87d2 5326 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5327 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
17eb87d2 5328 __func__, tagupper, taglower);
75167d2c
SC
5329 /* no unmap needed here because no data xfer. */
5330
5331 ei = c->err_info;
5332 switch (ei->CommandStatus) {
5333 case CMD_SUCCESS:
5334 break;
9437ac43
SC
5335 case CMD_TMF_STATUS:
5336 rc = hpsa_evaluate_tmf_status(h, c);
5337 break;
75167d2c
SC
5338 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5339 rc = -1;
5340 break;
5341 default:
5342 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
17eb87d2 5343 __func__, tagupper, taglower);
d1e8beac 5344 hpsa_scsi_interpret_error(h, c);
75167d2c
SC
5345 rc = -1;
5346 break;
5347 }
45fcb86e 5348 cmd_free(h, c);
dd0e19f3
ST
5349 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5350 __func__, tagupper, taglower);
75167d2c
SC
5351 return rc;
5352}
5353
8be986cc
SC
5354static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5355 struct CommandList *command_to_abort, int reply_queue)
5356{
5357 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5358 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5359 struct io_accel2_cmd *c2a =
5360 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
a58e7e53 5361 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
8be986cc
SC
5362 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5363
5364 /*
5365 * We're overlaying struct hpsa_tmf_struct on top of something which
5366 * was allocated as a struct io_accel2_cmd, so we better be sure it
5367 * actually fits, and doesn't overrun the error info space.
5368 */
5369 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5370 sizeof(struct io_accel2_cmd));
5371 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5372 offsetof(struct hpsa_tmf_struct, error_len) +
5373 sizeof(ac->error_len));
5374
5375 c->cmd_type = IOACCEL2_TMF;
a58e7e53
WS
5376 c->scsi_cmd = SCSI_CMD_BUSY;
5377
8be986cc
SC
5378 /* Adjust the DMA address to point to the accelerated command buffer */
5379 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5380 (c->cmdindex * sizeof(struct io_accel2_cmd));
5381 BUG_ON(c->busaddr & 0x0000007F);
5382
5383 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5384 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5385 ac->reply_queue = reply_queue;
5386 ac->tmf = IOACCEL2_TMF_ABORT;
5387 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5388 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5389 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5390 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5391 ac->error_ptr = cpu_to_le64(c->busaddr +
5392 offsetof(struct io_accel2_cmd, error_data));
5393 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5394}
5395
54b6e9e9
ST
5396/* ioaccel2 path firmware cannot handle abort task requests.
5397 * Change abort requests to physical target reset, and send to the
5398 * address of the physical disk used for the ioaccel 2 command.
5399 * Return 0 on success (IO_OK)
5400 * -1 on failure
5401 */
5402
5403static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
25163bd5 5404 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
54b6e9e9
ST
5405{
5406 int rc = IO_OK;
5407 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5408 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5409 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5410 unsigned char *psa = &phys_scsi3addr[0];
5411
5412 /* Get a pointer to the hpsa logical device. */
7fa3030c 5413 scmd = abort->scsi_cmd;
54b6e9e9
ST
5414 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5415 if (dev == NULL) {
5416 dev_warn(&h->pdev->dev,
5417 "Cannot abort: no device pointer for command.\n");
5418 return -1; /* not abortable */
5419 }
5420
2ba8bfc8
SC
5421 if (h->raid_offload_debug > 0)
5422 dev_info(&h->pdev->dev,
0d96ef5f 5423 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2ba8bfc8 5424 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
0d96ef5f 5425 "Reset as abort",
2ba8bfc8
SC
5426 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5427 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5428
54b6e9e9
ST
5429 if (!dev->offload_enabled) {
5430 dev_warn(&h->pdev->dev,
5431 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5432 return -1; /* not abortable */
5433 }
5434
5435 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5436 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5437 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5438 return -1; /* not abortable */
5439 }
5440
5441 /* send the reset */
2ba8bfc8
SC
5442 if (h->raid_offload_debug > 0)
5443 dev_info(&h->pdev->dev,
5444 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5445 psa[0], psa[1], psa[2], psa[3],
5446 psa[4], psa[5], psa[6], psa[7]);
d604f533 5447 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
54b6e9e9
ST
5448 if (rc != 0) {
5449 dev_warn(&h->pdev->dev,
5450 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5451 psa[0], psa[1], psa[2], psa[3],
5452 psa[4], psa[5], psa[6], psa[7]);
5453 return rc; /* failed to reset */
5454 }
5455
5456 /* wait for device to recover */
b69324ff 5457 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
54b6e9e9
ST
5458 dev_warn(&h->pdev->dev,
5459 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5460 psa[0], psa[1], psa[2], psa[3],
5461 psa[4], psa[5], psa[6], psa[7]);
5462 return -1; /* failed to recover */
5463 }
5464
5465 /* device recovered */
5466 dev_info(&h->pdev->dev,
5467 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5468 psa[0], psa[1], psa[2], psa[3],
5469 psa[4], psa[5], psa[6], psa[7]);
5470
5471 return rc; /* success */
5472}
5473
8be986cc
SC
5474static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5475 struct CommandList *abort, int reply_queue)
5476{
5477 int rc = IO_OK;
5478 struct CommandList *c;
5479 __le32 taglower, tagupper;
5480 struct hpsa_scsi_dev_t *dev;
5481 struct io_accel2_cmd *c2;
5482
5483 dev = abort->scsi_cmd->device->hostdata;
5484 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5485 return -1;
5486
5487 c = cmd_alloc(h);
5488 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5489 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5490 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5491 hpsa_get_tag(h, abort, &taglower, &tagupper);
5492 dev_dbg(&h->pdev->dev,
5493 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5494 __func__, tagupper, taglower);
5495 /* no unmap needed here because no data xfer. */
5496
5497 dev_dbg(&h->pdev->dev,
5498 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5499 __func__, tagupper, taglower, c2->error_data.serv_response);
5500 switch (c2->error_data.serv_response) {
5501 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5502 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5503 rc = 0;
5504 break;
5505 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5506 case IOACCEL2_SERV_RESPONSE_FAILURE:
5507 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5508 rc = -1;
5509 break;
5510 default:
5511 dev_warn(&h->pdev->dev,
5512 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5513 __func__, tagupper, taglower,
5514 c2->error_data.serv_response);
5515 rc = -1;
5516 }
5517 cmd_free(h, c);
5518 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5519 tagupper, taglower);
5520 return rc;
5521}
5522
6cba3f19 5523static int hpsa_send_abort_both_ways(struct ctlr_info *h,
25163bd5 5524 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
6cba3f19 5525{
8be986cc
SC
5526 /*
5527 * ioccelerator mode 2 commands should be aborted via the
54b6e9e9 5528 * accelerated path, since RAID path is unaware of these commands,
8be986cc
SC
5529 * but not all underlying firmware can handle abort TMF.
5530 * Change abort to physical device reset when abort TMF is unsupported.
54b6e9e9 5531 */
8be986cc
SC
5532 if (abort->cmd_type == CMD_IOACCEL2) {
5533 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5534 return hpsa_send_abort_ioaccel2(h, abort,
5535 reply_queue);
5536 else
5537 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
25163bd5 5538 abort, reply_queue);
8be986cc 5539 }
9b5c48c2 5540 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
25163bd5 5541}
54b6e9e9 5542
25163bd5
WS
5543/* Find out which reply queue a command was meant to return on */
5544static int hpsa_extract_reply_queue(struct ctlr_info *h,
5545 struct CommandList *c)
5546{
5547 if (c->cmd_type == CMD_IOACCEL2)
5548 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5549 return c->Header.ReplyQueue;
6cba3f19
SC
5550}
5551
9b5c48c2
SC
5552/*
5553 * Limit concurrency of abort commands to prevent
5554 * over-subscription of commands
5555 */
5556static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5557{
5558#define ABORT_CMD_WAIT_MSECS 5000
5559 return !wait_event_timeout(h->abort_cmd_wait_queue,
5560 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5561 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5562}
5563
75167d2c
SC
5564/* Send an abort for the specified command.
5565 * If the device and controller support it,
5566 * send a task abort request.
5567 */
5568static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5569{
5570
a58e7e53 5571 int rc;
75167d2c
SC
5572 struct ctlr_info *h;
5573 struct hpsa_scsi_dev_t *dev;
5574 struct CommandList *abort; /* pointer to command to be aborted */
75167d2c
SC
5575 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5576 char msg[256]; /* For debug messaging. */
5577 int ml = 0;
2b08b3e9 5578 __le32 tagupper, taglower;
25163bd5
WS
5579 int refcount, reply_queue;
5580
5581 if (sc == NULL)
5582 return FAILED;
75167d2c 5583
9b5c48c2
SC
5584 if (sc->device == NULL)
5585 return FAILED;
5586
75167d2c
SC
5587 /* Find the controller of the command to be aborted */
5588 h = sdev_to_hba(sc->device);
9b5c48c2 5589 if (h == NULL)
75167d2c
SC
5590 return FAILED;
5591
25163bd5
WS
5592 /* Find the device of the command to be aborted */
5593 dev = sc->device->hostdata;
5594 if (!dev) {
5595 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5596 msg);
e345893b 5597 return FAILED;
25163bd5
WS
5598 }
5599
5600 /* If controller locked up, we can guarantee command won't complete */
5601 if (lockup_detected(h)) {
5602 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5603 "ABORT FAILED, lockup detected");
5604 return FAILED;
5605 }
5606
5607 /* This is a good time to check if controller lockup has occurred */
5608 if (detect_controller_lockup(h)) {
5609 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5610 "ABORT FAILED, new lockup detected");
5611 return FAILED;
5612 }
e345893b 5613
75167d2c
SC
5614 /* Check that controller supports some kind of task abort */
5615 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5616 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5617 return FAILED;
5618
5619 memset(msg, 0, sizeof(msg));
4b761557 5620 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
75167d2c 5621 h->scsi_host->host_no, sc->device->channel,
0d96ef5f 5622 sc->device->id, sc->device->lun,
4b761557 5623 "Aborting command", sc);
75167d2c 5624
75167d2c
SC
5625 /* Get SCSI command to be aborted */
5626 abort = (struct CommandList *) sc->host_scribble;
5627 if (abort == NULL) {
281a7fd0
WS
5628 /* This can happen if the command already completed. */
5629 return SUCCESS;
5630 }
5631 refcount = atomic_inc_return(&abort->refcount);
5632 if (refcount == 1) { /* Command is done already. */
5633 cmd_free(h, abort);
5634 return SUCCESS;
75167d2c 5635 }
9b5c48c2
SC
5636
5637 /* Don't bother trying the abort if we know it won't work. */
5638 if (abort->cmd_type != CMD_IOACCEL2 &&
5639 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5640 cmd_free(h, abort);
5641 return FAILED;
5642 }
5643
a58e7e53
WS
5644 /*
5645 * Check that we're aborting the right command.
5646 * It's possible the CommandList already completed and got re-used.
5647 */
5648 if (abort->scsi_cmd != sc) {
5649 cmd_free(h, abort);
5650 return SUCCESS;
5651 }
5652
5653 abort->abort_pending = true;
17eb87d2 5654 hpsa_get_tag(h, abort, &taglower, &tagupper);
25163bd5 5655 reply_queue = hpsa_extract_reply_queue(h, abort);
17eb87d2 5656 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
7fa3030c 5657 as = abort->scsi_cmd;
75167d2c 5658 if (as != NULL)
4b761557
RE
5659 ml += sprintf(msg+ml,
5660 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5661 as->cmd_len, as->cmnd[0], as->cmnd[1],
5662 as->serial_number);
5663 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
0d96ef5f 5664 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
4b761557 5665
75167d2c
SC
5666 /*
5667 * Command is in flight, or possibly already completed
5668 * by the firmware (but not to the scsi mid layer) but we can't
5669 * distinguish which. Send the abort down.
5670 */
9b5c48c2
SC
5671 if (wait_for_available_abort_cmd(h)) {
5672 dev_warn(&h->pdev->dev,
4b761557
RE
5673 "%s FAILED, timeout waiting for an abort command to become available.\n",
5674 msg);
9b5c48c2
SC
5675 cmd_free(h, abort);
5676 return FAILED;
5677 }
25163bd5 5678 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
9b5c48c2
SC
5679 atomic_inc(&h->abort_cmds_available);
5680 wake_up_all(&h->abort_cmd_wait_queue);
75167d2c 5681 if (rc != 0) {
4b761557 5682 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
0d96ef5f 5683 hpsa_show_dev_msg(KERN_WARNING, h, dev,
4b761557 5684 "FAILED to abort command");
281a7fd0 5685 cmd_free(h, abort);
75167d2c
SC
5686 return FAILED;
5687 }
4b761557 5688 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
d604f533 5689 wait_event(h->event_sync_wait_queue,
a58e7e53 5690 abort->scsi_cmd != sc || lockup_detected(h));
281a7fd0 5691 cmd_free(h, abort);
a58e7e53 5692 return !lockup_detected(h) ? SUCCESS : FAILED;
75167d2c
SC
5693}
5694
73153fe5
WS
5695/*
5696 * For operations with an associated SCSI command, a command block is allocated
5697 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5698 * block request tag as an index into a table of entries. cmd_tagged_free() is
5699 * the complement, although cmd_free() may be called instead.
5700 */
5701static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5702 struct scsi_cmnd *scmd)
5703{
5704 int idx = hpsa_get_cmd_index(scmd);
5705 struct CommandList *c = h->cmd_pool + idx;
5706
5707 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5708 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5709 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5710 /* The index value comes from the block layer, so if it's out of
5711 * bounds, it's probably not our bug.
5712 */
5713 BUG();
5714 }
5715
5716 atomic_inc(&c->refcount);
5717 if (unlikely(!hpsa_is_cmd_idle(c))) {
5718 /*
5719 * We expect that the SCSI layer will hand us a unique tag
5720 * value. Thus, there should never be a collision here between
5721 * two requests...because if the selected command isn't idle
5722 * then someone is going to be very disappointed.
5723 */
5724 dev_err(&h->pdev->dev,
5725 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5726 idx);
5727 if (c->scsi_cmd != NULL)
5728 scsi_print_command(c->scsi_cmd);
5729 scsi_print_command(scmd);
5730 }
5731
5732 hpsa_cmd_partial_init(h, idx, c);
5733 return c;
5734}
5735
5736static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5737{
5738 /*
5739 * Release our reference to the block. We don't need to do anything
5740 * else to free it, because it is accessed by index. (There's no point
5741 * in checking the result of the decrement, since we cannot guarantee
5742 * that there isn't a concurrent abort which is also accessing it.)
5743 */
5744 (void)atomic_dec(&c->refcount);
5745}
5746
edd16368
SC
5747/*
5748 * For operations that cannot sleep, a command block is allocated at init,
5749 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5750 * which ones are free or in use. Lock must be held when calling this.
5751 * cmd_free() is the complement.
bf43caf3
RE
5752 * This function never gives up and returns NULL. If it hangs,
5753 * another thread must call cmd_free() to free some tags.
edd16368 5754 */
281a7fd0 5755
edd16368
SC
5756static struct CommandList *cmd_alloc(struct ctlr_info *h)
5757{
5758 struct CommandList *c;
360c73bd 5759 int refcount, i;
73153fe5 5760 int offset = 0;
4c413128 5761
33811026
RE
5762 /*
5763 * There is some *extremely* small but non-zero chance that that
4c413128
SC
5764 * multiple threads could get in here, and one thread could
5765 * be scanning through the list of bits looking for a free
5766 * one, but the free ones are always behind him, and other
5767 * threads sneak in behind him and eat them before he can
5768 * get to them, so that while there is always a free one, a
5769 * very unlucky thread might be starved anyway, never able to
5770 * beat the other threads. In reality, this happens so
5771 * infrequently as to be indistinguishable from never.
73153fe5
WS
5772 *
5773 * Note that we start allocating commands before the SCSI host structure
5774 * is initialized. Since the search starts at bit zero, this
5775 * all works, since we have at least one command structure available;
5776 * however, it means that the structures with the low indexes have to be
5777 * reserved for driver-initiated requests, while requests from the block
5778 * layer will use the higher indexes.
4c413128 5779 */
edd16368 5780
281a7fd0 5781 for (;;) {
73153fe5
WS
5782 i = find_next_zero_bit(h->cmd_pool_bits,
5783 HPSA_NRESERVED_CMDS,
5784 offset);
5785 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
281a7fd0
WS
5786 offset = 0;
5787 continue;
5788 }
5789 c = h->cmd_pool + i;
5790 refcount = atomic_inc_return(&c->refcount);
5791 if (unlikely(refcount > 1)) {
5792 cmd_free(h, c); /* already in use */
73153fe5 5793 offset = (i + 1) % HPSA_NRESERVED_CMDS;
281a7fd0
WS
5794 continue;
5795 }
5796 set_bit(i & (BITS_PER_LONG - 1),
5797 h->cmd_pool_bits + (i / BITS_PER_LONG));
5798 break; /* it's ours now. */
5799 }
360c73bd 5800 hpsa_cmd_partial_init(h, i, c);
edd16368
SC
5801 return c;
5802}
5803
73153fe5
WS
5804/*
5805 * This is the complementary operation to cmd_alloc(). Note, however, in some
5806 * corner cases it may also be used to free blocks allocated by
5807 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5808 * the clear-bit is harmless.
5809 */
edd16368
SC
5810static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5811{
281a7fd0
WS
5812 if (atomic_dec_and_test(&c->refcount)) {
5813 int i;
edd16368 5814
281a7fd0
WS
5815 i = c - h->cmd_pool;
5816 clear_bit(i & (BITS_PER_LONG - 1),
5817 h->cmd_pool_bits + (i / BITS_PER_LONG));
5818 }
edd16368
SC
5819}
5820
edd16368
SC
5821#ifdef CONFIG_COMPAT
5822
42a91641
DB
5823static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5824 void __user *arg)
edd16368
SC
5825{
5826 IOCTL32_Command_struct __user *arg32 =
5827 (IOCTL32_Command_struct __user *) arg;
5828 IOCTL_Command_struct arg64;
5829 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5830 int err;
5831 u32 cp;
5832
938abd84 5833 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5834 err = 0;
5835 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5836 sizeof(arg64.LUN_info));
5837 err |= copy_from_user(&arg64.Request, &arg32->Request,
5838 sizeof(arg64.Request));
5839 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5840 sizeof(arg64.error_info));
5841 err |= get_user(arg64.buf_size, &arg32->buf_size);
5842 err |= get_user(cp, &arg32->buf);
5843 arg64.buf = compat_ptr(cp);
5844 err |= copy_to_user(p, &arg64, sizeof(arg64));
5845
5846 if (err)
5847 return -EFAULT;
5848
42a91641 5849 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
edd16368
SC
5850 if (err)
5851 return err;
5852 err |= copy_in_user(&arg32->error_info, &p->error_info,
5853 sizeof(arg32->error_info));
5854 if (err)
5855 return -EFAULT;
5856 return err;
5857}
5858
5859static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
42a91641 5860 int cmd, void __user *arg)
edd16368
SC
5861{
5862 BIG_IOCTL32_Command_struct __user *arg32 =
5863 (BIG_IOCTL32_Command_struct __user *) arg;
5864 BIG_IOCTL_Command_struct arg64;
5865 BIG_IOCTL_Command_struct __user *p =
5866 compat_alloc_user_space(sizeof(arg64));
5867 int err;
5868 u32 cp;
5869
938abd84 5870 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
5871 err = 0;
5872 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5873 sizeof(arg64.LUN_info));
5874 err |= copy_from_user(&arg64.Request, &arg32->Request,
5875 sizeof(arg64.Request));
5876 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5877 sizeof(arg64.error_info));
5878 err |= get_user(arg64.buf_size, &arg32->buf_size);
5879 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5880 err |= get_user(cp, &arg32->buf);
5881 arg64.buf = compat_ptr(cp);
5882 err |= copy_to_user(p, &arg64, sizeof(arg64));
5883
5884 if (err)
5885 return -EFAULT;
5886
42a91641 5887 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
edd16368
SC
5888 if (err)
5889 return err;
5890 err |= copy_in_user(&arg32->error_info, &p->error_info,
5891 sizeof(arg32->error_info));
5892 if (err)
5893 return -EFAULT;
5894 return err;
5895}
71fe75a7 5896
42a91641 5897static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
71fe75a7
SC
5898{
5899 switch (cmd) {
5900 case CCISS_GETPCIINFO:
5901 case CCISS_GETINTINFO:
5902 case CCISS_SETINTINFO:
5903 case CCISS_GETNODENAME:
5904 case CCISS_SETNODENAME:
5905 case CCISS_GETHEARTBEAT:
5906 case CCISS_GETBUSTYPES:
5907 case CCISS_GETFIRMVER:
5908 case CCISS_GETDRIVVER:
5909 case CCISS_REVALIDVOLS:
5910 case CCISS_DEREGDISK:
5911 case CCISS_REGNEWDISK:
5912 case CCISS_REGNEWD:
5913 case CCISS_RESCANDISK:
5914 case CCISS_GETLUNINFO:
5915 return hpsa_ioctl(dev, cmd, arg);
5916
5917 case CCISS_PASSTHRU32:
5918 return hpsa_ioctl32_passthru(dev, cmd, arg);
5919 case CCISS_BIG_PASSTHRU32:
5920 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5921
5922 default:
5923 return -ENOIOCTLCMD;
5924 }
5925}
edd16368
SC
5926#endif
5927
5928static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5929{
5930 struct hpsa_pci_info pciinfo;
5931
5932 if (!argp)
5933 return -EINVAL;
5934 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5935 pciinfo.bus = h->pdev->bus->number;
5936 pciinfo.dev_fn = h->pdev->devfn;
5937 pciinfo.board_id = h->board_id;
5938 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5939 return -EFAULT;
5940 return 0;
5941}
5942
5943static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5944{
5945 DriverVer_type DriverVer;
5946 unsigned char vmaj, vmin, vsubmin;
5947 int rc;
5948
5949 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5950 &vmaj, &vmin, &vsubmin);
5951 if (rc != 3) {
5952 dev_info(&h->pdev->dev, "driver version string '%s' "
5953 "unrecognized.", HPSA_DRIVER_VERSION);
5954 vmaj = 0;
5955 vmin = 0;
5956 vsubmin = 0;
5957 }
5958 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
5959 if (!argp)
5960 return -EINVAL;
5961 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
5962 return -EFAULT;
5963 return 0;
5964}
5965
5966static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
5967{
5968 IOCTL_Command_struct iocommand;
5969 struct CommandList *c;
5970 char *buff = NULL;
50a0decf 5971 u64 temp64;
c1f63c8f 5972 int rc = 0;
edd16368
SC
5973
5974 if (!argp)
5975 return -EINVAL;
5976 if (!capable(CAP_SYS_RAWIO))
5977 return -EPERM;
5978 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
5979 return -EFAULT;
5980 if ((iocommand.buf_size < 1) &&
5981 (iocommand.Request.Type.Direction != XFER_NONE)) {
5982 return -EINVAL;
5983 }
5984 if (iocommand.buf_size > 0) {
5985 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
5986 if (buff == NULL)
2dd02d74 5987 return -ENOMEM;
9233fb10 5988 if (iocommand.Request.Type.Direction & XFER_WRITE) {
b03a7771
SC
5989 /* Copy the data into the buffer we created */
5990 if (copy_from_user(buff, iocommand.buf,
5991 iocommand.buf_size)) {
c1f63c8f
SC
5992 rc = -EFAULT;
5993 goto out_kfree;
b03a7771
SC
5994 }
5995 } else {
5996 memset(buff, 0, iocommand.buf_size);
edd16368 5997 }
b03a7771 5998 }
45fcb86e 5999 c = cmd_alloc(h);
bf43caf3 6000
edd16368
SC
6001 /* Fill in the command type */
6002 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6003 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
6004 /* Fill in Command Header */
6005 c->Header.ReplyQueue = 0; /* unused in simple mode */
6006 if (iocommand.buf_size > 0) { /* buffer to fill */
6007 c->Header.SGList = 1;
50a0decf 6008 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
6009 } else { /* no buffers to fill */
6010 c->Header.SGList = 0;
50a0decf 6011 c->Header.SGTotal = cpu_to_le16(0);
edd16368
SC
6012 }
6013 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
edd16368
SC
6014
6015 /* Fill in Request block */
6016 memcpy(&c->Request, &iocommand.Request,
6017 sizeof(c->Request));
6018
6019 /* Fill in the scatter gather information */
6020 if (iocommand.buf_size > 0) {
50a0decf 6021 temp64 = pci_map_single(h->pdev, buff,
edd16368 6022 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
6023 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6024 c->SG[0].Addr = cpu_to_le64(0);
6025 c->SG[0].Len = cpu_to_le32(0);
bcc48ffa
SC
6026 rc = -ENOMEM;
6027 goto out;
6028 }
50a0decf
SC
6029 c->SG[0].Addr = cpu_to_le64(temp64);
6030 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6031 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
edd16368 6032 }
25163bd5 6033 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
c2dd32e0
SC
6034 if (iocommand.buf_size > 0)
6035 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368 6036 check_ioctl_unit_attention(h, c);
25163bd5
WS
6037 if (rc) {
6038 rc = -EIO;
6039 goto out;
6040 }
edd16368
SC
6041
6042 /* Copy the error information out */
6043 memcpy(&iocommand.error_info, c->err_info,
6044 sizeof(iocommand.error_info));
6045 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
c1f63c8f
SC
6046 rc = -EFAULT;
6047 goto out;
edd16368 6048 }
9233fb10 6049 if ((iocommand.Request.Type.Direction & XFER_READ) &&
b03a7771 6050 iocommand.buf_size > 0) {
edd16368
SC
6051 /* Copy the data out of the buffer we created */
6052 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
c1f63c8f
SC
6053 rc = -EFAULT;
6054 goto out;
edd16368
SC
6055 }
6056 }
c1f63c8f 6057out:
45fcb86e 6058 cmd_free(h, c);
c1f63c8f
SC
6059out_kfree:
6060 kfree(buff);
6061 return rc;
edd16368
SC
6062}
6063
6064static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6065{
6066 BIG_IOCTL_Command_struct *ioc;
6067 struct CommandList *c;
6068 unsigned char **buff = NULL;
6069 int *buff_size = NULL;
50a0decf 6070 u64 temp64;
edd16368
SC
6071 BYTE sg_used = 0;
6072 int status = 0;
01a02ffc
SC
6073 u32 left;
6074 u32 sz;
edd16368
SC
6075 BYTE __user *data_ptr;
6076
6077 if (!argp)
6078 return -EINVAL;
6079 if (!capable(CAP_SYS_RAWIO))
6080 return -EPERM;
6081 ioc = (BIG_IOCTL_Command_struct *)
6082 kmalloc(sizeof(*ioc), GFP_KERNEL);
6083 if (!ioc) {
6084 status = -ENOMEM;
6085 goto cleanup1;
6086 }
6087 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6088 status = -EFAULT;
6089 goto cleanup1;
6090 }
6091 if ((ioc->buf_size < 1) &&
6092 (ioc->Request.Type.Direction != XFER_NONE)) {
6093 status = -EINVAL;
6094 goto cleanup1;
6095 }
6096 /* Check kmalloc limits using all SGs */
6097 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6098 status = -EINVAL;
6099 goto cleanup1;
6100 }
d66ae08b 6101 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
6102 status = -EINVAL;
6103 goto cleanup1;
6104 }
d66ae08b 6105 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
6106 if (!buff) {
6107 status = -ENOMEM;
6108 goto cleanup1;
6109 }
d66ae08b 6110 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
6111 if (!buff_size) {
6112 status = -ENOMEM;
6113 goto cleanup1;
6114 }
6115 left = ioc->buf_size;
6116 data_ptr = ioc->buf;
6117 while (left) {
6118 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6119 buff_size[sg_used] = sz;
6120 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6121 if (buff[sg_used] == NULL) {
6122 status = -ENOMEM;
6123 goto cleanup1;
6124 }
9233fb10 6125 if (ioc->Request.Type.Direction & XFER_WRITE) {
edd16368 6126 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
0758f4f7 6127 status = -EFAULT;
edd16368
SC
6128 goto cleanup1;
6129 }
6130 } else
6131 memset(buff[sg_used], 0, sz);
6132 left -= sz;
6133 data_ptr += sz;
6134 sg_used++;
6135 }
45fcb86e 6136 c = cmd_alloc(h);
bf43caf3 6137
edd16368 6138 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6139 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368 6140 c->Header.ReplyQueue = 0;
50a0decf
SC
6141 c->Header.SGList = (u8) sg_used;
6142 c->Header.SGTotal = cpu_to_le16(sg_used);
edd16368 6143 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
edd16368
SC
6144 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6145 if (ioc->buf_size > 0) {
6146 int i;
6147 for (i = 0; i < sg_used; i++) {
50a0decf 6148 temp64 = pci_map_single(h->pdev, buff[i],
edd16368 6149 buff_size[i], PCI_DMA_BIDIRECTIONAL);
50a0decf
SC
6150 if (dma_mapping_error(&h->pdev->dev,
6151 (dma_addr_t) temp64)) {
6152 c->SG[i].Addr = cpu_to_le64(0);
6153 c->SG[i].Len = cpu_to_le32(0);
bcc48ffa
SC
6154 hpsa_pci_unmap(h->pdev, c, i,
6155 PCI_DMA_BIDIRECTIONAL);
6156 status = -ENOMEM;
e2d4a1f6 6157 goto cleanup0;
bcc48ffa 6158 }
50a0decf
SC
6159 c->SG[i].Addr = cpu_to_le64(temp64);
6160 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6161 c->SG[i].Ext = cpu_to_le32(0);
edd16368 6162 }
50a0decf 6163 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
edd16368 6164 }
25163bd5 6165 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
b03a7771
SC
6166 if (sg_used)
6167 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368 6168 check_ioctl_unit_attention(h, c);
25163bd5
WS
6169 if (status) {
6170 status = -EIO;
6171 goto cleanup0;
6172 }
6173
edd16368
SC
6174 /* Copy the error information out */
6175 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6176 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
edd16368 6177 status = -EFAULT;
e2d4a1f6 6178 goto cleanup0;
edd16368 6179 }
9233fb10 6180 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
2b08b3e9
DB
6181 int i;
6182
edd16368
SC
6183 /* Copy the data out of the buffer we created */
6184 BYTE __user *ptr = ioc->buf;
6185 for (i = 0; i < sg_used; i++) {
6186 if (copy_to_user(ptr, buff[i], buff_size[i])) {
edd16368 6187 status = -EFAULT;
e2d4a1f6 6188 goto cleanup0;
edd16368
SC
6189 }
6190 ptr += buff_size[i];
6191 }
6192 }
edd16368 6193 status = 0;
e2d4a1f6 6194cleanup0:
45fcb86e 6195 cmd_free(h, c);
edd16368
SC
6196cleanup1:
6197 if (buff) {
2b08b3e9
DB
6198 int i;
6199
edd16368
SC
6200 for (i = 0; i < sg_used; i++)
6201 kfree(buff[i]);
6202 kfree(buff);
6203 }
6204 kfree(buff_size);
6205 kfree(ioc);
6206 return status;
6207}
6208
6209static void check_ioctl_unit_attention(struct ctlr_info *h,
6210 struct CommandList *c)
6211{
6212 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6213 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6214 (void) check_for_unit_attention(h, c);
6215}
0390f0c0 6216
edd16368
SC
6217/*
6218 * ioctl
6219 */
42a91641 6220static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
edd16368
SC
6221{
6222 struct ctlr_info *h;
6223 void __user *argp = (void __user *)arg;
0390f0c0 6224 int rc;
edd16368
SC
6225
6226 h = sdev_to_hba(dev);
6227
6228 switch (cmd) {
6229 case CCISS_DEREGDISK:
6230 case CCISS_REGNEWDISK:
6231 case CCISS_REGNEWD:
a08a8471 6232 hpsa_scan_start(h->scsi_host);
edd16368
SC
6233 return 0;
6234 case CCISS_GETPCIINFO:
6235 return hpsa_getpciinfo_ioctl(h, argp);
6236 case CCISS_GETDRIVVER:
6237 return hpsa_getdrivver_ioctl(h, argp);
6238 case CCISS_PASSTHRU:
34f0c627 6239 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6240 return -EAGAIN;
6241 rc = hpsa_passthru_ioctl(h, argp);
34f0c627 6242 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6243 return rc;
edd16368 6244 case CCISS_BIG_PASSTHRU:
34f0c627 6245 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
0390f0c0
SC
6246 return -EAGAIN;
6247 rc = hpsa_big_passthru_ioctl(h, argp);
34f0c627 6248 atomic_inc(&h->passthru_cmds_avail);
0390f0c0 6249 return rc;
edd16368
SC
6250 default:
6251 return -ENOTTY;
6252 }
6253}
6254
bf43caf3 6255static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6f039790 6256 u8 reset_type)
64670ac8
SC
6257{
6258 struct CommandList *c;
6259
6260 c = cmd_alloc(h);
bf43caf3 6261
a2dac136
SC
6262 /* fill_cmd can't fail here, no data buffer to map */
6263 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
64670ac8
SC
6264 RAID_CTLR_LUNID, TYPE_MSG);
6265 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6266 c->waiting = NULL;
6267 enqueue_cmd_and_start_io(h, c);
6268 /* Don't wait for completion, the reset won't complete. Don't free
6269 * the command either. This is the last command we will send before
6270 * re-initializing everything, so it doesn't matter and won't leak.
6271 */
bf43caf3 6272 return;
64670ac8
SC
6273}
6274
a2dac136 6275static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
b7bb24eb 6276 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
edd16368
SC
6277 int cmd_type)
6278{
6279 int pci_dir = XFER_NONE;
9b5c48c2 6280 u64 tag; /* for commands to be aborted */
edd16368
SC
6281
6282 c->cmd_type = CMD_IOCTL_PEND;
a58e7e53 6283 c->scsi_cmd = SCSI_CMD_BUSY;
edd16368
SC
6284 c->Header.ReplyQueue = 0;
6285 if (buff != NULL && size > 0) {
6286 c->Header.SGList = 1;
50a0decf 6287 c->Header.SGTotal = cpu_to_le16(1);
edd16368
SC
6288 } else {
6289 c->Header.SGList = 0;
50a0decf 6290 c->Header.SGTotal = cpu_to_le16(0);
edd16368 6291 }
edd16368
SC
6292 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6293
edd16368
SC
6294 if (cmd_type == TYPE_CMD) {
6295 switch (cmd) {
6296 case HPSA_INQUIRY:
6297 /* are we trying to read a vital product page */
b7bb24eb 6298 if (page_code & VPD_PAGE) {
edd16368 6299 c->Request.CDB[1] = 0x01;
b7bb24eb 6300 c->Request.CDB[2] = (page_code & 0xff);
edd16368
SC
6301 }
6302 c->Request.CDBLen = 6;
a505b86f
SC
6303 c->Request.type_attr_dir =
6304 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6305 c->Request.Timeout = 0;
6306 c->Request.CDB[0] = HPSA_INQUIRY;
6307 c->Request.CDB[4] = size & 0xFF;
6308 break;
6309 case HPSA_REPORT_LOG:
6310 case HPSA_REPORT_PHYS:
6311 /* Talking to controller so It's a physical command
6312 mode = 00 target = 0. Nothing to write.
6313 */
6314 c->Request.CDBLen = 12;
a505b86f
SC
6315 c->Request.type_attr_dir =
6316 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
edd16368
SC
6317 c->Request.Timeout = 0;
6318 c->Request.CDB[0] = cmd;
6319 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6320 c->Request.CDB[7] = (size >> 16) & 0xFF;
6321 c->Request.CDB[8] = (size >> 8) & 0xFF;
6322 c->Request.CDB[9] = size & 0xFF;
6323 break;
edd16368
SC
6324 case HPSA_CACHE_FLUSH:
6325 c->Request.CDBLen = 12;
a505b86f
SC
6326 c->Request.type_attr_dir =
6327 TYPE_ATTR_DIR(cmd_type,
6328 ATTR_SIMPLE, XFER_WRITE);
edd16368
SC
6329 c->Request.Timeout = 0;
6330 c->Request.CDB[0] = BMIC_WRITE;
6331 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
6332 c->Request.CDB[7] = (size >> 8) & 0xFF;
6333 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
6334 break;
6335 case TEST_UNIT_READY:
6336 c->Request.CDBLen = 6;
a505b86f
SC
6337 c->Request.type_attr_dir =
6338 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368
SC
6339 c->Request.Timeout = 0;
6340 break;
283b4a9b
SC
6341 case HPSA_GET_RAID_MAP:
6342 c->Request.CDBLen = 12;
a505b86f
SC
6343 c->Request.type_attr_dir =
6344 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
283b4a9b
SC
6345 c->Request.Timeout = 0;
6346 c->Request.CDB[0] = HPSA_CISS_READ;
6347 c->Request.CDB[1] = cmd;
6348 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6349 c->Request.CDB[7] = (size >> 16) & 0xFF;
6350 c->Request.CDB[8] = (size >> 8) & 0xFF;
6351 c->Request.CDB[9] = size & 0xFF;
6352 break;
316b221a
SC
6353 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6354 c->Request.CDBLen = 10;
a505b86f
SC
6355 c->Request.type_attr_dir =
6356 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
316b221a
SC
6357 c->Request.Timeout = 0;
6358 c->Request.CDB[0] = BMIC_READ;
6359 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6360 c->Request.CDB[7] = (size >> 16) & 0xFF;
6361 c->Request.CDB[8] = (size >> 8) & 0xFF;
6362 break;
03383736
DB
6363 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6364 c->Request.CDBLen = 10;
6365 c->Request.type_attr_dir =
6366 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6367 c->Request.Timeout = 0;
6368 c->Request.CDB[0] = BMIC_READ;
6369 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6370 c->Request.CDB[7] = (size >> 16) & 0xFF;
6371 c->Request.CDB[8] = (size >> 8) & 0XFF;
6372 break;
edd16368
SC
6373 default:
6374 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6375 BUG();
a2dac136 6376 return -1;
edd16368
SC
6377 }
6378 } else if (cmd_type == TYPE_MSG) {
6379 switch (cmd) {
6380
0b9b7b6e
ST
6381 case HPSA_PHYS_TARGET_RESET:
6382 c->Request.CDBLen = 16;
6383 c->Request.type_attr_dir =
6384 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6385 c->Request.Timeout = 0; /* Don't time out */
6386 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6387 c->Request.CDB[0] = HPSA_RESET;
6388 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6389 /* Physical target reset needs no control bytes 4-7*/
6390 c->Request.CDB[4] = 0x00;
6391 c->Request.CDB[5] = 0x00;
6392 c->Request.CDB[6] = 0x00;
6393 c->Request.CDB[7] = 0x00;
6394 break;
edd16368
SC
6395 case HPSA_DEVICE_RESET_MSG:
6396 c->Request.CDBLen = 16;
a505b86f
SC
6397 c->Request.type_attr_dir =
6398 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
edd16368 6399 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
6400 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6401 c->Request.CDB[0] = cmd;
21e89afd 6402 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
edd16368
SC
6403 /* If bytes 4-7 are zero, it means reset the */
6404 /* LunID device */
6405 c->Request.CDB[4] = 0x00;
6406 c->Request.CDB[5] = 0x00;
6407 c->Request.CDB[6] = 0x00;
6408 c->Request.CDB[7] = 0x00;
75167d2c
SC
6409 break;
6410 case HPSA_ABORT_MSG:
9b5c48c2 6411 memcpy(&tag, buff, sizeof(tag));
2b08b3e9 6412 dev_dbg(&h->pdev->dev,
9b5c48c2
SC
6413 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6414 tag, c->Header.tag);
75167d2c 6415 c->Request.CDBLen = 16;
a505b86f
SC
6416 c->Request.type_attr_dir =
6417 TYPE_ATTR_DIR(cmd_type,
6418 ATTR_SIMPLE, XFER_WRITE);
75167d2c
SC
6419 c->Request.Timeout = 0; /* Don't time out */
6420 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6421 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6422 c->Request.CDB[2] = 0x00; /* reserved */
6423 c->Request.CDB[3] = 0x00; /* reserved */
6424 /* Tag to abort goes in CDB[4]-CDB[11] */
9b5c48c2 6425 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
75167d2c
SC
6426 c->Request.CDB[12] = 0x00; /* reserved */
6427 c->Request.CDB[13] = 0x00; /* reserved */
6428 c->Request.CDB[14] = 0x00; /* reserved */
6429 c->Request.CDB[15] = 0x00; /* reserved */
edd16368 6430 break;
edd16368
SC
6431 default:
6432 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6433 cmd);
6434 BUG();
6435 }
6436 } else {
6437 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6438 BUG();
6439 }
6440
a505b86f 6441 switch (GET_DIR(c->Request.type_attr_dir)) {
edd16368
SC
6442 case XFER_READ:
6443 pci_dir = PCI_DMA_FROMDEVICE;
6444 break;
6445 case XFER_WRITE:
6446 pci_dir = PCI_DMA_TODEVICE;
6447 break;
6448 case XFER_NONE:
6449 pci_dir = PCI_DMA_NONE;
6450 break;
6451 default:
6452 pci_dir = PCI_DMA_BIDIRECTIONAL;
6453 }
a2dac136
SC
6454 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6455 return -1;
6456 return 0;
edd16368
SC
6457}
6458
6459/*
6460 * Map (physical) PCI mem into (virtual) kernel space
6461 */
6462static void __iomem *remap_pci_mem(ulong base, ulong size)
6463{
6464 ulong page_base = ((ulong) base) & PAGE_MASK;
6465 ulong page_offs = ((ulong) base) - page_base;
088ba34c
SC
6466 void __iomem *page_remapped = ioremap_nocache(page_base,
6467 page_offs + size);
edd16368
SC
6468
6469 return page_remapped ? (page_remapped + page_offs) : NULL;
6470}
6471
254f796b 6472static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
edd16368 6473{
254f796b 6474 return h->access.command_completed(h, q);
edd16368
SC
6475}
6476
900c5440 6477static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
6478{
6479 return h->access.intr_pending(h);
6480}
6481
6482static inline long interrupt_not_for_us(struct ctlr_info *h)
6483{
10f66018
SC
6484 return (h->access.intr_pending(h) == 0) ||
6485 (h->interrupts_enabled == 0);
edd16368
SC
6486}
6487
01a02ffc
SC
6488static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6489 u32 raw_tag)
edd16368
SC
6490{
6491 if (unlikely(tag_index >= h->nr_cmds)) {
6492 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6493 return 1;
6494 }
6495 return 0;
6496}
6497
5a3d16f5 6498static inline void finish_cmd(struct CommandList *c)
edd16368 6499{
e85c5974 6500 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
c349775e
ST
6501 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6502 || c->cmd_type == CMD_IOACCEL2))
1fb011fb 6503 complete_scsi_command(c);
8be986cc 6504 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
edd16368 6505 complete(c->waiting);
a104c99f
SC
6506}
6507
303932fd 6508/* process completion of an indexed ("direct lookup") command */
1d94f94d 6509static inline void process_indexed_cmd(struct ctlr_info *h,
303932fd
DB
6510 u32 raw_tag)
6511{
6512 u32 tag_index;
6513 struct CommandList *c;
6514
f2405db8 6515 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
1d94f94d
SC
6516 if (!bad_tag(h, tag_index, raw_tag)) {
6517 c = h->cmd_pool + tag_index;
6518 finish_cmd(c);
6519 }
303932fd
DB
6520}
6521
64670ac8
SC
6522/* Some controllers, like p400, will give us one interrupt
6523 * after a soft reset, even if we turned interrupts off.
6524 * Only need to check for this in the hpsa_xxx_discard_completions
6525 * functions.
6526 */
6527static int ignore_bogus_interrupt(struct ctlr_info *h)
6528{
6529 if (likely(!reset_devices))
6530 return 0;
6531
6532 if (likely(h->interrupts_enabled))
6533 return 0;
6534
6535 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6536 "(known firmware bug.) Ignoring.\n");
6537
6538 return 1;
6539}
6540
254f796b
MG
6541/*
6542 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6543 * Relies on (h-q[x] == x) being true for x such that
6544 * 0 <= x < MAX_REPLY_QUEUES.
6545 */
6546static struct ctlr_info *queue_to_hba(u8 *queue)
64670ac8 6547{
254f796b
MG
6548 return container_of((queue - *queue), struct ctlr_info, q[0]);
6549}
6550
6551static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6552{
6553 struct ctlr_info *h = queue_to_hba(queue);
6554 u8 q = *(u8 *) queue;
64670ac8
SC
6555 u32 raw_tag;
6556
6557 if (ignore_bogus_interrupt(h))
6558 return IRQ_NONE;
6559
6560 if (interrupt_not_for_us(h))
6561 return IRQ_NONE;
a0c12413 6562 h->last_intr_timestamp = get_jiffies_64();
64670ac8 6563 while (interrupt_pending(h)) {
254f796b 6564 raw_tag = get_next_completion(h, q);
64670ac8 6565 while (raw_tag != FIFO_EMPTY)
254f796b 6566 raw_tag = next_command(h, q);
64670ac8 6567 }
64670ac8
SC
6568 return IRQ_HANDLED;
6569}
6570
254f796b 6571static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
64670ac8 6572{
254f796b 6573 struct ctlr_info *h = queue_to_hba(queue);
64670ac8 6574 u32 raw_tag;
254f796b 6575 u8 q = *(u8 *) queue;
64670ac8
SC
6576
6577 if (ignore_bogus_interrupt(h))
6578 return IRQ_NONE;
6579
a0c12413 6580 h->last_intr_timestamp = get_jiffies_64();
254f796b 6581 raw_tag = get_next_completion(h, q);
64670ac8 6582 while (raw_tag != FIFO_EMPTY)
254f796b 6583 raw_tag = next_command(h, q);
64670ac8
SC
6584 return IRQ_HANDLED;
6585}
6586
254f796b 6587static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
edd16368 6588{
254f796b 6589 struct ctlr_info *h = queue_to_hba((u8 *) queue);
303932fd 6590 u32 raw_tag;
254f796b 6591 u8 q = *(u8 *) queue;
edd16368
SC
6592
6593 if (interrupt_not_for_us(h))
6594 return IRQ_NONE;
a0c12413 6595 h->last_intr_timestamp = get_jiffies_64();
10f66018 6596 while (interrupt_pending(h)) {
254f796b 6597 raw_tag = get_next_completion(h, q);
10f66018 6598 while (raw_tag != FIFO_EMPTY) {
f2405db8 6599 process_indexed_cmd(h, raw_tag);
254f796b 6600 raw_tag = next_command(h, q);
10f66018
SC
6601 }
6602 }
10f66018
SC
6603 return IRQ_HANDLED;
6604}
6605
254f796b 6606static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
10f66018 6607{
254f796b 6608 struct ctlr_info *h = queue_to_hba(queue);
10f66018 6609 u32 raw_tag;
254f796b 6610 u8 q = *(u8 *) queue;
10f66018 6611
a0c12413 6612 h->last_intr_timestamp = get_jiffies_64();
254f796b 6613 raw_tag = get_next_completion(h, q);
303932fd 6614 while (raw_tag != FIFO_EMPTY) {
f2405db8 6615 process_indexed_cmd(h, raw_tag);
254f796b 6616 raw_tag = next_command(h, q);
edd16368 6617 }
edd16368
SC
6618 return IRQ_HANDLED;
6619}
6620
a9a3a273
SC
6621/* Send a message CDB to the firmware. Careful, this only works
6622 * in simple mode, not performant mode due to the tag lookup.
6623 * We only ever use this immediately after a controller reset.
6624 */
6f039790
GKH
6625static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6626 unsigned char type)
edd16368
SC
6627{
6628 struct Command {
6629 struct CommandListHeader CommandHeader;
6630 struct RequestBlock Request;
6631 struct ErrDescriptor ErrorDescriptor;
6632 };
6633 struct Command *cmd;
6634 static const size_t cmd_sz = sizeof(*cmd) +
6635 sizeof(cmd->ErrorDescriptor);
6636 dma_addr_t paddr64;
2b08b3e9
DB
6637 __le32 paddr32;
6638 u32 tag;
edd16368
SC
6639 void __iomem *vaddr;
6640 int i, err;
6641
6642 vaddr = pci_ioremap_bar(pdev, 0);
6643 if (vaddr == NULL)
6644 return -ENOMEM;
6645
6646 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6647 * CCISS commands, so they must be allocated from the lower 4GiB of
6648 * memory.
6649 */
6650 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6651 if (err) {
6652 iounmap(vaddr);
1eaec8f3 6653 return err;
edd16368
SC
6654 }
6655
6656 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6657 if (cmd == NULL) {
6658 iounmap(vaddr);
6659 return -ENOMEM;
6660 }
6661
6662 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6663 * although there's no guarantee, we assume that the address is at
6664 * least 4-byte aligned (most likely, it's page-aligned).
6665 */
2b08b3e9 6666 paddr32 = cpu_to_le32(paddr64);
edd16368
SC
6667
6668 cmd->CommandHeader.ReplyQueue = 0;
6669 cmd->CommandHeader.SGList = 0;
50a0decf 6670 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
2b08b3e9 6671 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
edd16368
SC
6672 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6673
6674 cmd->Request.CDBLen = 16;
a505b86f
SC
6675 cmd->Request.type_attr_dir =
6676 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
edd16368
SC
6677 cmd->Request.Timeout = 0; /* Don't time out */
6678 cmd->Request.CDB[0] = opcode;
6679 cmd->Request.CDB[1] = type;
6680 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
50a0decf 6681 cmd->ErrorDescriptor.Addr =
2b08b3e9 6682 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
50a0decf 6683 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
edd16368 6684
2b08b3e9 6685 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
edd16368
SC
6686
6687 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6688 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
2b08b3e9 6689 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
edd16368
SC
6690 break;
6691 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6692 }
6693
6694 iounmap(vaddr);
6695
6696 /* we leak the DMA buffer here ... no choice since the controller could
6697 * still complete the command.
6698 */
6699 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6700 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6701 opcode, type);
6702 return -ETIMEDOUT;
6703 }
6704
6705 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6706
6707 if (tag & HPSA_ERROR_BIT) {
6708 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6709 opcode, type);
6710 return -EIO;
6711 }
6712
6713 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6714 opcode, type);
6715 return 0;
6716}
6717
edd16368
SC
6718#define hpsa_noop(p) hpsa_message(p, 3, 0)
6719
1df8552a 6720static int hpsa_controller_hard_reset(struct pci_dev *pdev,
42a91641 6721 void __iomem *vaddr, u32 use_doorbell)
1df8552a 6722{
1df8552a
SC
6723
6724 if (use_doorbell) {
6725 /* For everything after the P600, the PCI power state method
6726 * of resetting the controller doesn't work, so we have this
6727 * other way using the doorbell register.
6728 */
6729 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 6730 writel(use_doorbell, vaddr + SA5_DOORBELL);
85009239 6731
00701a96 6732 /* PMC hardware guys tell us we need a 10 second delay after
85009239
SC
6733 * doorbell reset and before any attempt to talk to the board
6734 * at all to ensure that this actually works and doesn't fall
6735 * over in some weird corner cases.
6736 */
00701a96 6737 msleep(10000);
1df8552a
SC
6738 } else { /* Try to do it the PCI power state way */
6739
6740 /* Quoting from the Open CISS Specification: "The Power
6741 * Management Control/Status Register (CSR) controls the power
6742 * state of the device. The normal operating state is D0,
6743 * CSR=00h. The software off state is D3, CSR=03h. To reset
6744 * the controller, place the interface device in D3 then to D0,
6745 * this causes a secondary PCI reset which will reset the
6746 * controller." */
2662cab8
DB
6747
6748 int rc = 0;
6749
1df8552a 6750 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
2662cab8 6751
1df8552a 6752 /* enter the D3hot power management state */
2662cab8
DB
6753 rc = pci_set_power_state(pdev, PCI_D3hot);
6754 if (rc)
6755 return rc;
1df8552a
SC
6756
6757 msleep(500);
6758
6759 /* enter the D0 power management state */
2662cab8
DB
6760 rc = pci_set_power_state(pdev, PCI_D0);
6761 if (rc)
6762 return rc;
c4853efe
MM
6763
6764 /*
6765 * The P600 requires a small delay when changing states.
6766 * Otherwise we may think the board did not reset and we bail.
6767 * This for kdump only and is particular to the P600.
6768 */
6769 msleep(500);
1df8552a
SC
6770 }
6771 return 0;
6772}
6773
6f039790 6774static void init_driver_version(char *driver_version, int len)
580ada3c
SC
6775{
6776 memset(driver_version, 0, len);
f79cfec6 6777 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
580ada3c
SC
6778}
6779
6f039790 6780static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6781{
6782 char *driver_version;
6783 int i, size = sizeof(cfgtable->driver_version);
6784
6785 driver_version = kmalloc(size, GFP_KERNEL);
6786 if (!driver_version)
6787 return -ENOMEM;
6788
6789 init_driver_version(driver_version, size);
6790 for (i = 0; i < size; i++)
6791 writeb(driver_version[i], &cfgtable->driver_version[i]);
6792 kfree(driver_version);
6793 return 0;
6794}
6795
6f039790
GKH
6796static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6797 unsigned char *driver_ver)
580ada3c
SC
6798{
6799 int i;
6800
6801 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6802 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6803}
6804
6f039790 6805static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
580ada3c
SC
6806{
6807
6808 char *driver_ver, *old_driver_ver;
6809 int rc, size = sizeof(cfgtable->driver_version);
6810
6811 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6812 if (!old_driver_ver)
6813 return -ENOMEM;
6814 driver_ver = old_driver_ver + size;
6815
6816 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6817 * should have been changed, otherwise we know the reset failed.
6818 */
6819 init_driver_version(old_driver_ver, size);
6820 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6821 rc = !memcmp(driver_ver, old_driver_ver, size);
6822 kfree(old_driver_ver);
6823 return rc;
6824}
edd16368 6825/* This does a hard reset of the controller using PCI power management
1df8552a 6826 * states or the using the doorbell register.
edd16368 6827 */
6b6c1cd7 6828static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
edd16368 6829{
1df8552a
SC
6830 u64 cfg_offset;
6831 u32 cfg_base_addr;
6832 u64 cfg_base_addr_index;
6833 void __iomem *vaddr;
6834 unsigned long paddr;
580ada3c 6835 u32 misc_fw_support;
270d05de 6836 int rc;
1df8552a 6837 struct CfgTable __iomem *cfgtable;
cf0b08d0 6838 u32 use_doorbell;
270d05de 6839 u16 command_register;
edd16368 6840
1df8552a
SC
6841 /* For controllers as old as the P600, this is very nearly
6842 * the same thing as
edd16368
SC
6843 *
6844 * pci_save_state(pci_dev);
6845 * pci_set_power_state(pci_dev, PCI_D3hot);
6846 * pci_set_power_state(pci_dev, PCI_D0);
6847 * pci_restore_state(pci_dev);
6848 *
1df8552a
SC
6849 * For controllers newer than the P600, the pci power state
6850 * method of resetting doesn't work so we have another way
6851 * using the doorbell register.
edd16368 6852 */
18867659 6853
60f923b9
RE
6854 if (!ctlr_is_resettable(board_id)) {
6855 dev_warn(&pdev->dev, "Controller not resettable\n");
25c1e56a
SC
6856 return -ENODEV;
6857 }
46380786
SC
6858
6859 /* if controller is soft- but not hard resettable... */
6860 if (!ctlr_is_hard_resettable(board_id))
6861 return -ENOTSUPP; /* try soft reset later. */
18867659 6862
270d05de
SC
6863 /* Save the PCI command register */
6864 pci_read_config_word(pdev, 4, &command_register);
270d05de 6865 pci_save_state(pdev);
edd16368 6866
1df8552a
SC
6867 /* find the first memory BAR, so we can find the cfg table */
6868 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6869 if (rc)
6870 return rc;
6871 vaddr = remap_pci_mem(paddr, 0x250);
6872 if (!vaddr)
6873 return -ENOMEM;
edd16368 6874
1df8552a
SC
6875 /* find cfgtable in order to check if reset via doorbell is supported */
6876 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6877 &cfg_base_addr_index, &cfg_offset);
6878 if (rc)
6879 goto unmap_vaddr;
6880 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6881 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6882 if (!cfgtable) {
6883 rc = -ENOMEM;
6884 goto unmap_vaddr;
6885 }
580ada3c
SC
6886 rc = write_driver_ver_to_cfgtable(cfgtable);
6887 if (rc)
03741d95 6888 goto unmap_cfgtable;
edd16368 6889
cf0b08d0
SC
6890 /* If reset via doorbell register is supported, use that.
6891 * There are two such methods. Favor the newest method.
6892 */
1df8552a 6893 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
6894 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6895 if (use_doorbell) {
6896 use_doorbell = DOORBELL_CTLR_RESET2;
6897 } else {
6898 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6899 if (use_doorbell) {
050f7147
SC
6900 dev_warn(&pdev->dev,
6901 "Soft reset not supported. Firmware update is required.\n");
64670ac8 6902 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
6903 goto unmap_cfgtable;
6904 }
6905 }
edd16368 6906
1df8552a
SC
6907 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6908 if (rc)
6909 goto unmap_cfgtable;
edd16368 6910
270d05de 6911 pci_restore_state(pdev);
270d05de 6912 pci_write_config_word(pdev, 4, command_register);
edd16368 6913
1df8552a
SC
6914 /* Some devices (notably the HP Smart Array 5i Controller)
6915 need a little pause here */
6916 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6917
fe5389c8
SC
6918 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6919 if (rc) {
6920 dev_warn(&pdev->dev,
050f7147 6921 "Failed waiting for board to become ready after hard reset\n");
fe5389c8
SC
6922 goto unmap_cfgtable;
6923 }
fe5389c8 6924
580ada3c
SC
6925 rc = controller_reset_failed(vaddr);
6926 if (rc < 0)
6927 goto unmap_cfgtable;
6928 if (rc) {
64670ac8
SC
6929 dev_warn(&pdev->dev, "Unable to successfully reset "
6930 "controller. Will try soft reset.\n");
6931 rc = -ENOTSUPP;
580ada3c 6932 } else {
64670ac8 6933 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
6934 }
6935
6936unmap_cfgtable:
6937 iounmap(cfgtable);
6938
6939unmap_vaddr:
6940 iounmap(vaddr);
6941 return rc;
edd16368
SC
6942}
6943
6944/*
6945 * We cannot read the structure directly, for portability we must use
6946 * the io functions.
6947 * This is for debug only.
6948 */
42a91641 6949static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
edd16368 6950{
58f8665c 6951#ifdef HPSA_DEBUG
edd16368
SC
6952 int i;
6953 char temp_name[17];
6954
6955 dev_info(dev, "Controller Configuration information\n");
6956 dev_info(dev, "------------------------------------\n");
6957 for (i = 0; i < 4; i++)
6958 temp_name[i] = readb(&(tb->Signature[i]));
6959 temp_name[4] = '\0';
6960 dev_info(dev, " Signature = %s\n", temp_name);
6961 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
6962 dev_info(dev, " Transport methods supported = 0x%x\n",
6963 readl(&(tb->TransportSupport)));
6964 dev_info(dev, " Transport methods active = 0x%x\n",
6965 readl(&(tb->TransportActive)));
6966 dev_info(dev, " Requested transport Method = 0x%x\n",
6967 readl(&(tb->HostWrite.TransportRequest)));
6968 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
6969 readl(&(tb->HostWrite.CoalIntDelay)));
6970 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
6971 readl(&(tb->HostWrite.CoalIntCount)));
69d6e33d 6972 dev_info(dev, " Max outstanding commands = %d\n",
edd16368
SC
6973 readl(&(tb->CmdsOutMax)));
6974 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6975 for (i = 0; i < 16; i++)
6976 temp_name[i] = readb(&(tb->ServerName[i]));
6977 temp_name[16] = '\0';
6978 dev_info(dev, " Server Name = %s\n", temp_name);
6979 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6980 readl(&(tb->HeartBeat)));
edd16368 6981#endif /* HPSA_DEBUG */
58f8665c 6982}
edd16368
SC
6983
6984static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6985{
6986 int i, offset, mem_type, bar_type;
6987
6988 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6989 return 0;
6990 offset = 0;
6991 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6992 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6993 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6994 offset += 4;
6995 else {
6996 mem_type = pci_resource_flags(pdev, i) &
6997 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6998 switch (mem_type) {
6999 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7000 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7001 offset += 4; /* 32 bit */
7002 break;
7003 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7004 offset += 8;
7005 break;
7006 default: /* reserved in PCI 2.2 */
7007 dev_warn(&pdev->dev,
7008 "base address is invalid\n");
7009 return -1;
7010 break;
7011 }
7012 }
7013 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7014 return i + 1;
7015 }
7016 return -1;
7017}
7018
cc64c817
RE
7019static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7020{
7021 if (h->msix_vector) {
7022 if (h->pdev->msix_enabled)
7023 pci_disable_msix(h->pdev);
105a3dbc 7024 h->msix_vector = 0;
cc64c817
RE
7025 } else if (h->msi_vector) {
7026 if (h->pdev->msi_enabled)
7027 pci_disable_msi(h->pdev);
105a3dbc 7028 h->msi_vector = 0;
cc64c817
RE
7029 }
7030}
7031
edd16368 7032/* If MSI/MSI-X is supported by the kernel we will try to enable it on
050f7147 7033 * controllers that are capable. If not, we use legacy INTx mode.
edd16368 7034 */
6f039790 7035static void hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
7036{
7037#ifdef CONFIG_PCI_MSI
254f796b
MG
7038 int err, i;
7039 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7040
7041 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7042 hpsa_msix_entries[i].vector = 0;
7043 hpsa_msix_entries[i].entry = i;
7044 }
edd16368
SC
7045
7046 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
7047 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7048 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 7049 goto default_int_mode;
55c06c71 7050 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
050f7147 7051 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
eee0f03a 7052 h->msix_vector = MAX_REPLY_QUEUES;
f89439bc
SC
7053 if (h->msix_vector > num_online_cpus())
7054 h->msix_vector = num_online_cpus();
18fce3c4
AG
7055 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7056 1, h->msix_vector);
7057 if (err < 0) {
7058 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7059 h->msix_vector = 0;
7060 goto single_msi_mode;
7061 } else if (err < h->msix_vector) {
55c06c71 7062 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368 7063 "available\n", err);
edd16368 7064 }
18fce3c4
AG
7065 h->msix_vector = err;
7066 for (i = 0; i < h->msix_vector; i++)
7067 h->intr[i] = hpsa_msix_entries[i].vector;
7068 return;
edd16368 7069 }
18fce3c4 7070single_msi_mode:
55c06c71 7071 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
050f7147 7072 dev_info(&h->pdev->dev, "MSI capable controller\n");
55c06c71 7073 if (!pci_enable_msi(h->pdev))
edd16368
SC
7074 h->msi_vector = 1;
7075 else
55c06c71 7076 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
7077 }
7078default_int_mode:
7079#endif /* CONFIG_PCI_MSI */
7080 /* if we get here we're going to use the default interrupt mode */
a9a3a273 7081 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
7082}
7083
6f039790 7084static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
e5c880d1
SC
7085{
7086 int i;
7087 u32 subsystem_vendor_id, subsystem_device_id;
7088
7089 subsystem_vendor_id = pdev->subsystem_vendor;
7090 subsystem_device_id = pdev->subsystem_device;
7091 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7092 subsystem_vendor_id;
7093
7094 for (i = 0; i < ARRAY_SIZE(products); i++)
7095 if (*board_id == products[i].board_id)
7096 return i;
7097
6798cc0a
SC
7098 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7099 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7100 !hpsa_allow_any) {
e5c880d1
SC
7101 dev_warn(&pdev->dev, "unrecognized board ID: "
7102 "0x%08x, ignoring.\n", *board_id);
7103 return -ENODEV;
7104 }
7105 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7106}
7107
6f039790
GKH
7108static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7109 unsigned long *memory_bar)
3a7774ce
SC
7110{
7111 int i;
7112
7113 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 7114 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 7115 /* addressing mode bits already removed */
12d2cd47
SC
7116 *memory_bar = pci_resource_start(pdev, i);
7117 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
7118 *memory_bar);
7119 return 0;
7120 }
12d2cd47 7121 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
7122 return -ENODEV;
7123}
7124
6f039790
GKH
7125static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7126 int wait_for_ready)
2c4c8c8b 7127{
fe5389c8 7128 int i, iterations;
2c4c8c8b 7129 u32 scratchpad;
fe5389c8
SC
7130 if (wait_for_ready)
7131 iterations = HPSA_BOARD_READY_ITERATIONS;
7132 else
7133 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 7134
fe5389c8
SC
7135 for (i = 0; i < iterations; i++) {
7136 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7137 if (wait_for_ready) {
7138 if (scratchpad == HPSA_FIRMWARE_READY)
7139 return 0;
7140 } else {
7141 if (scratchpad != HPSA_FIRMWARE_READY)
7142 return 0;
7143 }
2c4c8c8b
SC
7144 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7145 }
fe5389c8 7146 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
7147 return -ENODEV;
7148}
7149
6f039790
GKH
7150static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7151 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7152 u64 *cfg_offset)
a51fd47f
SC
7153{
7154 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7155 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7156 *cfg_base_addr &= (u32) 0x0000ffff;
7157 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7158 if (*cfg_base_addr_index == -1) {
7159 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7160 return -ENODEV;
7161 }
7162 return 0;
7163}
7164
195f2c65
RE
7165static void hpsa_free_cfgtables(struct ctlr_info *h)
7166{
105a3dbc 7167 if (h->transtable) {
195f2c65 7168 iounmap(h->transtable);
105a3dbc
RE
7169 h->transtable = NULL;
7170 }
7171 if (h->cfgtable) {
195f2c65 7172 iounmap(h->cfgtable);
105a3dbc
RE
7173 h->cfgtable = NULL;
7174 }
195f2c65
RE
7175}
7176
7177/* Find and map CISS config table and transfer table
7178+ * several items must be unmapped (freed) later
7179+ * */
6f039790 7180static int hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 7181{
01a02ffc
SC
7182 u64 cfg_offset;
7183 u32 cfg_base_addr;
7184 u64 cfg_base_addr_index;
303932fd 7185 u32 trans_offset;
a51fd47f 7186 int rc;
77c4495c 7187
a51fd47f
SC
7188 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7189 &cfg_base_addr_index, &cfg_offset);
7190 if (rc)
7191 return rc;
77c4495c 7192 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 7193 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
cd3c81c4
RE
7194 if (!h->cfgtable) {
7195 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
77c4495c 7196 return -ENOMEM;
cd3c81c4 7197 }
580ada3c
SC
7198 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7199 if (rc)
7200 return rc;
77c4495c 7201 /* Find performant mode table. */
a51fd47f 7202 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
7203 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7204 cfg_base_addr_index)+cfg_offset+trans_offset,
7205 sizeof(*h->transtable));
195f2c65
RE
7206 if (!h->transtable) {
7207 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7208 hpsa_free_cfgtables(h);
77c4495c 7209 return -ENOMEM;
195f2c65 7210 }
77c4495c
SC
7211 return 0;
7212}
7213
6f039790 7214static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
cba3d38b 7215{
41ce4c35
SC
7216#define MIN_MAX_COMMANDS 16
7217 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7218
7219 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
72ceeaec
SC
7220
7221 /* Limit commands in memory limited kdump scenario. */
7222 if (reset_devices && h->max_commands > 32)
7223 h->max_commands = 32;
7224
41ce4c35
SC
7225 if (h->max_commands < MIN_MAX_COMMANDS) {
7226 dev_warn(&h->pdev->dev,
7227 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7228 h->max_commands,
7229 MIN_MAX_COMMANDS);
7230 h->max_commands = MIN_MAX_COMMANDS;
cba3d38b
SC
7231 }
7232}
7233
c7ee65b3
WS
7234/* If the controller reports that the total max sg entries is greater than 512,
7235 * then we know that chained SG blocks work. (Original smart arrays did not
7236 * support chained SG blocks and would return zero for max sg entries.)
7237 */
7238static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7239{
7240 return h->maxsgentries > 512;
7241}
7242
b93d7536
SC
7243/* Interrogate the hardware for some limits:
7244 * max commands, max SG elements without chaining, and with chaining,
7245 * SG chain block size, etc.
7246 */
6f039790 7247static void hpsa_find_board_params(struct ctlr_info *h)
b93d7536 7248{
cba3d38b 7249 hpsa_get_max_perf_mode_cmds(h);
45fcb86e 7250 h->nr_cmds = h->max_commands;
b93d7536 7251 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
283b4a9b 7252 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
c7ee65b3
WS
7253 if (hpsa_supports_chained_sg_blocks(h)) {
7254 /* Limit in-command s/g elements to 32 save dma'able memory. */
b93d7536 7255 h->max_cmd_sg_entries = 32;
1a63ea6f 7256 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
b93d7536
SC
7257 h->maxsgentries--; /* save one for chain pointer */
7258 } else {
c7ee65b3
WS
7259 /*
7260 * Original smart arrays supported at most 31 s/g entries
7261 * embedded inline in the command (trying to use more
7262 * would lock up the controller)
7263 */
7264 h->max_cmd_sg_entries = 31;
1a63ea6f 7265 h->maxsgentries = 31; /* default to traditional values */
c7ee65b3 7266 h->chainsize = 0;
b93d7536 7267 }
75167d2c
SC
7268
7269 /* Find out what task management functions are supported and cache */
7270 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
0e7a7fce
ST
7271 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7272 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7273 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7274 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
8be986cc
SC
7275 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7276 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
b93d7536
SC
7277}
7278
76c46e49
SC
7279static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7280{
0fc9fd40 7281 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
050f7147 7282 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
76c46e49
SC
7283 return false;
7284 }
7285 return true;
7286}
7287
97a5e98c 7288static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
f7c39101 7289{
97a5e98c 7290 u32 driver_support;
f7c39101 7291
97a5e98c 7292 driver_support = readl(&(h->cfgtable->driver_support));
0b9e7b74
AB
7293 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7294#ifdef CONFIG_X86
97a5e98c 7295 driver_support |= ENABLE_SCSI_PREFETCH;
f7c39101 7296#endif
28e13446
SC
7297 driver_support |= ENABLE_UNIT_ATTN;
7298 writel(driver_support, &(h->cfgtable->driver_support));
f7c39101
SC
7299}
7300
3d0eab67
SC
7301/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7302 * in a prefetch beyond physical memory.
7303 */
7304static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7305{
7306 u32 dma_prefetch;
7307
7308 if (h->board_id != 0x3225103C)
7309 return;
7310 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7311 dma_prefetch |= 0x8000;
7312 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7313}
7314
c706a795 7315static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
76438d08
SC
7316{
7317 int i;
7318 u32 doorbell_value;
7319 unsigned long flags;
7320 /* wait until the clear_event_notify bit 6 is cleared by controller. */
007e7aa9 7321 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
76438d08
SC
7322 spin_lock_irqsave(&h->lock, flags);
7323 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7324 spin_unlock_irqrestore(&h->lock, flags);
7325 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
c706a795 7326 goto done;
76438d08 7327 /* delay and try again */
007e7aa9 7328 msleep(CLEAR_EVENT_WAIT_INTERVAL);
76438d08 7329 }
c706a795
RE
7330 return -ENODEV;
7331done:
7332 return 0;
76438d08
SC
7333}
7334
c706a795 7335static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
7336{
7337 int i;
6eaf46fd
SC
7338 u32 doorbell_value;
7339 unsigned long flags;
eb6b2ae9
SC
7340
7341 /* under certain very rare conditions, this can take awhile.
7342 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7343 * as we enter this code.)
7344 */
007e7aa9 7345 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
25163bd5
WS
7346 if (h->remove_in_progress)
7347 goto done;
6eaf46fd
SC
7348 spin_lock_irqsave(&h->lock, flags);
7349 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7350 spin_unlock_irqrestore(&h->lock, flags);
382be668 7351 if (!(doorbell_value & CFGTBL_ChangeReq))
c706a795 7352 goto done;
eb6b2ae9 7353 /* delay and try again */
007e7aa9 7354 msleep(MODE_CHANGE_WAIT_INTERVAL);
eb6b2ae9 7355 }
c706a795
RE
7356 return -ENODEV;
7357done:
7358 return 0;
3f4336f3
SC
7359}
7360
c706a795 7361/* return -ENODEV or other reason on error, 0 on success */
6f039790 7362static int hpsa_enter_simple_mode(struct ctlr_info *h)
3f4336f3
SC
7363{
7364 u32 trans_support;
7365
7366 trans_support = readl(&(h->cfgtable->TransportSupport));
7367 if (!(trans_support & SIMPLE_MODE))
7368 return -ENOTSUPP;
7369
7370 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
283b4a9b 7371
3f4336f3
SC
7372 /* Update the field, and then ring the doorbell */
7373 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
b9af4937 7374 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
3f4336f3 7375 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
7376 if (hpsa_wait_for_mode_change_ack(h))
7377 goto error;
eb6b2ae9 7378 print_cfg_table(&h->pdev->dev, h->cfgtable);
283b4a9b
SC
7379 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7380 goto error;
960a30e7 7381 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9 7382 return 0;
283b4a9b 7383error:
050f7147 7384 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
283b4a9b 7385 return -ENODEV;
eb6b2ae9
SC
7386}
7387
195f2c65
RE
7388/* free items allocated or mapped by hpsa_pci_init */
7389static void hpsa_free_pci_init(struct ctlr_info *h)
7390{
7391 hpsa_free_cfgtables(h); /* pci_init 4 */
7392 iounmap(h->vaddr); /* pci_init 3 */
105a3dbc 7393 h->vaddr = NULL;
195f2c65 7394 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
943a7021
RE
7395 /*
7396 * call pci_disable_device before pci_release_regions per
7397 * Documentation/PCI/pci.txt
7398 */
195f2c65 7399 pci_disable_device(h->pdev); /* pci_init 1 */
943a7021 7400 pci_release_regions(h->pdev); /* pci_init 2 */
195f2c65
RE
7401}
7402
7403/* several items must be freed later */
6f039790 7404static int hpsa_pci_init(struct ctlr_info *h)
77c4495c 7405{
eb6b2ae9 7406 int prod_index, err;
edd16368 7407
e5c880d1
SC
7408 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7409 if (prod_index < 0)
60f923b9 7410 return prod_index;
e5c880d1
SC
7411 h->product_name = products[prod_index].product_name;
7412 h->access = *(products[prod_index].access);
edd16368 7413
9b5c48c2
SC
7414 h->needs_abort_tags_swizzled =
7415 ctlr_needs_abort_tags_swizzled(h->board_id);
7416
e5a44df8
MG
7417 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7418 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7419
55c06c71 7420 err = pci_enable_device(h->pdev);
edd16368 7421 if (err) {
195f2c65 7422 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
943a7021 7423 pci_disable_device(h->pdev);
edd16368
SC
7424 return err;
7425 }
7426
f79cfec6 7427 err = pci_request_regions(h->pdev, HPSA);
edd16368 7428 if (err) {
55c06c71 7429 dev_err(&h->pdev->dev,
195f2c65 7430 "failed to obtain PCI resources\n");
943a7021
RE
7431 pci_disable_device(h->pdev);
7432 return err;
edd16368 7433 }
4fa604e1
RE
7434
7435 pci_set_master(h->pdev);
7436
6b3f4c52 7437 hpsa_interrupt_mode(h);
12d2cd47 7438 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 7439 if (err)
195f2c65 7440 goto clean2; /* intmode+region, pci */
edd16368 7441 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9 7442 if (!h->vaddr) {
195f2c65 7443 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
204892e9 7444 err = -ENOMEM;
195f2c65 7445 goto clean2; /* intmode+region, pci */
204892e9 7446 }
fe5389c8 7447 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 7448 if (err)
195f2c65 7449 goto clean3; /* vaddr, intmode+region, pci */
77c4495c
SC
7450 err = hpsa_find_cfgtables(h);
7451 if (err)
195f2c65 7452 goto clean3; /* vaddr, intmode+region, pci */
b93d7536 7453 hpsa_find_board_params(h);
edd16368 7454
76c46e49 7455 if (!hpsa_CISS_signature_present(h)) {
edd16368 7456 err = -ENODEV;
195f2c65 7457 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368 7458 }
97a5e98c 7459 hpsa_set_driver_support_bits(h);
3d0eab67 7460 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
7461 err = hpsa_enter_simple_mode(h);
7462 if (err)
195f2c65 7463 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
edd16368
SC
7464 return 0;
7465
195f2c65
RE
7466clean4: /* cfgtables, vaddr, intmode+region, pci */
7467 hpsa_free_cfgtables(h);
7468clean3: /* vaddr, intmode+region, pci */
7469 iounmap(h->vaddr);
105a3dbc 7470 h->vaddr = NULL;
195f2c65
RE
7471clean2: /* intmode+region, pci */
7472 hpsa_disable_interrupt_mode(h);
943a7021
RE
7473 /*
7474 * call pci_disable_device before pci_release_regions per
7475 * Documentation/PCI/pci.txt
7476 */
195f2c65 7477 pci_disable_device(h->pdev);
943a7021 7478 pci_release_regions(h->pdev);
edd16368
SC
7479 return err;
7480}
7481
6f039790 7482static void hpsa_hba_inquiry(struct ctlr_info *h)
339b2b14
SC
7483{
7484 int rc;
7485
7486#define HBA_INQUIRY_BYTE_COUNT 64
7487 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7488 if (!h->hba_inquiry_data)
7489 return;
7490 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7491 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7492 if (rc != 0) {
7493 kfree(h->hba_inquiry_data);
7494 h->hba_inquiry_data = NULL;
7495 }
7496}
7497
6b6c1cd7 7498static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
4c2a8c40 7499{
1df8552a 7500 int rc, i;
3b747298 7501 void __iomem *vaddr;
4c2a8c40
SC
7502
7503 if (!reset_devices)
7504 return 0;
7505
132aa220
TH
7506 /* kdump kernel is loading, we don't know in which state is
7507 * the pci interface. The dev->enable_cnt is equal zero
7508 * so we call enable+disable, wait a while and switch it on.
7509 */
7510 rc = pci_enable_device(pdev);
7511 if (rc) {
7512 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7513 return -ENODEV;
7514 }
7515 pci_disable_device(pdev);
7516 msleep(260); /* a randomly chosen number */
7517 rc = pci_enable_device(pdev);
7518 if (rc) {
7519 dev_warn(&pdev->dev, "failed to enable device.\n");
7520 return -ENODEV;
7521 }
4fa604e1 7522
859c75ab 7523 pci_set_master(pdev);
4fa604e1 7524
3b747298
TH
7525 vaddr = pci_ioremap_bar(pdev, 0);
7526 if (vaddr == NULL) {
7527 rc = -ENOMEM;
7528 goto out_disable;
7529 }
7530 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7531 iounmap(vaddr);
7532
1df8552a 7533 /* Reset the controller with a PCI power-cycle or via doorbell */
6b6c1cd7 7534 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
4c2a8c40 7535
1df8552a
SC
7536 /* -ENOTSUPP here means we cannot reset the controller
7537 * but it's already (and still) up and running in
18867659
SC
7538 * "performant mode". Or, it might be 640x, which can't reset
7539 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a 7540 */
adf1b3a3 7541 if (rc)
132aa220 7542 goto out_disable;
4c2a8c40
SC
7543
7544 /* Now try to get the controller to respond to a no-op */
1ba66c9c 7545 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
7546 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7547 if (hpsa_noop(pdev) == 0)
7548 break;
7549 else
7550 dev_warn(&pdev->dev, "no-op failed%s\n",
7551 (i < 11 ? "; re-trying" : ""));
7552 }
132aa220
TH
7553
7554out_disable:
7555
7556 pci_disable_device(pdev);
7557 return rc;
4c2a8c40
SC
7558}
7559
1fb7c98a
RE
7560static void hpsa_free_cmd_pool(struct ctlr_info *h)
7561{
7562 kfree(h->cmd_pool_bits);
105a3dbc
RE
7563 h->cmd_pool_bits = NULL;
7564 if (h->cmd_pool) {
1fb7c98a
RE
7565 pci_free_consistent(h->pdev,
7566 h->nr_cmds * sizeof(struct CommandList),
7567 h->cmd_pool,
7568 h->cmd_pool_dhandle);
105a3dbc
RE
7569 h->cmd_pool = NULL;
7570 h->cmd_pool_dhandle = 0;
7571 }
7572 if (h->errinfo_pool) {
1fb7c98a
RE
7573 pci_free_consistent(h->pdev,
7574 h->nr_cmds * sizeof(struct ErrorInfo),
7575 h->errinfo_pool,
7576 h->errinfo_pool_dhandle);
105a3dbc
RE
7577 h->errinfo_pool = NULL;
7578 h->errinfo_pool_dhandle = 0;
7579 }
1fb7c98a
RE
7580}
7581
d37ffbe4 7582static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
2e9d1b36
SC
7583{
7584 h->cmd_pool_bits = kzalloc(
7585 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7586 sizeof(unsigned long), GFP_KERNEL);
7587 h->cmd_pool = pci_alloc_consistent(h->pdev,
7588 h->nr_cmds * sizeof(*h->cmd_pool),
7589 &(h->cmd_pool_dhandle));
7590 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7591 h->nr_cmds * sizeof(*h->errinfo_pool),
7592 &(h->errinfo_pool_dhandle));
7593 if ((h->cmd_pool_bits == NULL)
7594 || (h->cmd_pool == NULL)
7595 || (h->errinfo_pool == NULL)) {
7596 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
2c143342 7597 goto clean_up;
2e9d1b36 7598 }
360c73bd 7599 hpsa_preinitialize_commands(h);
2e9d1b36 7600 return 0;
2c143342
RE
7601clean_up:
7602 hpsa_free_cmd_pool(h);
7603 return -ENOMEM;
2e9d1b36
SC
7604}
7605
41b3cf08
SC
7606static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7607{
ec429952 7608 int i, cpu;
41b3cf08
SC
7609
7610 cpu = cpumask_first(cpu_online_mask);
7611 for (i = 0; i < h->msix_vector; i++) {
ec429952 7612 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
41b3cf08
SC
7613 cpu = cpumask_next(cpu, cpu_online_mask);
7614 }
7615}
7616
ec501a18
RE
7617/* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7618static void hpsa_free_irqs(struct ctlr_info *h)
7619{
7620 int i;
7621
7622 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7623 /* Single reply queue, only one irq to free */
7624 i = h->intr_mode;
7625 irq_set_affinity_hint(h->intr[i], NULL);
7626 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7627 h->q[i] = 0;
ec501a18
RE
7628 return;
7629 }
7630
7631 for (i = 0; i < h->msix_vector; i++) {
7632 irq_set_affinity_hint(h->intr[i], NULL);
7633 free_irq(h->intr[i], &h->q[i]);
105a3dbc 7634 h->q[i] = 0;
ec501a18 7635 }
a4e17fc1
RE
7636 for (; i < MAX_REPLY_QUEUES; i++)
7637 h->q[i] = 0;
ec501a18
RE
7638}
7639
9ee61794
RE
7640/* returns 0 on success; cleans up and returns -Enn on error */
7641static int hpsa_request_irqs(struct ctlr_info *h,
0ae01a32
SC
7642 irqreturn_t (*msixhandler)(int, void *),
7643 irqreturn_t (*intxhandler)(int, void *))
7644{
254f796b 7645 int rc, i;
0ae01a32 7646
254f796b
MG
7647 /*
7648 * initialize h->q[x] = x so that interrupt handlers know which
7649 * queue to process.
7650 */
7651 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7652 h->q[i] = (u8) i;
7653
eee0f03a 7654 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
254f796b 7655 /* If performant mode and MSI-X, use multiple reply queues */
a4e17fc1 7656 for (i = 0; i < h->msix_vector; i++) {
8b47004a 7657 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
254f796b 7658 rc = request_irq(h->intr[i], msixhandler,
8b47004a 7659 0, h->intrname[i],
254f796b 7660 &h->q[i]);
a4e17fc1
RE
7661 if (rc) {
7662 int j;
7663
7664 dev_err(&h->pdev->dev,
7665 "failed to get irq %d for %s\n",
7666 h->intr[i], h->devname);
7667 for (j = 0; j < i; j++) {
7668 free_irq(h->intr[j], &h->q[j]);
7669 h->q[j] = 0;
7670 }
7671 for (; j < MAX_REPLY_QUEUES; j++)
7672 h->q[j] = 0;
7673 return rc;
7674 }
7675 }
41b3cf08 7676 hpsa_irq_affinity_hints(h);
254f796b
MG
7677 } else {
7678 /* Use single reply pool */
eee0f03a 7679 if (h->msix_vector > 0 || h->msi_vector) {
8b47004a
RE
7680 if (h->msix_vector)
7681 sprintf(h->intrname[h->intr_mode],
7682 "%s-msix", h->devname);
7683 else
7684 sprintf(h->intrname[h->intr_mode],
7685 "%s-msi", h->devname);
254f796b 7686 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7687 msixhandler, 0,
7688 h->intrname[h->intr_mode],
254f796b
MG
7689 &h->q[h->intr_mode]);
7690 } else {
8b47004a
RE
7691 sprintf(h->intrname[h->intr_mode],
7692 "%s-intx", h->devname);
254f796b 7693 rc = request_irq(h->intr[h->intr_mode],
8b47004a
RE
7694 intxhandler, IRQF_SHARED,
7695 h->intrname[h->intr_mode],
254f796b
MG
7696 &h->q[h->intr_mode]);
7697 }
105a3dbc 7698 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
254f796b 7699 }
0ae01a32 7700 if (rc) {
195f2c65 7701 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
0ae01a32 7702 h->intr[h->intr_mode], h->devname);
195f2c65 7703 hpsa_free_irqs(h);
0ae01a32
SC
7704 return -ENODEV;
7705 }
7706 return 0;
7707}
7708
6f039790 7709static int hpsa_kdump_soft_reset(struct ctlr_info *h)
64670ac8 7710{
39c53f55 7711 int rc;
bf43caf3 7712 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
64670ac8
SC
7713
7714 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
39c53f55
RE
7715 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7716 if (rc) {
64670ac8 7717 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
39c53f55 7718 return rc;
64670ac8
SC
7719 }
7720
7721 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
39c53f55
RE
7722 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7723 if (rc) {
64670ac8
SC
7724 dev_warn(&h->pdev->dev, "Board failed to become ready "
7725 "after soft reset.\n");
39c53f55 7726 return rc;
64670ac8
SC
7727 }
7728
7729 return 0;
7730}
7731
072b0518
SC
7732static void hpsa_free_reply_queues(struct ctlr_info *h)
7733{
7734 int i;
7735
7736 for (i = 0; i < h->nreply_queues; i++) {
7737 if (!h->reply_queue[i].head)
7738 continue;
1fb7c98a
RE
7739 pci_free_consistent(h->pdev,
7740 h->reply_queue_size,
7741 h->reply_queue[i].head,
7742 h->reply_queue[i].busaddr);
072b0518
SC
7743 h->reply_queue[i].head = NULL;
7744 h->reply_queue[i].busaddr = 0;
7745 }
105a3dbc 7746 h->reply_queue_size = 0;
072b0518
SC
7747}
7748
0097f0f4
SC
7749static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7750{
105a3dbc
RE
7751 hpsa_free_performant_mode(h); /* init_one 7 */
7752 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7753 hpsa_free_cmd_pool(h); /* init_one 5 */
7754 hpsa_free_irqs(h); /* init_one 4 */
2946e82b
RE
7755 scsi_host_put(h->scsi_host); /* init_one 3 */
7756 h->scsi_host = NULL; /* init_one 3 */
7757 hpsa_free_pci_init(h); /* init_one 2_5 */
9ecd953a
RE
7758 free_percpu(h->lockup_detected); /* init_one 2 */
7759 h->lockup_detected = NULL; /* init_one 2 */
7760 if (h->resubmit_wq) {
7761 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7762 h->resubmit_wq = NULL;
7763 }
7764 if (h->rescan_ctlr_wq) {
7765 destroy_workqueue(h->rescan_ctlr_wq);
7766 h->rescan_ctlr_wq = NULL;
7767 }
105a3dbc 7768 kfree(h); /* init_one 1 */
64670ac8
SC
7769}
7770
a0c12413 7771/* Called when controller lockup detected. */
f2405db8 7772static void fail_all_outstanding_cmds(struct ctlr_info *h)
a0c12413 7773{
281a7fd0
WS
7774 int i, refcount;
7775 struct CommandList *c;
25163bd5 7776 int failcount = 0;
a0c12413 7777
080ef1cc 7778 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
f2405db8 7779 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 7780 c = h->cmd_pool + i;
281a7fd0
WS
7781 refcount = atomic_inc_return(&c->refcount);
7782 if (refcount > 1) {
25163bd5 7783 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
281a7fd0 7784 finish_cmd(c);
433b5f4d 7785 atomic_dec(&h->commands_outstanding);
25163bd5 7786 failcount++;
281a7fd0
WS
7787 }
7788 cmd_free(h, c);
a0c12413 7789 }
25163bd5
WS
7790 dev_warn(&h->pdev->dev,
7791 "failed %d commands in fail_all\n", failcount);
a0c12413
SC
7792}
7793
094963da
SC
7794static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7795{
c8ed0010 7796 int cpu;
094963da 7797
c8ed0010 7798 for_each_online_cpu(cpu) {
094963da
SC
7799 u32 *lockup_detected;
7800 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7801 *lockup_detected = value;
094963da
SC
7802 }
7803 wmb(); /* be sure the per-cpu variables are out to memory */
7804}
7805
a0c12413
SC
7806static void controller_lockup_detected(struct ctlr_info *h)
7807{
7808 unsigned long flags;
094963da 7809 u32 lockup_detected;
a0c12413 7810
a0c12413
SC
7811 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7812 spin_lock_irqsave(&h->lock, flags);
094963da
SC
7813 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7814 if (!lockup_detected) {
7815 /* no heartbeat, but controller gave us a zero. */
7816 dev_warn(&h->pdev->dev,
25163bd5
WS
7817 "lockup detected after %d but scratchpad register is zero\n",
7818 h->heartbeat_sample_interval / HZ);
094963da
SC
7819 lockup_detected = 0xffffffff;
7820 }
7821 set_lockup_detected_for_all_cpus(h, lockup_detected);
a0c12413 7822 spin_unlock_irqrestore(&h->lock, flags);
25163bd5
WS
7823 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7824 lockup_detected, h->heartbeat_sample_interval / HZ);
a0c12413 7825 pci_disable_device(h->pdev);
f2405db8 7826 fail_all_outstanding_cmds(h);
a0c12413
SC
7827}
7828
25163bd5 7829static int detect_controller_lockup(struct ctlr_info *h)
a0c12413
SC
7830{
7831 u64 now;
7832 u32 heartbeat;
7833 unsigned long flags;
7834
a0c12413
SC
7835 now = get_jiffies_64();
7836 /* If we've received an interrupt recently, we're ok. */
7837 if (time_after64(h->last_intr_timestamp +
e85c5974 7838 (h->heartbeat_sample_interval), now))
25163bd5 7839 return false;
a0c12413
SC
7840
7841 /*
7842 * If we've already checked the heartbeat recently, we're ok.
7843 * This could happen if someone sends us a signal. We
7844 * otherwise don't care about signals in this thread.
7845 */
7846 if (time_after64(h->last_heartbeat_timestamp +
e85c5974 7847 (h->heartbeat_sample_interval), now))
25163bd5 7848 return false;
a0c12413
SC
7849
7850 /* If heartbeat has not changed since we last looked, we're not ok. */
7851 spin_lock_irqsave(&h->lock, flags);
7852 heartbeat = readl(&h->cfgtable->HeartBeat);
7853 spin_unlock_irqrestore(&h->lock, flags);
7854 if (h->last_heartbeat == heartbeat) {
7855 controller_lockup_detected(h);
25163bd5 7856 return true;
a0c12413
SC
7857 }
7858
7859 /* We're ok. */
7860 h->last_heartbeat = heartbeat;
7861 h->last_heartbeat_timestamp = now;
25163bd5 7862 return false;
a0c12413
SC
7863}
7864
9846590e 7865static void hpsa_ack_ctlr_events(struct ctlr_info *h)
76438d08
SC
7866{
7867 int i;
7868 char *event_type;
7869
e4aa3e6a
SC
7870 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7871 return;
7872
76438d08 7873 /* Ask the controller to clear the events we're handling. */
1f7cee8c
SC
7874 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7875 | CFGTBL_Trans_io_accel2)) &&
76438d08
SC
7876 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7877 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7878
7879 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7880 event_type = "state change";
7881 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7882 event_type = "configuration change";
7883 /* Stop sending new RAID offload reqs via the IO accelerator */
7884 scsi_block_requests(h->scsi_host);
7885 for (i = 0; i < h->ndevices; i++)
7886 h->dev[i]->offload_enabled = 0;
23100dd9 7887 hpsa_drain_accel_commands(h);
76438d08
SC
7888 /* Set 'accelerator path config change' bit */
7889 dev_warn(&h->pdev->dev,
7890 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7891 h->events, event_type);
7892 writel(h->events, &(h->cfgtable->clear_event_notify));
7893 /* Set the "clear event notify field update" bit 6 */
7894 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7895 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7896 hpsa_wait_for_clear_event_notify_ack(h);
7897 scsi_unblock_requests(h->scsi_host);
7898 } else {
7899 /* Acknowledge controller notification events. */
7900 writel(h->events, &(h->cfgtable->clear_event_notify));
7901 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7902 hpsa_wait_for_clear_event_notify_ack(h);
7903#if 0
7904 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7905 hpsa_wait_for_mode_change_ack(h);
7906#endif
7907 }
9846590e 7908 return;
76438d08
SC
7909}
7910
7911/* Check a register on the controller to see if there are configuration
7912 * changes (added/changed/removed logical drives, etc.) which mean that
e863d68e
ST
7913 * we should rescan the controller for devices.
7914 * Also check flag for driver-initiated rescan.
76438d08 7915 */
9846590e 7916static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
76438d08 7917{
853633e8
DB
7918 if (h->drv_req_rescan) {
7919 h->drv_req_rescan = 0;
7920 return 1;
7921 }
7922
76438d08 7923 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
9846590e 7924 return 0;
76438d08
SC
7925
7926 h->events = readl(&(h->cfgtable->event_notify));
9846590e
SC
7927 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7928}
76438d08 7929
9846590e
SC
7930/*
7931 * Check if any of the offline devices have become ready
7932 */
7933static int hpsa_offline_devices_ready(struct ctlr_info *h)
7934{
7935 unsigned long flags;
7936 struct offline_device_entry *d;
7937 struct list_head *this, *tmp;
7938
7939 spin_lock_irqsave(&h->offline_device_lock, flags);
7940 list_for_each_safe(this, tmp, &h->offline_device_list) {
7941 d = list_entry(this, struct offline_device_entry,
7942 offline_list);
7943 spin_unlock_irqrestore(&h->offline_device_lock, flags);
d1fea47c
SC
7944 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7945 spin_lock_irqsave(&h->offline_device_lock, flags);
7946 list_del(&d->offline_list);
7947 spin_unlock_irqrestore(&h->offline_device_lock, flags);
9846590e 7948 return 1;
d1fea47c 7949 }
9846590e
SC
7950 spin_lock_irqsave(&h->offline_device_lock, flags);
7951 }
7952 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7953 return 0;
76438d08
SC
7954}
7955
6636e7f4 7956static void hpsa_rescan_ctlr_worker(struct work_struct *work)
a0c12413
SC
7957{
7958 unsigned long flags;
8a98db73 7959 struct ctlr_info *h = container_of(to_delayed_work(work),
6636e7f4
DB
7960 struct ctlr_info, rescan_ctlr_work);
7961
7962
7963 if (h->remove_in_progress)
8a98db73 7964 return;
9846590e
SC
7965
7966 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
7967 scsi_host_get(h->scsi_host);
9846590e
SC
7968 hpsa_ack_ctlr_events(h);
7969 hpsa_scan_start(h->scsi_host);
7970 scsi_host_put(h->scsi_host);
7971 }
8a98db73 7972 spin_lock_irqsave(&h->lock, flags);
6636e7f4
DB
7973 if (!h->remove_in_progress)
7974 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
7975 h->heartbeat_sample_interval);
7976 spin_unlock_irqrestore(&h->lock, flags);
7977}
7978
7979static void hpsa_monitor_ctlr_worker(struct work_struct *work)
7980{
7981 unsigned long flags;
7982 struct ctlr_info *h = container_of(to_delayed_work(work),
7983 struct ctlr_info, monitor_ctlr_work);
7984
7985 detect_controller_lockup(h);
7986 if (lockup_detected(h))
a0c12413 7987 return;
6636e7f4
DB
7988
7989 spin_lock_irqsave(&h->lock, flags);
7990 if (!h->remove_in_progress)
7991 schedule_delayed_work(&h->monitor_ctlr_work,
8a98db73
SC
7992 h->heartbeat_sample_interval);
7993 spin_unlock_irqrestore(&h->lock, flags);
a0c12413
SC
7994}
7995
6636e7f4
DB
7996static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
7997 char *name)
7998{
7999 struct workqueue_struct *wq = NULL;
6636e7f4 8000
397ea9cb 8001 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
6636e7f4
DB
8002 if (!wq)
8003 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8004
8005 return wq;
8006}
8007
6f039790 8008static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
edd16368 8009{
4c2a8c40 8010 int dac, rc;
edd16368 8011 struct ctlr_info *h;
64670ac8
SC
8012 int try_soft_reset = 0;
8013 unsigned long flags;
6b6c1cd7 8014 u32 board_id;
edd16368
SC
8015
8016 if (number_of_controllers == 0)
8017 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 8018
6b6c1cd7
TH
8019 rc = hpsa_lookup_board_id(pdev, &board_id);
8020 if (rc < 0) {
8021 dev_warn(&pdev->dev, "Board ID not found\n");
8022 return rc;
8023 }
8024
8025 rc = hpsa_init_reset_devices(pdev, board_id);
64670ac8
SC
8026 if (rc) {
8027 if (rc != -ENOTSUPP)
8028 return rc;
8029 /* If the reset fails in a particular way (it has no way to do
8030 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8031 * a soft reset once we get the controller configured up to the
8032 * point that it can accept a command.
8033 */
8034 try_soft_reset = 1;
8035 rc = 0;
8036 }
8037
8038reinit_after_soft_reset:
edd16368 8039
303932fd
DB
8040 /* Command structures must be aligned on a 32-byte boundary because
8041 * the 5 lower bits of the address are used by the hardware. and by
8042 * the driver. See comments in hpsa.h for more info.
8043 */
303932fd 8044 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368 8045 h = kzalloc(sizeof(*h), GFP_KERNEL);
105a3dbc
RE
8046 if (!h) {
8047 dev_err(&pdev->dev, "Failed to allocate controller head\n");
ecd9aad4 8048 return -ENOMEM;
105a3dbc 8049 }
edd16368 8050
55c06c71 8051 h->pdev = pdev;
105a3dbc 8052
a9a3a273 8053 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9846590e 8054 INIT_LIST_HEAD(&h->offline_device_list);
6eaf46fd 8055 spin_lock_init(&h->lock);
9846590e 8056 spin_lock_init(&h->offline_device_lock);
6eaf46fd 8057 spin_lock_init(&h->scan_lock);
34f0c627 8058 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
9b5c48c2 8059 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
094963da
SC
8060
8061 /* Allocate and clear per-cpu variable lockup_detected */
8062 h->lockup_detected = alloc_percpu(u32);
2a5ac326 8063 if (!h->lockup_detected) {
105a3dbc 8064 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
2a5ac326 8065 rc = -ENOMEM;
2efa5929 8066 goto clean1; /* aer/h */
2a5ac326 8067 }
094963da
SC
8068 set_lockup_detected_for_all_cpus(h, 0);
8069
55c06c71 8070 rc = hpsa_pci_init(h);
105a3dbc 8071 if (rc)
2946e82b
RE
8072 goto clean2; /* lu, aer/h */
8073
8074 /* relies on h-> settings made by hpsa_pci_init, including
8075 * interrupt_mode h->intr */
8076 rc = hpsa_scsi_host_alloc(h);
8077 if (rc)
8078 goto clean2_5; /* pci, lu, aer/h */
edd16368 8079
2946e82b 8080 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
edd16368
SC
8081 h->ctlr = number_of_controllers;
8082 number_of_controllers++;
edd16368
SC
8083
8084 /* configure PCI DMA stuff */
ecd9aad4
SC
8085 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8086 if (rc == 0) {
edd16368 8087 dac = 1;
ecd9aad4
SC
8088 } else {
8089 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8090 if (rc == 0) {
8091 dac = 0;
8092 } else {
8093 dev_err(&pdev->dev, "no suitable DMA available\n");
2946e82b 8094 goto clean3; /* shost, pci, lu, aer/h */
ecd9aad4 8095 }
edd16368
SC
8096 }
8097
8098 /* make sure the board interrupts are off */
8099 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 8100
105a3dbc
RE
8101 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8102 if (rc)
2946e82b 8103 goto clean3; /* shost, pci, lu, aer/h */
d37ffbe4 8104 rc = hpsa_alloc_cmd_pool(h);
8947fd10 8105 if (rc)
2946e82b 8106 goto clean4; /* irq, shost, pci, lu, aer/h */
105a3dbc
RE
8107 rc = hpsa_alloc_sg_chain_blocks(h);
8108 if (rc)
2946e82b 8109 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
a08a8471 8110 init_waitqueue_head(&h->scan_wait_queue);
9b5c48c2 8111 init_waitqueue_head(&h->abort_cmd_wait_queue);
d604f533
WS
8112 init_waitqueue_head(&h->event_sync_wait_queue);
8113 mutex_init(&h->reset_mutex);
a08a8471 8114 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
8115
8116 pci_set_drvdata(pdev, h);
9a41338e 8117 h->ndevices = 0;
2946e82b 8118
9a41338e 8119 spin_lock_init(&h->devlock);
105a3dbc
RE
8120 rc = hpsa_put_ctlr_into_performant_mode(h);
8121 if (rc)
2946e82b
RE
8122 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8123
8124 /* hook into SCSI subsystem */
8125 rc = hpsa_scsi_add_host(h);
8126 if (rc)
8127 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
2efa5929
RE
8128
8129 /* create the resubmit workqueue */
8130 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8131 if (!h->rescan_ctlr_wq) {
8132 rc = -ENOMEM;
8133 goto clean7;
8134 }
8135
8136 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8137 if (!h->resubmit_wq) {
8138 rc = -ENOMEM;
8139 goto clean7; /* aer/h */
8140 }
64670ac8 8141
105a3dbc
RE
8142 /*
8143 * At this point, the controller is ready to take commands.
64670ac8
SC
8144 * Now, if reset_devices and the hard reset didn't work, try
8145 * the soft reset and see if that works.
8146 */
8147 if (try_soft_reset) {
8148
8149 /* This is kind of gross. We may or may not get a completion
8150 * from the soft reset command, and if we do, then the value
8151 * from the fifo may or may not be valid. So, we wait 10 secs
8152 * after the reset throwing away any completions we get during
8153 * that time. Unregister the interrupt handler and register
8154 * fake ones to scoop up any residual completions.
8155 */
8156 spin_lock_irqsave(&h->lock, flags);
8157 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8158 spin_unlock_irqrestore(&h->lock, flags);
ec501a18 8159 hpsa_free_irqs(h);
9ee61794 8160 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
64670ac8
SC
8161 hpsa_intx_discard_completions);
8162 if (rc) {
9ee61794
RE
8163 dev_warn(&h->pdev->dev,
8164 "Failed to request_irq after soft reset.\n");
d498757c 8165 /*
b2ef480c
RE
8166 * cannot goto clean7 or free_irqs will be called
8167 * again. Instead, do its work
8168 */
8169 hpsa_free_performant_mode(h); /* clean7 */
8170 hpsa_free_sg_chain_blocks(h); /* clean6 */
8171 hpsa_free_cmd_pool(h); /* clean5 */
8172 /*
8173 * skip hpsa_free_irqs(h) clean4 since that
8174 * was just called before request_irqs failed
d498757c
RE
8175 */
8176 goto clean3;
64670ac8
SC
8177 }
8178
8179 rc = hpsa_kdump_soft_reset(h);
8180 if (rc)
8181 /* Neither hard nor soft reset worked, we're hosed. */
7ef7323f 8182 goto clean7;
64670ac8
SC
8183
8184 dev_info(&h->pdev->dev, "Board READY.\n");
8185 dev_info(&h->pdev->dev,
8186 "Waiting for stale completions to drain.\n");
8187 h->access.set_intr_mask(h, HPSA_INTR_ON);
8188 msleep(10000);
8189 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8190
8191 rc = controller_reset_failed(h->cfgtable);
8192 if (rc)
8193 dev_info(&h->pdev->dev,
8194 "Soft reset appears to have failed.\n");
8195
8196 /* since the controller's reset, we have to go back and re-init
8197 * everything. Easiest to just forget what we've done and do it
8198 * all over again.
8199 */
8200 hpsa_undo_allocations_after_kdump_soft_reset(h);
8201 try_soft_reset = 0;
8202 if (rc)
b2ef480c 8203 /* don't goto clean, we already unallocated */
64670ac8
SC
8204 return -ENODEV;
8205
8206 goto reinit_after_soft_reset;
8207 }
edd16368 8208
105a3dbc
RE
8209 /* Enable Accelerated IO path at driver layer */
8210 h->acciopath_status = 1;
da0697bd 8211
e863d68e 8212
edd16368
SC
8213 /* Turn the interrupts on so we can service requests */
8214 h->access.set_intr_mask(h, HPSA_INTR_ON);
8215
339b2b14 8216 hpsa_hba_inquiry(h);
8a98db73
SC
8217
8218 /* Monitor the controller for firmware lockups */
8219 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8220 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8221 schedule_delayed_work(&h->monitor_ctlr_work,
8222 h->heartbeat_sample_interval);
6636e7f4
DB
8223 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8224 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8225 h->heartbeat_sample_interval);
88bf6d62 8226 return 0;
edd16368 8227
2946e82b 8228clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
105a3dbc
RE
8229 hpsa_free_performant_mode(h);
8230 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8231clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
33a2ffce 8232 hpsa_free_sg_chain_blocks(h);
2946e82b 8233clean5: /* cmd, irq, shost, pci, lu, aer/h */
2e9d1b36 8234 hpsa_free_cmd_pool(h);
2946e82b 8235clean4: /* irq, shost, pci, lu, aer/h */
ec501a18 8236 hpsa_free_irqs(h);
2946e82b
RE
8237clean3: /* shost, pci, lu, aer/h */
8238 scsi_host_put(h->scsi_host);
8239 h->scsi_host = NULL;
8240clean2_5: /* pci, lu, aer/h */
195f2c65 8241 hpsa_free_pci_init(h);
2946e82b 8242clean2: /* lu, aer/h */
105a3dbc
RE
8243 if (h->lockup_detected) {
8244 free_percpu(h->lockup_detected);
8245 h->lockup_detected = NULL;
8246 }
8247clean1: /* wq/aer/h */
8248 if (h->resubmit_wq) {
080ef1cc 8249 destroy_workqueue(h->resubmit_wq);
105a3dbc
RE
8250 h->resubmit_wq = NULL;
8251 }
8252 if (h->rescan_ctlr_wq) {
6636e7f4 8253 destroy_workqueue(h->rescan_ctlr_wq);
105a3dbc
RE
8254 h->rescan_ctlr_wq = NULL;
8255 }
edd16368 8256 kfree(h);
ecd9aad4 8257 return rc;
edd16368
SC
8258}
8259
8260static void hpsa_flush_cache(struct ctlr_info *h)
8261{
8262 char *flush_buf;
8263 struct CommandList *c;
25163bd5 8264 int rc;
702890e3 8265
094963da 8266 if (unlikely(lockup_detected(h)))
702890e3 8267 return;
edd16368
SC
8268 flush_buf = kzalloc(4, GFP_KERNEL);
8269 if (!flush_buf)
8270 return;
8271
45fcb86e 8272 c = cmd_alloc(h);
bf43caf3 8273
a2dac136
SC
8274 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8275 RAID_CTLR_LUNID, TYPE_CMD)) {
8276 goto out;
8277 }
25163bd5
WS
8278 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8279 PCI_DMA_TODEVICE, NO_TIMEOUT);
8280 if (rc)
8281 goto out;
edd16368 8282 if (c->err_info->CommandStatus != 0)
a2dac136 8283out:
edd16368
SC
8284 dev_warn(&h->pdev->dev,
8285 "error flushing cache on controller\n");
45fcb86e 8286 cmd_free(h, c);
edd16368
SC
8287 kfree(flush_buf);
8288}
8289
8290static void hpsa_shutdown(struct pci_dev *pdev)
8291{
8292 struct ctlr_info *h;
8293
8294 h = pci_get_drvdata(pdev);
8295 /* Turn board interrupts off and send the flush cache command
8296 * sendcmd will turn off interrupt, and send the flush...
8297 * To write all data in the battery backed cache to disks
8298 */
8299 hpsa_flush_cache(h);
8300 h->access.set_intr_mask(h, HPSA_INTR_OFF);
105a3dbc 8301 hpsa_free_irqs(h); /* init_one 4 */
cc64c817 8302 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
edd16368
SC
8303}
8304
6f039790 8305static void hpsa_free_device_info(struct ctlr_info *h)
55e14e76
SC
8306{
8307 int i;
8308
105a3dbc 8309 for (i = 0; i < h->ndevices; i++) {
55e14e76 8310 kfree(h->dev[i]);
105a3dbc
RE
8311 h->dev[i] = NULL;
8312 }
55e14e76
SC
8313}
8314
6f039790 8315static void hpsa_remove_one(struct pci_dev *pdev)
edd16368
SC
8316{
8317 struct ctlr_info *h;
8a98db73 8318 unsigned long flags;
edd16368
SC
8319
8320 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 8321 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
8322 return;
8323 }
8324 h = pci_get_drvdata(pdev);
8a98db73
SC
8325
8326 /* Get rid of any controller monitoring work items */
8327 spin_lock_irqsave(&h->lock, flags);
8328 h->remove_in_progress = 1;
8a98db73 8329 spin_unlock_irqrestore(&h->lock, flags);
6636e7f4
DB
8330 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8331 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8332 destroy_workqueue(h->rescan_ctlr_wq);
8333 destroy_workqueue(h->resubmit_wq);
cc64c817 8334
2d041306
DB
8335 /*
8336 * Call before disabling interrupts.
8337 * scsi_remove_host can trigger I/O operations especially
8338 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8339 * operations which cannot complete and will hang the system.
8340 */
8341 if (h->scsi_host)
8342 scsi_remove_host(h->scsi_host); /* init_one 8 */
105a3dbc 8343 /* includes hpsa_free_irqs - init_one 4 */
195f2c65 8344 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
edd16368 8345 hpsa_shutdown(pdev);
cc64c817 8346
105a3dbc
RE
8347 hpsa_free_device_info(h); /* scan */
8348
2946e82b
RE
8349 kfree(h->hba_inquiry_data); /* init_one 10 */
8350 h->hba_inquiry_data = NULL; /* init_one 10 */
2946e82b 8351 hpsa_free_ioaccel2_sg_chain_blocks(h);
105a3dbc
RE
8352 hpsa_free_performant_mode(h); /* init_one 7 */
8353 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8354 hpsa_free_cmd_pool(h); /* init_one 5 */
8355
8356 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
195f2c65 8357
2946e82b
RE
8358 scsi_host_put(h->scsi_host); /* init_one 3 */
8359 h->scsi_host = NULL; /* init_one 3 */
8360
195f2c65 8361 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
2946e82b 8362 hpsa_free_pci_init(h); /* init_one 2.5 */
195f2c65 8363
105a3dbc
RE
8364 free_percpu(h->lockup_detected); /* init_one 2 */
8365 h->lockup_detected = NULL; /* init_one 2 */
8366 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8367 kfree(h); /* init_one 1 */
edd16368
SC
8368}
8369
8370static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8371 __attribute__((unused)) pm_message_t state)
8372{
8373 return -ENOSYS;
8374}
8375
8376static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8377{
8378 return -ENOSYS;
8379}
8380
8381static struct pci_driver hpsa_pci_driver = {
f79cfec6 8382 .name = HPSA,
edd16368 8383 .probe = hpsa_init_one,
6f039790 8384 .remove = hpsa_remove_one,
edd16368
SC
8385 .id_table = hpsa_pci_device_id, /* id_table */
8386 .shutdown = hpsa_shutdown,
8387 .suspend = hpsa_suspend,
8388 .resume = hpsa_resume,
8389};
8390
303932fd
DB
8391/* Fill in bucket_map[], given nsgs (the max number of
8392 * scatter gather elements supported) and bucket[],
8393 * which is an array of 8 integers. The bucket[] array
8394 * contains 8 different DMA transfer sizes (in 16
8395 * byte increments) which the controller uses to fetch
8396 * commands. This function fills in bucket_map[], which
8397 * maps a given number of scatter gather elements to one of
8398 * the 8 DMA transfer sizes. The point of it is to allow the
8399 * controller to only do as much DMA as needed to fetch the
8400 * command, with the DMA transfer size encoded in the lower
8401 * bits of the command address.
8402 */
8403static void calc_bucket_map(int bucket[], int num_buckets,
2b08b3e9 8404 int nsgs, int min_blocks, u32 *bucket_map)
303932fd
DB
8405{
8406 int i, j, b, size;
8407
303932fd
DB
8408 /* Note, bucket_map must have nsgs+1 entries. */
8409 for (i = 0; i <= nsgs; i++) {
8410 /* Compute size of a command with i SG entries */
e1f7de0c 8411 size = i + min_blocks;
303932fd
DB
8412 b = num_buckets; /* Assume the biggest bucket */
8413 /* Find the bucket that is just big enough */
e1f7de0c 8414 for (j = 0; j < num_buckets; j++) {
303932fd
DB
8415 if (bucket[j] >= size) {
8416 b = j;
8417 break;
8418 }
8419 }
8420 /* for a command with i SG entries, use bucket b. */
8421 bucket_map[i] = b;
8422 }
8423}
8424
105a3dbc
RE
8425/*
8426 * return -ENODEV on err, 0 on success (or no action)
8427 * allocates numerous items that must be freed later
8428 */
c706a795 8429static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
303932fd 8430{
6c311b57
SC
8431 int i;
8432 unsigned long register_value;
e1f7de0c
MG
8433 unsigned long transMethod = CFGTBL_Trans_Performant |
8434 (trans_support & CFGTBL_Trans_use_short_tags) |
b9af4937
SC
8435 CFGTBL_Trans_enable_directed_msix |
8436 (trans_support & (CFGTBL_Trans_io_accel1 |
8437 CFGTBL_Trans_io_accel2));
e1f7de0c 8438 struct access_method access = SA5_performant_access;
def342bd
SC
8439
8440 /* This is a bit complicated. There are 8 registers on
8441 * the controller which we write to to tell it 8 different
8442 * sizes of commands which there may be. It's a way of
8443 * reducing the DMA done to fetch each command. Encoded into
8444 * each command's tag are 3 bits which communicate to the controller
8445 * which of the eight sizes that command fits within. The size of
8446 * each command depends on how many scatter gather entries there are.
8447 * Each SG entry requires 16 bytes. The eight registers are programmed
8448 * with the number of 16-byte blocks a command of that size requires.
8449 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 8450 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
8451 * blocks. Note, this only extends to the SG entries contained
8452 * within the command block, and does not extend to chained blocks
8453 * of SG elements. bft[] contains the eight values we write to
8454 * the registers. They are not evenly distributed, but have more
8455 * sizes for small commands, and fewer sizes for larger commands.
8456 */
d66ae08b 8457 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
b9af4937
SC
8458#define MIN_IOACCEL2_BFT_ENTRY 5
8459#define HPSA_IOACCEL2_HEADER_SZ 4
8460 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8461 13, 14, 15, 16, 17, 18, 19,
8462 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8463 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8464 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8465 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8466 16 * MIN_IOACCEL2_BFT_ENTRY);
8467 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
d66ae08b 8468 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
8469 /* 5 = 1 s/g entry or 4k
8470 * 6 = 2 s/g entry or 8k
8471 * 8 = 4 s/g entry or 16k
8472 * 10 = 6 s/g entry or 24k
8473 */
303932fd 8474
b3a52e79
SC
8475 /* If the controller supports either ioaccel method then
8476 * we can also use the RAID stack submit path that does not
8477 * perform the superfluous readl() after each command submission.
8478 */
8479 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8480 access = SA5_performant_access_no_read;
8481
303932fd 8482 /* Controller spec: zero out this buffer. */
072b0518
SC
8483 for (i = 0; i < h->nreply_queues; i++)
8484 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
303932fd 8485
d66ae08b
SC
8486 bft[7] = SG_ENTRIES_IN_CMD + 4;
8487 calc_bucket_map(bft, ARRAY_SIZE(bft),
e1f7de0c 8488 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
303932fd
DB
8489 for (i = 0; i < 8; i++)
8490 writel(bft[i], &h->transtable->BlockFetch[i]);
8491
8492 /* size of controller ring buffer */
8493 writel(h->max_commands, &h->transtable->RepQSize);
254f796b 8494 writel(h->nreply_queues, &h->transtable->RepQCount);
303932fd
DB
8495 writel(0, &h->transtable->RepQCtrAddrLow32);
8496 writel(0, &h->transtable->RepQCtrAddrHigh32);
254f796b
MG
8497
8498 for (i = 0; i < h->nreply_queues; i++) {
8499 writel(0, &h->transtable->RepQAddr[i].upper);
072b0518 8500 writel(h->reply_queue[i].busaddr,
254f796b
MG
8501 &h->transtable->RepQAddr[i].lower);
8502 }
8503
b9af4937 8504 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
e1f7de0c
MG
8505 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8506 /*
8507 * enable outbound interrupt coalescing in accelerator mode;
8508 */
8509 if (trans_support & CFGTBL_Trans_io_accel1) {
8510 access = SA5_ioaccel_mode1_access;
8511 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8512 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
c349775e
ST
8513 } else {
8514 if (trans_support & CFGTBL_Trans_io_accel2) {
8515 access = SA5_ioaccel_mode2_access;
8516 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8517 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8518 }
e1f7de0c 8519 }
303932fd 8520 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8521 if (hpsa_wait_for_mode_change_ack(h)) {
8522 dev_err(&h->pdev->dev,
8523 "performant mode problem - doorbell timeout\n");
8524 return -ENODEV;
8525 }
303932fd
DB
8526 register_value = readl(&(h->cfgtable->TransportActive));
8527 if (!(register_value & CFGTBL_Trans_Performant)) {
050f7147
SC
8528 dev_err(&h->pdev->dev,
8529 "performant mode problem - transport not active\n");
c706a795 8530 return -ENODEV;
303932fd 8531 }
960a30e7 8532 /* Change the access methods to the performant access methods */
e1f7de0c
MG
8533 h->access = access;
8534 h->transMethod = transMethod;
8535
b9af4937
SC
8536 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8537 (trans_support & CFGTBL_Trans_io_accel2)))
c706a795 8538 return 0;
e1f7de0c 8539
b9af4937
SC
8540 if (trans_support & CFGTBL_Trans_io_accel1) {
8541 /* Set up I/O accelerator mode */
8542 for (i = 0; i < h->nreply_queues; i++) {
8543 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8544 h->reply_queue[i].current_entry =
8545 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8546 }
8547 bft[7] = h->ioaccel_maxsg + 8;
8548 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8549 h->ioaccel1_blockFetchTable);
e1f7de0c 8550
b9af4937 8551 /* initialize all reply queue entries to unused */
072b0518
SC
8552 for (i = 0; i < h->nreply_queues; i++)
8553 memset(h->reply_queue[i].head,
8554 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8555 h->reply_queue_size);
e1f7de0c 8556
b9af4937
SC
8557 /* set all the constant fields in the accelerator command
8558 * frames once at init time to save CPU cycles later.
8559 */
8560 for (i = 0; i < h->nr_cmds; i++) {
8561 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8562
8563 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8564 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8565 (i * sizeof(struct ErrorInfo)));
8566 cp->err_info_len = sizeof(struct ErrorInfo);
8567 cp->sgl_offset = IOACCEL1_SGLOFFSET;
2b08b3e9
DB
8568 cp->host_context_flags =
8569 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
b9af4937
SC
8570 cp->timeout_sec = 0;
8571 cp->ReplyQueue = 0;
50a0decf 8572 cp->tag =
f2405db8 8573 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
50a0decf
SC
8574 cp->host_addr =
8575 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
b9af4937 8576 (i * sizeof(struct io_accel1_cmd)));
b9af4937
SC
8577 }
8578 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8579 u64 cfg_offset, cfg_base_addr_index;
8580 u32 bft2_offset, cfg_base_addr;
8581 int rc;
8582
8583 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8584 &cfg_base_addr_index, &cfg_offset);
8585 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8586 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8587 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8588 4, h->ioaccel2_blockFetchTable);
8589 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8590 BUILD_BUG_ON(offsetof(struct CfgTable,
8591 io_accel_request_size_offset) != 0xb8);
8592 h->ioaccel2_bft2_regs =
8593 remap_pci_mem(pci_resource_start(h->pdev,
8594 cfg_base_addr_index) +
8595 cfg_offset + bft2_offset,
8596 ARRAY_SIZE(bft2) *
8597 sizeof(*h->ioaccel2_bft2_regs));
8598 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8599 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
e1f7de0c 8600 }
b9af4937 8601 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
c706a795
RE
8602 if (hpsa_wait_for_mode_change_ack(h)) {
8603 dev_err(&h->pdev->dev,
8604 "performant mode problem - enabling ioaccel mode\n");
8605 return -ENODEV;
8606 }
8607 return 0;
e1f7de0c
MG
8608}
8609
1fb7c98a
RE
8610/* Free ioaccel1 mode command blocks and block fetch table */
8611static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8612{
105a3dbc 8613 if (h->ioaccel_cmd_pool) {
1fb7c98a
RE
8614 pci_free_consistent(h->pdev,
8615 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8616 h->ioaccel_cmd_pool,
8617 h->ioaccel_cmd_pool_dhandle);
105a3dbc
RE
8618 h->ioaccel_cmd_pool = NULL;
8619 h->ioaccel_cmd_pool_dhandle = 0;
8620 }
1fb7c98a 8621 kfree(h->ioaccel1_blockFetchTable);
105a3dbc 8622 h->ioaccel1_blockFetchTable = NULL;
1fb7c98a
RE
8623}
8624
d37ffbe4
RE
8625/* Allocate ioaccel1 mode command blocks and block fetch table */
8626static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
e1f7de0c 8627{
283b4a9b
SC
8628 h->ioaccel_maxsg =
8629 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8630 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8631 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8632
e1f7de0c
MG
8633 /* Command structures must be aligned on a 128-byte boundary
8634 * because the 7 lower bits of the address are used by the
8635 * hardware.
8636 */
e1f7de0c
MG
8637 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8638 IOACCEL1_COMMANDLIST_ALIGNMENT);
8639 h->ioaccel_cmd_pool =
8640 pci_alloc_consistent(h->pdev,
8641 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8642 &(h->ioaccel_cmd_pool_dhandle));
8643
8644 h->ioaccel1_blockFetchTable =
283b4a9b 8645 kmalloc(((h->ioaccel_maxsg + 1) *
e1f7de0c
MG
8646 sizeof(u32)), GFP_KERNEL);
8647
8648 if ((h->ioaccel_cmd_pool == NULL) ||
8649 (h->ioaccel1_blockFetchTable == NULL))
8650 goto clean_up;
8651
8652 memset(h->ioaccel_cmd_pool, 0,
8653 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8654 return 0;
8655
8656clean_up:
1fb7c98a 8657 hpsa_free_ioaccel1_cmd_and_bft(h);
2dd02d74 8658 return -ENOMEM;
6c311b57
SC
8659}
8660
1fb7c98a
RE
8661/* Free ioaccel2 mode command blocks and block fetch table */
8662static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8663{
d9a729f3
WS
8664 hpsa_free_ioaccel2_sg_chain_blocks(h);
8665
105a3dbc 8666 if (h->ioaccel2_cmd_pool) {
1fb7c98a
RE
8667 pci_free_consistent(h->pdev,
8668 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8669 h->ioaccel2_cmd_pool,
8670 h->ioaccel2_cmd_pool_dhandle);
105a3dbc
RE
8671 h->ioaccel2_cmd_pool = NULL;
8672 h->ioaccel2_cmd_pool_dhandle = 0;
8673 }
1fb7c98a 8674 kfree(h->ioaccel2_blockFetchTable);
105a3dbc 8675 h->ioaccel2_blockFetchTable = NULL;
1fb7c98a
RE
8676}
8677
d37ffbe4
RE
8678/* Allocate ioaccel2 mode command blocks and block fetch table */
8679static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
aca9012a 8680{
d9a729f3
WS
8681 int rc;
8682
aca9012a
SC
8683 /* Allocate ioaccel2 mode command blocks and block fetch table */
8684
8685 h->ioaccel_maxsg =
8686 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8687 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8688 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8689
aca9012a
SC
8690 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8691 IOACCEL2_COMMANDLIST_ALIGNMENT);
8692 h->ioaccel2_cmd_pool =
8693 pci_alloc_consistent(h->pdev,
8694 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8695 &(h->ioaccel2_cmd_pool_dhandle));
8696
8697 h->ioaccel2_blockFetchTable =
8698 kmalloc(((h->ioaccel_maxsg + 1) *
8699 sizeof(u32)), GFP_KERNEL);
8700
8701 if ((h->ioaccel2_cmd_pool == NULL) ||
d9a729f3
WS
8702 (h->ioaccel2_blockFetchTable == NULL)) {
8703 rc = -ENOMEM;
8704 goto clean_up;
8705 }
8706
8707 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8708 if (rc)
aca9012a
SC
8709 goto clean_up;
8710
8711 memset(h->ioaccel2_cmd_pool, 0,
8712 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8713 return 0;
8714
8715clean_up:
1fb7c98a 8716 hpsa_free_ioaccel2_cmd_and_bft(h);
d9a729f3 8717 return rc;
aca9012a
SC
8718}
8719
105a3dbc
RE
8720/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8721static void hpsa_free_performant_mode(struct ctlr_info *h)
8722{
8723 kfree(h->blockFetchTable);
8724 h->blockFetchTable = NULL;
8725 hpsa_free_reply_queues(h);
8726 hpsa_free_ioaccel1_cmd_and_bft(h);
8727 hpsa_free_ioaccel2_cmd_and_bft(h);
8728}
8729
8730/* return -ENODEV on error, 0 on success (or no action)
8731 * allocates numerous items that must be freed later
8732 */
8733static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
6c311b57
SC
8734{
8735 u32 trans_support;
e1f7de0c
MG
8736 unsigned long transMethod = CFGTBL_Trans_Performant |
8737 CFGTBL_Trans_use_short_tags;
105a3dbc 8738 int i, rc;
6c311b57 8739
02ec19c8 8740 if (hpsa_simple_mode)
105a3dbc 8741 return 0;
02ec19c8 8742
67c99a72 8743 trans_support = readl(&(h->cfgtable->TransportSupport));
8744 if (!(trans_support & PERFORMANT_MODE))
105a3dbc 8745 return 0;
67c99a72 8746
e1f7de0c
MG
8747 /* Check for I/O accelerator mode support */
8748 if (trans_support & CFGTBL_Trans_io_accel1) {
8749 transMethod |= CFGTBL_Trans_io_accel1 |
8750 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8751 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8752 if (rc)
8753 return rc;
8754 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8755 transMethod |= CFGTBL_Trans_io_accel2 |
aca9012a 8756 CFGTBL_Trans_enable_directed_msix;
105a3dbc
RE
8757 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8758 if (rc)
8759 return rc;
e1f7de0c
MG
8760 }
8761
eee0f03a 8762 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
cba3d38b 8763 hpsa_get_max_perf_mode_cmds(h);
6c311b57 8764 /* Performant mode ring buffer and supporting data structures */
072b0518 8765 h->reply_queue_size = h->max_commands * sizeof(u64);
6c311b57 8766
254f796b 8767 for (i = 0; i < h->nreply_queues; i++) {
072b0518
SC
8768 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8769 h->reply_queue_size,
8770 &(h->reply_queue[i].busaddr));
105a3dbc
RE
8771 if (!h->reply_queue[i].head) {
8772 rc = -ENOMEM;
8773 goto clean1; /* rq, ioaccel */
8774 }
254f796b
MG
8775 h->reply_queue[i].size = h->max_commands;
8776 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8777 h->reply_queue[i].current_entry = 0;
8778 }
8779
6c311b57 8780 /* Need a block fetch table for performant mode */
d66ae08b 8781 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57 8782 sizeof(u32)), GFP_KERNEL);
105a3dbc
RE
8783 if (!h->blockFetchTable) {
8784 rc = -ENOMEM;
8785 goto clean1; /* rq, ioaccel */
8786 }
6c311b57 8787
105a3dbc
RE
8788 rc = hpsa_enter_performant_mode(h, trans_support);
8789 if (rc)
8790 goto clean2; /* bft, rq, ioaccel */
8791 return 0;
303932fd 8792
105a3dbc 8793clean2: /* bft, rq, ioaccel */
303932fd 8794 kfree(h->blockFetchTable);
105a3dbc
RE
8795 h->blockFetchTable = NULL;
8796clean1: /* rq, ioaccel */
8797 hpsa_free_reply_queues(h);
8798 hpsa_free_ioaccel1_cmd_and_bft(h);
8799 hpsa_free_ioaccel2_cmd_and_bft(h);
8800 return rc;
303932fd
DB
8801}
8802
23100dd9 8803static int is_accelerated_cmd(struct CommandList *c)
76438d08 8804{
23100dd9
SC
8805 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8806}
8807
8808static void hpsa_drain_accel_commands(struct ctlr_info *h)
8809{
8810 struct CommandList *c = NULL;
f2405db8 8811 int i, accel_cmds_out;
281a7fd0 8812 int refcount;
76438d08 8813
f2405db8 8814 do { /* wait for all outstanding ioaccel commands to drain out */
23100dd9 8815 accel_cmds_out = 0;
f2405db8 8816 for (i = 0; i < h->nr_cmds; i++) {
f2405db8 8817 c = h->cmd_pool + i;
281a7fd0
WS
8818 refcount = atomic_inc_return(&c->refcount);
8819 if (refcount > 1) /* Command is allocated */
8820 accel_cmds_out += is_accelerated_cmd(c);
8821 cmd_free(h, c);
f2405db8 8822 }
23100dd9 8823 if (accel_cmds_out <= 0)
281a7fd0 8824 break;
76438d08
SC
8825 msleep(100);
8826 } while (1);
8827}
8828
edd16368
SC
8829/*
8830 * This is it. Register the PCI driver information for the cards we control
8831 * the OS will call our registered routines when it finds one of our cards.
8832 */
8833static int __init hpsa_init(void)
8834{
31468401 8835 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
8836}
8837
8838static void __exit hpsa_cleanup(void)
8839{
8840 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
8841}
8842
e1f7de0c
MG
8843static void __attribute__((unused)) verify_offsets(void)
8844{
dd0e19f3
ST
8845#define VERIFY_OFFSET(member, offset) \
8846 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8847
8848 VERIFY_OFFSET(structure_size, 0);
8849 VERIFY_OFFSET(volume_blk_size, 4);
8850 VERIFY_OFFSET(volume_blk_cnt, 8);
8851 VERIFY_OFFSET(phys_blk_shift, 16);
8852 VERIFY_OFFSET(parity_rotation_shift, 17);
8853 VERIFY_OFFSET(strip_size, 18);
8854 VERIFY_OFFSET(disk_starting_blk, 20);
8855 VERIFY_OFFSET(disk_blk_cnt, 28);
8856 VERIFY_OFFSET(data_disks_per_row, 36);
8857 VERIFY_OFFSET(metadata_disks_per_row, 38);
8858 VERIFY_OFFSET(row_cnt, 40);
8859 VERIFY_OFFSET(layout_map_count, 42);
8860 VERIFY_OFFSET(flags, 44);
8861 VERIFY_OFFSET(dekindex, 46);
8862 /* VERIFY_OFFSET(reserved, 48 */
8863 VERIFY_OFFSET(data, 64);
8864
8865#undef VERIFY_OFFSET
8866
b66cc250
MM
8867#define VERIFY_OFFSET(member, offset) \
8868 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8869
8870 VERIFY_OFFSET(IU_type, 0);
8871 VERIFY_OFFSET(direction, 1);
8872 VERIFY_OFFSET(reply_queue, 2);
8873 /* VERIFY_OFFSET(reserved1, 3); */
8874 VERIFY_OFFSET(scsi_nexus, 4);
8875 VERIFY_OFFSET(Tag, 8);
8876 VERIFY_OFFSET(cdb, 16);
8877 VERIFY_OFFSET(cciss_lun, 32);
8878 VERIFY_OFFSET(data_len, 40);
8879 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8880 VERIFY_OFFSET(sg_count, 45);
8881 /* VERIFY_OFFSET(reserved3 */
8882 VERIFY_OFFSET(err_ptr, 48);
8883 VERIFY_OFFSET(err_len, 56);
8884 /* VERIFY_OFFSET(reserved4 */
8885 VERIFY_OFFSET(sg, 64);
8886
8887#undef VERIFY_OFFSET
8888
e1f7de0c
MG
8889#define VERIFY_OFFSET(member, offset) \
8890 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8891
8892 VERIFY_OFFSET(dev_handle, 0x00);
8893 VERIFY_OFFSET(reserved1, 0x02);
8894 VERIFY_OFFSET(function, 0x03);
8895 VERIFY_OFFSET(reserved2, 0x04);
8896 VERIFY_OFFSET(err_info, 0x0C);
8897 VERIFY_OFFSET(reserved3, 0x10);
8898 VERIFY_OFFSET(err_info_len, 0x12);
8899 VERIFY_OFFSET(reserved4, 0x13);
8900 VERIFY_OFFSET(sgl_offset, 0x14);
8901 VERIFY_OFFSET(reserved5, 0x15);
8902 VERIFY_OFFSET(transfer_len, 0x1C);
8903 VERIFY_OFFSET(reserved6, 0x20);
8904 VERIFY_OFFSET(io_flags, 0x24);
8905 VERIFY_OFFSET(reserved7, 0x26);
8906 VERIFY_OFFSET(LUN, 0x34);
8907 VERIFY_OFFSET(control, 0x3C);
8908 VERIFY_OFFSET(CDB, 0x40);
8909 VERIFY_OFFSET(reserved8, 0x50);
8910 VERIFY_OFFSET(host_context_flags, 0x60);
8911 VERIFY_OFFSET(timeout_sec, 0x62);
8912 VERIFY_OFFSET(ReplyQueue, 0x64);
8913 VERIFY_OFFSET(reserved9, 0x65);
50a0decf 8914 VERIFY_OFFSET(tag, 0x68);
e1f7de0c
MG
8915 VERIFY_OFFSET(host_addr, 0x70);
8916 VERIFY_OFFSET(CISS_LUN, 0x78);
8917 VERIFY_OFFSET(SG, 0x78 + 8);
8918#undef VERIFY_OFFSET
8919}
8920
edd16368
SC
8921module_init(hpsa_init);
8922module_exit(hpsa_cleanup);