]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/scsi/hpsa.c
[SCSI] hpsa: do not read from controller unnecessarily in completion code
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / hpsa.c
CommitLineData
edd16368
SC
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
e5a44df8 26#include <linux/pci-aspm.h>
edd16368
SC
27#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
32#include <linux/seq_file.h>
33#include <linux/init.h>
34#include <linux/spinlock.h>
edd16368
SC
35#include <linux/compat.h>
36#include <linux/blktrace_api.h>
37#include <linux/uaccess.h>
38#include <linux/io.h>
39#include <linux/dma-mapping.h>
40#include <linux/completion.h>
41#include <linux/moduleparam.h>
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
667e23d4 46#include <scsi/scsi_tcq.h>
edd16368
SC
47#include <linux/cciss_ioctl.h>
48#include <linux/string.h>
49#include <linux/bitmap.h>
60063497 50#include <linux/atomic.h>
edd16368 51#include <linux/kthread.h>
a0c12413 52#include <linux/jiffies.h>
edd16368
SC
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
31468401 57#define HPSA_DRIVER_VERSION "2.0.2-1"
edd16368 58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
f79cfec6 59#define HPSA "hpsa"
edd16368
SC
60
61/* How long to wait (in milliseconds) for board to go into simple mode */
62#define MAX_CONFIG_WAIT 30000
63#define MAX_IOCTL_CONFIG_WAIT 1000
64
65/*define how many times we will try a command because of bus resets */
66#define MAX_CMD_RETRIES 3
67
68/* Embedded module documentation macros - see modules.h */
69MODULE_AUTHOR("Hewlett-Packard Company");
70MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 HPSA_DRIVER_VERSION);
72MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73MODULE_VERSION(HPSA_DRIVER_VERSION);
74MODULE_LICENSE("GPL");
75
76static int hpsa_allow_any;
77module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
02ec19c8
SC
80static int hpsa_simple_mode;
81module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
edd16368
SC
84
85/* define the PCI info for the cards we can control */
86static const struct pci_device_id hpsa_pci_device_id[] = {
edd16368
SC
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
f8b01eb9 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
9143a961 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
7c03b870 102 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
6798cc0a 103 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
edd16368
SC
104 {0,}
105};
106
107MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
108
109/* board_id = Subsystem Device ID & Vendor ID
110 * product = Marketing Name for the board
111 * access = Address of the struct of function pointers
112 */
113static struct board_type products[] = {
edd16368
SC
114 {0x3241103C, "Smart Array P212", &SA5_access},
115 {0x3243103C, "Smart Array P410", &SA5_access},
116 {0x3245103C, "Smart Array P410i", &SA5_access},
117 {0x3247103C, "Smart Array P411", &SA5_access},
118 {0x3249103C, "Smart Array P812", &SA5_access},
119 {0x324a103C, "Smart Array P712m", &SA5_access},
120 {0x324b103C, "Smart Array P711m", &SA5_access},
9143a961 121 {0x3350103C, "Smart Array", &SA5_access},
122 {0x3351103C, "Smart Array", &SA5_access},
123 {0x3352103C, "Smart Array", &SA5_access},
124 {0x3353103C, "Smart Array", &SA5_access},
125 {0x3354103C, "Smart Array", &SA5_access},
126 {0x3355103C, "Smart Array", &SA5_access},
127 {0x3356103C, "Smart Array", &SA5_access},
edd16368
SC
128 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
129};
130
131static int number_of_controllers;
132
a0c12413
SC
133static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list);
134static spinlock_t lockup_detector_lock;
135static struct task_struct *hpsa_lockup_detector;
136
10f66018
SC
137static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
138static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
edd16368
SC
139static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
140static void start_io(struct ctlr_info *h);
141
142#ifdef CONFIG_COMPAT
143static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
144#endif
145
146static void cmd_free(struct ctlr_info *h, struct CommandList *c);
147static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
148static struct CommandList *cmd_alloc(struct ctlr_info *h);
149static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
01a02ffc
SC
150static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
151 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
edd16368
SC
152 int cmd_type);
153
f281233d 154static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
a08a8471
SC
155static void hpsa_scan_start(struct Scsi_Host *);
156static int hpsa_scan_finished(struct Scsi_Host *sh,
157 unsigned long elapsed_time);
667e23d4
SC
158static int hpsa_change_queue_depth(struct scsi_device *sdev,
159 int qdepth, int reason);
edd16368
SC
160
161static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
162static int hpsa_slave_alloc(struct scsi_device *sdev);
163static void hpsa_slave_destroy(struct scsi_device *sdev);
164
edd16368 165static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
edd16368
SC
166static int check_for_unit_attention(struct ctlr_info *h,
167 struct CommandList *c);
168static void check_ioctl_unit_attention(struct ctlr_info *h,
169 struct CommandList *c);
303932fd
DB
170/* performant mode helper functions */
171static void calc_bucket_map(int *bucket, int num_buckets,
172 int nsgs, int *bucket_map);
7136f9a7 173static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
303932fd 174static inline u32 next_command(struct ctlr_info *h);
1df8552a
SC
175static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
176 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
177 u64 *cfg_offset);
178static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
179 unsigned long *memory_bar);
18867659 180static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
fe5389c8
SC
181static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
182 void __iomem *vaddr, int wait_for_ready);
183#define BOARD_NOT_READY 0
184#define BOARD_READY 1
edd16368 185
edd16368
SC
186static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
187{
188 unsigned long *priv = shost_priv(sdev->host);
189 return (struct ctlr_info *) *priv;
190}
191
a23513e8
SC
192static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
193{
194 unsigned long *priv = shost_priv(sh);
195 return (struct ctlr_info *) *priv;
196}
197
edd16368
SC
198static int check_for_unit_attention(struct ctlr_info *h,
199 struct CommandList *c)
200{
201 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
202 return 0;
203
204 switch (c->err_info->SenseInfo[12]) {
205 case STATE_CHANGED:
f79cfec6 206 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
edd16368
SC
207 "detected, command retried\n", h->ctlr);
208 break;
209 case LUN_FAILED:
f79cfec6 210 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
edd16368
SC
211 "detected, action required\n", h->ctlr);
212 break;
213 case REPORT_LUNS_CHANGED:
f79cfec6 214 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
31468401 215 "changed, action required\n", h->ctlr);
edd16368 216 /*
4f4eb9f1
ST
217 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
218 * target (array) devices.
edd16368
SC
219 */
220 break;
221 case POWER_OR_RESET:
f79cfec6 222 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
edd16368
SC
223 "or device reset detected\n", h->ctlr);
224 break;
225 case UNIT_ATTENTION_CLEARED:
f79cfec6 226 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
edd16368
SC
227 "cleared by another initiator\n", h->ctlr);
228 break;
229 default:
f79cfec6 230 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
edd16368
SC
231 "unit attention detected\n", h->ctlr);
232 break;
233 }
234 return 1;
235}
236
237static ssize_t host_store_rescan(struct device *dev,
238 struct device_attribute *attr,
239 const char *buf, size_t count)
240{
241 struct ctlr_info *h;
242 struct Scsi_Host *shost = class_to_shost(dev);
a23513e8 243 h = shost_to_hba(shost);
31468401 244 hpsa_scan_start(h->scsi_host);
edd16368
SC
245 return count;
246}
247
d28ce020
SC
248static ssize_t host_show_firmware_revision(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct ctlr_info *h;
252 struct Scsi_Host *shost = class_to_shost(dev);
253 unsigned char *fwrev;
254
255 h = shost_to_hba(shost);
256 if (!h->hba_inquiry_data)
257 return 0;
258 fwrev = &h->hba_inquiry_data[32];
259 return snprintf(buf, 20, "%c%c%c%c\n",
260 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
261}
262
94a13649
SC
263static ssize_t host_show_commands_outstanding(struct device *dev,
264 struct device_attribute *attr, char *buf)
265{
266 struct Scsi_Host *shost = class_to_shost(dev);
267 struct ctlr_info *h = shost_to_hba(shost);
268
269 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
270}
271
745a7a25
SC
272static ssize_t host_show_transport_mode(struct device *dev,
273 struct device_attribute *attr, char *buf)
274{
275 struct ctlr_info *h;
276 struct Scsi_Host *shost = class_to_shost(dev);
277
278 h = shost_to_hba(shost);
279 return snprintf(buf, 20, "%s\n",
960a30e7 280 h->transMethod & CFGTBL_Trans_Performant ?
745a7a25
SC
281 "performant" : "simple");
282}
283
46380786 284/* List of controllers which cannot be hard reset on kexec with reset_devices */
941b1cda
SC
285static u32 unresettable_controller[] = {
286 0x324a103C, /* Smart Array P712m */
287 0x324b103C, /* SmartArray P711m */
288 0x3223103C, /* Smart Array P800 */
289 0x3234103C, /* Smart Array P400 */
290 0x3235103C, /* Smart Array P400i */
291 0x3211103C, /* Smart Array E200i */
292 0x3212103C, /* Smart Array E200 */
293 0x3213103C, /* Smart Array E200i */
294 0x3214103C, /* Smart Array E200i */
295 0x3215103C, /* Smart Array E200i */
296 0x3237103C, /* Smart Array E500 */
297 0x323D103C, /* Smart Array P700m */
7af0abbc 298 0x40800E11, /* Smart Array 5i */
941b1cda
SC
299 0x409C0E11, /* Smart Array 6400 */
300 0x409D0E11, /* Smart Array 6400 EM */
5a4f934e
TH
301 0x40700E11, /* Smart Array 5300 */
302 0x40820E11, /* Smart Array 532 */
303 0x40830E11, /* Smart Array 5312 */
304 0x409A0E11, /* Smart Array 641 */
305 0x409B0E11, /* Smart Array 642 */
306 0x40910E11, /* Smart Array 6i */
941b1cda
SC
307};
308
46380786
SC
309/* List of controllers which cannot even be soft reset */
310static u32 soft_unresettable_controller[] = {
7af0abbc 311 0x40800E11, /* Smart Array 5i */
5a4f934e
TH
312 0x40700E11, /* Smart Array 5300 */
313 0x40820E11, /* Smart Array 532 */
314 0x40830E11, /* Smart Array 5312 */
315 0x409A0E11, /* Smart Array 641 */
316 0x409B0E11, /* Smart Array 642 */
317 0x40910E11, /* Smart Array 6i */
46380786
SC
318 /* Exclude 640x boards. These are two pci devices in one slot
319 * which share a battery backed cache module. One controls the
320 * cache, the other accesses the cache through the one that controls
321 * it. If we reset the one controlling the cache, the other will
322 * likely not be happy. Just forbid resetting this conjoined mess.
323 * The 640x isn't really supported by hpsa anyway.
324 */
325 0x409C0E11, /* Smart Array 6400 */
326 0x409D0E11, /* Smart Array 6400 EM */
327};
328
329static int ctlr_is_hard_resettable(u32 board_id)
941b1cda
SC
330{
331 int i;
332
333 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
46380786
SC
334 if (unresettable_controller[i] == board_id)
335 return 0;
336 return 1;
337}
338
339static int ctlr_is_soft_resettable(u32 board_id)
340{
341 int i;
342
343 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
344 if (soft_unresettable_controller[i] == board_id)
941b1cda
SC
345 return 0;
346 return 1;
347}
348
46380786
SC
349static int ctlr_is_resettable(u32 board_id)
350{
351 return ctlr_is_hard_resettable(board_id) ||
352 ctlr_is_soft_resettable(board_id);
353}
354
941b1cda
SC
355static ssize_t host_show_resettable(struct device *dev,
356 struct device_attribute *attr, char *buf)
357{
358 struct ctlr_info *h;
359 struct Scsi_Host *shost = class_to_shost(dev);
360
361 h = shost_to_hba(shost);
46380786 362 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
941b1cda
SC
363}
364
edd16368
SC
365static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
366{
367 return (scsi3addr[3] & 0xC0) == 0x40;
368}
369
370static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
371 "UNKNOWN"
372};
373#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
374
375static ssize_t raid_level_show(struct device *dev,
376 struct device_attribute *attr, char *buf)
377{
378 ssize_t l = 0;
82a72c0a 379 unsigned char rlevel;
edd16368
SC
380 struct ctlr_info *h;
381 struct scsi_device *sdev;
382 struct hpsa_scsi_dev_t *hdev;
383 unsigned long flags;
384
385 sdev = to_scsi_device(dev);
386 h = sdev_to_hba(sdev);
387 spin_lock_irqsave(&h->lock, flags);
388 hdev = sdev->hostdata;
389 if (!hdev) {
390 spin_unlock_irqrestore(&h->lock, flags);
391 return -ENODEV;
392 }
393
394 /* Is this even a logical drive? */
395 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
396 spin_unlock_irqrestore(&h->lock, flags);
397 l = snprintf(buf, PAGE_SIZE, "N/A\n");
398 return l;
399 }
400
401 rlevel = hdev->raid_level;
402 spin_unlock_irqrestore(&h->lock, flags);
82a72c0a 403 if (rlevel > RAID_UNKNOWN)
edd16368
SC
404 rlevel = RAID_UNKNOWN;
405 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
406 return l;
407}
408
409static ssize_t lunid_show(struct device *dev,
410 struct device_attribute *attr, char *buf)
411{
412 struct ctlr_info *h;
413 struct scsi_device *sdev;
414 struct hpsa_scsi_dev_t *hdev;
415 unsigned long flags;
416 unsigned char lunid[8];
417
418 sdev = to_scsi_device(dev);
419 h = sdev_to_hba(sdev);
420 spin_lock_irqsave(&h->lock, flags);
421 hdev = sdev->hostdata;
422 if (!hdev) {
423 spin_unlock_irqrestore(&h->lock, flags);
424 return -ENODEV;
425 }
426 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
427 spin_unlock_irqrestore(&h->lock, flags);
428 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
429 lunid[0], lunid[1], lunid[2], lunid[3],
430 lunid[4], lunid[5], lunid[6], lunid[7]);
431}
432
433static ssize_t unique_id_show(struct device *dev,
434 struct device_attribute *attr, char *buf)
435{
436 struct ctlr_info *h;
437 struct scsi_device *sdev;
438 struct hpsa_scsi_dev_t *hdev;
439 unsigned long flags;
440 unsigned char sn[16];
441
442 sdev = to_scsi_device(dev);
443 h = sdev_to_hba(sdev);
444 spin_lock_irqsave(&h->lock, flags);
445 hdev = sdev->hostdata;
446 if (!hdev) {
447 spin_unlock_irqrestore(&h->lock, flags);
448 return -ENODEV;
449 }
450 memcpy(sn, hdev->device_id, sizeof(sn));
451 spin_unlock_irqrestore(&h->lock, flags);
452 return snprintf(buf, 16 * 2 + 2,
453 "%02X%02X%02X%02X%02X%02X%02X%02X"
454 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
455 sn[0], sn[1], sn[2], sn[3],
456 sn[4], sn[5], sn[6], sn[7],
457 sn[8], sn[9], sn[10], sn[11],
458 sn[12], sn[13], sn[14], sn[15]);
459}
460
3f5eac3a
SC
461static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
462static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
463static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
464static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
465static DEVICE_ATTR(firmware_revision, S_IRUGO,
466 host_show_firmware_revision, NULL);
467static DEVICE_ATTR(commands_outstanding, S_IRUGO,
468 host_show_commands_outstanding, NULL);
469static DEVICE_ATTR(transport_mode, S_IRUGO,
470 host_show_transport_mode, NULL);
941b1cda
SC
471static DEVICE_ATTR(resettable, S_IRUGO,
472 host_show_resettable, NULL);
3f5eac3a
SC
473
474static struct device_attribute *hpsa_sdev_attrs[] = {
475 &dev_attr_raid_level,
476 &dev_attr_lunid,
477 &dev_attr_unique_id,
478 NULL,
479};
480
481static struct device_attribute *hpsa_shost_attrs[] = {
482 &dev_attr_rescan,
483 &dev_attr_firmware_revision,
484 &dev_attr_commands_outstanding,
485 &dev_attr_transport_mode,
941b1cda 486 &dev_attr_resettable,
3f5eac3a
SC
487 NULL,
488};
489
490static struct scsi_host_template hpsa_driver_template = {
491 .module = THIS_MODULE,
f79cfec6
SC
492 .name = HPSA,
493 .proc_name = HPSA,
3f5eac3a
SC
494 .queuecommand = hpsa_scsi_queue_command,
495 .scan_start = hpsa_scan_start,
496 .scan_finished = hpsa_scan_finished,
497 .change_queue_depth = hpsa_change_queue_depth,
498 .this_id = -1,
499 .use_clustering = ENABLE_CLUSTERING,
500 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
501 .ioctl = hpsa_ioctl,
502 .slave_alloc = hpsa_slave_alloc,
503 .slave_destroy = hpsa_slave_destroy,
504#ifdef CONFIG_COMPAT
505 .compat_ioctl = hpsa_compat_ioctl,
506#endif
507 .sdev_attrs = hpsa_sdev_attrs,
508 .shost_attrs = hpsa_shost_attrs,
c0d6a4d1 509 .max_sectors = 8192,
3f5eac3a
SC
510};
511
512
513/* Enqueuing and dequeuing functions for cmdlists. */
514static inline void addQ(struct list_head *list, struct CommandList *c)
515{
516 list_add_tail(&c->list, list);
517}
518
519static inline u32 next_command(struct ctlr_info *h)
520{
521 u32 a;
522
523 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
524 return h->access.command_completed(h);
525
526 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
527 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
528 (h->reply_pool_head)++;
529 h->commands_outstanding--;
530 } else {
531 a = FIFO_EMPTY;
532 }
533 /* Check for wraparound */
534 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
535 h->reply_pool_head = h->reply_pool;
536 h->reply_pool_wraparound ^= 1;
537 }
538 return a;
539}
540
541/* set_performant_mode: Modify the tag for cciss performant
542 * set bit 0 for pull model, bits 3-1 for block fetch
543 * register number
544 */
545static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
546{
547 if (likely(h->transMethod & CFGTBL_Trans_Performant))
548 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
549}
550
551static void enqueue_cmd_and_start_io(struct ctlr_info *h,
552 struct CommandList *c)
553{
554 unsigned long flags;
555
556 set_performant_mode(h, c);
557 spin_lock_irqsave(&h->lock, flags);
558 addQ(&h->reqQ, c);
559 h->Qdepth++;
560 start_io(h);
561 spin_unlock_irqrestore(&h->lock, flags);
562}
563
564static inline void removeQ(struct CommandList *c)
565{
566 if (WARN_ON(list_empty(&c->list)))
567 return;
568 list_del_init(&c->list);
569}
570
571static inline int is_hba_lunid(unsigned char scsi3addr[])
572{
573 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
574}
575
576static inline int is_scsi_rev_5(struct ctlr_info *h)
577{
578 if (!h->hba_inquiry_data)
579 return 0;
580 if ((h->hba_inquiry_data[2] & 0x07) == 5)
581 return 1;
582 return 0;
583}
584
edd16368
SC
585static int hpsa_find_target_lun(struct ctlr_info *h,
586 unsigned char scsi3addr[], int bus, int *target, int *lun)
587{
588 /* finds an unused bus, target, lun for a new physical device
589 * assumes h->devlock is held
590 */
591 int i, found = 0;
cfe5badc 592 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
edd16368 593
263d9401 594 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
edd16368
SC
595
596 for (i = 0; i < h->ndevices; i++) {
597 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
263d9401 598 __set_bit(h->dev[i]->target, lun_taken);
edd16368
SC
599 }
600
263d9401
AM
601 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
602 if (i < HPSA_MAX_DEVICES) {
603 /* *bus = 1; */
604 *target = i;
605 *lun = 0;
606 found = 1;
edd16368
SC
607 }
608 return !found;
609}
610
611/* Add an entry into h->dev[] array. */
612static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
613 struct hpsa_scsi_dev_t *device,
614 struct hpsa_scsi_dev_t *added[], int *nadded)
615{
616 /* assumes h->devlock is held */
617 int n = h->ndevices;
618 int i;
619 unsigned char addr1[8], addr2[8];
620 struct hpsa_scsi_dev_t *sd;
621
cfe5badc 622 if (n >= HPSA_MAX_DEVICES) {
edd16368
SC
623 dev_err(&h->pdev->dev, "too many devices, some will be "
624 "inaccessible.\n");
625 return -1;
626 }
627
628 /* physical devices do not have lun or target assigned until now. */
629 if (device->lun != -1)
630 /* Logical device, lun is already assigned. */
631 goto lun_assigned;
632
633 /* If this device a non-zero lun of a multi-lun device
634 * byte 4 of the 8-byte LUN addr will contain the logical
635 * unit no, zero otherise.
636 */
637 if (device->scsi3addr[4] == 0) {
638 /* This is not a non-zero lun of a multi-lun device */
639 if (hpsa_find_target_lun(h, device->scsi3addr,
640 device->bus, &device->target, &device->lun) != 0)
641 return -1;
642 goto lun_assigned;
643 }
644
645 /* This is a non-zero lun of a multi-lun device.
646 * Search through our list and find the device which
647 * has the same 8 byte LUN address, excepting byte 4.
648 * Assign the same bus and target for this new LUN.
649 * Use the logical unit number from the firmware.
650 */
651 memcpy(addr1, device->scsi3addr, 8);
652 addr1[4] = 0;
653 for (i = 0; i < n; i++) {
654 sd = h->dev[i];
655 memcpy(addr2, sd->scsi3addr, 8);
656 addr2[4] = 0;
657 /* differ only in byte 4? */
658 if (memcmp(addr1, addr2, 8) == 0) {
659 device->bus = sd->bus;
660 device->target = sd->target;
661 device->lun = device->scsi3addr[4];
662 break;
663 }
664 }
665 if (device->lun == -1) {
666 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
667 " suspect firmware bug or unsupported hardware "
668 "configuration.\n");
669 return -1;
670 }
671
672lun_assigned:
673
674 h->dev[n] = device;
675 h->ndevices++;
676 added[*nadded] = device;
677 (*nadded)++;
678
679 /* initially, (before registering with scsi layer) we don't
680 * know our hostno and we don't want to print anything first
681 * time anyway (the scsi layer's inquiries will show that info)
682 */
683 /* if (hostno != -1) */
684 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
685 scsi_device_type(device->devtype), hostno,
686 device->bus, device->target, device->lun);
687 return 0;
688}
689
bd9244f7
ST
690/* Update an entry in h->dev[] array. */
691static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
692 int entry, struct hpsa_scsi_dev_t *new_entry)
693{
694 /* assumes h->devlock is held */
695 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
696
697 /* Raid level changed. */
698 h->dev[entry]->raid_level = new_entry->raid_level;
699 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
700 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
701 new_entry->target, new_entry->lun);
702}
703
2a8ccf31
SC
704/* Replace an entry from h->dev[] array. */
705static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
706 int entry, struct hpsa_scsi_dev_t *new_entry,
707 struct hpsa_scsi_dev_t *added[], int *nadded,
708 struct hpsa_scsi_dev_t *removed[], int *nremoved)
709{
710 /* assumes h->devlock is held */
cfe5badc 711 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
2a8ccf31
SC
712 removed[*nremoved] = h->dev[entry];
713 (*nremoved)++;
01350d05
SC
714
715 /*
716 * New physical devices won't have target/lun assigned yet
717 * so we need to preserve the values in the slot we are replacing.
718 */
719 if (new_entry->target == -1) {
720 new_entry->target = h->dev[entry]->target;
721 new_entry->lun = h->dev[entry]->lun;
722 }
723
2a8ccf31
SC
724 h->dev[entry] = new_entry;
725 added[*nadded] = new_entry;
726 (*nadded)++;
727 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
728 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
729 new_entry->target, new_entry->lun);
730}
731
edd16368
SC
732/* Remove an entry from h->dev[] array. */
733static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
734 struct hpsa_scsi_dev_t *removed[], int *nremoved)
735{
736 /* assumes h->devlock is held */
737 int i;
738 struct hpsa_scsi_dev_t *sd;
739
cfe5badc 740 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
edd16368
SC
741
742 sd = h->dev[entry];
743 removed[*nremoved] = h->dev[entry];
744 (*nremoved)++;
745
746 for (i = entry; i < h->ndevices-1; i++)
747 h->dev[i] = h->dev[i+1];
748 h->ndevices--;
749 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
750 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
751 sd->lun);
752}
753
754#define SCSI3ADDR_EQ(a, b) ( \
755 (a)[7] == (b)[7] && \
756 (a)[6] == (b)[6] && \
757 (a)[5] == (b)[5] && \
758 (a)[4] == (b)[4] && \
759 (a)[3] == (b)[3] && \
760 (a)[2] == (b)[2] && \
761 (a)[1] == (b)[1] && \
762 (a)[0] == (b)[0])
763
764static void fixup_botched_add(struct ctlr_info *h,
765 struct hpsa_scsi_dev_t *added)
766{
767 /* called when scsi_add_device fails in order to re-adjust
768 * h->dev[] to match the mid layer's view.
769 */
770 unsigned long flags;
771 int i, j;
772
773 spin_lock_irqsave(&h->lock, flags);
774 for (i = 0; i < h->ndevices; i++) {
775 if (h->dev[i] == added) {
776 for (j = i; j < h->ndevices-1; j++)
777 h->dev[j] = h->dev[j+1];
778 h->ndevices--;
779 break;
780 }
781 }
782 spin_unlock_irqrestore(&h->lock, flags);
783 kfree(added);
784}
785
786static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
787 struct hpsa_scsi_dev_t *dev2)
788{
edd16368
SC
789 /* we compare everything except lun and target as these
790 * are not yet assigned. Compare parts likely
791 * to differ first
792 */
793 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
794 sizeof(dev1->scsi3addr)) != 0)
795 return 0;
796 if (memcmp(dev1->device_id, dev2->device_id,
797 sizeof(dev1->device_id)) != 0)
798 return 0;
799 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
800 return 0;
801 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
802 return 0;
edd16368
SC
803 if (dev1->devtype != dev2->devtype)
804 return 0;
edd16368
SC
805 if (dev1->bus != dev2->bus)
806 return 0;
807 return 1;
808}
809
bd9244f7
ST
810static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
811 struct hpsa_scsi_dev_t *dev2)
812{
813 /* Device attributes that can change, but don't mean
814 * that the device is a different device, nor that the OS
815 * needs to be told anything about the change.
816 */
817 if (dev1->raid_level != dev2->raid_level)
818 return 1;
819 return 0;
820}
821
edd16368
SC
822/* Find needle in haystack. If exact match found, return DEVICE_SAME,
823 * and return needle location in *index. If scsi3addr matches, but not
824 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
bd9244f7
ST
825 * location in *index.
826 * In the case of a minor device attribute change, such as RAID level, just
827 * return DEVICE_UPDATED, along with the updated device's location in index.
828 * If needle not found, return DEVICE_NOT_FOUND.
edd16368
SC
829 */
830static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
831 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
832 int *index)
833{
834 int i;
835#define DEVICE_NOT_FOUND 0
836#define DEVICE_CHANGED 1
837#define DEVICE_SAME 2
bd9244f7 838#define DEVICE_UPDATED 3
edd16368 839 for (i = 0; i < haystack_size; i++) {
23231048
SC
840 if (haystack[i] == NULL) /* previously removed. */
841 continue;
edd16368
SC
842 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
843 *index = i;
bd9244f7
ST
844 if (device_is_the_same(needle, haystack[i])) {
845 if (device_updated(needle, haystack[i]))
846 return DEVICE_UPDATED;
edd16368 847 return DEVICE_SAME;
bd9244f7 848 } else {
edd16368 849 return DEVICE_CHANGED;
bd9244f7 850 }
edd16368
SC
851 }
852 }
853 *index = -1;
854 return DEVICE_NOT_FOUND;
855}
856
4967bd3e 857static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
edd16368
SC
858 struct hpsa_scsi_dev_t *sd[], int nsds)
859{
860 /* sd contains scsi3 addresses and devtypes, and inquiry
861 * data. This function takes what's in sd to be the current
862 * reality and updates h->dev[] to reflect that reality.
863 */
864 int i, entry, device_change, changes = 0;
865 struct hpsa_scsi_dev_t *csd;
866 unsigned long flags;
867 struct hpsa_scsi_dev_t **added, **removed;
868 int nadded, nremoved;
869 struct Scsi_Host *sh = NULL;
870
cfe5badc
ST
871 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
872 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
873
874 if (!added || !removed) {
875 dev_warn(&h->pdev->dev, "out of memory in "
876 "adjust_hpsa_scsi_table\n");
877 goto free_and_out;
878 }
879
880 spin_lock_irqsave(&h->devlock, flags);
881
882 /* find any devices in h->dev[] that are not in
883 * sd[] and remove them from h->dev[], and for any
884 * devices which have changed, remove the old device
885 * info and add the new device info.
bd9244f7
ST
886 * If minor device attributes change, just update
887 * the existing device structure.
edd16368
SC
888 */
889 i = 0;
890 nremoved = 0;
891 nadded = 0;
892 while (i < h->ndevices) {
893 csd = h->dev[i];
894 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
895 if (device_change == DEVICE_NOT_FOUND) {
896 changes++;
897 hpsa_scsi_remove_entry(h, hostno, i,
898 removed, &nremoved);
899 continue; /* remove ^^^, hence i not incremented */
900 } else if (device_change == DEVICE_CHANGED) {
901 changes++;
2a8ccf31
SC
902 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
903 added, &nadded, removed, &nremoved);
c7f172dc
SC
904 /* Set it to NULL to prevent it from being freed
905 * at the bottom of hpsa_update_scsi_devices()
906 */
907 sd[entry] = NULL;
bd9244f7
ST
908 } else if (device_change == DEVICE_UPDATED) {
909 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
edd16368
SC
910 }
911 i++;
912 }
913
914 /* Now, make sure every device listed in sd[] is also
915 * listed in h->dev[], adding them if they aren't found
916 */
917
918 for (i = 0; i < nsds; i++) {
919 if (!sd[i]) /* if already added above. */
920 continue;
921 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
922 h->ndevices, &entry);
923 if (device_change == DEVICE_NOT_FOUND) {
924 changes++;
925 if (hpsa_scsi_add_entry(h, hostno, sd[i],
926 added, &nadded) != 0)
927 break;
928 sd[i] = NULL; /* prevent from being freed later. */
929 } else if (device_change == DEVICE_CHANGED) {
930 /* should never happen... */
931 changes++;
932 dev_warn(&h->pdev->dev,
933 "device unexpectedly changed.\n");
934 /* but if it does happen, we just ignore that device */
935 }
936 }
937 spin_unlock_irqrestore(&h->devlock, flags);
938
939 /* Don't notify scsi mid layer of any changes the first time through
940 * (or if there are no changes) scsi_scan_host will do it later the
941 * first time through.
942 */
943 if (hostno == -1 || !changes)
944 goto free_and_out;
945
946 sh = h->scsi_host;
947 /* Notify scsi mid layer of any removed devices */
948 for (i = 0; i < nremoved; i++) {
949 struct scsi_device *sdev =
950 scsi_device_lookup(sh, removed[i]->bus,
951 removed[i]->target, removed[i]->lun);
952 if (sdev != NULL) {
953 scsi_remove_device(sdev);
954 scsi_device_put(sdev);
955 } else {
956 /* We don't expect to get here.
957 * future cmds to this device will get selection
958 * timeout as if the device was gone.
959 */
960 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
961 " for removal.", hostno, removed[i]->bus,
962 removed[i]->target, removed[i]->lun);
963 }
964 kfree(removed[i]);
965 removed[i] = NULL;
966 }
967
968 /* Notify scsi mid layer of any added devices */
969 for (i = 0; i < nadded; i++) {
970 if (scsi_add_device(sh, added[i]->bus,
971 added[i]->target, added[i]->lun) == 0)
972 continue;
973 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
974 "device not added.\n", hostno, added[i]->bus,
975 added[i]->target, added[i]->lun);
976 /* now we have to remove it from h->dev,
977 * since it didn't get added to scsi mid layer
978 */
979 fixup_botched_add(h, added[i]);
980 }
981
982free_and_out:
983 kfree(added);
984 kfree(removed);
edd16368
SC
985}
986
987/*
988 * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t *
989 * Assume's h->devlock is held.
990 */
991static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
992 int bus, int target, int lun)
993{
994 int i;
995 struct hpsa_scsi_dev_t *sd;
996
997 for (i = 0; i < h->ndevices; i++) {
998 sd = h->dev[i];
999 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1000 return sd;
1001 }
1002 return NULL;
1003}
1004
1005/* link sdev->hostdata to our per-device structure. */
1006static int hpsa_slave_alloc(struct scsi_device *sdev)
1007{
1008 struct hpsa_scsi_dev_t *sd;
1009 unsigned long flags;
1010 struct ctlr_info *h;
1011
1012 h = sdev_to_hba(sdev);
1013 spin_lock_irqsave(&h->devlock, flags);
1014 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1015 sdev_id(sdev), sdev->lun);
1016 if (sd != NULL)
1017 sdev->hostdata = sd;
1018 spin_unlock_irqrestore(&h->devlock, flags);
1019 return 0;
1020}
1021
1022static void hpsa_slave_destroy(struct scsi_device *sdev)
1023{
bcc44255 1024 /* nothing to do. */
edd16368
SC
1025}
1026
33a2ffce
SC
1027static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1028{
1029 int i;
1030
1031 if (!h->cmd_sg_list)
1032 return;
1033 for (i = 0; i < h->nr_cmds; i++) {
1034 kfree(h->cmd_sg_list[i]);
1035 h->cmd_sg_list[i] = NULL;
1036 }
1037 kfree(h->cmd_sg_list);
1038 h->cmd_sg_list = NULL;
1039}
1040
1041static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1042{
1043 int i;
1044
1045 if (h->chainsize <= 0)
1046 return 0;
1047
1048 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1049 GFP_KERNEL);
1050 if (!h->cmd_sg_list)
1051 return -ENOMEM;
1052 for (i = 0; i < h->nr_cmds; i++) {
1053 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1054 h->chainsize, GFP_KERNEL);
1055 if (!h->cmd_sg_list[i])
1056 goto clean;
1057 }
1058 return 0;
1059
1060clean:
1061 hpsa_free_sg_chain_blocks(h);
1062 return -ENOMEM;
1063}
1064
1065static void hpsa_map_sg_chain_block(struct ctlr_info *h,
1066 struct CommandList *c)
1067{
1068 struct SGDescriptor *chain_sg, *chain_block;
1069 u64 temp64;
1070
1071 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1072 chain_block = h->cmd_sg_list[c->cmdindex];
1073 chain_sg->Ext = HPSA_SG_CHAIN;
1074 chain_sg->Len = sizeof(*chain_sg) *
1075 (c->Header.SGTotal - h->max_cmd_sg_entries);
1076 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1077 PCI_DMA_TODEVICE);
1078 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1079 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1080}
1081
1082static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1083 struct CommandList *c)
1084{
1085 struct SGDescriptor *chain_sg;
1086 union u64bit temp64;
1087
1088 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1089 return;
1090
1091 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1092 temp64.val32.lower = chain_sg->Addr.lower;
1093 temp64.val32.upper = chain_sg->Addr.upper;
1094 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1095}
1096
1fb011fb 1097static void complete_scsi_command(struct CommandList *cp)
edd16368
SC
1098{
1099 struct scsi_cmnd *cmd;
1100 struct ctlr_info *h;
1101 struct ErrorInfo *ei;
1102
1103 unsigned char sense_key;
1104 unsigned char asc; /* additional sense code */
1105 unsigned char ascq; /* additional sense code qualifier */
db111e18 1106 unsigned long sense_data_size;
edd16368
SC
1107
1108 ei = cp->err_info;
1109 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1110 h = cp->h;
1111
1112 scsi_dma_unmap(cmd); /* undo the DMA mappings */
33a2ffce
SC
1113 if (cp->Header.SGTotal > h->max_cmd_sg_entries)
1114 hpsa_unmap_sg_chain_block(h, cp);
edd16368
SC
1115
1116 cmd->result = (DID_OK << 16); /* host byte */
1117 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
5512672f 1118 cmd->result |= ei->ScsiStatus;
edd16368
SC
1119
1120 /* copy the sense data whether we need to or not. */
db111e18
SC
1121 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1122 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1123 else
1124 sense_data_size = sizeof(ei->SenseInfo);
1125 if (ei->SenseLen < sense_data_size)
1126 sense_data_size = ei->SenseLen;
1127
1128 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
edd16368
SC
1129 scsi_set_resid(cmd, ei->ResidualCnt);
1130
1131 if (ei->CommandStatus == 0) {
1132 cmd->scsi_done(cmd);
1133 cmd_free(h, cp);
1134 return;
1135 }
1136
1137 /* an error has occurred */
1138 switch (ei->CommandStatus) {
1139
1140 case CMD_TARGET_STATUS:
1141 if (ei->ScsiStatus) {
1142 /* Get sense key */
1143 sense_key = 0xf & ei->SenseInfo[2];
1144 /* Get additional sense code */
1145 asc = ei->SenseInfo[12];
1146 /* Get addition sense code qualifier */
1147 ascq = ei->SenseInfo[13];
1148 }
1149
1150 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1151 if (check_for_unit_attention(h, cp)) {
1152 cmd->result = DID_SOFT_ERROR << 16;
1153 break;
1154 }
1155 if (sense_key == ILLEGAL_REQUEST) {
1156 /*
1157 * SCSI REPORT_LUNS is commonly unsupported on
1158 * Smart Array. Suppress noisy complaint.
1159 */
1160 if (cp->Request.CDB[0] == REPORT_LUNS)
1161 break;
1162
1163 /* If ASC/ASCQ indicate Logical Unit
1164 * Not Supported condition,
1165 */
1166 if ((asc == 0x25) && (ascq == 0x0)) {
1167 dev_warn(&h->pdev->dev, "cp %p "
1168 "has check condition\n", cp);
1169 break;
1170 }
1171 }
1172
1173 if (sense_key == NOT_READY) {
1174 /* If Sense is Not Ready, Logical Unit
1175 * Not ready, Manual Intervention
1176 * required
1177 */
1178 if ((asc == 0x04) && (ascq == 0x03)) {
edd16368
SC
1179 dev_warn(&h->pdev->dev, "cp %p "
1180 "has check condition: unit "
1181 "not ready, manual "
1182 "intervention required\n", cp);
1183 break;
1184 }
1185 }
1d3b3609
MG
1186 if (sense_key == ABORTED_COMMAND) {
1187 /* Aborted command is retryable */
1188 dev_warn(&h->pdev->dev, "cp %p "
1189 "has check condition: aborted command: "
1190 "ASC: 0x%x, ASCQ: 0x%x\n",
1191 cp, asc, ascq);
1192 cmd->result = DID_SOFT_ERROR << 16;
1193 break;
1194 }
edd16368 1195 /* Must be some other type of check condition */
21b8e4ef 1196 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
edd16368
SC
1197 "unknown type: "
1198 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1199 "Returning result: 0x%x, "
1200 "cmd=[%02x %02x %02x %02x %02x "
807be732 1201 "%02x %02x %02x %02x %02x %02x "
edd16368
SC
1202 "%02x %02x %02x %02x %02x]\n",
1203 cp, sense_key, asc, ascq,
1204 cmd->result,
1205 cmd->cmnd[0], cmd->cmnd[1],
1206 cmd->cmnd[2], cmd->cmnd[3],
1207 cmd->cmnd[4], cmd->cmnd[5],
1208 cmd->cmnd[6], cmd->cmnd[7],
807be732
MM
1209 cmd->cmnd[8], cmd->cmnd[9],
1210 cmd->cmnd[10], cmd->cmnd[11],
1211 cmd->cmnd[12], cmd->cmnd[13],
1212 cmd->cmnd[14], cmd->cmnd[15]);
edd16368
SC
1213 break;
1214 }
1215
1216
1217 /* Problem was not a check condition
1218 * Pass it up to the upper layers...
1219 */
1220 if (ei->ScsiStatus) {
1221 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1222 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1223 "Returning result: 0x%x\n",
1224 cp, ei->ScsiStatus,
1225 sense_key, asc, ascq,
1226 cmd->result);
1227 } else { /* scsi status is zero??? How??? */
1228 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1229 "Returning no connection.\n", cp),
1230
1231 /* Ordinarily, this case should never happen,
1232 * but there is a bug in some released firmware
1233 * revisions that allows it to happen if, for
1234 * example, a 4100 backplane loses power and
1235 * the tape drive is in it. We assume that
1236 * it's a fatal error of some kind because we
1237 * can't show that it wasn't. We will make it
1238 * look like selection timeout since that is
1239 * the most common reason for this to occur,
1240 * and it's severe enough.
1241 */
1242
1243 cmd->result = DID_NO_CONNECT << 16;
1244 }
1245 break;
1246
1247 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1248 break;
1249 case CMD_DATA_OVERRUN:
1250 dev_warn(&h->pdev->dev, "cp %p has"
1251 " completed with data overrun "
1252 "reported\n", cp);
1253 break;
1254 case CMD_INVALID: {
1255 /* print_bytes(cp, sizeof(*cp), 1, 0);
1256 print_cmd(cp); */
1257 /* We get CMD_INVALID if you address a non-existent device
1258 * instead of a selection timeout (no response). You will
1259 * see this if you yank out a drive, then try to access it.
1260 * This is kind of a shame because it means that any other
1261 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1262 * missing target. */
1263 cmd->result = DID_NO_CONNECT << 16;
1264 }
1265 break;
1266 case CMD_PROTOCOL_ERR:
1267 dev_warn(&h->pdev->dev, "cp %p has "
1268 "protocol error \n", cp);
1269 break;
1270 case CMD_HARDWARE_ERR:
1271 cmd->result = DID_ERROR << 16;
1272 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1273 break;
1274 case CMD_CONNECTION_LOST:
1275 cmd->result = DID_ERROR << 16;
1276 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1277 break;
1278 case CMD_ABORTED:
1279 cmd->result = DID_ABORT << 16;
1280 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1281 cp, ei->ScsiStatus);
1282 break;
1283 case CMD_ABORT_FAILED:
1284 cmd->result = DID_ERROR << 16;
1285 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1286 break;
1287 case CMD_UNSOLICITED_ABORT:
f6e76055
SC
1288 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1289 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
edd16368
SC
1290 "abort\n", cp);
1291 break;
1292 case CMD_TIMEOUT:
1293 cmd->result = DID_TIME_OUT << 16;
1294 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1295 break;
1d5e2ed0
SC
1296 case CMD_UNABORTABLE:
1297 cmd->result = DID_ERROR << 16;
1298 dev_warn(&h->pdev->dev, "Command unabortable\n");
1299 break;
edd16368
SC
1300 default:
1301 cmd->result = DID_ERROR << 16;
1302 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1303 cp, ei->CommandStatus);
1304 }
1305 cmd->scsi_done(cmd);
1306 cmd_free(h, cp);
1307}
1308
edd16368
SC
1309static void hpsa_pci_unmap(struct pci_dev *pdev,
1310 struct CommandList *c, int sg_used, int data_direction)
1311{
1312 int i;
1313 union u64bit addr64;
1314
1315 for (i = 0; i < sg_used; i++) {
1316 addr64.val32.lower = c->SG[i].Addr.lower;
1317 addr64.val32.upper = c->SG[i].Addr.upper;
1318 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1319 data_direction);
1320 }
1321}
1322
1323static void hpsa_map_one(struct pci_dev *pdev,
1324 struct CommandList *cp,
1325 unsigned char *buf,
1326 size_t buflen,
1327 int data_direction)
1328{
01a02ffc 1329 u64 addr64;
edd16368
SC
1330
1331 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1332 cp->Header.SGList = 0;
1333 cp->Header.SGTotal = 0;
1334 return;
1335 }
1336
01a02ffc 1337 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
edd16368 1338 cp->SG[0].Addr.lower =
01a02ffc 1339 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
edd16368 1340 cp->SG[0].Addr.upper =
01a02ffc 1341 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
edd16368 1342 cp->SG[0].Len = buflen;
01a02ffc
SC
1343 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1344 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
edd16368
SC
1345}
1346
1347static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1348 struct CommandList *c)
1349{
1350 DECLARE_COMPLETION_ONSTACK(wait);
1351
1352 c->waiting = &wait;
1353 enqueue_cmd_and_start_io(h, c);
1354 wait_for_completion(&wait);
1355}
1356
a0c12413
SC
1357static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1358 struct CommandList *c)
1359{
1360 unsigned long flags;
1361
1362 /* If controller lockup detected, fake a hardware error. */
1363 spin_lock_irqsave(&h->lock, flags);
1364 if (unlikely(h->lockup_detected)) {
1365 spin_unlock_irqrestore(&h->lock, flags);
1366 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1367 } else {
1368 spin_unlock_irqrestore(&h->lock, flags);
1369 hpsa_scsi_do_simple_cmd_core(h, c);
1370 }
1371}
1372
edd16368
SC
1373static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1374 struct CommandList *c, int data_direction)
1375{
1376 int retry_count = 0;
1377
1378 do {
7630abd0 1379 memset(c->err_info, 0, sizeof(*c->err_info));
edd16368
SC
1380 hpsa_scsi_do_simple_cmd_core(h, c);
1381 retry_count++;
1382 } while (check_for_unit_attention(h, c) && retry_count <= 3);
1383 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1384}
1385
1386static void hpsa_scsi_interpret_error(struct CommandList *cp)
1387{
1388 struct ErrorInfo *ei;
1389 struct device *d = &cp->h->pdev->dev;
1390
1391 ei = cp->err_info;
1392 switch (ei->CommandStatus) {
1393 case CMD_TARGET_STATUS:
1394 dev_warn(d, "cmd %p has completed with errors\n", cp);
1395 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1396 ei->ScsiStatus);
1397 if (ei->ScsiStatus == 0)
1398 dev_warn(d, "SCSI status is abnormally zero. "
1399 "(probably indicates selection timeout "
1400 "reported incorrectly due to a known "
1401 "firmware bug, circa July, 2001.)\n");
1402 break;
1403 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1404 dev_info(d, "UNDERRUN\n");
1405 break;
1406 case CMD_DATA_OVERRUN:
1407 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1408 break;
1409 case CMD_INVALID: {
1410 /* controller unfortunately reports SCSI passthru's
1411 * to non-existent targets as invalid commands.
1412 */
1413 dev_warn(d, "cp %p is reported invalid (probably means "
1414 "target device no longer present)\n", cp);
1415 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1416 print_cmd(cp); */
1417 }
1418 break;
1419 case CMD_PROTOCOL_ERR:
1420 dev_warn(d, "cp %p has protocol error \n", cp);
1421 break;
1422 case CMD_HARDWARE_ERR:
1423 /* cmd->result = DID_ERROR << 16; */
1424 dev_warn(d, "cp %p had hardware error\n", cp);
1425 break;
1426 case CMD_CONNECTION_LOST:
1427 dev_warn(d, "cp %p had connection lost\n", cp);
1428 break;
1429 case CMD_ABORTED:
1430 dev_warn(d, "cp %p was aborted\n", cp);
1431 break;
1432 case CMD_ABORT_FAILED:
1433 dev_warn(d, "cp %p reports abort failed\n", cp);
1434 break;
1435 case CMD_UNSOLICITED_ABORT:
1436 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1437 break;
1438 case CMD_TIMEOUT:
1439 dev_warn(d, "cp %p timed out\n", cp);
1440 break;
1d5e2ed0
SC
1441 case CMD_UNABORTABLE:
1442 dev_warn(d, "Command unabortable\n");
1443 break;
edd16368
SC
1444 default:
1445 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1446 ei->CommandStatus);
1447 }
1448}
1449
1450static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1451 unsigned char page, unsigned char *buf,
1452 unsigned char bufsize)
1453{
1454 int rc = IO_OK;
1455 struct CommandList *c;
1456 struct ErrorInfo *ei;
1457
1458 c = cmd_special_alloc(h);
1459
1460 if (c == NULL) { /* trouble... */
1461 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
ecd9aad4 1462 return -ENOMEM;
edd16368
SC
1463 }
1464
1465 fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
1466 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1467 ei = c->err_info;
1468 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1469 hpsa_scsi_interpret_error(c);
1470 rc = -1;
1471 }
1472 cmd_special_free(h, c);
1473 return rc;
1474}
1475
1476static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
1477{
1478 int rc = IO_OK;
1479 struct CommandList *c;
1480 struct ErrorInfo *ei;
1481
1482 c = cmd_special_alloc(h);
1483
1484 if (c == NULL) { /* trouble... */
1485 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
e9ea04a6 1486 return -ENOMEM;
edd16368
SC
1487 }
1488
1489 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
1490 hpsa_scsi_do_simple_cmd_core(h, c);
1491 /* no unmap needed here because no data xfer. */
1492
1493 ei = c->err_info;
1494 if (ei->CommandStatus != 0) {
1495 hpsa_scsi_interpret_error(c);
1496 rc = -1;
1497 }
1498 cmd_special_free(h, c);
1499 return rc;
1500}
1501
1502static void hpsa_get_raid_level(struct ctlr_info *h,
1503 unsigned char *scsi3addr, unsigned char *raid_level)
1504{
1505 int rc;
1506 unsigned char *buf;
1507
1508 *raid_level = RAID_UNKNOWN;
1509 buf = kzalloc(64, GFP_KERNEL);
1510 if (!buf)
1511 return;
1512 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1513 if (rc == 0)
1514 *raid_level = buf[8];
1515 if (*raid_level > RAID_UNKNOWN)
1516 *raid_level = RAID_UNKNOWN;
1517 kfree(buf);
1518 return;
1519}
1520
1521/* Get the device id from inquiry page 0x83 */
1522static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
1523 unsigned char *device_id, int buflen)
1524{
1525 int rc;
1526 unsigned char *buf;
1527
1528 if (buflen > 16)
1529 buflen = 16;
1530 buf = kzalloc(64, GFP_KERNEL);
1531 if (!buf)
1532 return -1;
1533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1534 if (rc == 0)
1535 memcpy(device_id, &buf[8], buflen);
1536 kfree(buf);
1537 return rc != 0;
1538}
1539
1540static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
1541 struct ReportLUNdata *buf, int bufsize,
1542 int extended_response)
1543{
1544 int rc = IO_OK;
1545 struct CommandList *c;
1546 unsigned char scsi3addr[8];
1547 struct ErrorInfo *ei;
1548
1549 c = cmd_special_alloc(h);
1550 if (c == NULL) { /* trouble... */
1551 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1552 return -1;
1553 }
e89c0ae7
SC
1554 /* address the controller */
1555 memset(scsi3addr, 0, sizeof(scsi3addr));
edd16368
SC
1556 fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
1557 buf, bufsize, 0, scsi3addr, TYPE_CMD);
1558 if (extended_response)
1559 c->Request.CDB[1] = extended_response;
1560 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1561 ei = c->err_info;
1562 if (ei->CommandStatus != 0 &&
1563 ei->CommandStatus != CMD_DATA_UNDERRUN) {
1564 hpsa_scsi_interpret_error(c);
1565 rc = -1;
1566 }
1567 cmd_special_free(h, c);
1568 return rc;
1569}
1570
1571static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
1572 struct ReportLUNdata *buf,
1573 int bufsize, int extended_response)
1574{
1575 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1576}
1577
1578static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
1579 struct ReportLUNdata *buf, int bufsize)
1580{
1581 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1582}
1583
1584static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
1585 int bus, int target, int lun)
1586{
1587 device->bus = bus;
1588 device->target = target;
1589 device->lun = lun;
1590}
1591
1592static int hpsa_update_device_info(struct ctlr_info *h,
0b0e1d6c
SC
1593 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
1594 unsigned char *is_OBDR_device)
edd16368 1595{
0b0e1d6c
SC
1596
1597#define OBDR_SIG_OFFSET 43
1598#define OBDR_TAPE_SIG "$DR-10"
1599#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1600#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1601
ea6d3bc3 1602 unsigned char *inq_buff;
0b0e1d6c 1603 unsigned char *obdr_sig;
edd16368 1604
ea6d3bc3 1605 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
edd16368
SC
1606 if (!inq_buff)
1607 goto bail_out;
1608
edd16368
SC
1609 /* Do an inquiry to the device to see what it is. */
1610 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1611 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1612 /* Inquiry failed (msg printed already) */
1613 dev_err(&h->pdev->dev,
1614 "hpsa_update_device_info: inquiry failed\n");
1615 goto bail_out;
1616 }
1617
edd16368
SC
1618 this_device->devtype = (inq_buff[0] & 0x1f);
1619 memcpy(this_device->scsi3addr, scsi3addr, 8);
1620 memcpy(this_device->vendor, &inq_buff[8],
1621 sizeof(this_device->vendor));
1622 memcpy(this_device->model, &inq_buff[16],
1623 sizeof(this_device->model));
edd16368
SC
1624 memset(this_device->device_id, 0,
1625 sizeof(this_device->device_id));
1626 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
1627 sizeof(this_device->device_id));
1628
1629 if (this_device->devtype == TYPE_DISK &&
1630 is_logical_dev_addr_mode(scsi3addr))
1631 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
1632 else
1633 this_device->raid_level = RAID_UNKNOWN;
1634
0b0e1d6c
SC
1635 if (is_OBDR_device) {
1636 /* See if this is a One-Button-Disaster-Recovery device
1637 * by looking for "$DR-10" at offset 43 in inquiry data.
1638 */
1639 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
1640 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
1641 strncmp(obdr_sig, OBDR_TAPE_SIG,
1642 OBDR_SIG_LEN) == 0);
1643 }
1644
edd16368
SC
1645 kfree(inq_buff);
1646 return 0;
1647
1648bail_out:
1649 kfree(inq_buff);
1650 return 1;
1651}
1652
4f4eb9f1 1653static unsigned char *ext_target_model[] = {
edd16368
SC
1654 "MSA2012",
1655 "MSA2024",
1656 "MSA2312",
1657 "MSA2324",
fda38518 1658 "P2000 G3 SAS",
edd16368
SC
1659 NULL,
1660};
1661
4f4eb9f1 1662static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
edd16368
SC
1663{
1664 int i;
1665
4f4eb9f1
ST
1666 for (i = 0; ext_target_model[i]; i++)
1667 if (strncmp(device->model, ext_target_model[i],
1668 strlen(ext_target_model[i])) == 0)
edd16368
SC
1669 return 1;
1670 return 0;
1671}
1672
1673/* Helper function to assign bus, target, lun mapping of devices.
4f4eb9f1 1674 * Puts non-external target logical volumes on bus 0, external target logical
edd16368
SC
1675 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1676 * Logical drive target and lun are assigned at this time, but
1677 * physical device lun and target assignment are deferred (assigned
1678 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1679 */
1680static void figure_bus_target_lun(struct ctlr_info *h,
1f310bde 1681 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
edd16368 1682{
1f310bde
SC
1683 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1684
1685 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1686 /* physical device, target and lun filled in later */
edd16368 1687 if (is_hba_lunid(lunaddrbytes))
1f310bde 1688 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
edd16368 1689 else
1f310bde
SC
1690 /* defer target, lun assignment for physical devices */
1691 hpsa_set_bus_target_lun(device, 2, -1, -1);
1692 return;
1693 }
1694 /* It's a logical device */
4f4eb9f1
ST
1695 if (is_ext_target(h, device)) {
1696 /* external target way, put logicals on bus 1
1f310bde
SC
1697 * and match target/lun numbers box
1698 * reports, other smart array, bus 0, target 0, match lunid
1699 */
1700 hpsa_set_bus_target_lun(device,
1701 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1702 return;
edd16368 1703 }
1f310bde 1704 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
edd16368
SC
1705}
1706
1707/*
1708 * If there is no lun 0 on a target, linux won't find any devices.
4f4eb9f1 1709 * For the external targets (arrays), we have to manually detect the enclosure
edd16368
SC
1710 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1711 * it for some reason. *tmpdevice is the target we're adding,
1712 * this_device is a pointer into the current element of currentsd[]
1713 * that we're building up in update_scsi_devices(), below.
1714 * lunzerobits is a bitmap that tracks which targets already have a
1715 * lun 0 assigned.
1716 * Returns 1 if an enclosure was added, 0 if not.
1717 */
4f4eb9f1 1718static int add_ext_target_dev(struct ctlr_info *h,
edd16368 1719 struct hpsa_scsi_dev_t *tmpdevice,
01a02ffc 1720 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
4f4eb9f1 1721 unsigned long lunzerobits[], int *n_ext_target_devs)
edd16368
SC
1722{
1723 unsigned char scsi3addr[8];
1724
1f310bde 1725 if (test_bit(tmpdevice->target, lunzerobits))
edd16368
SC
1726 return 0; /* There is already a lun 0 on this target. */
1727
1728 if (!is_logical_dev_addr_mode(lunaddrbytes))
1729 return 0; /* It's the logical targets that may lack lun 0. */
1730
4f4eb9f1
ST
1731 if (!is_ext_target(h, tmpdevice))
1732 return 0; /* Only external target devices have this problem. */
edd16368 1733
1f310bde 1734 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
edd16368
SC
1735 return 0;
1736
c4f8a299 1737 memset(scsi3addr, 0, 8);
1f310bde 1738 scsi3addr[3] = tmpdevice->target;
edd16368
SC
1739 if (is_hba_lunid(scsi3addr))
1740 return 0; /* Don't add the RAID controller here. */
1741
339b2b14
SC
1742 if (is_scsi_rev_5(h))
1743 return 0; /* p1210m doesn't need to do this. */
1744
4f4eb9f1 1745 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
aca4a520
ST
1746 dev_warn(&h->pdev->dev, "Maximum number of external "
1747 "target devices exceeded. Check your hardware "
edd16368
SC
1748 "configuration.");
1749 return 0;
1750 }
1751
0b0e1d6c 1752 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
edd16368 1753 return 0;
4f4eb9f1 1754 (*n_ext_target_devs)++;
1f310bde
SC
1755 hpsa_set_bus_target_lun(this_device,
1756 tmpdevice->bus, tmpdevice->target, 0);
1757 set_bit(tmpdevice->target, lunzerobits);
edd16368
SC
1758 return 1;
1759}
1760
1761/*
1762 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
1763 * logdev. The number of luns in physdev and logdev are returned in
1764 * *nphysicals and *nlogicals, respectively.
1765 * Returns 0 on success, -1 otherwise.
1766 */
1767static int hpsa_gather_lun_info(struct ctlr_info *h,
1768 int reportlunsize,
01a02ffc
SC
1769 struct ReportLUNdata *physdev, u32 *nphysicals,
1770 struct ReportLUNdata *logdev, u32 *nlogicals)
edd16368
SC
1771{
1772 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1773 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
1774 return -1;
1775 }
6df1e954 1776 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8;
edd16368
SC
1777 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
1778 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
1779 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1780 *nphysicals - HPSA_MAX_PHYS_LUN);
1781 *nphysicals = HPSA_MAX_PHYS_LUN;
1782 }
1783 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1784 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
1785 return -1;
1786 }
6df1e954 1787 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
edd16368
SC
1788 /* Reject Logicals in excess of our max capability. */
1789 if (*nlogicals > HPSA_MAX_LUN) {
1790 dev_warn(&h->pdev->dev,
1791 "maximum logical LUNs (%d) exceeded. "
1792 "%d LUNs ignored.\n", HPSA_MAX_LUN,
1793 *nlogicals - HPSA_MAX_LUN);
1794 *nlogicals = HPSA_MAX_LUN;
1795 }
1796 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
1797 dev_warn(&h->pdev->dev,
1798 "maximum logical + physical LUNs (%d) exceeded. "
1799 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
1800 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
1801 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
1802 }
1803 return 0;
1804}
1805
339b2b14
SC
1806u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
1807 int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list,
1808 struct ReportLUNdata *logdev_list)
1809{
1810 /* Helper function, figure out where the LUN ID info is coming from
1811 * given index i, lists of physical and logical devices, where in
1812 * the list the raid controller is supposed to appear (first or last)
1813 */
1814
1815 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1816 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1817
1818 if (i == raid_ctlr_position)
1819 return RAID_CTLR_LUNID;
1820
1821 if (i < logicals_start)
1822 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
1823
1824 if (i < last_device)
1825 return &logdev_list->LUN[i - nphysicals -
1826 (raid_ctlr_position == 0)][0];
1827 BUG();
1828 return NULL;
1829}
1830
edd16368
SC
1831static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1832{
1833 /* the idea here is we could get notified
1834 * that some devices have changed, so we do a report
1835 * physical luns and report logical luns cmd, and adjust
1836 * our list of devices accordingly.
1837 *
1838 * The scsi3addr's of devices won't change so long as the
1839 * adapter is not reset. That means we can rescan and
1840 * tell which devices we already know about, vs. new
1841 * devices, vs. disappearing devices.
1842 */
1843 struct ReportLUNdata *physdev_list = NULL;
1844 struct ReportLUNdata *logdev_list = NULL;
01a02ffc
SC
1845 u32 nphysicals = 0;
1846 u32 nlogicals = 0;
1847 u32 ndev_allocated = 0;
edd16368
SC
1848 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1849 int ncurrent = 0;
1850 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
4f4eb9f1 1851 int i, n_ext_target_devs, ndevs_to_allocate;
339b2b14 1852 int raid_ctlr_position;
aca4a520 1853 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
edd16368 1854
cfe5badc 1855 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
edd16368
SC
1856 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
1857 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
edd16368
SC
1858 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
1859
0b0e1d6c 1860 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
edd16368
SC
1861 dev_err(&h->pdev->dev, "out of memory\n");
1862 goto out;
1863 }
1864 memset(lunzerobits, 0, sizeof(lunzerobits));
1865
1866 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1867 logdev_list, &nlogicals))
1868 goto out;
1869
aca4a520
ST
1870 /* We might see up to the maximum number of logical and physical disks
1871 * plus external target devices, and a device for the local RAID
1872 * controller.
edd16368 1873 */
aca4a520 1874 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
edd16368
SC
1875
1876 /* Allocate the per device structures */
1877 for (i = 0; i < ndevs_to_allocate; i++) {
b7ec021f
ST
1878 if (i >= HPSA_MAX_DEVICES) {
1879 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
1880 " %d devices ignored.\n", HPSA_MAX_DEVICES,
1881 ndevs_to_allocate - HPSA_MAX_DEVICES);
1882 break;
1883 }
1884
edd16368
SC
1885 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
1886 if (!currentsd[i]) {
1887 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
1888 __FILE__, __LINE__);
1889 goto out;
1890 }
1891 ndev_allocated++;
1892 }
1893
339b2b14
SC
1894 if (unlikely(is_scsi_rev_5(h)))
1895 raid_ctlr_position = 0;
1896 else
1897 raid_ctlr_position = nphysicals + nlogicals;
1898
edd16368 1899 /* adjust our table of devices */
4f4eb9f1 1900 n_ext_target_devs = 0;
edd16368 1901 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
0b0e1d6c 1902 u8 *lunaddrbytes, is_OBDR = 0;
edd16368
SC
1903
1904 /* Figure out where the LUN ID info is coming from */
339b2b14
SC
1905 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
1906 i, nphysicals, nlogicals, physdev_list, logdev_list);
edd16368 1907 /* skip masked physical devices. */
339b2b14
SC
1908 if (lunaddrbytes[3] & 0xC0 &&
1909 i < nphysicals + (raid_ctlr_position == 0))
edd16368
SC
1910 continue;
1911
1912 /* Get device type, vendor, model, device id */
0b0e1d6c
SC
1913 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1914 &is_OBDR))
edd16368 1915 continue; /* skip it if we can't talk to it. */
1f310bde 1916 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
edd16368
SC
1917 this_device = currentsd[ncurrent];
1918
1919 /*
4f4eb9f1 1920 * For external target devices, we have to insert a LUN 0 which
edd16368
SC
1921 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1922 * is nonetheless an enclosure device there. We have to
1923 * present that otherwise linux won't find anything if
1924 * there is no lun 0.
1925 */
4f4eb9f1 1926 if (add_ext_target_dev(h, tmpdevice, this_device,
1f310bde 1927 lunaddrbytes, lunzerobits,
4f4eb9f1 1928 &n_ext_target_devs)) {
edd16368
SC
1929 ncurrent++;
1930 this_device = currentsd[ncurrent];
1931 }
1932
1933 *this_device = *tmpdevice;
edd16368
SC
1934
1935 switch (this_device->devtype) {
0b0e1d6c 1936 case TYPE_ROM:
edd16368
SC
1937 /* We don't *really* support actual CD-ROM devices,
1938 * just "One Button Disaster Recovery" tape drive
1939 * which temporarily pretends to be a CD-ROM drive.
1940 * So we check that the device is really an OBDR tape
1941 * device by checking for "$DR-10" in bytes 43-48 of
1942 * the inquiry data.
1943 */
0b0e1d6c
SC
1944 if (is_OBDR)
1945 ncurrent++;
edd16368
SC
1946 break;
1947 case TYPE_DISK:
1948 if (i < nphysicals)
1949 break;
1950 ncurrent++;
1951 break;
1952 case TYPE_TAPE:
1953 case TYPE_MEDIUM_CHANGER:
1954 ncurrent++;
1955 break;
1956 case TYPE_RAID:
1957 /* Only present the Smartarray HBA as a RAID controller.
1958 * If it's a RAID controller other than the HBA itself
1959 * (an external RAID controller, MSA500 or similar)
1960 * don't present it.
1961 */
1962 if (!is_hba_lunid(lunaddrbytes))
1963 break;
1964 ncurrent++;
1965 break;
1966 default:
1967 break;
1968 }
cfe5badc 1969 if (ncurrent >= HPSA_MAX_DEVICES)
edd16368
SC
1970 break;
1971 }
1972 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
1973out:
1974 kfree(tmpdevice);
1975 for (i = 0; i < ndev_allocated; i++)
1976 kfree(currentsd[i]);
1977 kfree(currentsd);
edd16368
SC
1978 kfree(physdev_list);
1979 kfree(logdev_list);
edd16368
SC
1980}
1981
1982/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
1983 * dma mapping and fills in the scatter gather entries of the
1984 * hpsa command, cp.
1985 */
33a2ffce 1986static int hpsa_scatter_gather(struct ctlr_info *h,
edd16368
SC
1987 struct CommandList *cp,
1988 struct scsi_cmnd *cmd)
1989{
1990 unsigned int len;
1991 struct scatterlist *sg;
01a02ffc 1992 u64 addr64;
33a2ffce
SC
1993 int use_sg, i, sg_index, chained;
1994 struct SGDescriptor *curr_sg;
edd16368 1995
33a2ffce 1996 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
edd16368
SC
1997
1998 use_sg = scsi_dma_map(cmd);
1999 if (use_sg < 0)
2000 return use_sg;
2001
2002 if (!use_sg)
2003 goto sglist_finished;
2004
33a2ffce
SC
2005 curr_sg = cp->SG;
2006 chained = 0;
2007 sg_index = 0;
edd16368 2008 scsi_for_each_sg(cmd, sg, use_sg, i) {
33a2ffce
SC
2009 if (i == h->max_cmd_sg_entries - 1 &&
2010 use_sg > h->max_cmd_sg_entries) {
2011 chained = 1;
2012 curr_sg = h->cmd_sg_list[cp->cmdindex];
2013 sg_index = 0;
2014 }
01a02ffc 2015 addr64 = (u64) sg_dma_address(sg);
edd16368 2016 len = sg_dma_len(sg);
33a2ffce
SC
2017 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2018 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2019 curr_sg->Len = len;
2020 curr_sg->Ext = 0; /* we are not chaining */
2021 curr_sg++;
2022 }
2023
2024 if (use_sg + chained > h->maxSG)
2025 h->maxSG = use_sg + chained;
2026
2027 if (chained) {
2028 cp->Header.SGList = h->max_cmd_sg_entries;
2029 cp->Header.SGTotal = (u16) (use_sg + 1);
2030 hpsa_map_sg_chain_block(h, cp);
2031 return 0;
edd16368
SC
2032 }
2033
2034sglist_finished:
2035
01a02ffc
SC
2036 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
2037 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
edd16368
SC
2038 return 0;
2039}
2040
2041
f281233d 2042static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
edd16368
SC
2043 void (*done)(struct scsi_cmnd *))
2044{
2045 struct ctlr_info *h;
2046 struct hpsa_scsi_dev_t *dev;
2047 unsigned char scsi3addr[8];
2048 struct CommandList *c;
2049 unsigned long flags;
2050
2051 /* Get the ptr to our adapter structure out of cmd->host. */
2052 h = sdev_to_hba(cmd->device);
2053 dev = cmd->device->hostdata;
2054 if (!dev) {
2055 cmd->result = DID_NO_CONNECT << 16;
2056 done(cmd);
2057 return 0;
2058 }
2059 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
2060
edd16368 2061 spin_lock_irqsave(&h->lock, flags);
a0c12413
SC
2062 if (unlikely(h->lockup_detected)) {
2063 spin_unlock_irqrestore(&h->lock, flags);
2064 cmd->result = DID_ERROR << 16;
2065 done(cmd);
2066 return 0;
2067 }
2068 /* Need a lock as this is being allocated from the pool */
edd16368
SC
2069 c = cmd_alloc(h);
2070 spin_unlock_irqrestore(&h->lock, flags);
2071 if (c == NULL) { /* trouble... */
2072 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2073 return SCSI_MLQUEUE_HOST_BUSY;
2074 }
2075
2076 /* Fill in the command list header */
2077
2078 cmd->scsi_done = done; /* save this for use by completion code */
2079
2080 /* save c in case we have to abort it */
2081 cmd->host_scribble = (unsigned char *) c;
2082
2083 c->cmd_type = CMD_SCSI;
2084 c->scsi_cmd = cmd;
2085 c->Header.ReplyQueue = 0; /* unused in simple mode */
2086 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
303932fd
DB
2087 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
2088 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
edd16368
SC
2089
2090 /* Fill in the request block... */
2091
2092 c->Request.Timeout = 0;
2093 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
2094 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
2095 c->Request.CDBLen = cmd->cmd_len;
2096 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
2097 c->Request.Type.Type = TYPE_CMD;
2098 c->Request.Type.Attribute = ATTR_SIMPLE;
2099 switch (cmd->sc_data_direction) {
2100 case DMA_TO_DEVICE:
2101 c->Request.Type.Direction = XFER_WRITE;
2102 break;
2103 case DMA_FROM_DEVICE:
2104 c->Request.Type.Direction = XFER_READ;
2105 break;
2106 case DMA_NONE:
2107 c->Request.Type.Direction = XFER_NONE;
2108 break;
2109 case DMA_BIDIRECTIONAL:
2110 /* This can happen if a buggy application does a scsi passthru
2111 * and sets both inlen and outlen to non-zero. ( see
2112 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
2113 */
2114
2115 c->Request.Type.Direction = XFER_RSVD;
2116 /* This is technically wrong, and hpsa controllers should
2117 * reject it with CMD_INVALID, which is the most correct
2118 * response, but non-fibre backends appear to let it
2119 * slide by, and give the same results as if this field
2120 * were set correctly. Either way is acceptable for
2121 * our purposes here.
2122 */
2123
2124 break;
2125
2126 default:
2127 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2128 cmd->sc_data_direction);
2129 BUG();
2130 break;
2131 }
2132
33a2ffce 2133 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
edd16368
SC
2134 cmd_free(h, c);
2135 return SCSI_MLQUEUE_HOST_BUSY;
2136 }
2137 enqueue_cmd_and_start_io(h, c);
2138 /* the cmd'll come back via intr handler in complete_scsi_command() */
2139 return 0;
2140}
2141
f281233d
JG
2142static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2143
a08a8471
SC
2144static void hpsa_scan_start(struct Scsi_Host *sh)
2145{
2146 struct ctlr_info *h = shost_to_hba(sh);
2147 unsigned long flags;
2148
2149 /* wait until any scan already in progress is finished. */
2150 while (1) {
2151 spin_lock_irqsave(&h->scan_lock, flags);
2152 if (h->scan_finished)
2153 break;
2154 spin_unlock_irqrestore(&h->scan_lock, flags);
2155 wait_event(h->scan_wait_queue, h->scan_finished);
2156 /* Note: We don't need to worry about a race between this
2157 * thread and driver unload because the midlayer will
2158 * have incremented the reference count, so unload won't
2159 * happen if we're in here.
2160 */
2161 }
2162 h->scan_finished = 0; /* mark scan as in progress */
2163 spin_unlock_irqrestore(&h->scan_lock, flags);
2164
2165 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
2166
2167 spin_lock_irqsave(&h->scan_lock, flags);
2168 h->scan_finished = 1; /* mark scan as finished. */
2169 wake_up_all(&h->scan_wait_queue);
2170 spin_unlock_irqrestore(&h->scan_lock, flags);
2171}
2172
2173static int hpsa_scan_finished(struct Scsi_Host *sh,
2174 unsigned long elapsed_time)
2175{
2176 struct ctlr_info *h = shost_to_hba(sh);
2177 unsigned long flags;
2178 int finished;
2179
2180 spin_lock_irqsave(&h->scan_lock, flags);
2181 finished = h->scan_finished;
2182 spin_unlock_irqrestore(&h->scan_lock, flags);
2183 return finished;
2184}
2185
667e23d4
SC
2186static int hpsa_change_queue_depth(struct scsi_device *sdev,
2187 int qdepth, int reason)
2188{
2189 struct ctlr_info *h = sdev_to_hba(sdev);
2190
2191 if (reason != SCSI_QDEPTH_DEFAULT)
2192 return -ENOTSUPP;
2193
2194 if (qdepth < 1)
2195 qdepth = 1;
2196 else
2197 if (qdepth > h->nr_cmds)
2198 qdepth = h->nr_cmds;
2199 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2200 return sdev->queue_depth;
2201}
2202
edd16368
SC
2203static void hpsa_unregister_scsi(struct ctlr_info *h)
2204{
2205 /* we are being forcibly unloaded, and may not refuse. */
2206 scsi_remove_host(h->scsi_host);
2207 scsi_host_put(h->scsi_host);
2208 h->scsi_host = NULL;
2209}
2210
2211static int hpsa_register_scsi(struct ctlr_info *h)
2212{
b705690d
SC
2213 struct Scsi_Host *sh;
2214 int error;
edd16368 2215
b705690d
SC
2216 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2217 if (sh == NULL)
2218 goto fail;
2219
2220 sh->io_port = 0;
2221 sh->n_io_port = 0;
2222 sh->this_id = -1;
2223 sh->max_channel = 3;
2224 sh->max_cmd_len = MAX_COMMAND_SIZE;
2225 sh->max_lun = HPSA_MAX_LUN;
2226 sh->max_id = HPSA_MAX_LUN;
2227 sh->can_queue = h->nr_cmds;
2228 sh->cmd_per_lun = h->nr_cmds;
2229 sh->sg_tablesize = h->maxsgentries;
2230 h->scsi_host = sh;
2231 sh->hostdata[0] = (unsigned long) h;
2232 sh->irq = h->intr[h->intr_mode];
2233 sh->unique_id = sh->irq;
2234 error = scsi_add_host(sh, &h->pdev->dev);
2235 if (error)
2236 goto fail_host_put;
2237 scsi_scan_host(sh);
2238 return 0;
2239
2240 fail_host_put:
2241 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2242 " failed for controller %d\n", __func__, h->ctlr);
2243 scsi_host_put(sh);
2244 return error;
2245 fail:
2246 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2247 " failed for controller %d\n", __func__, h->ctlr);
2248 return -ENOMEM;
edd16368
SC
2249}
2250
2251static int wait_for_device_to_become_ready(struct ctlr_info *h,
2252 unsigned char lunaddr[])
2253{
2254 int rc = 0;
2255 int count = 0;
2256 int waittime = 1; /* seconds */
2257 struct CommandList *c;
2258
2259 c = cmd_special_alloc(h);
2260 if (!c) {
2261 dev_warn(&h->pdev->dev, "out of memory in "
2262 "wait_for_device_to_become_ready.\n");
2263 return IO_ERROR;
2264 }
2265
2266 /* Send test unit ready until device ready, or give up. */
2267 while (count < HPSA_TUR_RETRY_LIMIT) {
2268
2269 /* Wait for a bit. do this first, because if we send
2270 * the TUR right away, the reset will just abort it.
2271 */
2272 msleep(1000 * waittime);
2273 count++;
2274
2275 /* Increase wait time with each try, up to a point. */
2276 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
2277 waittime = waittime * 2;
2278
2279 /* Send the Test Unit Ready */
2280 fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
2281 hpsa_scsi_do_simple_cmd_core(h, c);
2282 /* no unmap needed here because no data xfer. */
2283
2284 if (c->err_info->CommandStatus == CMD_SUCCESS)
2285 break;
2286
2287 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2288 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
2289 (c->err_info->SenseInfo[2] == NO_SENSE ||
2290 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
2291 break;
2292
2293 dev_warn(&h->pdev->dev, "waiting %d secs "
2294 "for device to become ready.\n", waittime);
2295 rc = 1; /* device not ready. */
2296 }
2297
2298 if (rc)
2299 dev_warn(&h->pdev->dev, "giving up on device.\n");
2300 else
2301 dev_warn(&h->pdev->dev, "device is ready.\n");
2302
2303 cmd_special_free(h, c);
2304 return rc;
2305}
2306
2307/* Need at least one of these error handlers to keep ../scsi/hosts.c from
2308 * complaining. Doing a host- or bus-reset can't do anything good here.
2309 */
2310static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
2311{
2312 int rc;
2313 struct ctlr_info *h;
2314 struct hpsa_scsi_dev_t *dev;
2315
2316 /* find the controller to which the command to be aborted was sent */
2317 h = sdev_to_hba(scsicmd->device);
2318 if (h == NULL) /* paranoia */
2319 return FAILED;
edd16368
SC
2320 dev = scsicmd->device->hostdata;
2321 if (!dev) {
2322 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
2323 "device lookup failed.\n");
2324 return FAILED;
2325 }
d416b0c7
SC
2326 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
2327 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
edd16368
SC
2328 /* send a reset to the SCSI LUN which the command was sent to */
2329 rc = hpsa_send_reset(h, dev->scsi3addr);
2330 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
2331 return SUCCESS;
2332
2333 dev_warn(&h->pdev->dev, "resetting device failed.\n");
2334 return FAILED;
2335}
2336
2337/*
2338 * For operations that cannot sleep, a command block is allocated at init,
2339 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
2340 * which ones are free or in use. Lock must be held when calling this.
2341 * cmd_free() is the complement.
2342 */
2343static struct CommandList *cmd_alloc(struct ctlr_info *h)
2344{
2345 struct CommandList *c;
2346 int i;
2347 union u64bit temp64;
2348 dma_addr_t cmd_dma_handle, err_dma_handle;
2349
2350 do {
2351 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
2352 if (i == h->nr_cmds)
2353 return NULL;
2354 } while (test_and_set_bit
2355 (i & (BITS_PER_LONG - 1),
2356 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
2357 c = h->cmd_pool + i;
2358 memset(c, 0, sizeof(*c));
2359 cmd_dma_handle = h->cmd_pool_dhandle
2360 + i * sizeof(*c);
2361 c->err_info = h->errinfo_pool + i;
2362 memset(c->err_info, 0, sizeof(*c->err_info));
2363 err_dma_handle = h->errinfo_pool_dhandle
2364 + i * sizeof(*c->err_info);
2365 h->nr_allocs++;
2366
2367 c->cmdindex = i;
2368
9e0fc764 2369 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
2370 c->busaddr = (u32) cmd_dma_handle;
2371 temp64.val = (u64) err_dma_handle;
edd16368
SC
2372 c->ErrDesc.Addr.lower = temp64.val32.lower;
2373 c->ErrDesc.Addr.upper = temp64.val32.upper;
2374 c->ErrDesc.Len = sizeof(*c->err_info);
2375
2376 c->h = h;
2377 return c;
2378}
2379
2380/* For operations that can wait for kmalloc to possibly sleep,
2381 * this routine can be called. Lock need not be held to call
2382 * cmd_special_alloc. cmd_special_free() is the complement.
2383 */
2384static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2385{
2386 struct CommandList *c;
2387 union u64bit temp64;
2388 dma_addr_t cmd_dma_handle, err_dma_handle;
2389
2390 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
2391 if (c == NULL)
2392 return NULL;
2393 memset(c, 0, sizeof(*c));
2394
2395 c->cmdindex = -1;
2396
2397 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
2398 &err_dma_handle);
2399
2400 if (c->err_info == NULL) {
2401 pci_free_consistent(h->pdev,
2402 sizeof(*c), c, cmd_dma_handle);
2403 return NULL;
2404 }
2405 memset(c->err_info, 0, sizeof(*c->err_info));
2406
9e0fc764 2407 INIT_LIST_HEAD(&c->list);
01a02ffc
SC
2408 c->busaddr = (u32) cmd_dma_handle;
2409 temp64.val = (u64) err_dma_handle;
edd16368
SC
2410 c->ErrDesc.Addr.lower = temp64.val32.lower;
2411 c->ErrDesc.Addr.upper = temp64.val32.upper;
2412 c->ErrDesc.Len = sizeof(*c->err_info);
2413
2414 c->h = h;
2415 return c;
2416}
2417
2418static void cmd_free(struct ctlr_info *h, struct CommandList *c)
2419{
2420 int i;
2421
2422 i = c - h->cmd_pool;
2423 clear_bit(i & (BITS_PER_LONG - 1),
2424 h->cmd_pool_bits + (i / BITS_PER_LONG));
2425 h->nr_frees++;
2426}
2427
2428static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2429{
2430 union u64bit temp64;
2431
2432 temp64.val32.lower = c->ErrDesc.Addr.lower;
2433 temp64.val32.upper = c->ErrDesc.Addr.upper;
2434 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2435 c->err_info, (dma_addr_t) temp64.val);
2436 pci_free_consistent(h->pdev, sizeof(*c),
d896f3f3 2437 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
edd16368
SC
2438}
2439
2440#ifdef CONFIG_COMPAT
2441
edd16368
SC
2442static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2443{
2444 IOCTL32_Command_struct __user *arg32 =
2445 (IOCTL32_Command_struct __user *) arg;
2446 IOCTL_Command_struct arg64;
2447 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
2448 int err;
2449 u32 cp;
2450
938abd84 2451 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
2452 err = 0;
2453 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2454 sizeof(arg64.LUN_info));
2455 err |= copy_from_user(&arg64.Request, &arg32->Request,
2456 sizeof(arg64.Request));
2457 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2458 sizeof(arg64.error_info));
2459 err |= get_user(arg64.buf_size, &arg32->buf_size);
2460 err |= get_user(cp, &arg32->buf);
2461 arg64.buf = compat_ptr(cp);
2462 err |= copy_to_user(p, &arg64, sizeof(arg64));
2463
2464 if (err)
2465 return -EFAULT;
2466
e39eeaed 2467 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
edd16368
SC
2468 if (err)
2469 return err;
2470 err |= copy_in_user(&arg32->error_info, &p->error_info,
2471 sizeof(arg32->error_info));
2472 if (err)
2473 return -EFAULT;
2474 return err;
2475}
2476
2477static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2478 int cmd, void *arg)
2479{
2480 BIG_IOCTL32_Command_struct __user *arg32 =
2481 (BIG_IOCTL32_Command_struct __user *) arg;
2482 BIG_IOCTL_Command_struct arg64;
2483 BIG_IOCTL_Command_struct __user *p =
2484 compat_alloc_user_space(sizeof(arg64));
2485 int err;
2486 u32 cp;
2487
938abd84 2488 memset(&arg64, 0, sizeof(arg64));
edd16368
SC
2489 err = 0;
2490 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2491 sizeof(arg64.LUN_info));
2492 err |= copy_from_user(&arg64.Request, &arg32->Request,
2493 sizeof(arg64.Request));
2494 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
2495 sizeof(arg64.error_info));
2496 err |= get_user(arg64.buf_size, &arg32->buf_size);
2497 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
2498 err |= get_user(cp, &arg32->buf);
2499 arg64.buf = compat_ptr(cp);
2500 err |= copy_to_user(p, &arg64, sizeof(arg64));
2501
2502 if (err)
2503 return -EFAULT;
2504
e39eeaed 2505 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
edd16368
SC
2506 if (err)
2507 return err;
2508 err |= copy_in_user(&arg32->error_info, &p->error_info,
2509 sizeof(arg32->error_info));
2510 if (err)
2511 return -EFAULT;
2512 return err;
2513}
71fe75a7
SC
2514
2515static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
2516{
2517 switch (cmd) {
2518 case CCISS_GETPCIINFO:
2519 case CCISS_GETINTINFO:
2520 case CCISS_SETINTINFO:
2521 case CCISS_GETNODENAME:
2522 case CCISS_SETNODENAME:
2523 case CCISS_GETHEARTBEAT:
2524 case CCISS_GETBUSTYPES:
2525 case CCISS_GETFIRMVER:
2526 case CCISS_GETDRIVVER:
2527 case CCISS_REVALIDVOLS:
2528 case CCISS_DEREGDISK:
2529 case CCISS_REGNEWDISK:
2530 case CCISS_REGNEWD:
2531 case CCISS_RESCANDISK:
2532 case CCISS_GETLUNINFO:
2533 return hpsa_ioctl(dev, cmd, arg);
2534
2535 case CCISS_PASSTHRU32:
2536 return hpsa_ioctl32_passthru(dev, cmd, arg);
2537 case CCISS_BIG_PASSTHRU32:
2538 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2539
2540 default:
2541 return -ENOIOCTLCMD;
2542 }
2543}
edd16368
SC
2544#endif
2545
2546static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
2547{
2548 struct hpsa_pci_info pciinfo;
2549
2550 if (!argp)
2551 return -EINVAL;
2552 pciinfo.domain = pci_domain_nr(h->pdev->bus);
2553 pciinfo.bus = h->pdev->bus->number;
2554 pciinfo.dev_fn = h->pdev->devfn;
2555 pciinfo.board_id = h->board_id;
2556 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
2557 return -EFAULT;
2558 return 0;
2559}
2560
2561static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
2562{
2563 DriverVer_type DriverVer;
2564 unsigned char vmaj, vmin, vsubmin;
2565 int rc;
2566
2567 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
2568 &vmaj, &vmin, &vsubmin);
2569 if (rc != 3) {
2570 dev_info(&h->pdev->dev, "driver version string '%s' "
2571 "unrecognized.", HPSA_DRIVER_VERSION);
2572 vmaj = 0;
2573 vmin = 0;
2574 vsubmin = 0;
2575 }
2576 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2577 if (!argp)
2578 return -EINVAL;
2579 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
2580 return -EFAULT;
2581 return 0;
2582}
2583
2584static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2585{
2586 IOCTL_Command_struct iocommand;
2587 struct CommandList *c;
2588 char *buff = NULL;
2589 union u64bit temp64;
2590
2591 if (!argp)
2592 return -EINVAL;
2593 if (!capable(CAP_SYS_RAWIO))
2594 return -EPERM;
2595 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
2596 return -EFAULT;
2597 if ((iocommand.buf_size < 1) &&
2598 (iocommand.Request.Type.Direction != XFER_NONE)) {
2599 return -EINVAL;
2600 }
2601 if (iocommand.buf_size > 0) {
2602 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2603 if (buff == NULL)
2604 return -EFAULT;
b03a7771
SC
2605 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2606 /* Copy the data into the buffer we created */
2607 if (copy_from_user(buff, iocommand.buf,
2608 iocommand.buf_size)) {
2609 kfree(buff);
2610 return -EFAULT;
2611 }
2612 } else {
2613 memset(buff, 0, iocommand.buf_size);
edd16368 2614 }
b03a7771 2615 }
edd16368
SC
2616 c = cmd_special_alloc(h);
2617 if (c == NULL) {
2618 kfree(buff);
2619 return -ENOMEM;
2620 }
2621 /* Fill in the command type */
2622 c->cmd_type = CMD_IOCTL_PEND;
2623 /* Fill in Command Header */
2624 c->Header.ReplyQueue = 0; /* unused in simple mode */
2625 if (iocommand.buf_size > 0) { /* buffer to fill */
2626 c->Header.SGList = 1;
2627 c->Header.SGTotal = 1;
2628 } else { /* no buffers to fill */
2629 c->Header.SGList = 0;
2630 c->Header.SGTotal = 0;
2631 }
2632 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
2633 /* use the kernel address the cmd block for tag */
2634 c->Header.Tag.lower = c->busaddr;
2635
2636 /* Fill in Request block */
2637 memcpy(&c->Request, &iocommand.Request,
2638 sizeof(c->Request));
2639
2640 /* Fill in the scatter gather information */
2641 if (iocommand.buf_size > 0) {
2642 temp64.val = pci_map_single(h->pdev, buff,
2643 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
2644 c->SG[0].Addr.lower = temp64.val32.lower;
2645 c->SG[0].Addr.upper = temp64.val32.upper;
2646 c->SG[0].Len = iocommand.buf_size;
2647 c->SG[0].Ext = 0; /* we are not chaining*/
2648 }
a0c12413 2649 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
c2dd32e0
SC
2650 if (iocommand.buf_size > 0)
2651 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
2652 check_ioctl_unit_attention(h, c);
2653
2654 /* Copy the error information out */
2655 memcpy(&iocommand.error_info, c->err_info,
2656 sizeof(iocommand.error_info));
2657 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
2658 kfree(buff);
2659 cmd_special_free(h, c);
2660 return -EFAULT;
2661 }
b03a7771
SC
2662 if (iocommand.Request.Type.Direction == XFER_READ &&
2663 iocommand.buf_size > 0) {
edd16368
SC
2664 /* Copy the data out of the buffer we created */
2665 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2666 kfree(buff);
2667 cmd_special_free(h, c);
2668 return -EFAULT;
2669 }
2670 }
2671 kfree(buff);
2672 cmd_special_free(h, c);
2673 return 0;
2674}
2675
2676static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2677{
2678 BIG_IOCTL_Command_struct *ioc;
2679 struct CommandList *c;
2680 unsigned char **buff = NULL;
2681 int *buff_size = NULL;
2682 union u64bit temp64;
2683 BYTE sg_used = 0;
2684 int status = 0;
2685 int i;
01a02ffc
SC
2686 u32 left;
2687 u32 sz;
edd16368
SC
2688 BYTE __user *data_ptr;
2689
2690 if (!argp)
2691 return -EINVAL;
2692 if (!capable(CAP_SYS_RAWIO))
2693 return -EPERM;
2694 ioc = (BIG_IOCTL_Command_struct *)
2695 kmalloc(sizeof(*ioc), GFP_KERNEL);
2696 if (!ioc) {
2697 status = -ENOMEM;
2698 goto cleanup1;
2699 }
2700 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
2701 status = -EFAULT;
2702 goto cleanup1;
2703 }
2704 if ((ioc->buf_size < 1) &&
2705 (ioc->Request.Type.Direction != XFER_NONE)) {
2706 status = -EINVAL;
2707 goto cleanup1;
2708 }
2709 /* Check kmalloc limits using all SGs */
2710 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
2711 status = -EINVAL;
2712 goto cleanup1;
2713 }
d66ae08b 2714 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
edd16368
SC
2715 status = -EINVAL;
2716 goto cleanup1;
2717 }
d66ae08b 2718 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
edd16368
SC
2719 if (!buff) {
2720 status = -ENOMEM;
2721 goto cleanup1;
2722 }
d66ae08b 2723 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
edd16368
SC
2724 if (!buff_size) {
2725 status = -ENOMEM;
2726 goto cleanup1;
2727 }
2728 left = ioc->buf_size;
2729 data_ptr = ioc->buf;
2730 while (left) {
2731 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
2732 buff_size[sg_used] = sz;
2733 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
2734 if (buff[sg_used] == NULL) {
2735 status = -ENOMEM;
2736 goto cleanup1;
2737 }
2738 if (ioc->Request.Type.Direction == XFER_WRITE) {
2739 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
2740 status = -ENOMEM;
2741 goto cleanup1;
2742 }
2743 } else
2744 memset(buff[sg_used], 0, sz);
2745 left -= sz;
2746 data_ptr += sz;
2747 sg_used++;
2748 }
2749 c = cmd_special_alloc(h);
2750 if (c == NULL) {
2751 status = -ENOMEM;
2752 goto cleanup1;
2753 }
2754 c->cmd_type = CMD_IOCTL_PEND;
2755 c->Header.ReplyQueue = 0;
b03a7771 2756 c->Header.SGList = c->Header.SGTotal = sg_used;
edd16368
SC
2757 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2758 c->Header.Tag.lower = c->busaddr;
2759 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
2760 if (ioc->buf_size > 0) {
2761 int i;
2762 for (i = 0; i < sg_used; i++) {
2763 temp64.val = pci_map_single(h->pdev, buff[i],
2764 buff_size[i], PCI_DMA_BIDIRECTIONAL);
2765 c->SG[i].Addr.lower = temp64.val32.lower;
2766 c->SG[i].Addr.upper = temp64.val32.upper;
2767 c->SG[i].Len = buff_size[i];
2768 /* we are not chaining */
2769 c->SG[i].Ext = 0;
2770 }
2771 }
a0c12413 2772 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
b03a7771
SC
2773 if (sg_used)
2774 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
edd16368
SC
2775 check_ioctl_unit_attention(h, c);
2776 /* Copy the error information out */
2777 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
2778 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
2779 cmd_special_free(h, c);
2780 status = -EFAULT;
2781 goto cleanup1;
2782 }
b03a7771 2783 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
edd16368
SC
2784 /* Copy the data out of the buffer we created */
2785 BYTE __user *ptr = ioc->buf;
2786 for (i = 0; i < sg_used; i++) {
2787 if (copy_to_user(ptr, buff[i], buff_size[i])) {
2788 cmd_special_free(h, c);
2789 status = -EFAULT;
2790 goto cleanup1;
2791 }
2792 ptr += buff_size[i];
2793 }
2794 }
2795 cmd_special_free(h, c);
2796 status = 0;
2797cleanup1:
2798 if (buff) {
2799 for (i = 0; i < sg_used; i++)
2800 kfree(buff[i]);
2801 kfree(buff);
2802 }
2803 kfree(buff_size);
2804 kfree(ioc);
2805 return status;
2806}
2807
2808static void check_ioctl_unit_attention(struct ctlr_info *h,
2809 struct CommandList *c)
2810{
2811 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
2812 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
2813 (void) check_for_unit_attention(h, c);
2814}
2815/*
2816 * ioctl
2817 */
2818static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2819{
2820 struct ctlr_info *h;
2821 void __user *argp = (void __user *)arg;
2822
2823 h = sdev_to_hba(dev);
2824
2825 switch (cmd) {
2826 case CCISS_DEREGDISK:
2827 case CCISS_REGNEWDISK:
2828 case CCISS_REGNEWD:
a08a8471 2829 hpsa_scan_start(h->scsi_host);
edd16368
SC
2830 return 0;
2831 case CCISS_GETPCIINFO:
2832 return hpsa_getpciinfo_ioctl(h, argp);
2833 case CCISS_GETDRIVVER:
2834 return hpsa_getdrivver_ioctl(h, argp);
2835 case CCISS_PASSTHRU:
2836 return hpsa_passthru_ioctl(h, argp);
2837 case CCISS_BIG_PASSTHRU:
2838 return hpsa_big_passthru_ioctl(h, argp);
2839 default:
2840 return -ENOTTY;
2841 }
2842}
2843
64670ac8
SC
2844static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2845 unsigned char *scsi3addr, u8 reset_type)
2846{
2847 struct CommandList *c;
2848
2849 c = cmd_alloc(h);
2850 if (!c)
2851 return -ENOMEM;
2852 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2853 RAID_CTLR_LUNID, TYPE_MSG);
2854 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2855 c->waiting = NULL;
2856 enqueue_cmd_and_start_io(h, c);
2857 /* Don't wait for completion, the reset won't complete. Don't free
2858 * the command either. This is the last command we will send before
2859 * re-initializing everything, so it doesn't matter and won't leak.
2860 */
2861 return 0;
2862}
2863
01a02ffc
SC
2864static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2865 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
edd16368
SC
2866 int cmd_type)
2867{
2868 int pci_dir = XFER_NONE;
2869
2870 c->cmd_type = CMD_IOCTL_PEND;
2871 c->Header.ReplyQueue = 0;
2872 if (buff != NULL && size > 0) {
2873 c->Header.SGList = 1;
2874 c->Header.SGTotal = 1;
2875 } else {
2876 c->Header.SGList = 0;
2877 c->Header.SGTotal = 0;
2878 }
2879 c->Header.Tag.lower = c->busaddr;
2880 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
2881
2882 c->Request.Type.Type = cmd_type;
2883 if (cmd_type == TYPE_CMD) {
2884 switch (cmd) {
2885 case HPSA_INQUIRY:
2886 /* are we trying to read a vital product page */
2887 if (page_code != 0) {
2888 c->Request.CDB[1] = 0x01;
2889 c->Request.CDB[2] = page_code;
2890 }
2891 c->Request.CDBLen = 6;
2892 c->Request.Type.Attribute = ATTR_SIMPLE;
2893 c->Request.Type.Direction = XFER_READ;
2894 c->Request.Timeout = 0;
2895 c->Request.CDB[0] = HPSA_INQUIRY;
2896 c->Request.CDB[4] = size & 0xFF;
2897 break;
2898 case HPSA_REPORT_LOG:
2899 case HPSA_REPORT_PHYS:
2900 /* Talking to controller so It's a physical command
2901 mode = 00 target = 0. Nothing to write.
2902 */
2903 c->Request.CDBLen = 12;
2904 c->Request.Type.Attribute = ATTR_SIMPLE;
2905 c->Request.Type.Direction = XFER_READ;
2906 c->Request.Timeout = 0;
2907 c->Request.CDB[0] = cmd;
2908 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
2909 c->Request.CDB[7] = (size >> 16) & 0xFF;
2910 c->Request.CDB[8] = (size >> 8) & 0xFF;
2911 c->Request.CDB[9] = size & 0xFF;
2912 break;
edd16368
SC
2913 case HPSA_CACHE_FLUSH:
2914 c->Request.CDBLen = 12;
2915 c->Request.Type.Attribute = ATTR_SIMPLE;
2916 c->Request.Type.Direction = XFER_WRITE;
2917 c->Request.Timeout = 0;
2918 c->Request.CDB[0] = BMIC_WRITE;
2919 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
bb158eab
SC
2920 c->Request.CDB[7] = (size >> 8) & 0xFF;
2921 c->Request.CDB[8] = size & 0xFF;
edd16368
SC
2922 break;
2923 case TEST_UNIT_READY:
2924 c->Request.CDBLen = 6;
2925 c->Request.Type.Attribute = ATTR_SIMPLE;
2926 c->Request.Type.Direction = XFER_NONE;
2927 c->Request.Timeout = 0;
2928 break;
2929 default:
2930 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
2931 BUG();
2932 return;
2933 }
2934 } else if (cmd_type == TYPE_MSG) {
2935 switch (cmd) {
2936
2937 case HPSA_DEVICE_RESET_MSG:
2938 c->Request.CDBLen = 16;
2939 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
2940 c->Request.Type.Attribute = ATTR_SIMPLE;
2941 c->Request.Type.Direction = XFER_NONE;
2942 c->Request.Timeout = 0; /* Don't time out */
64670ac8
SC
2943 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2944 c->Request.CDB[0] = cmd;
edd16368
SC
2945 c->Request.CDB[1] = 0x03; /* Reset target above */
2946 /* If bytes 4-7 are zero, it means reset the */
2947 /* LunID device */
2948 c->Request.CDB[4] = 0x00;
2949 c->Request.CDB[5] = 0x00;
2950 c->Request.CDB[6] = 0x00;
2951 c->Request.CDB[7] = 0x00;
2952 break;
2953
2954 default:
2955 dev_warn(&h->pdev->dev, "unknown message type %d\n",
2956 cmd);
2957 BUG();
2958 }
2959 } else {
2960 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
2961 BUG();
2962 }
2963
2964 switch (c->Request.Type.Direction) {
2965 case XFER_READ:
2966 pci_dir = PCI_DMA_FROMDEVICE;
2967 break;
2968 case XFER_WRITE:
2969 pci_dir = PCI_DMA_TODEVICE;
2970 break;
2971 case XFER_NONE:
2972 pci_dir = PCI_DMA_NONE;
2973 break;
2974 default:
2975 pci_dir = PCI_DMA_BIDIRECTIONAL;
2976 }
2977
2978 hpsa_map_one(h->pdev, c, buff, size, pci_dir);
2979
2980 return;
2981}
2982
2983/*
2984 * Map (physical) PCI mem into (virtual) kernel space
2985 */
2986static void __iomem *remap_pci_mem(ulong base, ulong size)
2987{
2988 ulong page_base = ((ulong) base) & PAGE_MASK;
2989 ulong page_offs = ((ulong) base) - page_base;
2990 void __iomem *page_remapped = ioremap(page_base, page_offs + size);
2991
2992 return page_remapped ? (page_remapped + page_offs) : NULL;
2993}
2994
2995/* Takes cmds off the submission queue and sends them to the hardware,
2996 * then puts them on the queue of cmds waiting for completion.
2997 */
2998static void start_io(struct ctlr_info *h)
2999{
3000 struct CommandList *c;
3001
9e0fc764
SC
3002 while (!list_empty(&h->reqQ)) {
3003 c = list_entry(h->reqQ.next, struct CommandList, list);
edd16368
SC
3004 /* can't do anything if fifo is full */
3005 if ((h->access.fifo_full(h))) {
3006 dev_warn(&h->pdev->dev, "fifo full\n");
3007 break;
3008 }
3009
3010 /* Get the first entry from the Request Q */
3011 removeQ(c);
3012 h->Qdepth--;
3013
3014 /* Tell the controller execute command */
3015 h->access.submit_command(h, c);
3016
3017 /* Put job onto the completed Q */
3018 addQ(&h->cmpQ, c);
3019 }
3020}
3021
3022static inline unsigned long get_next_completion(struct ctlr_info *h)
3023{
3024 return h->access.command_completed(h);
3025}
3026
900c5440 3027static inline bool interrupt_pending(struct ctlr_info *h)
edd16368
SC
3028{
3029 return h->access.intr_pending(h);
3030}
3031
3032static inline long interrupt_not_for_us(struct ctlr_info *h)
3033{
10f66018
SC
3034 return (h->access.intr_pending(h) == 0) ||
3035 (h->interrupts_enabled == 0);
edd16368
SC
3036}
3037
01a02ffc
SC
3038static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
3039 u32 raw_tag)
edd16368
SC
3040{
3041 if (unlikely(tag_index >= h->nr_cmds)) {
3042 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
3043 return 1;
3044 }
3045 return 0;
3046}
3047
01a02ffc 3048static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
edd16368
SC
3049{
3050 removeQ(c);
3051 if (likely(c->cmd_type == CMD_SCSI))
1fb011fb 3052 complete_scsi_command(c);
edd16368
SC
3053 else if (c->cmd_type == CMD_IOCTL_PEND)
3054 complete(c->waiting);
3055}
3056
a104c99f
SC
3057static inline u32 hpsa_tag_contains_index(u32 tag)
3058{
a104c99f
SC
3059 return tag & DIRECT_LOOKUP_BIT;
3060}
3061
3062static inline u32 hpsa_tag_to_index(u32 tag)
3063{
a104c99f
SC
3064 return tag >> DIRECT_LOOKUP_SHIFT;
3065}
3066
a9a3a273
SC
3067
3068static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
a104c99f 3069{
a9a3a273
SC
3070#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3071#define HPSA_SIMPLE_ERROR_BITS 0x03
960a30e7 3072 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
a9a3a273
SC
3073 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3074 return tag & ~HPSA_PERF_ERROR_BITS;
a104c99f
SC
3075}
3076
303932fd
DB
3077/* process completion of an indexed ("direct lookup") command */
3078static inline u32 process_indexed_cmd(struct ctlr_info *h,
3079 u32 raw_tag)
3080{
3081 u32 tag_index;
3082 struct CommandList *c;
3083
3084 tag_index = hpsa_tag_to_index(raw_tag);
3085 if (bad_tag(h, tag_index, raw_tag))
3086 return next_command(h);
3087 c = h->cmd_pool + tag_index;
3088 finish_cmd(c, raw_tag);
3089 return next_command(h);
3090}
3091
3092/* process completion of a non-indexed command */
3093static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
3094 u32 raw_tag)
3095{
3096 u32 tag;
3097 struct CommandList *c = NULL;
303932fd 3098
a9a3a273 3099 tag = hpsa_tag_discard_error_bits(h, raw_tag);
9e0fc764 3100 list_for_each_entry(c, &h->cmpQ, list) {
303932fd
DB
3101 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3102 finish_cmd(c, raw_tag);
3103 return next_command(h);
3104 }
3105 }
3106 bad_tag(h, h->nr_cmds + 1, raw_tag);
3107 return next_command(h);
3108}
3109
64670ac8
SC
3110/* Some controllers, like p400, will give us one interrupt
3111 * after a soft reset, even if we turned interrupts off.
3112 * Only need to check for this in the hpsa_xxx_discard_completions
3113 * functions.
3114 */
3115static int ignore_bogus_interrupt(struct ctlr_info *h)
3116{
3117 if (likely(!reset_devices))
3118 return 0;
3119
3120 if (likely(h->interrupts_enabled))
3121 return 0;
3122
3123 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3124 "(known firmware bug.) Ignoring.\n");
3125
3126 return 1;
3127}
3128
3129static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3130{
3131 struct ctlr_info *h = dev_id;
3132 unsigned long flags;
3133 u32 raw_tag;
3134
3135 if (ignore_bogus_interrupt(h))
3136 return IRQ_NONE;
3137
3138 if (interrupt_not_for_us(h))
3139 return IRQ_NONE;
3140 spin_lock_irqsave(&h->lock, flags);
a0c12413 3141 h->last_intr_timestamp = get_jiffies_64();
64670ac8
SC
3142 while (interrupt_pending(h)) {
3143 raw_tag = get_next_completion(h);
3144 while (raw_tag != FIFO_EMPTY)
3145 raw_tag = next_command(h);
3146 }
3147 spin_unlock_irqrestore(&h->lock, flags);
3148 return IRQ_HANDLED;
3149}
3150
3151static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3152{
3153 struct ctlr_info *h = dev_id;
3154 unsigned long flags;
3155 u32 raw_tag;
3156
3157 if (ignore_bogus_interrupt(h))
3158 return IRQ_NONE;
3159
3160 spin_lock_irqsave(&h->lock, flags);
a0c12413 3161 h->last_intr_timestamp = get_jiffies_64();
64670ac8
SC
3162 raw_tag = get_next_completion(h);
3163 while (raw_tag != FIFO_EMPTY)
3164 raw_tag = next_command(h);
3165 spin_unlock_irqrestore(&h->lock, flags);
3166 return IRQ_HANDLED;
3167}
3168
10f66018 3169static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
edd16368
SC
3170{
3171 struct ctlr_info *h = dev_id;
edd16368 3172 unsigned long flags;
303932fd 3173 u32 raw_tag;
edd16368
SC
3174
3175 if (interrupt_not_for_us(h))
3176 return IRQ_NONE;
10f66018 3177 spin_lock_irqsave(&h->lock, flags);
a0c12413 3178 h->last_intr_timestamp = get_jiffies_64();
10f66018
SC
3179 while (interrupt_pending(h)) {
3180 raw_tag = get_next_completion(h);
3181 while (raw_tag != FIFO_EMPTY) {
3182 if (hpsa_tag_contains_index(raw_tag))
3183 raw_tag = process_indexed_cmd(h, raw_tag);
3184 else
3185 raw_tag = process_nonindexed_cmd(h, raw_tag);
3186 }
3187 }
3188 spin_unlock_irqrestore(&h->lock, flags);
3189 return IRQ_HANDLED;
3190}
3191
3192static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
3193{
3194 struct ctlr_info *h = dev_id;
3195 unsigned long flags;
3196 u32 raw_tag;
3197
edd16368 3198 spin_lock_irqsave(&h->lock, flags);
a0c12413 3199 h->last_intr_timestamp = get_jiffies_64();
303932fd
DB
3200 raw_tag = get_next_completion(h);
3201 while (raw_tag != FIFO_EMPTY) {
3202 if (hpsa_tag_contains_index(raw_tag))
3203 raw_tag = process_indexed_cmd(h, raw_tag);
3204 else
3205 raw_tag = process_nonindexed_cmd(h, raw_tag);
edd16368
SC
3206 }
3207 spin_unlock_irqrestore(&h->lock, flags);
3208 return IRQ_HANDLED;
3209}
3210
a9a3a273
SC
3211/* Send a message CDB to the firmware. Careful, this only works
3212 * in simple mode, not performant mode due to the tag lookup.
3213 * We only ever use this immediately after a controller reset.
3214 */
edd16368
SC
3215static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3216 unsigned char type)
3217{
3218 struct Command {
3219 struct CommandListHeader CommandHeader;
3220 struct RequestBlock Request;
3221 struct ErrDescriptor ErrorDescriptor;
3222 };
3223 struct Command *cmd;
3224 static const size_t cmd_sz = sizeof(*cmd) +
3225 sizeof(cmd->ErrorDescriptor);
3226 dma_addr_t paddr64;
3227 uint32_t paddr32, tag;
3228 void __iomem *vaddr;
3229 int i, err;
3230
3231 vaddr = pci_ioremap_bar(pdev, 0);
3232 if (vaddr == NULL)
3233 return -ENOMEM;
3234
3235 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
3236 * CCISS commands, so they must be allocated from the lower 4GiB of
3237 * memory.
3238 */
3239 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
3240 if (err) {
3241 iounmap(vaddr);
3242 return -ENOMEM;
3243 }
3244
3245 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
3246 if (cmd == NULL) {
3247 iounmap(vaddr);
3248 return -ENOMEM;
3249 }
3250
3251 /* This must fit, because of the 32-bit consistent DMA mask. Also,
3252 * although there's no guarantee, we assume that the address is at
3253 * least 4-byte aligned (most likely, it's page-aligned).
3254 */
3255 paddr32 = paddr64;
3256
3257 cmd->CommandHeader.ReplyQueue = 0;
3258 cmd->CommandHeader.SGList = 0;
3259 cmd->CommandHeader.SGTotal = 0;
3260 cmd->CommandHeader.Tag.lower = paddr32;
3261 cmd->CommandHeader.Tag.upper = 0;
3262 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3263
3264 cmd->Request.CDBLen = 16;
3265 cmd->Request.Type.Type = TYPE_MSG;
3266 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
3267 cmd->Request.Type.Direction = XFER_NONE;
3268 cmd->Request.Timeout = 0; /* Don't time out */
3269 cmd->Request.CDB[0] = opcode;
3270 cmd->Request.CDB[1] = type;
3271 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
3272 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
3273 cmd->ErrorDescriptor.Addr.upper = 0;
3274 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
3275
3276 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
3277
3278 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3279 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
a9a3a273 3280 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
edd16368
SC
3281 break;
3282 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3283 }
3284
3285 iounmap(vaddr);
3286
3287 /* we leak the DMA buffer here ... no choice since the controller could
3288 * still complete the command.
3289 */
3290 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3291 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
3292 opcode, type);
3293 return -ETIMEDOUT;
3294 }
3295
3296 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
3297
3298 if (tag & HPSA_ERROR_BIT) {
3299 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
3300 opcode, type);
3301 return -EIO;
3302 }
3303
3304 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
3305 opcode, type);
3306 return 0;
3307}
3308
edd16368
SC
3309#define hpsa_noop(p) hpsa_message(p, 3, 0)
3310
1df8552a 3311static int hpsa_controller_hard_reset(struct pci_dev *pdev,
cf0b08d0 3312 void * __iomem vaddr, u32 use_doorbell)
1df8552a
SC
3313{
3314 u16 pmcsr;
3315 int pos;
3316
3317 if (use_doorbell) {
3318 /* For everything after the P600, the PCI power state method
3319 * of resetting the controller doesn't work, so we have this
3320 * other way using the doorbell register.
3321 */
3322 dev_info(&pdev->dev, "using doorbell to reset controller\n");
cf0b08d0 3323 writel(use_doorbell, vaddr + SA5_DOORBELL);
1df8552a
SC
3324 } else { /* Try to do it the PCI power state way */
3325
3326 /* Quoting from the Open CISS Specification: "The Power
3327 * Management Control/Status Register (CSR) controls the power
3328 * state of the device. The normal operating state is D0,
3329 * CSR=00h. The software off state is D3, CSR=03h. To reset
3330 * the controller, place the interface device in D3 then to D0,
3331 * this causes a secondary PCI reset which will reset the
3332 * controller." */
3333
3334 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
3335 if (pos == 0) {
3336 dev_err(&pdev->dev,
3337 "hpsa_reset_controller: "
3338 "PCI PM not supported\n");
3339 return -ENODEV;
3340 }
3341 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
3342 /* enter the D3hot power management state */
3343 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
3344 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3345 pmcsr |= PCI_D3hot;
3346 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3347
3348 msleep(500);
3349
3350 /* enter the D0 power management state */
3351 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3352 pmcsr |= PCI_D0;
3353 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
c4853efe
MM
3354
3355 /*
3356 * The P600 requires a small delay when changing states.
3357 * Otherwise we may think the board did not reset and we bail.
3358 * This for kdump only and is particular to the P600.
3359 */
3360 msleep(500);
1df8552a
SC
3361 }
3362 return 0;
3363}
3364
580ada3c
SC
3365static __devinit void init_driver_version(char *driver_version, int len)
3366{
3367 memset(driver_version, 0, len);
f79cfec6 3368 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
580ada3c
SC
3369}
3370
3371static __devinit int write_driver_ver_to_cfgtable(
3372 struct CfgTable __iomem *cfgtable)
3373{
3374 char *driver_version;
3375 int i, size = sizeof(cfgtable->driver_version);
3376
3377 driver_version = kmalloc(size, GFP_KERNEL);
3378 if (!driver_version)
3379 return -ENOMEM;
3380
3381 init_driver_version(driver_version, size);
3382 for (i = 0; i < size; i++)
3383 writeb(driver_version[i], &cfgtable->driver_version[i]);
3384 kfree(driver_version);
3385 return 0;
3386}
3387
3388static __devinit void read_driver_ver_from_cfgtable(
3389 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3390{
3391 int i;
3392
3393 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3394 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3395}
3396
3397static __devinit int controller_reset_failed(
3398 struct CfgTable __iomem *cfgtable)
3399{
3400
3401 char *driver_ver, *old_driver_ver;
3402 int rc, size = sizeof(cfgtable->driver_version);
3403
3404 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3405 if (!old_driver_ver)
3406 return -ENOMEM;
3407 driver_ver = old_driver_ver + size;
3408
3409 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3410 * should have been changed, otherwise we know the reset failed.
3411 */
3412 init_driver_version(old_driver_ver, size);
3413 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3414 rc = !memcmp(driver_ver, old_driver_ver, size);
3415 kfree(old_driver_ver);
3416 return rc;
3417}
edd16368 3418/* This does a hard reset of the controller using PCI power management
1df8552a 3419 * states or the using the doorbell register.
edd16368 3420 */
1df8552a 3421static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
edd16368 3422{
1df8552a
SC
3423 u64 cfg_offset;
3424 u32 cfg_base_addr;
3425 u64 cfg_base_addr_index;
3426 void __iomem *vaddr;
3427 unsigned long paddr;
580ada3c 3428 u32 misc_fw_support;
270d05de 3429 int rc;
1df8552a 3430 struct CfgTable __iomem *cfgtable;
cf0b08d0 3431 u32 use_doorbell;
18867659 3432 u32 board_id;
270d05de 3433 u16 command_register;
edd16368 3434
1df8552a
SC
3435 /* For controllers as old as the P600, this is very nearly
3436 * the same thing as
edd16368
SC
3437 *
3438 * pci_save_state(pci_dev);
3439 * pci_set_power_state(pci_dev, PCI_D3hot);
3440 * pci_set_power_state(pci_dev, PCI_D0);
3441 * pci_restore_state(pci_dev);
3442 *
1df8552a
SC
3443 * For controllers newer than the P600, the pci power state
3444 * method of resetting doesn't work so we have another way
3445 * using the doorbell register.
edd16368 3446 */
18867659 3447
25c1e56a 3448 rc = hpsa_lookup_board_id(pdev, &board_id);
46380786 3449 if (rc < 0 || !ctlr_is_resettable(board_id)) {
25c1e56a
SC
3450 dev_warn(&pdev->dev, "Not resetting device.\n");
3451 return -ENODEV;
3452 }
46380786
SC
3453
3454 /* if controller is soft- but not hard resettable... */
3455 if (!ctlr_is_hard_resettable(board_id))
3456 return -ENOTSUPP; /* try soft reset later. */
18867659 3457
270d05de
SC
3458 /* Save the PCI command register */
3459 pci_read_config_word(pdev, 4, &command_register);
3460 /* Turn the board off. This is so that later pci_restore_state()
3461 * won't turn the board on before the rest of config space is ready.
3462 */
3463 pci_disable_device(pdev);
3464 pci_save_state(pdev);
edd16368 3465
1df8552a
SC
3466 /* find the first memory BAR, so we can find the cfg table */
3467 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3468 if (rc)
3469 return rc;
3470 vaddr = remap_pci_mem(paddr, 0x250);
3471 if (!vaddr)
3472 return -ENOMEM;
edd16368 3473
1df8552a
SC
3474 /* find cfgtable in order to check if reset via doorbell is supported */
3475 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3476 &cfg_base_addr_index, &cfg_offset);
3477 if (rc)
3478 goto unmap_vaddr;
3479 cfgtable = remap_pci_mem(pci_resource_start(pdev,
3480 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
3481 if (!cfgtable) {
3482 rc = -ENOMEM;
3483 goto unmap_vaddr;
3484 }
580ada3c
SC
3485 rc = write_driver_ver_to_cfgtable(cfgtable);
3486 if (rc)
3487 goto unmap_vaddr;
edd16368 3488
cf0b08d0
SC
3489 /* If reset via doorbell register is supported, use that.
3490 * There are two such methods. Favor the newest method.
3491 */
1df8552a 3492 misc_fw_support = readl(&cfgtable->misc_fw_support);
cf0b08d0
SC
3493 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3494 if (use_doorbell) {
3495 use_doorbell = DOORBELL_CTLR_RESET2;
3496 } else {
3497 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3498 if (use_doorbell) {
fba63097
MM
3499 dev_warn(&pdev->dev, "Soft reset not supported. "
3500 "Firmware update is required.\n");
64670ac8 3501 rc = -ENOTSUPP; /* try soft reset */
cf0b08d0
SC
3502 goto unmap_cfgtable;
3503 }
3504 }
edd16368 3505
1df8552a
SC
3506 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3507 if (rc)
3508 goto unmap_cfgtable;
edd16368 3509
270d05de
SC
3510 pci_restore_state(pdev);
3511 rc = pci_enable_device(pdev);
3512 if (rc) {
3513 dev_warn(&pdev->dev, "failed to enable device.\n");
3514 goto unmap_cfgtable;
edd16368 3515 }
270d05de 3516 pci_write_config_word(pdev, 4, command_register);
edd16368 3517
1df8552a
SC
3518 /* Some devices (notably the HP Smart Array 5i Controller)
3519 need a little pause here */
3520 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3521
fe5389c8 3522 /* Wait for board to become not ready, then ready. */
2b870cb3 3523 dev_info(&pdev->dev, "Waiting for board to reset.\n");
fe5389c8 3524 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
64670ac8 3525 if (rc) {
fe5389c8 3526 dev_warn(&pdev->dev,
64670ac8
SC
3527 "failed waiting for board to reset."
3528 " Will try soft reset.\n");
3529 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3530 goto unmap_cfgtable;
3531 }
fe5389c8
SC
3532 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3533 if (rc) {
3534 dev_warn(&pdev->dev,
64670ac8
SC
3535 "failed waiting for board to become ready "
3536 "after hard reset\n");
fe5389c8
SC
3537 goto unmap_cfgtable;
3538 }
fe5389c8 3539
580ada3c
SC
3540 rc = controller_reset_failed(vaddr);
3541 if (rc < 0)
3542 goto unmap_cfgtable;
3543 if (rc) {
64670ac8
SC
3544 dev_warn(&pdev->dev, "Unable to successfully reset "
3545 "controller. Will try soft reset.\n");
3546 rc = -ENOTSUPP;
580ada3c 3547 } else {
64670ac8 3548 dev_info(&pdev->dev, "board ready after hard reset.\n");
1df8552a
SC
3549 }
3550
3551unmap_cfgtable:
3552 iounmap(cfgtable);
3553
3554unmap_vaddr:
3555 iounmap(vaddr);
3556 return rc;
edd16368
SC
3557}
3558
3559/*
3560 * We cannot read the structure directly, for portability we must use
3561 * the io functions.
3562 * This is for debug only.
3563 */
edd16368
SC
3564static void print_cfg_table(struct device *dev, struct CfgTable *tb)
3565{
58f8665c 3566#ifdef HPSA_DEBUG
edd16368
SC
3567 int i;
3568 char temp_name[17];
3569
3570 dev_info(dev, "Controller Configuration information\n");
3571 dev_info(dev, "------------------------------------\n");
3572 for (i = 0; i < 4; i++)
3573 temp_name[i] = readb(&(tb->Signature[i]));
3574 temp_name[4] = '\0';
3575 dev_info(dev, " Signature = %s\n", temp_name);
3576 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
3577 dev_info(dev, " Transport methods supported = 0x%x\n",
3578 readl(&(tb->TransportSupport)));
3579 dev_info(dev, " Transport methods active = 0x%x\n",
3580 readl(&(tb->TransportActive)));
3581 dev_info(dev, " Requested transport Method = 0x%x\n",
3582 readl(&(tb->HostWrite.TransportRequest)));
3583 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
3584 readl(&(tb->HostWrite.CoalIntDelay)));
3585 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
3586 readl(&(tb->HostWrite.CoalIntCount)));
3587 dev_info(dev, " Max outstanding commands = 0x%d\n",
3588 readl(&(tb->CmdsOutMax)));
3589 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
3590 for (i = 0; i < 16; i++)
3591 temp_name[i] = readb(&(tb->ServerName[i]));
3592 temp_name[16] = '\0';
3593 dev_info(dev, " Server Name = %s\n", temp_name);
3594 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
3595 readl(&(tb->HeartBeat)));
edd16368 3596#endif /* HPSA_DEBUG */
58f8665c 3597}
edd16368
SC
3598
3599static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
3600{
3601 int i, offset, mem_type, bar_type;
3602
3603 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
3604 return 0;
3605 offset = 0;
3606 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3607 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
3608 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
3609 offset += 4;
3610 else {
3611 mem_type = pci_resource_flags(pdev, i) &
3612 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
3613 switch (mem_type) {
3614 case PCI_BASE_ADDRESS_MEM_TYPE_32:
3615 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
3616 offset += 4; /* 32 bit */
3617 break;
3618 case PCI_BASE_ADDRESS_MEM_TYPE_64:
3619 offset += 8;
3620 break;
3621 default: /* reserved in PCI 2.2 */
3622 dev_warn(&pdev->dev,
3623 "base address is invalid\n");
3624 return -1;
3625 break;
3626 }
3627 }
3628 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
3629 return i + 1;
3630 }
3631 return -1;
3632}
3633
3634/* If MSI/MSI-X is supported by the kernel we will try to enable it on
3635 * controllers that are capable. If not, we use IO-APIC mode.
3636 */
3637
6b3f4c52 3638static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
edd16368
SC
3639{
3640#ifdef CONFIG_PCI_MSI
3641 int err;
3642 struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1},
3643 {0, 2}, {0, 3}
3644 };
3645
3646 /* Some boards advertise MSI but don't really support it */
6b3f4c52
SC
3647 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
3648 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
edd16368 3649 goto default_int_mode;
55c06c71
SC
3650 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
3651 dev_info(&h->pdev->dev, "MSIX\n");
3652 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4);
edd16368
SC
3653 if (!err) {
3654 h->intr[0] = hpsa_msix_entries[0].vector;
3655 h->intr[1] = hpsa_msix_entries[1].vector;
3656 h->intr[2] = hpsa_msix_entries[2].vector;
3657 h->intr[3] = hpsa_msix_entries[3].vector;
3658 h->msix_vector = 1;
3659 return;
3660 }
3661 if (err > 0) {
55c06c71 3662 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
edd16368
SC
3663 "available\n", err);
3664 goto default_int_mode;
3665 } else {
55c06c71 3666 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
edd16368
SC
3667 err);
3668 goto default_int_mode;
3669 }
3670 }
55c06c71
SC
3671 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
3672 dev_info(&h->pdev->dev, "MSI\n");
3673 if (!pci_enable_msi(h->pdev))
edd16368
SC
3674 h->msi_vector = 1;
3675 else
55c06c71 3676 dev_warn(&h->pdev->dev, "MSI init failed\n");
edd16368
SC
3677 }
3678default_int_mode:
3679#endif /* CONFIG_PCI_MSI */
3680 /* if we get here we're going to use the default interrupt mode */
a9a3a273 3681 h->intr[h->intr_mode] = h->pdev->irq;
edd16368
SC
3682}
3683
e5c880d1
SC
3684static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
3685{
3686 int i;
3687 u32 subsystem_vendor_id, subsystem_device_id;
3688
3689 subsystem_vendor_id = pdev->subsystem_vendor;
3690 subsystem_device_id = pdev->subsystem_device;
3691 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
3692 subsystem_vendor_id;
3693
3694 for (i = 0; i < ARRAY_SIZE(products); i++)
3695 if (*board_id == products[i].board_id)
3696 return i;
3697
6798cc0a
SC
3698 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
3699 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
3700 !hpsa_allow_any) {
e5c880d1
SC
3701 dev_warn(&pdev->dev, "unrecognized board ID: "
3702 "0x%08x, ignoring.\n", *board_id);
3703 return -ENODEV;
3704 }
3705 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
3706}
3707
12d2cd47 3708static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3a7774ce
SC
3709 unsigned long *memory_bar)
3710{
3711 int i;
3712
3713 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
12d2cd47 3714 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3a7774ce 3715 /* addressing mode bits already removed */
12d2cd47
SC
3716 *memory_bar = pci_resource_start(pdev, i);
3717 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
3a7774ce
SC
3718 *memory_bar);
3719 return 0;
3720 }
12d2cd47 3721 dev_warn(&pdev->dev, "no memory BAR found\n");
3a7774ce
SC
3722 return -ENODEV;
3723}
3724
fe5389c8
SC
3725static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3726 void __iomem *vaddr, int wait_for_ready)
2c4c8c8b 3727{
fe5389c8 3728 int i, iterations;
2c4c8c8b 3729 u32 scratchpad;
fe5389c8
SC
3730 if (wait_for_ready)
3731 iterations = HPSA_BOARD_READY_ITERATIONS;
3732 else
3733 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
2c4c8c8b 3734
fe5389c8
SC
3735 for (i = 0; i < iterations; i++) {
3736 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3737 if (wait_for_ready) {
3738 if (scratchpad == HPSA_FIRMWARE_READY)
3739 return 0;
3740 } else {
3741 if (scratchpad != HPSA_FIRMWARE_READY)
3742 return 0;
3743 }
2c4c8c8b
SC
3744 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3745 }
fe5389c8 3746 dev_warn(&pdev->dev, "board not ready, timed out.\n");
2c4c8c8b
SC
3747 return -ENODEV;
3748}
3749
a51fd47f
SC
3750static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
3751 void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
3752 u64 *cfg_offset)
3753{
3754 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
3755 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
3756 *cfg_base_addr &= (u32) 0x0000ffff;
3757 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
3758 if (*cfg_base_addr_index == -1) {
3759 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
3760 return -ENODEV;
3761 }
3762 return 0;
3763}
3764
77c4495c 3765static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
edd16368 3766{
01a02ffc
SC
3767 u64 cfg_offset;
3768 u32 cfg_base_addr;
3769 u64 cfg_base_addr_index;
303932fd 3770 u32 trans_offset;
a51fd47f 3771 int rc;
77c4495c 3772
a51fd47f
SC
3773 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
3774 &cfg_base_addr_index, &cfg_offset);
3775 if (rc)
3776 return rc;
77c4495c 3777 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
a51fd47f 3778 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
77c4495c
SC
3779 if (!h->cfgtable)
3780 return -ENOMEM;
580ada3c
SC
3781 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3782 if (rc)
3783 return rc;
77c4495c 3784 /* Find performant mode table. */
a51fd47f 3785 trans_offset = readl(&h->cfgtable->TransMethodOffset);
77c4495c
SC
3786 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
3787 cfg_base_addr_index)+cfg_offset+trans_offset,
3788 sizeof(*h->transtable));
3789 if (!h->transtable)
3790 return -ENOMEM;
3791 return 0;
3792}
3793
cba3d38b
SC
3794static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3795{
3796 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
72ceeaec
SC
3797
3798 /* Limit commands in memory limited kdump scenario. */
3799 if (reset_devices && h->max_commands > 32)
3800 h->max_commands = 32;
3801
cba3d38b
SC
3802 if (h->max_commands < 16) {
3803 dev_warn(&h->pdev->dev, "Controller reports "
3804 "max supported commands of %d, an obvious lie. "
3805 "Using 16. Ensure that firmware is up to date.\n",
3806 h->max_commands);
3807 h->max_commands = 16;
3808 }
3809}
3810
b93d7536
SC
3811/* Interrogate the hardware for some limits:
3812 * max commands, max SG elements without chaining, and with chaining,
3813 * SG chain block size, etc.
3814 */
3815static void __devinit hpsa_find_board_params(struct ctlr_info *h)
3816{
cba3d38b 3817 hpsa_get_max_perf_mode_cmds(h);
b93d7536
SC
3818 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
3819 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
3820 /*
3821 * Limit in-command s/g elements to 32 save dma'able memory.
3822 * Howvever spec says if 0, use 31
3823 */
3824 h->max_cmd_sg_entries = 31;
3825 if (h->maxsgentries > 512) {
3826 h->max_cmd_sg_entries = 32;
3827 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
3828 h->maxsgentries--; /* save one for chain pointer */
3829 } else {
3830 h->maxsgentries = 31; /* default to traditional values */
3831 h->chainsize = 0;
3832 }
3833}
3834
76c46e49
SC
3835static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
3836{
0fc9fd40 3837 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
76c46e49
SC
3838 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
3839 return false;
3840 }
3841 return true;
3842}
3843
f7c39101
SC
3844/* Need to enable prefetch in the SCSI core for 6400 in x86 */
3845static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h)
3846{
3847#ifdef CONFIG_X86
3848 u32 prefetch;
3849
3850 prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
3851 prefetch |= 0x100;
3852 writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
3853#endif
3854}
3855
3d0eab67
SC
3856/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
3857 * in a prefetch beyond physical memory.
3858 */
3859static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3860{
3861 u32 dma_prefetch;
3862
3863 if (h->board_id != 0x3225103C)
3864 return;
3865 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
3866 dma_prefetch |= 0x8000;
3867 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
3868}
3869
3f4336f3 3870static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
eb6b2ae9
SC
3871{
3872 int i;
6eaf46fd
SC
3873 u32 doorbell_value;
3874 unsigned long flags;
eb6b2ae9
SC
3875
3876 /* under certain very rare conditions, this can take awhile.
3877 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3878 * as we enter this code.)
3879 */
3880 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6eaf46fd
SC
3881 spin_lock_irqsave(&h->lock, flags);
3882 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3883 spin_unlock_irqrestore(&h->lock, flags);
382be668 3884 if (!(doorbell_value & CFGTBL_ChangeReq))
eb6b2ae9
SC
3885 break;
3886 /* delay and try again */
60d3f5b0 3887 usleep_range(10000, 20000);
eb6b2ae9 3888 }
3f4336f3
SC
3889}
3890
3891static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3892{
3893 u32 trans_support;
3894
3895 trans_support = readl(&(h->cfgtable->TransportSupport));
3896 if (!(trans_support & SIMPLE_MODE))
3897 return -ENOTSUPP;
3898
3899 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
3900 /* Update the field, and then ring the doorbell */
3901 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
3902 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3903 hpsa_wait_for_mode_change_ack(h);
eb6b2ae9 3904 print_cfg_table(&h->pdev->dev, h->cfgtable);
eb6b2ae9
SC
3905 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
3906 dev_warn(&h->pdev->dev,
3907 "unable to get board into simple mode\n");
3908 return -ENODEV;
3909 }
960a30e7 3910 h->transMethod = CFGTBL_Trans_Simple;
eb6b2ae9
SC
3911 return 0;
3912}
3913
77c4495c
SC
3914static int __devinit hpsa_pci_init(struct ctlr_info *h)
3915{
eb6b2ae9 3916 int prod_index, err;
edd16368 3917
e5c880d1
SC
3918 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
3919 if (prod_index < 0)
3920 return -ENODEV;
3921 h->product_name = products[prod_index].product_name;
3922 h->access = *(products[prod_index].access);
edd16368 3923
e5a44df8
MG
3924 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
3925 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
3926
55c06c71 3927 err = pci_enable_device(h->pdev);
edd16368 3928 if (err) {
55c06c71 3929 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
edd16368
SC
3930 return err;
3931 }
3932
5cb460a6
SC
3933 /* Enable bus mastering (pci_disable_device may disable this) */
3934 pci_set_master(h->pdev);
3935
f79cfec6 3936 err = pci_request_regions(h->pdev, HPSA);
edd16368 3937 if (err) {
55c06c71
SC
3938 dev_err(&h->pdev->dev,
3939 "cannot obtain PCI resources, aborting\n");
edd16368
SC
3940 return err;
3941 }
6b3f4c52 3942 hpsa_interrupt_mode(h);
12d2cd47 3943 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
3a7774ce 3944 if (err)
edd16368 3945 goto err_out_free_res;
edd16368 3946 h->vaddr = remap_pci_mem(h->paddr, 0x250);
204892e9
SC
3947 if (!h->vaddr) {
3948 err = -ENOMEM;
3949 goto err_out_free_res;
3950 }
fe5389c8 3951 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
2c4c8c8b 3952 if (err)
edd16368 3953 goto err_out_free_res;
77c4495c
SC
3954 err = hpsa_find_cfgtables(h);
3955 if (err)
edd16368 3956 goto err_out_free_res;
b93d7536 3957 hpsa_find_board_params(h);
edd16368 3958
76c46e49 3959 if (!hpsa_CISS_signature_present(h)) {
edd16368
SC
3960 err = -ENODEV;
3961 goto err_out_free_res;
3962 }
f7c39101 3963 hpsa_enable_scsi_prefetch(h);
3d0eab67 3964 hpsa_p600_dma_prefetch_quirk(h);
eb6b2ae9
SC
3965 err = hpsa_enter_simple_mode(h);
3966 if (err)
edd16368 3967 goto err_out_free_res;
edd16368
SC
3968 return 0;
3969
3970err_out_free_res:
204892e9
SC
3971 if (h->transtable)
3972 iounmap(h->transtable);
3973 if (h->cfgtable)
3974 iounmap(h->cfgtable);
3975 if (h->vaddr)
3976 iounmap(h->vaddr);
f0bd0b68 3977 pci_disable_device(h->pdev);
55c06c71 3978 pci_release_regions(h->pdev);
edd16368
SC
3979 return err;
3980}
3981
339b2b14
SC
3982static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
3983{
3984 int rc;
3985
3986#define HBA_INQUIRY_BYTE_COUNT 64
3987 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
3988 if (!h->hba_inquiry_data)
3989 return;
3990 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
3991 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
3992 if (rc != 0) {
3993 kfree(h->hba_inquiry_data);
3994 h->hba_inquiry_data = NULL;
3995 }
3996}
3997
4c2a8c40
SC
3998static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3999{
1df8552a 4000 int rc, i;
4c2a8c40
SC
4001
4002 if (!reset_devices)
4003 return 0;
4004
1df8552a
SC
4005 /* Reset the controller with a PCI power-cycle or via doorbell */
4006 rc = hpsa_kdump_hard_reset_controller(pdev);
4c2a8c40 4007
1df8552a
SC
4008 /* -ENOTSUPP here means we cannot reset the controller
4009 * but it's already (and still) up and running in
18867659
SC
4010 * "performant mode". Or, it might be 640x, which can't reset
4011 * due to concerns about shared bbwc between 6402/6404 pair.
1df8552a
SC
4012 */
4013 if (rc == -ENOTSUPP)
64670ac8 4014 return rc; /* just try to do the kdump anyhow. */
1df8552a
SC
4015 if (rc)
4016 return -ENODEV;
4c2a8c40
SC
4017
4018 /* Now try to get the controller to respond to a no-op */
2b870cb3 4019 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
4c2a8c40
SC
4020 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
4021 if (hpsa_noop(pdev) == 0)
4022 break;
4023 else
4024 dev_warn(&pdev->dev, "no-op failed%s\n",
4025 (i < 11 ? "; re-trying" : ""));
4026 }
4027 return 0;
4028}
4029
2e9d1b36
SC
4030static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
4031{
4032 h->cmd_pool_bits = kzalloc(
4033 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
4034 sizeof(unsigned long), GFP_KERNEL);
4035 h->cmd_pool = pci_alloc_consistent(h->pdev,
4036 h->nr_cmds * sizeof(*h->cmd_pool),
4037 &(h->cmd_pool_dhandle));
4038 h->errinfo_pool = pci_alloc_consistent(h->pdev,
4039 h->nr_cmds * sizeof(*h->errinfo_pool),
4040 &(h->errinfo_pool_dhandle));
4041 if ((h->cmd_pool_bits == NULL)
4042 || (h->cmd_pool == NULL)
4043 || (h->errinfo_pool == NULL)) {
4044 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
4045 return -ENOMEM;
4046 }
4047 return 0;
4048}
4049
4050static void hpsa_free_cmd_pool(struct ctlr_info *h)
4051{
4052 kfree(h->cmd_pool_bits);
4053 if (h->cmd_pool)
4054 pci_free_consistent(h->pdev,
4055 h->nr_cmds * sizeof(struct CommandList),
4056 h->cmd_pool, h->cmd_pool_dhandle);
4057 if (h->errinfo_pool)
4058 pci_free_consistent(h->pdev,
4059 h->nr_cmds * sizeof(struct ErrorInfo),
4060 h->errinfo_pool,
4061 h->errinfo_pool_dhandle);
4062}
4063
0ae01a32
SC
4064static int hpsa_request_irq(struct ctlr_info *h,
4065 irqreturn_t (*msixhandler)(int, void *),
4066 irqreturn_t (*intxhandler)(int, void *))
4067{
4068 int rc;
4069
4070 if (h->msix_vector || h->msi_vector)
4071 rc = request_irq(h->intr[h->intr_mode], msixhandler,
45bcf018 4072 0, h->devname, h);
0ae01a32
SC
4073 else
4074 rc = request_irq(h->intr[h->intr_mode], intxhandler,
45bcf018 4075 IRQF_SHARED, h->devname, h);
0ae01a32
SC
4076 if (rc) {
4077 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4078 h->intr[h->intr_mode], h->devname);
4079 return -ENODEV;
4080 }
4081 return 0;
4082}
4083
64670ac8
SC
4084static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4085{
4086 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4087 HPSA_RESET_TYPE_CONTROLLER)) {
4088 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4089 return -EIO;
4090 }
4091
4092 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4093 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4094 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4095 return -1;
4096 }
4097
4098 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4099 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4100 dev_warn(&h->pdev->dev, "Board failed to become ready "
4101 "after soft reset.\n");
4102 return -1;
4103 }
4104
4105 return 0;
4106}
4107
4108static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4109{
4110 free_irq(h->intr[h->intr_mode], h);
4111#ifdef CONFIG_PCI_MSI
4112 if (h->msix_vector)
4113 pci_disable_msix(h->pdev);
4114 else if (h->msi_vector)
4115 pci_disable_msi(h->pdev);
4116#endif /* CONFIG_PCI_MSI */
4117 hpsa_free_sg_chain_blocks(h);
4118 hpsa_free_cmd_pool(h);
4119 kfree(h->blockFetchTable);
4120 pci_free_consistent(h->pdev, h->reply_pool_size,
4121 h->reply_pool, h->reply_pool_dhandle);
4122 if (h->vaddr)
4123 iounmap(h->vaddr);
4124 if (h->transtable)
4125 iounmap(h->transtable);
4126 if (h->cfgtable)
4127 iounmap(h->cfgtable);
4128 pci_release_regions(h->pdev);
4129 kfree(h);
4130}
4131
a0c12413
SC
4132static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h)
4133{
4134 assert_spin_locked(&lockup_detector_lock);
4135 if (!hpsa_lockup_detector)
4136 return;
4137 if (h->lockup_detected)
4138 return; /* already stopped the lockup detector */
4139 list_del(&h->lockup_list);
4140}
4141
4142/* Called when controller lockup detected. */
4143static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
4144{
4145 struct CommandList *c = NULL;
4146
4147 assert_spin_locked(&h->lock);
4148 /* Mark all outstanding commands as failed and complete them. */
4149 while (!list_empty(list)) {
4150 c = list_entry(list->next, struct CommandList, list);
4151 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
4152 finish_cmd(c, c->Header.Tag.lower);
4153 }
4154}
4155
4156static void controller_lockup_detected(struct ctlr_info *h)
4157{
4158 unsigned long flags;
4159
4160 assert_spin_locked(&lockup_detector_lock);
4161 remove_ctlr_from_lockup_detector_list(h);
4162 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4163 spin_lock_irqsave(&h->lock, flags);
4164 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
4165 spin_unlock_irqrestore(&h->lock, flags);
4166 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
4167 h->lockup_detected);
4168 pci_disable_device(h->pdev);
4169 spin_lock_irqsave(&h->lock, flags);
4170 fail_all_cmds_on_list(h, &h->cmpQ);
4171 fail_all_cmds_on_list(h, &h->reqQ);
4172 spin_unlock_irqrestore(&h->lock, flags);
4173}
4174
4175#define HEARTBEAT_SAMPLE_INTERVAL (10 * HZ)
4176#define HEARTBEAT_CHECK_MINIMUM_INTERVAL (HEARTBEAT_SAMPLE_INTERVAL / 2)
4177
4178static void detect_controller_lockup(struct ctlr_info *h)
4179{
4180 u64 now;
4181 u32 heartbeat;
4182 unsigned long flags;
4183
4184 assert_spin_locked(&lockup_detector_lock);
4185 now = get_jiffies_64();
4186 /* If we've received an interrupt recently, we're ok. */
4187 if (time_after64(h->last_intr_timestamp +
4188 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
4189 return;
4190
4191 /*
4192 * If we've already checked the heartbeat recently, we're ok.
4193 * This could happen if someone sends us a signal. We
4194 * otherwise don't care about signals in this thread.
4195 */
4196 if (time_after64(h->last_heartbeat_timestamp +
4197 (HEARTBEAT_CHECK_MINIMUM_INTERVAL), now))
4198 return;
4199
4200 /* If heartbeat has not changed since we last looked, we're not ok. */
4201 spin_lock_irqsave(&h->lock, flags);
4202 heartbeat = readl(&h->cfgtable->HeartBeat);
4203 spin_unlock_irqrestore(&h->lock, flags);
4204 if (h->last_heartbeat == heartbeat) {
4205 controller_lockup_detected(h);
4206 return;
4207 }
4208
4209 /* We're ok. */
4210 h->last_heartbeat = heartbeat;
4211 h->last_heartbeat_timestamp = now;
4212}
4213
4214static int detect_controller_lockup_thread(void *notused)
4215{
4216 struct ctlr_info *h;
4217 unsigned long flags;
4218
4219 while (1) {
4220 struct list_head *this, *tmp;
4221
4222 schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL);
4223 if (kthread_should_stop())
4224 break;
4225 spin_lock_irqsave(&lockup_detector_lock, flags);
4226 list_for_each_safe(this, tmp, &hpsa_ctlr_list) {
4227 h = list_entry(this, struct ctlr_info, lockup_list);
4228 detect_controller_lockup(h);
4229 }
4230 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4231 }
4232 return 0;
4233}
4234
4235static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h)
4236{
4237 unsigned long flags;
4238
4239 spin_lock_irqsave(&lockup_detector_lock, flags);
4240 list_add_tail(&h->lockup_list, &hpsa_ctlr_list);
4241 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4242}
4243
4244static void start_controller_lockup_detector(struct ctlr_info *h)
4245{
4246 /* Start the lockup detector thread if not already started */
4247 if (!hpsa_lockup_detector) {
4248 spin_lock_init(&lockup_detector_lock);
4249 hpsa_lockup_detector =
4250 kthread_run(detect_controller_lockup_thread,
f79cfec6 4251 NULL, HPSA);
a0c12413
SC
4252 }
4253 if (!hpsa_lockup_detector) {
4254 dev_warn(&h->pdev->dev,
4255 "Could not start lockup detector thread\n");
4256 return;
4257 }
4258 add_ctlr_to_lockup_detector_list(h);
4259}
4260
4261static void stop_controller_lockup_detector(struct ctlr_info *h)
4262{
4263 unsigned long flags;
4264
4265 spin_lock_irqsave(&lockup_detector_lock, flags);
4266 remove_ctlr_from_lockup_detector_list(h);
4267 /* If the list of ctlr's to monitor is empty, stop the thread */
4268 if (list_empty(&hpsa_ctlr_list)) {
775bf277 4269 spin_unlock_irqrestore(&lockup_detector_lock, flags);
a0c12413 4270 kthread_stop(hpsa_lockup_detector);
775bf277 4271 spin_lock_irqsave(&lockup_detector_lock, flags);
a0c12413
SC
4272 hpsa_lockup_detector = NULL;
4273 }
4274 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4275}
4276
edd16368
SC
4277static int __devinit hpsa_init_one(struct pci_dev *pdev,
4278 const struct pci_device_id *ent)
4279{
4c2a8c40 4280 int dac, rc;
edd16368 4281 struct ctlr_info *h;
64670ac8
SC
4282 int try_soft_reset = 0;
4283 unsigned long flags;
edd16368
SC
4284
4285 if (number_of_controllers == 0)
4286 printk(KERN_INFO DRIVER_NAME "\n");
edd16368 4287
4c2a8c40 4288 rc = hpsa_init_reset_devices(pdev);
64670ac8
SC
4289 if (rc) {
4290 if (rc != -ENOTSUPP)
4291 return rc;
4292 /* If the reset fails in a particular way (it has no way to do
4293 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4294 * a soft reset once we get the controller configured up to the
4295 * point that it can accept a command.
4296 */
4297 try_soft_reset = 1;
4298 rc = 0;
4299 }
4300
4301reinit_after_soft_reset:
edd16368 4302
303932fd
DB
4303 /* Command structures must be aligned on a 32-byte boundary because
4304 * the 5 lower bits of the address are used by the hardware. and by
4305 * the driver. See comments in hpsa.h for more info.
4306 */
4307#define COMMANDLIST_ALIGNMENT 32
4308 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
edd16368
SC
4309 h = kzalloc(sizeof(*h), GFP_KERNEL);
4310 if (!h)
ecd9aad4 4311 return -ENOMEM;
edd16368 4312
55c06c71 4313 h->pdev = pdev;
a9a3a273 4314 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
9e0fc764
SC
4315 INIT_LIST_HEAD(&h->cmpQ);
4316 INIT_LIST_HEAD(&h->reqQ);
6eaf46fd
SC
4317 spin_lock_init(&h->lock);
4318 spin_lock_init(&h->scan_lock);
55c06c71 4319 rc = hpsa_pci_init(h);
ecd9aad4 4320 if (rc != 0)
edd16368
SC
4321 goto clean1;
4322
f79cfec6 4323 sprintf(h->devname, HPSA "%d", number_of_controllers);
edd16368
SC
4324 h->ctlr = number_of_controllers;
4325 number_of_controllers++;
edd16368
SC
4326
4327 /* configure PCI DMA stuff */
ecd9aad4
SC
4328 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4329 if (rc == 0) {
edd16368 4330 dac = 1;
ecd9aad4
SC
4331 } else {
4332 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4333 if (rc == 0) {
4334 dac = 0;
4335 } else {
4336 dev_err(&pdev->dev, "no suitable DMA available\n");
4337 goto clean1;
4338 }
edd16368
SC
4339 }
4340
4341 /* make sure the board interrupts are off */
4342 h->access.set_intr_mask(h, HPSA_INTR_OFF);
10f66018 4343
0ae01a32 4344 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
edd16368 4345 goto clean2;
303932fd
DB
4346 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
4347 h->devname, pdev->device,
a9a3a273 4348 h->intr[h->intr_mode], dac ? "" : " not");
2e9d1b36 4349 if (hpsa_allocate_cmd_pool(h))
edd16368 4350 goto clean4;
33a2ffce
SC
4351 if (hpsa_allocate_sg_chain_blocks(h))
4352 goto clean4;
a08a8471
SC
4353 init_waitqueue_head(&h->scan_wait_queue);
4354 h->scan_finished = 1; /* no scan currently in progress */
edd16368
SC
4355
4356 pci_set_drvdata(pdev, h);
9a41338e
SC
4357 h->ndevices = 0;
4358 h->scsi_host = NULL;
4359 spin_lock_init(&h->devlock);
64670ac8
SC
4360 hpsa_put_ctlr_into_performant_mode(h);
4361
4362 /* At this point, the controller is ready to take commands.
4363 * Now, if reset_devices and the hard reset didn't work, try
4364 * the soft reset and see if that works.
4365 */
4366 if (try_soft_reset) {
4367
4368 /* This is kind of gross. We may or may not get a completion
4369 * from the soft reset command, and if we do, then the value
4370 * from the fifo may or may not be valid. So, we wait 10 secs
4371 * after the reset throwing away any completions we get during
4372 * that time. Unregister the interrupt handler and register
4373 * fake ones to scoop up any residual completions.
4374 */
4375 spin_lock_irqsave(&h->lock, flags);
4376 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4377 spin_unlock_irqrestore(&h->lock, flags);
4378 free_irq(h->intr[h->intr_mode], h);
4379 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4380 hpsa_intx_discard_completions);
4381 if (rc) {
4382 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4383 "soft reset.\n");
4384 goto clean4;
4385 }
4386
4387 rc = hpsa_kdump_soft_reset(h);
4388 if (rc)
4389 /* Neither hard nor soft reset worked, we're hosed. */
4390 goto clean4;
4391
4392 dev_info(&h->pdev->dev, "Board READY.\n");
4393 dev_info(&h->pdev->dev,
4394 "Waiting for stale completions to drain.\n");
4395 h->access.set_intr_mask(h, HPSA_INTR_ON);
4396 msleep(10000);
4397 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4398
4399 rc = controller_reset_failed(h->cfgtable);
4400 if (rc)
4401 dev_info(&h->pdev->dev,
4402 "Soft reset appears to have failed.\n");
4403
4404 /* since the controller's reset, we have to go back and re-init
4405 * everything. Easiest to just forget what we've done and do it
4406 * all over again.
4407 */
4408 hpsa_undo_allocations_after_kdump_soft_reset(h);
4409 try_soft_reset = 0;
4410 if (rc)
4411 /* don't go to clean4, we already unallocated */
4412 return -ENODEV;
4413
4414 goto reinit_after_soft_reset;
4415 }
edd16368
SC
4416
4417 /* Turn the interrupts on so we can service requests */
4418 h->access.set_intr_mask(h, HPSA_INTR_ON);
4419
339b2b14 4420 hpsa_hba_inquiry(h);
edd16368 4421 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
a0c12413 4422 start_controller_lockup_detector(h);
edd16368
SC
4423 return 1;
4424
4425clean4:
33a2ffce 4426 hpsa_free_sg_chain_blocks(h);
2e9d1b36 4427 hpsa_free_cmd_pool(h);
a9a3a273 4428 free_irq(h->intr[h->intr_mode], h);
edd16368
SC
4429clean2:
4430clean1:
edd16368 4431 kfree(h);
ecd9aad4 4432 return rc;
edd16368
SC
4433}
4434
4435static void hpsa_flush_cache(struct ctlr_info *h)
4436{
4437 char *flush_buf;
4438 struct CommandList *c;
4439
4440 flush_buf = kzalloc(4, GFP_KERNEL);
4441 if (!flush_buf)
4442 return;
4443
4444 c = cmd_special_alloc(h);
4445 if (!c) {
4446 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4447 goto out_of_memory;
4448 }
4449 fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
4450 RAID_CTLR_LUNID, TYPE_CMD);
4451 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
4452 if (c->err_info->CommandStatus != 0)
4453 dev_warn(&h->pdev->dev,
4454 "error flushing cache on controller\n");
4455 cmd_special_free(h, c);
4456out_of_memory:
4457 kfree(flush_buf);
4458}
4459
4460static void hpsa_shutdown(struct pci_dev *pdev)
4461{
4462 struct ctlr_info *h;
4463
4464 h = pci_get_drvdata(pdev);
4465 /* Turn board interrupts off and send the flush cache command
4466 * sendcmd will turn off interrupt, and send the flush...
4467 * To write all data in the battery backed cache to disks
4468 */
4469 hpsa_flush_cache(h);
4470 h->access.set_intr_mask(h, HPSA_INTR_OFF);
a9a3a273 4471 free_irq(h->intr[h->intr_mode], h);
edd16368
SC
4472#ifdef CONFIG_PCI_MSI
4473 if (h->msix_vector)
4474 pci_disable_msix(h->pdev);
4475 else if (h->msi_vector)
4476 pci_disable_msi(h->pdev);
4477#endif /* CONFIG_PCI_MSI */
4478}
4479
55e14e76
SC
4480static void __devexit hpsa_free_device_info(struct ctlr_info *h)
4481{
4482 int i;
4483
4484 for (i = 0; i < h->ndevices; i++)
4485 kfree(h->dev[i]);
4486}
4487
edd16368
SC
4488static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4489{
4490 struct ctlr_info *h;
4491
4492 if (pci_get_drvdata(pdev) == NULL) {
a0c12413 4493 dev_err(&pdev->dev, "unable to remove device\n");
edd16368
SC
4494 return;
4495 }
4496 h = pci_get_drvdata(pdev);
a0c12413 4497 stop_controller_lockup_detector(h);
edd16368
SC
4498 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
4499 hpsa_shutdown(pdev);
4500 iounmap(h->vaddr);
204892e9
SC
4501 iounmap(h->transtable);
4502 iounmap(h->cfgtable);
55e14e76 4503 hpsa_free_device_info(h);
33a2ffce 4504 hpsa_free_sg_chain_blocks(h);
edd16368
SC
4505 pci_free_consistent(h->pdev,
4506 h->nr_cmds * sizeof(struct CommandList),
4507 h->cmd_pool, h->cmd_pool_dhandle);
4508 pci_free_consistent(h->pdev,
4509 h->nr_cmds * sizeof(struct ErrorInfo),
4510 h->errinfo_pool, h->errinfo_pool_dhandle);
303932fd
DB
4511 pci_free_consistent(h->pdev, h->reply_pool_size,
4512 h->reply_pool, h->reply_pool_dhandle);
edd16368 4513 kfree(h->cmd_pool_bits);
303932fd 4514 kfree(h->blockFetchTable);
339b2b14 4515 kfree(h->hba_inquiry_data);
f0bd0b68 4516 pci_disable_device(pdev);
edd16368
SC
4517 pci_release_regions(pdev);
4518 pci_set_drvdata(pdev, NULL);
edd16368
SC
4519 kfree(h);
4520}
4521
4522static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
4523 __attribute__((unused)) pm_message_t state)
4524{
4525 return -ENOSYS;
4526}
4527
4528static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4529{
4530 return -ENOSYS;
4531}
4532
4533static struct pci_driver hpsa_pci_driver = {
f79cfec6 4534 .name = HPSA,
edd16368
SC
4535 .probe = hpsa_init_one,
4536 .remove = __devexit_p(hpsa_remove_one),
4537 .id_table = hpsa_pci_device_id, /* id_table */
4538 .shutdown = hpsa_shutdown,
4539 .suspend = hpsa_suspend,
4540 .resume = hpsa_resume,
4541};
4542
303932fd
DB
4543/* Fill in bucket_map[], given nsgs (the max number of
4544 * scatter gather elements supported) and bucket[],
4545 * which is an array of 8 integers. The bucket[] array
4546 * contains 8 different DMA transfer sizes (in 16
4547 * byte increments) which the controller uses to fetch
4548 * commands. This function fills in bucket_map[], which
4549 * maps a given number of scatter gather elements to one of
4550 * the 8 DMA transfer sizes. The point of it is to allow the
4551 * controller to only do as much DMA as needed to fetch the
4552 * command, with the DMA transfer size encoded in the lower
4553 * bits of the command address.
4554 */
4555static void calc_bucket_map(int bucket[], int num_buckets,
4556 int nsgs, int *bucket_map)
4557{
4558 int i, j, b, size;
4559
4560 /* even a command with 0 SGs requires 4 blocks */
4561#define MINIMUM_TRANSFER_BLOCKS 4
4562#define NUM_BUCKETS 8
4563 /* Note, bucket_map must have nsgs+1 entries. */
4564 for (i = 0; i <= nsgs; i++) {
4565 /* Compute size of a command with i SG entries */
4566 size = i + MINIMUM_TRANSFER_BLOCKS;
4567 b = num_buckets; /* Assume the biggest bucket */
4568 /* Find the bucket that is just big enough */
4569 for (j = 0; j < 8; j++) {
4570 if (bucket[j] >= size) {
4571 b = j;
4572 break;
4573 }
4574 }
4575 /* for a command with i SG entries, use bucket b. */
4576 bucket_map[i] = b;
4577 }
4578}
4579
960a30e7
SC
4580static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4581 u32 use_short_tags)
303932fd 4582{
6c311b57
SC
4583 int i;
4584 unsigned long register_value;
def342bd
SC
4585
4586 /* This is a bit complicated. There are 8 registers on
4587 * the controller which we write to to tell it 8 different
4588 * sizes of commands which there may be. It's a way of
4589 * reducing the DMA done to fetch each command. Encoded into
4590 * each command's tag are 3 bits which communicate to the controller
4591 * which of the eight sizes that command fits within. The size of
4592 * each command depends on how many scatter gather entries there are.
4593 * Each SG entry requires 16 bytes. The eight registers are programmed
4594 * with the number of 16-byte blocks a command of that size requires.
4595 * The smallest command possible requires 5 such 16 byte blocks.
d66ae08b 4596 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
def342bd
SC
4597 * blocks. Note, this only extends to the SG entries contained
4598 * within the command block, and does not extend to chained blocks
4599 * of SG elements. bft[] contains the eight values we write to
4600 * the registers. They are not evenly distributed, but have more
4601 * sizes for small commands, and fewer sizes for larger commands.
4602 */
d66ae08b
SC
4603 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
4604 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
303932fd
DB
4605 /* 5 = 1 s/g entry or 4k
4606 * 6 = 2 s/g entry or 8k
4607 * 8 = 4 s/g entry or 16k
4608 * 10 = 6 s/g entry or 24k
4609 */
303932fd
DB
4610
4611 h->reply_pool_wraparound = 1; /* spec: init to 1 */
4612
4613 /* Controller spec: zero out this buffer. */
4614 memset(h->reply_pool, 0, h->reply_pool_size);
4615 h->reply_pool_head = h->reply_pool;
4616
d66ae08b
SC
4617 bft[7] = SG_ENTRIES_IN_CMD + 4;
4618 calc_bucket_map(bft, ARRAY_SIZE(bft),
4619 SG_ENTRIES_IN_CMD, h->blockFetchTable);
303932fd
DB
4620 for (i = 0; i < 8; i++)
4621 writel(bft[i], &h->transtable->BlockFetch[i]);
4622
4623 /* size of controller ring buffer */
4624 writel(h->max_commands, &h->transtable->RepQSize);
4625 writel(1, &h->transtable->RepQCount);
4626 writel(0, &h->transtable->RepQCtrAddrLow32);
4627 writel(0, &h->transtable->RepQCtrAddrHigh32);
4628 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4629 writel(0, &h->transtable->RepQAddr0High32);
960a30e7 4630 writel(CFGTBL_Trans_Performant | use_short_tags,
303932fd
DB
4631 &(h->cfgtable->HostWrite.TransportRequest));
4632 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
3f4336f3 4633 hpsa_wait_for_mode_change_ack(h);
303932fd
DB
4634 register_value = readl(&(h->cfgtable->TransportActive));
4635 if (!(register_value & CFGTBL_Trans_Performant)) {
4636 dev_warn(&h->pdev->dev, "unable to get board into"
4637 " performant mode\n");
4638 return;
4639 }
960a30e7
SC
4640 /* Change the access methods to the performant access methods */
4641 h->access = SA5_performant_access;
4642 h->transMethod = CFGTBL_Trans_Performant;
6c311b57
SC
4643}
4644
4645static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4646{
4647 u32 trans_support;
4648
02ec19c8
SC
4649 if (hpsa_simple_mode)
4650 return;
4651
6c311b57
SC
4652 trans_support = readl(&(h->cfgtable->TransportSupport));
4653 if (!(trans_support & PERFORMANT_MODE))
4654 return;
4655
cba3d38b 4656 hpsa_get_max_perf_mode_cmds(h);
6c311b57
SC
4657 /* Performant mode ring buffer and supporting data structures */
4658 h->reply_pool_size = h->max_commands * sizeof(u64);
4659 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4660 &(h->reply_pool_dhandle));
4661
4662 /* Need a block fetch table for performant mode */
d66ae08b 4663 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
6c311b57
SC
4664 sizeof(u32)), GFP_KERNEL);
4665
4666 if ((h->reply_pool == NULL)
4667 || (h->blockFetchTable == NULL))
4668 goto clean_up;
4669
960a30e7
SC
4670 hpsa_enter_performant_mode(h,
4671 trans_support & CFGTBL_Trans_use_short_tags);
303932fd
DB
4672
4673 return;
4674
4675clean_up:
4676 if (h->reply_pool)
4677 pci_free_consistent(h->pdev, h->reply_pool_size,
4678 h->reply_pool, h->reply_pool_dhandle);
4679 kfree(h->blockFetchTable);
4680}
4681
edd16368
SC
4682/*
4683 * This is it. Register the PCI driver information for the cards we control
4684 * the OS will call our registered routines when it finds one of our cards.
4685 */
4686static int __init hpsa_init(void)
4687{
31468401 4688 return pci_register_driver(&hpsa_pci_driver);
edd16368
SC
4689}
4690
4691static void __exit hpsa_cleanup(void)
4692{
4693 pci_unregister_driver(&hpsa_pci_driver);
edd16368
SC
4694}
4695
4696module_init(hpsa_init);
4697module_exit(hpsa_cleanup);