]>
Commit | Line | Data |
---|---|---|
edd16368 SC |
1 | /* |
2 | * Disk Array driver for HP Smart Array SAS controllers | |
1358f6dc DB |
3 | * Copyright 2014-2015 PMC-Sierra, Inc. |
4 | * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. | |
edd16368 SC |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; version 2 of the License. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more details. | |
14 | * | |
1358f6dc | 15 | * Questions/Comments/Bugfixes to storagedev@pmcs.com |
edd16368 SC |
16 | * |
17 | */ | |
18 | ||
19 | #include <linux/module.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/types.h> | |
22 | #include <linux/pci.h> | |
e5a44df8 | 23 | #include <linux/pci-aspm.h> |
edd16368 SC |
24 | #include <linux/kernel.h> |
25 | #include <linux/slab.h> | |
26 | #include <linux/delay.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/timer.h> | |
edd16368 SC |
29 | #include <linux/init.h> |
30 | #include <linux/spinlock.h> | |
edd16368 SC |
31 | #include <linux/compat.h> |
32 | #include <linux/blktrace_api.h> | |
33 | #include <linux/uaccess.h> | |
34 | #include <linux/io.h> | |
35 | #include <linux/dma-mapping.h> | |
36 | #include <linux/completion.h> | |
37 | #include <linux/moduleparam.h> | |
38 | #include <scsi/scsi.h> | |
39 | #include <scsi/scsi_cmnd.h> | |
40 | #include <scsi/scsi_device.h> | |
41 | #include <scsi/scsi_host.h> | |
667e23d4 | 42 | #include <scsi/scsi_tcq.h> |
9437ac43 | 43 | #include <scsi/scsi_eh.h> |
d04e62b9 | 44 | #include <scsi/scsi_transport_sas.h> |
73153fe5 | 45 | #include <scsi/scsi_dbg.h> |
edd16368 SC |
46 | #include <linux/cciss_ioctl.h> |
47 | #include <linux/string.h> | |
48 | #include <linux/bitmap.h> | |
60063497 | 49 | #include <linux/atomic.h> |
a0c12413 | 50 | #include <linux/jiffies.h> |
42a91641 | 51 | #include <linux/percpu-defs.h> |
094963da | 52 | #include <linux/percpu.h> |
2b08b3e9 | 53 | #include <asm/unaligned.h> |
283b4a9b | 54 | #include <asm/div64.h> |
edd16368 SC |
55 | #include "hpsa_cmd.h" |
56 | #include "hpsa.h" | |
57 | ||
ec2c3aa9 DB |
58 | /* |
59 | * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' | |
60 | * with an optional trailing '-' followed by a byte value (0-255). | |
61 | */ | |
62 | #define HPSA_DRIVER_VERSION "3.4.14-0" | |
edd16368 | 63 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
f79cfec6 | 64 | #define HPSA "hpsa" |
edd16368 | 65 | |
007e7aa9 RE |
66 | /* How long to wait for CISS doorbell communication */ |
67 | #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ | |
68 | #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ | |
69 | #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ | |
70 | #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ | |
edd16368 SC |
71 | #define MAX_IOCTL_CONFIG_WAIT 1000 |
72 | ||
73 | /*define how many times we will try a command because of bus resets */ | |
74 | #define MAX_CMD_RETRIES 3 | |
75 | ||
76 | /* Embedded module documentation macros - see modules.h */ | |
77 | MODULE_AUTHOR("Hewlett-Packard Company"); | |
78 | MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ | |
79 | HPSA_DRIVER_VERSION); | |
80 | MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); | |
81 | MODULE_VERSION(HPSA_DRIVER_VERSION); | |
82 | MODULE_LICENSE("GPL"); | |
83 | ||
84 | static int hpsa_allow_any; | |
85 | module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); | |
86 | MODULE_PARM_DESC(hpsa_allow_any, | |
87 | "Allow hpsa driver to access unknown HP Smart Array hardware"); | |
02ec19c8 SC |
88 | static int hpsa_simple_mode; |
89 | module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); | |
90 | MODULE_PARM_DESC(hpsa_simple_mode, | |
91 | "Use 'simple mode' rather than 'performant mode'"); | |
edd16368 SC |
92 | |
93 | /* define the PCI info for the cards we can control */ | |
94 | static const struct pci_device_id hpsa_pci_device_id[] = { | |
edd16368 SC |
95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
96 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, | |
97 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | |
98 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, | |
99 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | |
163dbcd8 MM |
100 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
101 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, | |
f8b01eb9 | 102 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
9143a961 | 103 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, |
104 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, | |
105 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, | |
106 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, | |
107 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, | |
108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, | |
109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, | |
fe0c9610 MM |
110 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921}, |
111 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922}, | |
112 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923}, | |
113 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924}, | |
fe0c9610 MM |
114 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, |
115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, | |
97b9f53d MM |
116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, |
117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, | |
118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, | |
119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, | |
120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, | |
121 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, | |
122 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, | |
123 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, | |
124 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, | |
125 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, | |
3b7a45e5 | 126 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6}, |
97b9f53d MM |
127 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, |
128 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, | |
129 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, | |
3b7a45e5 JH |
130 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA}, |
131 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB}, | |
132 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC}, | |
133 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, | |
134 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, | |
fdfa4b6d | 135 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, |
cbb47dcb DB |
136 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581}, |
137 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582}, | |
138 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583}, | |
139 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584}, | |
140 | {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585}, | |
8e616a5e SC |
141 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, |
142 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, | |
143 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, | |
144 | {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, | |
145 | {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f}, | |
7c03b870 | 146 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
6798cc0a | 147 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
edd16368 SC |
148 | {0,} |
149 | }; | |
150 | ||
151 | MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); | |
152 | ||
153 | /* board_id = Subsystem Device ID & Vendor ID | |
154 | * product = Marketing Name for the board | |
155 | * access = Address of the struct of function pointers | |
156 | */ | |
157 | static struct board_type products[] = { | |
edd16368 SC |
158 | {0x3241103C, "Smart Array P212", &SA5_access}, |
159 | {0x3243103C, "Smart Array P410", &SA5_access}, | |
160 | {0x3245103C, "Smart Array P410i", &SA5_access}, | |
161 | {0x3247103C, "Smart Array P411", &SA5_access}, | |
162 | {0x3249103C, "Smart Array P812", &SA5_access}, | |
163dbcd8 MM |
163 | {0x324A103C, "Smart Array P712m", &SA5_access}, |
164 | {0x324B103C, "Smart Array P711m", &SA5_access}, | |
7d2cce58 | 165 | {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */ |
fe0c9610 MM |
166 | {0x3350103C, "Smart Array P222", &SA5_access}, |
167 | {0x3351103C, "Smart Array P420", &SA5_access}, | |
168 | {0x3352103C, "Smart Array P421", &SA5_access}, | |
169 | {0x3353103C, "Smart Array P822", &SA5_access}, | |
170 | {0x3354103C, "Smart Array P420i", &SA5_access}, | |
171 | {0x3355103C, "Smart Array P220i", &SA5_access}, | |
172 | {0x3356103C, "Smart Array P721m", &SA5_access}, | |
1fd6c8e3 MM |
173 | {0x1921103C, "Smart Array P830i", &SA5_access}, |
174 | {0x1922103C, "Smart Array P430", &SA5_access}, | |
175 | {0x1923103C, "Smart Array P431", &SA5_access}, | |
176 | {0x1924103C, "Smart Array P830", &SA5_access}, | |
177 | {0x1926103C, "Smart Array P731m", &SA5_access}, | |
178 | {0x1928103C, "Smart Array P230i", &SA5_access}, | |
179 | {0x1929103C, "Smart Array P530", &SA5_access}, | |
27fb8137 DB |
180 | {0x21BD103C, "Smart Array P244br", &SA5_access}, |
181 | {0x21BE103C, "Smart Array P741m", &SA5_access}, | |
182 | {0x21BF103C, "Smart HBA H240ar", &SA5_access}, | |
183 | {0x21C0103C, "Smart Array P440ar", &SA5_access}, | |
c8ae0ab1 | 184 | {0x21C1103C, "Smart Array P840ar", &SA5_access}, |
27fb8137 DB |
185 | {0x21C2103C, "Smart Array P440", &SA5_access}, |
186 | {0x21C3103C, "Smart Array P441", &SA5_access}, | |
97b9f53d | 187 | {0x21C4103C, "Smart Array", &SA5_access}, |
27fb8137 DB |
188 | {0x21C5103C, "Smart Array P841", &SA5_access}, |
189 | {0x21C6103C, "Smart HBA H244br", &SA5_access}, | |
190 | {0x21C7103C, "Smart HBA H240", &SA5_access}, | |
191 | {0x21C8103C, "Smart HBA H241", &SA5_access}, | |
97b9f53d | 192 | {0x21C9103C, "Smart Array", &SA5_access}, |
27fb8137 DB |
193 | {0x21CA103C, "Smart Array P246br", &SA5_access}, |
194 | {0x21CB103C, "Smart Array P840", &SA5_access}, | |
3b7a45e5 JH |
195 | {0x21CC103C, "Smart Array", &SA5_access}, |
196 | {0x21CD103C, "Smart Array", &SA5_access}, | |
27fb8137 | 197 | {0x21CE103C, "Smart HBA", &SA5_access}, |
fdfa4b6d | 198 | {0x05809005, "SmartHBA-SA", &SA5_access}, |
cbb47dcb DB |
199 | {0x05819005, "SmartHBA-SA 8i", &SA5_access}, |
200 | {0x05829005, "SmartHBA-SA 8i8e", &SA5_access}, | |
201 | {0x05839005, "SmartHBA-SA 8e", &SA5_access}, | |
202 | {0x05849005, "SmartHBA-SA 16i", &SA5_access}, | |
203 | {0x05859005, "SmartHBA-SA 4i4e", &SA5_access}, | |
8e616a5e SC |
204 | {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, |
205 | {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, | |
206 | {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, | |
207 | {0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, | |
208 | {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access}, | |
edd16368 SC |
209 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
210 | }; | |
211 | ||
d04e62b9 KB |
212 | static struct scsi_transport_template *hpsa_sas_transport_template; |
213 | static int hpsa_add_sas_host(struct ctlr_info *h); | |
214 | static void hpsa_delete_sas_host(struct ctlr_info *h); | |
215 | static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, | |
216 | struct hpsa_scsi_dev_t *device); | |
217 | static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device); | |
218 | static struct hpsa_scsi_dev_t | |
219 | *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, | |
220 | struct sas_rphy *rphy); | |
221 | ||
a58e7e53 WS |
222 | #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy) |
223 | static const struct scsi_cmnd hpsa_cmd_busy; | |
224 | #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle) | |
225 | static const struct scsi_cmnd hpsa_cmd_idle; | |
edd16368 SC |
226 | static int number_of_controllers; |
227 | ||
10f66018 SC |
228 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
229 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | |
42a91641 | 230 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg); |
edd16368 SC |
231 | |
232 | #ifdef CONFIG_COMPAT | |
42a91641 DB |
233 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, |
234 | void __user *arg); | |
edd16368 SC |
235 | #endif |
236 | ||
237 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |
edd16368 | 238 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
73153fe5 WS |
239 | static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c); |
240 | static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, | |
241 | struct scsi_cmnd *scmd); | |
a2dac136 | 242 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
b7bb24eb | 243 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
edd16368 | 244 | int cmd_type); |
2c143342 | 245 | static void hpsa_free_cmd_pool(struct ctlr_info *h); |
b7bb24eb | 246 | #define VPD_PAGE (1 << 8) |
b48d9804 | 247 | #define HPSA_SIMPLE_ERROR_BITS 0x03 |
edd16368 | 248 | |
f281233d | 249 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
a08a8471 SC |
250 | static void hpsa_scan_start(struct Scsi_Host *); |
251 | static int hpsa_scan_finished(struct Scsi_Host *sh, | |
252 | unsigned long elapsed_time); | |
7c0a0229 | 253 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); |
edd16368 SC |
254 | |
255 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | |
75167d2c | 256 | static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); |
edd16368 | 257 | static int hpsa_slave_alloc(struct scsi_device *sdev); |
41ce4c35 | 258 | static int hpsa_slave_configure(struct scsi_device *sdev); |
edd16368 SC |
259 | static void hpsa_slave_destroy(struct scsi_device *sdev); |
260 | ||
8aa60681 | 261 | static void hpsa_update_scsi_devices(struct ctlr_info *h); |
edd16368 SC |
262 | static int check_for_unit_attention(struct ctlr_info *h, |
263 | struct CommandList *c); | |
264 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
265 | struct CommandList *c); | |
303932fd DB |
266 | /* performant mode helper functions */ |
267 | static void calc_bucket_map(int *bucket, int num_buckets, | |
2b08b3e9 | 268 | int nsgs, int min_blocks, u32 *bucket_map); |
105a3dbc RE |
269 | static void hpsa_free_performant_mode(struct ctlr_info *h); |
270 | static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); | |
254f796b | 271 | static inline u32 next_command(struct ctlr_info *h, u8 q); |
6f039790 GKH |
272 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
273 | u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
274 | u64 *cfg_offset); | |
275 | static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, | |
276 | unsigned long *memory_bar); | |
277 | static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); | |
278 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, | |
279 | int wait_for_ready); | |
75167d2c | 280 | static inline void finish_cmd(struct CommandList *c); |
c706a795 | 281 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h); |
fe5389c8 SC |
282 | #define BOARD_NOT_READY 0 |
283 | #define BOARD_READY 1 | |
23100dd9 | 284 | static void hpsa_drain_accel_commands(struct ctlr_info *h); |
76438d08 | 285 | static void hpsa_flush_cache(struct ctlr_info *h); |
c349775e ST |
286 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, |
287 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | |
03383736 | 288 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk); |
080ef1cc | 289 | static void hpsa_command_resubmit_worker(struct work_struct *work); |
25163bd5 WS |
290 | static u32 lockup_detected(struct ctlr_info *h); |
291 | static int detect_controller_lockup(struct ctlr_info *h); | |
c2adae44 | 292 | static void hpsa_disable_rld_caching(struct ctlr_info *h); |
d04e62b9 KB |
293 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
294 | struct ReportExtendedLUNdata *buf, int bufsize); | |
34592254 | 295 | static int hpsa_luns_changed(struct ctlr_info *h); |
edd16368 | 296 | |
edd16368 SC |
297 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
298 | { | |
299 | unsigned long *priv = shost_priv(sdev->host); | |
300 | return (struct ctlr_info *) *priv; | |
301 | } | |
302 | ||
a23513e8 SC |
303 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
304 | { | |
305 | unsigned long *priv = shost_priv(sh); | |
306 | return (struct ctlr_info *) *priv; | |
307 | } | |
308 | ||
a58e7e53 WS |
309 | static inline bool hpsa_is_cmd_idle(struct CommandList *c) |
310 | { | |
311 | return c->scsi_cmd == SCSI_CMD_IDLE; | |
312 | } | |
313 | ||
d604f533 WS |
314 | static inline bool hpsa_is_pending_event(struct CommandList *c) |
315 | { | |
316 | return c->abort_pending || c->reset_pending; | |
317 | } | |
318 | ||
9437ac43 SC |
319 | /* extract sense key, asc, and ascq from sense data. -1 means invalid. */ |
320 | static void decode_sense_data(const u8 *sense_data, int sense_data_len, | |
321 | u8 *sense_key, u8 *asc, u8 *ascq) | |
322 | { | |
323 | struct scsi_sense_hdr sshdr; | |
324 | bool rc; | |
325 | ||
326 | *sense_key = -1; | |
327 | *asc = -1; | |
328 | *ascq = -1; | |
329 | ||
330 | if (sense_data_len < 1) | |
331 | return; | |
332 | ||
333 | rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr); | |
334 | if (rc) { | |
335 | *sense_key = sshdr.sense_key; | |
336 | *asc = sshdr.asc; | |
337 | *ascq = sshdr.ascq; | |
338 | } | |
339 | } | |
340 | ||
edd16368 SC |
341 | static int check_for_unit_attention(struct ctlr_info *h, |
342 | struct CommandList *c) | |
343 | { | |
9437ac43 SC |
344 | u8 sense_key, asc, ascq; |
345 | int sense_len; | |
346 | ||
347 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) | |
348 | sense_len = sizeof(c->err_info->SenseInfo); | |
349 | else | |
350 | sense_len = c->err_info->SenseLen; | |
351 | ||
352 | decode_sense_data(c->err_info->SenseInfo, sense_len, | |
353 | &sense_key, &asc, &ascq); | |
81c27557 | 354 | if (sense_key != UNIT_ATTENTION || asc == 0xff) |
edd16368 SC |
355 | return 0; |
356 | ||
9437ac43 | 357 | switch (asc) { |
edd16368 | 358 | case STATE_CHANGED: |
9437ac43 | 359 | dev_warn(&h->pdev->dev, |
2946e82b RE |
360 | "%s: a state change detected, command retried\n", |
361 | h->devname); | |
edd16368 SC |
362 | break; |
363 | case LUN_FAILED: | |
7f73695a | 364 | dev_warn(&h->pdev->dev, |
2946e82b | 365 | "%s: LUN failure detected\n", h->devname); |
edd16368 SC |
366 | break; |
367 | case REPORT_LUNS_CHANGED: | |
7f73695a | 368 | dev_warn(&h->pdev->dev, |
2946e82b | 369 | "%s: report LUN data changed\n", h->devname); |
edd16368 | 370 | /* |
4f4eb9f1 ST |
371 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the external |
372 | * target (array) devices. | |
edd16368 SC |
373 | */ |
374 | break; | |
375 | case POWER_OR_RESET: | |
2946e82b RE |
376 | dev_warn(&h->pdev->dev, |
377 | "%s: a power on or device reset detected\n", | |
378 | h->devname); | |
edd16368 SC |
379 | break; |
380 | case UNIT_ATTENTION_CLEARED: | |
2946e82b RE |
381 | dev_warn(&h->pdev->dev, |
382 | "%s: unit attention cleared by another initiator\n", | |
383 | h->devname); | |
edd16368 SC |
384 | break; |
385 | default: | |
2946e82b RE |
386 | dev_warn(&h->pdev->dev, |
387 | "%s: unknown unit attention detected\n", | |
388 | h->devname); | |
edd16368 SC |
389 | break; |
390 | } | |
391 | return 1; | |
392 | } | |
393 | ||
852af20a MB |
394 | static int check_for_busy(struct ctlr_info *h, struct CommandList *c) |
395 | { | |
396 | if (c->err_info->CommandStatus != CMD_TARGET_STATUS || | |
397 | (c->err_info->ScsiStatus != SAM_STAT_BUSY && | |
398 | c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL)) | |
399 | return 0; | |
400 | dev_warn(&h->pdev->dev, HPSA "device busy"); | |
401 | return 1; | |
402 | } | |
403 | ||
e985c58f SC |
404 | static u32 lockup_detected(struct ctlr_info *h); |
405 | static ssize_t host_show_lockup_detected(struct device *dev, | |
406 | struct device_attribute *attr, char *buf) | |
407 | { | |
408 | int ld; | |
409 | struct ctlr_info *h; | |
410 | struct Scsi_Host *shost = class_to_shost(dev); | |
411 | ||
412 | h = shost_to_hba(shost); | |
413 | ld = lockup_detected(h); | |
414 | ||
415 | return sprintf(buf, "ld=%d\n", ld); | |
416 | } | |
417 | ||
da0697bd ST |
418 | static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, |
419 | struct device_attribute *attr, | |
420 | const char *buf, size_t count) | |
421 | { | |
422 | int status, len; | |
423 | struct ctlr_info *h; | |
424 | struct Scsi_Host *shost = class_to_shost(dev); | |
425 | char tmpbuf[10]; | |
426 | ||
427 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | |
428 | return -EACCES; | |
429 | len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; | |
430 | strncpy(tmpbuf, buf, len); | |
431 | tmpbuf[len] = '\0'; | |
432 | if (sscanf(tmpbuf, "%d", &status) != 1) | |
433 | return -EINVAL; | |
434 | h = shost_to_hba(shost); | |
435 | h->acciopath_status = !!status; | |
436 | dev_warn(&h->pdev->dev, | |
437 | "hpsa: HP SSD Smart Path %s via sysfs update.\n", | |
438 | h->acciopath_status ? "enabled" : "disabled"); | |
439 | return count; | |
440 | } | |
441 | ||
2ba8bfc8 SC |
442 | static ssize_t host_store_raid_offload_debug(struct device *dev, |
443 | struct device_attribute *attr, | |
444 | const char *buf, size_t count) | |
445 | { | |
446 | int debug_level, len; | |
447 | struct ctlr_info *h; | |
448 | struct Scsi_Host *shost = class_to_shost(dev); | |
449 | char tmpbuf[10]; | |
450 | ||
451 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | |
452 | return -EACCES; | |
453 | len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; | |
454 | strncpy(tmpbuf, buf, len); | |
455 | tmpbuf[len] = '\0'; | |
456 | if (sscanf(tmpbuf, "%d", &debug_level) != 1) | |
457 | return -EINVAL; | |
458 | if (debug_level < 0) | |
459 | debug_level = 0; | |
460 | h = shost_to_hba(shost); | |
461 | h->raid_offload_debug = debug_level; | |
462 | dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", | |
463 | h->raid_offload_debug); | |
464 | return count; | |
465 | } | |
466 | ||
edd16368 SC |
467 | static ssize_t host_store_rescan(struct device *dev, |
468 | struct device_attribute *attr, | |
469 | const char *buf, size_t count) | |
470 | { | |
471 | struct ctlr_info *h; | |
472 | struct Scsi_Host *shost = class_to_shost(dev); | |
a23513e8 | 473 | h = shost_to_hba(shost); |
31468401 | 474 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
475 | return count; |
476 | } | |
477 | ||
d28ce020 SC |
478 | static ssize_t host_show_firmware_revision(struct device *dev, |
479 | struct device_attribute *attr, char *buf) | |
480 | { | |
481 | struct ctlr_info *h; | |
482 | struct Scsi_Host *shost = class_to_shost(dev); | |
483 | unsigned char *fwrev; | |
484 | ||
485 | h = shost_to_hba(shost); | |
486 | if (!h->hba_inquiry_data) | |
487 | return 0; | |
488 | fwrev = &h->hba_inquiry_data[32]; | |
489 | return snprintf(buf, 20, "%c%c%c%c\n", | |
490 | fwrev[0], fwrev[1], fwrev[2], fwrev[3]); | |
491 | } | |
492 | ||
94a13649 SC |
493 | static ssize_t host_show_commands_outstanding(struct device *dev, |
494 | struct device_attribute *attr, char *buf) | |
495 | { | |
496 | struct Scsi_Host *shost = class_to_shost(dev); | |
497 | struct ctlr_info *h = shost_to_hba(shost); | |
498 | ||
0cbf768e SC |
499 | return snprintf(buf, 20, "%d\n", |
500 | atomic_read(&h->commands_outstanding)); | |
94a13649 SC |
501 | } |
502 | ||
745a7a25 SC |
503 | static ssize_t host_show_transport_mode(struct device *dev, |
504 | struct device_attribute *attr, char *buf) | |
505 | { | |
506 | struct ctlr_info *h; | |
507 | struct Scsi_Host *shost = class_to_shost(dev); | |
508 | ||
509 | h = shost_to_hba(shost); | |
510 | return snprintf(buf, 20, "%s\n", | |
960a30e7 | 511 | h->transMethod & CFGTBL_Trans_Performant ? |
745a7a25 SC |
512 | "performant" : "simple"); |
513 | } | |
514 | ||
da0697bd ST |
515 | static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, |
516 | struct device_attribute *attr, char *buf) | |
517 | { | |
518 | struct ctlr_info *h; | |
519 | struct Scsi_Host *shost = class_to_shost(dev); | |
520 | ||
521 | h = shost_to_hba(shost); | |
522 | return snprintf(buf, 30, "HP SSD Smart Path %s\n", | |
523 | (h->acciopath_status == 1) ? "enabled" : "disabled"); | |
524 | } | |
525 | ||
46380786 | 526 | /* List of controllers which cannot be hard reset on kexec with reset_devices */ |
941b1cda SC |
527 | static u32 unresettable_controller[] = { |
528 | 0x324a103C, /* Smart Array P712m */ | |
9b5c48c2 | 529 | 0x324b103C, /* Smart Array P711m */ |
941b1cda SC |
530 | 0x3223103C, /* Smart Array P800 */ |
531 | 0x3234103C, /* Smart Array P400 */ | |
532 | 0x3235103C, /* Smart Array P400i */ | |
533 | 0x3211103C, /* Smart Array E200i */ | |
534 | 0x3212103C, /* Smart Array E200 */ | |
535 | 0x3213103C, /* Smart Array E200i */ | |
536 | 0x3214103C, /* Smart Array E200i */ | |
537 | 0x3215103C, /* Smart Array E200i */ | |
538 | 0x3237103C, /* Smart Array E500 */ | |
539 | 0x323D103C, /* Smart Array P700m */ | |
7af0abbc | 540 | 0x40800E11, /* Smart Array 5i */ |
941b1cda SC |
541 | 0x409C0E11, /* Smart Array 6400 */ |
542 | 0x409D0E11, /* Smart Array 6400 EM */ | |
5a4f934e TH |
543 | 0x40700E11, /* Smart Array 5300 */ |
544 | 0x40820E11, /* Smart Array 532 */ | |
545 | 0x40830E11, /* Smart Array 5312 */ | |
546 | 0x409A0E11, /* Smart Array 641 */ | |
547 | 0x409B0E11, /* Smart Array 642 */ | |
548 | 0x40910E11, /* Smart Array 6i */ | |
941b1cda SC |
549 | }; |
550 | ||
46380786 SC |
551 | /* List of controllers which cannot even be soft reset */ |
552 | static u32 soft_unresettable_controller[] = { | |
7af0abbc | 553 | 0x40800E11, /* Smart Array 5i */ |
5a4f934e TH |
554 | 0x40700E11, /* Smart Array 5300 */ |
555 | 0x40820E11, /* Smart Array 532 */ | |
556 | 0x40830E11, /* Smart Array 5312 */ | |
557 | 0x409A0E11, /* Smart Array 641 */ | |
558 | 0x409B0E11, /* Smart Array 642 */ | |
559 | 0x40910E11, /* Smart Array 6i */ | |
46380786 SC |
560 | /* Exclude 640x boards. These are two pci devices in one slot |
561 | * which share a battery backed cache module. One controls the | |
562 | * cache, the other accesses the cache through the one that controls | |
563 | * it. If we reset the one controlling the cache, the other will | |
564 | * likely not be happy. Just forbid resetting this conjoined mess. | |
565 | * The 640x isn't really supported by hpsa anyway. | |
566 | */ | |
567 | 0x409C0E11, /* Smart Array 6400 */ | |
568 | 0x409D0E11, /* Smart Array 6400 EM */ | |
569 | }; | |
570 | ||
9b5c48c2 SC |
571 | static u32 needs_abort_tags_swizzled[] = { |
572 | 0x323D103C, /* Smart Array P700m */ | |
573 | 0x324a103C, /* Smart Array P712m */ | |
574 | 0x324b103C, /* SmartArray P711m */ | |
575 | }; | |
576 | ||
577 | static int board_id_in_array(u32 a[], int nelems, u32 board_id) | |
941b1cda SC |
578 | { |
579 | int i; | |
580 | ||
9b5c48c2 SC |
581 | for (i = 0; i < nelems; i++) |
582 | if (a[i] == board_id) | |
583 | return 1; | |
584 | return 0; | |
46380786 SC |
585 | } |
586 | ||
9b5c48c2 | 587 | static int ctlr_is_hard_resettable(u32 board_id) |
46380786 | 588 | { |
9b5c48c2 SC |
589 | return !board_id_in_array(unresettable_controller, |
590 | ARRAY_SIZE(unresettable_controller), board_id); | |
591 | } | |
46380786 | 592 | |
9b5c48c2 SC |
593 | static int ctlr_is_soft_resettable(u32 board_id) |
594 | { | |
595 | return !board_id_in_array(soft_unresettable_controller, | |
596 | ARRAY_SIZE(soft_unresettable_controller), board_id); | |
941b1cda SC |
597 | } |
598 | ||
46380786 SC |
599 | static int ctlr_is_resettable(u32 board_id) |
600 | { | |
601 | return ctlr_is_hard_resettable(board_id) || | |
602 | ctlr_is_soft_resettable(board_id); | |
603 | } | |
604 | ||
9b5c48c2 SC |
605 | static int ctlr_needs_abort_tags_swizzled(u32 board_id) |
606 | { | |
607 | return board_id_in_array(needs_abort_tags_swizzled, | |
608 | ARRAY_SIZE(needs_abort_tags_swizzled), board_id); | |
609 | } | |
610 | ||
941b1cda SC |
611 | static ssize_t host_show_resettable(struct device *dev, |
612 | struct device_attribute *attr, char *buf) | |
613 | { | |
614 | struct ctlr_info *h; | |
615 | struct Scsi_Host *shost = class_to_shost(dev); | |
616 | ||
617 | h = shost_to_hba(shost); | |
46380786 | 618 | return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); |
941b1cda SC |
619 | } |
620 | ||
edd16368 SC |
621 | static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) |
622 | { | |
623 | return (scsi3addr[3] & 0xC0) == 0x40; | |
624 | } | |
625 | ||
f2ef0ce7 | 626 | static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6", |
7c59a0d4 | 627 | "1(+0)ADM", "UNKNOWN", "PHYS DRV" |
edd16368 | 628 | }; |
6b80b18f ST |
629 | #define HPSA_RAID_0 0 |
630 | #define HPSA_RAID_4 1 | |
631 | #define HPSA_RAID_1 2 /* also used for RAID 10 */ | |
632 | #define HPSA_RAID_5 3 /* also used for RAID 50 */ | |
633 | #define HPSA_RAID_51 4 | |
634 | #define HPSA_RAID_6 5 /* also used for RAID 60 */ | |
635 | #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */ | |
7c59a0d4 DB |
636 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2) |
637 | #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1) | |
edd16368 | 638 | |
f3f01730 KB |
639 | static inline bool is_logical_device(struct hpsa_scsi_dev_t *device) |
640 | { | |
641 | return !device->physical_device; | |
642 | } | |
edd16368 SC |
643 | |
644 | static ssize_t raid_level_show(struct device *dev, | |
645 | struct device_attribute *attr, char *buf) | |
646 | { | |
647 | ssize_t l = 0; | |
82a72c0a | 648 | unsigned char rlevel; |
edd16368 SC |
649 | struct ctlr_info *h; |
650 | struct scsi_device *sdev; | |
651 | struct hpsa_scsi_dev_t *hdev; | |
652 | unsigned long flags; | |
653 | ||
654 | sdev = to_scsi_device(dev); | |
655 | h = sdev_to_hba(sdev); | |
656 | spin_lock_irqsave(&h->lock, flags); | |
657 | hdev = sdev->hostdata; | |
658 | if (!hdev) { | |
659 | spin_unlock_irqrestore(&h->lock, flags); | |
660 | return -ENODEV; | |
661 | } | |
662 | ||
663 | /* Is this even a logical drive? */ | |
f3f01730 | 664 | if (!is_logical_device(hdev)) { |
edd16368 SC |
665 | spin_unlock_irqrestore(&h->lock, flags); |
666 | l = snprintf(buf, PAGE_SIZE, "N/A\n"); | |
667 | return l; | |
668 | } | |
669 | ||
670 | rlevel = hdev->raid_level; | |
671 | spin_unlock_irqrestore(&h->lock, flags); | |
82a72c0a | 672 | if (rlevel > RAID_UNKNOWN) |
edd16368 SC |
673 | rlevel = RAID_UNKNOWN; |
674 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); | |
675 | return l; | |
676 | } | |
677 | ||
678 | static ssize_t lunid_show(struct device *dev, | |
679 | struct device_attribute *attr, char *buf) | |
680 | { | |
681 | struct ctlr_info *h; | |
682 | struct scsi_device *sdev; | |
683 | struct hpsa_scsi_dev_t *hdev; | |
684 | unsigned long flags; | |
685 | unsigned char lunid[8]; | |
686 | ||
687 | sdev = to_scsi_device(dev); | |
688 | h = sdev_to_hba(sdev); | |
689 | spin_lock_irqsave(&h->lock, flags); | |
690 | hdev = sdev->hostdata; | |
691 | if (!hdev) { | |
692 | spin_unlock_irqrestore(&h->lock, flags); | |
693 | return -ENODEV; | |
694 | } | |
695 | memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); | |
696 | spin_unlock_irqrestore(&h->lock, flags); | |
697 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
698 | lunid[0], lunid[1], lunid[2], lunid[3], | |
699 | lunid[4], lunid[5], lunid[6], lunid[7]); | |
700 | } | |
701 | ||
702 | static ssize_t unique_id_show(struct device *dev, | |
703 | struct device_attribute *attr, char *buf) | |
704 | { | |
705 | struct ctlr_info *h; | |
706 | struct scsi_device *sdev; | |
707 | struct hpsa_scsi_dev_t *hdev; | |
708 | unsigned long flags; | |
709 | unsigned char sn[16]; | |
710 | ||
711 | sdev = to_scsi_device(dev); | |
712 | h = sdev_to_hba(sdev); | |
713 | spin_lock_irqsave(&h->lock, flags); | |
714 | hdev = sdev->hostdata; | |
715 | if (!hdev) { | |
716 | spin_unlock_irqrestore(&h->lock, flags); | |
717 | return -ENODEV; | |
718 | } | |
719 | memcpy(sn, hdev->device_id, sizeof(sn)); | |
720 | spin_unlock_irqrestore(&h->lock, flags); | |
721 | return snprintf(buf, 16 * 2 + 2, | |
722 | "%02X%02X%02X%02X%02X%02X%02X%02X" | |
723 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", | |
724 | sn[0], sn[1], sn[2], sn[3], | |
725 | sn[4], sn[5], sn[6], sn[7], | |
726 | sn[8], sn[9], sn[10], sn[11], | |
727 | sn[12], sn[13], sn[14], sn[15]); | |
728 | } | |
729 | ||
c1988684 ST |
730 | static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, |
731 | struct device_attribute *attr, char *buf) | |
732 | { | |
733 | struct ctlr_info *h; | |
734 | struct scsi_device *sdev; | |
735 | struct hpsa_scsi_dev_t *hdev; | |
736 | unsigned long flags; | |
737 | int offload_enabled; | |
738 | ||
739 | sdev = to_scsi_device(dev); | |
740 | h = sdev_to_hba(sdev); | |
741 | spin_lock_irqsave(&h->lock, flags); | |
742 | hdev = sdev->hostdata; | |
743 | if (!hdev) { | |
744 | spin_unlock_irqrestore(&h->lock, flags); | |
745 | return -ENODEV; | |
746 | } | |
747 | offload_enabled = hdev->offload_enabled; | |
748 | spin_unlock_irqrestore(&h->lock, flags); | |
749 | return snprintf(buf, 20, "%d\n", offload_enabled); | |
750 | } | |
751 | ||
8270b862 | 752 | #define MAX_PATHS 8 |
8270b862 JH |
753 | static ssize_t path_info_show(struct device *dev, |
754 | struct device_attribute *attr, char *buf) | |
755 | { | |
756 | struct ctlr_info *h; | |
757 | struct scsi_device *sdev; | |
758 | struct hpsa_scsi_dev_t *hdev; | |
759 | unsigned long flags; | |
760 | int i; | |
761 | int output_len = 0; | |
762 | u8 box; | |
763 | u8 bay; | |
764 | u8 path_map_index = 0; | |
765 | char *active; | |
766 | unsigned char phys_connector[2]; | |
8270b862 | 767 | |
8270b862 JH |
768 | sdev = to_scsi_device(dev); |
769 | h = sdev_to_hba(sdev); | |
770 | spin_lock_irqsave(&h->devlock, flags); | |
771 | hdev = sdev->hostdata; | |
772 | if (!hdev) { | |
773 | spin_unlock_irqrestore(&h->devlock, flags); | |
774 | return -ENODEV; | |
775 | } | |
776 | ||
777 | bay = hdev->bay; | |
778 | for (i = 0; i < MAX_PATHS; i++) { | |
779 | path_map_index = 1<<i; | |
780 | if (i == hdev->active_path_index) | |
781 | active = "Active"; | |
782 | else if (hdev->path_map & path_map_index) | |
783 | active = "Inactive"; | |
784 | else | |
785 | continue; | |
786 | ||
1faf072c RV |
787 | output_len += scnprintf(buf + output_len, |
788 | PAGE_SIZE - output_len, | |
789 | "[%d:%d:%d:%d] %20.20s ", | |
8270b862 JH |
790 | h->scsi_host->host_no, |
791 | hdev->bus, hdev->target, hdev->lun, | |
792 | scsi_device_type(hdev->devtype)); | |
793 | ||
cca8f13b | 794 | if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) { |
2708f295 | 795 | output_len += scnprintf(buf + output_len, |
1faf072c RV |
796 | PAGE_SIZE - output_len, |
797 | "%s\n", active); | |
8270b862 JH |
798 | continue; |
799 | } | |
800 | ||
801 | box = hdev->box[i]; | |
802 | memcpy(&phys_connector, &hdev->phys_connector[i], | |
803 | sizeof(phys_connector)); | |
804 | if (phys_connector[0] < '0') | |
805 | phys_connector[0] = '0'; | |
806 | if (phys_connector[1] < '0') | |
807 | phys_connector[1] = '0'; | |
cca8f13b | 808 | output_len += scnprintf(buf + output_len, |
1faf072c | 809 | PAGE_SIZE - output_len, |
8270b862 JH |
810 | "PORT: %.2s ", |
811 | phys_connector); | |
af15ed36 DB |
812 | if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) && |
813 | hdev->expose_device) { | |
8270b862 | 814 | if (box == 0 || box == 0xFF) { |
2708f295 | 815 | output_len += scnprintf(buf + output_len, |
1faf072c | 816 | PAGE_SIZE - output_len, |
8270b862 JH |
817 | "BAY: %hhu %s\n", |
818 | bay, active); | |
819 | } else { | |
2708f295 | 820 | output_len += scnprintf(buf + output_len, |
1faf072c | 821 | PAGE_SIZE - output_len, |
8270b862 JH |
822 | "BOX: %hhu BAY: %hhu %s\n", |
823 | box, bay, active); | |
824 | } | |
825 | } else if (box != 0 && box != 0xFF) { | |
2708f295 | 826 | output_len += scnprintf(buf + output_len, |
1faf072c | 827 | PAGE_SIZE - output_len, "BOX: %hhu %s\n", |
8270b862 JH |
828 | box, active); |
829 | } else | |
2708f295 | 830 | output_len += scnprintf(buf + output_len, |
1faf072c | 831 | PAGE_SIZE - output_len, "%s\n", active); |
8270b862 JH |
832 | } |
833 | ||
834 | spin_unlock_irqrestore(&h->devlock, flags); | |
1faf072c | 835 | return output_len; |
8270b862 JH |
836 | } |
837 | ||
3f5eac3a SC |
838 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
839 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | |
840 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); | |
841 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | |
c1988684 ST |
842 | static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, |
843 | host_show_hp_ssd_smart_path_enabled, NULL); | |
8270b862 | 844 | static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL); |
da0697bd ST |
845 | static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, |
846 | host_show_hp_ssd_smart_path_status, | |
847 | host_store_hp_ssd_smart_path_status); | |
2ba8bfc8 SC |
848 | static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, |
849 | host_store_raid_offload_debug); | |
3f5eac3a SC |
850 | static DEVICE_ATTR(firmware_revision, S_IRUGO, |
851 | host_show_firmware_revision, NULL); | |
852 | static DEVICE_ATTR(commands_outstanding, S_IRUGO, | |
853 | host_show_commands_outstanding, NULL); | |
854 | static DEVICE_ATTR(transport_mode, S_IRUGO, | |
855 | host_show_transport_mode, NULL); | |
941b1cda SC |
856 | static DEVICE_ATTR(resettable, S_IRUGO, |
857 | host_show_resettable, NULL); | |
e985c58f SC |
858 | static DEVICE_ATTR(lockup_detected, S_IRUGO, |
859 | host_show_lockup_detected, NULL); | |
3f5eac3a SC |
860 | |
861 | static struct device_attribute *hpsa_sdev_attrs[] = { | |
862 | &dev_attr_raid_level, | |
863 | &dev_attr_lunid, | |
864 | &dev_attr_unique_id, | |
c1988684 | 865 | &dev_attr_hp_ssd_smart_path_enabled, |
8270b862 | 866 | &dev_attr_path_info, |
3f5eac3a SC |
867 | NULL, |
868 | }; | |
869 | ||
870 | static struct device_attribute *hpsa_shost_attrs[] = { | |
871 | &dev_attr_rescan, | |
872 | &dev_attr_firmware_revision, | |
873 | &dev_attr_commands_outstanding, | |
874 | &dev_attr_transport_mode, | |
941b1cda | 875 | &dev_attr_resettable, |
da0697bd | 876 | &dev_attr_hp_ssd_smart_path_status, |
2ba8bfc8 | 877 | &dev_attr_raid_offload_debug, |
fb53c439 | 878 | &dev_attr_lockup_detected, |
3f5eac3a SC |
879 | NULL, |
880 | }; | |
881 | ||
41ce4c35 SC |
882 | #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ |
883 | HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) | |
884 | ||
3f5eac3a SC |
885 | static struct scsi_host_template hpsa_driver_template = { |
886 | .module = THIS_MODULE, | |
f79cfec6 SC |
887 | .name = HPSA, |
888 | .proc_name = HPSA, | |
3f5eac3a SC |
889 | .queuecommand = hpsa_scsi_queue_command, |
890 | .scan_start = hpsa_scan_start, | |
891 | .scan_finished = hpsa_scan_finished, | |
7c0a0229 | 892 | .change_queue_depth = hpsa_change_queue_depth, |
3f5eac3a SC |
893 | .this_id = -1, |
894 | .use_clustering = ENABLE_CLUSTERING, | |
75167d2c | 895 | .eh_abort_handler = hpsa_eh_abort_handler, |
3f5eac3a SC |
896 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
897 | .ioctl = hpsa_ioctl, | |
898 | .slave_alloc = hpsa_slave_alloc, | |
41ce4c35 | 899 | .slave_configure = hpsa_slave_configure, |
3f5eac3a SC |
900 | .slave_destroy = hpsa_slave_destroy, |
901 | #ifdef CONFIG_COMPAT | |
902 | .compat_ioctl = hpsa_compat_ioctl, | |
903 | #endif | |
904 | .sdev_attrs = hpsa_sdev_attrs, | |
905 | .shost_attrs = hpsa_shost_attrs, | |
c0d6a4d1 | 906 | .max_sectors = 8192, |
54b2b50c | 907 | .no_write_same = 1, |
3f5eac3a SC |
908 | }; |
909 | ||
254f796b | 910 | static inline u32 next_command(struct ctlr_info *h, u8 q) |
3f5eac3a SC |
911 | { |
912 | u32 a; | |
072b0518 | 913 | struct reply_queue_buffer *rq = &h->reply_queue[q]; |
3f5eac3a | 914 | |
e1f7de0c MG |
915 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
916 | return h->access.command_completed(h, q); | |
917 | ||
3f5eac3a | 918 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
254f796b | 919 | return h->access.command_completed(h, q); |
3f5eac3a | 920 | |
254f796b MG |
921 | if ((rq->head[rq->current_entry] & 1) == rq->wraparound) { |
922 | a = rq->head[rq->current_entry]; | |
923 | rq->current_entry++; | |
0cbf768e | 924 | atomic_dec(&h->commands_outstanding); |
3f5eac3a SC |
925 | } else { |
926 | a = FIFO_EMPTY; | |
927 | } | |
928 | /* Check for wraparound */ | |
254f796b MG |
929 | if (rq->current_entry == h->max_commands) { |
930 | rq->current_entry = 0; | |
931 | rq->wraparound ^= 1; | |
3f5eac3a SC |
932 | } |
933 | return a; | |
934 | } | |
935 | ||
c349775e ST |
936 | /* |
937 | * There are some special bits in the bus address of the | |
938 | * command that we have to set for the controller to know | |
939 | * how to process the command: | |
940 | * | |
941 | * Normal performant mode: | |
942 | * bit 0: 1 means performant mode, 0 means simple mode. | |
943 | * bits 1-3 = block fetch table entry | |
944 | * bits 4-6 = command type (== 0) | |
945 | * | |
946 | * ioaccel1 mode: | |
947 | * bit 0 = "performant mode" bit. | |
948 | * bits 1-3 = block fetch table entry | |
949 | * bits 4-6 = command type (== 110) | |
950 | * (command type is needed because ioaccel1 mode | |
951 | * commands are submitted through the same register as normal | |
952 | * mode commands, so this is how the controller knows whether | |
953 | * the command is normal mode or ioaccel1 mode.) | |
954 | * | |
955 | * ioaccel2 mode: | |
956 | * bit 0 = "performant mode" bit. | |
957 | * bits 1-4 = block fetch table entry (note extra bit) | |
958 | * bits 4-6 = not needed, because ioaccel2 mode has | |
959 | * a separate special register for submitting commands. | |
960 | */ | |
961 | ||
25163bd5 WS |
962 | /* |
963 | * set_performant_mode: Modify the tag for cciss performant | |
3f5eac3a SC |
964 | * set bit 0 for pull model, bits 3-1 for block fetch |
965 | * register number | |
966 | */ | |
25163bd5 WS |
967 | #define DEFAULT_REPLY_QUEUE (-1) |
968 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c, | |
969 | int reply_queue) | |
3f5eac3a | 970 | { |
254f796b | 971 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) { |
3f5eac3a | 972 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
25163bd5 WS |
973 | if (unlikely(!h->msix_vector)) |
974 | return; | |
975 | if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) | |
254f796b | 976 | c->Header.ReplyQueue = |
804a5cb5 | 977 | raw_smp_processor_id() % h->nreply_queues; |
25163bd5 WS |
978 | else |
979 | c->Header.ReplyQueue = reply_queue % h->nreply_queues; | |
254f796b | 980 | } |
3f5eac3a SC |
981 | } |
982 | ||
c349775e | 983 | static void set_ioaccel1_performant_mode(struct ctlr_info *h, |
25163bd5 WS |
984 | struct CommandList *c, |
985 | int reply_queue) | |
c349775e ST |
986 | { |
987 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; | |
988 | ||
25163bd5 WS |
989 | /* |
990 | * Tell the controller to post the reply to the queue for this | |
c349775e ST |
991 | * processor. This seems to give the best I/O throughput. |
992 | */ | |
25163bd5 WS |
993 | if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
994 | cp->ReplyQueue = smp_processor_id() % h->nreply_queues; | |
995 | else | |
996 | cp->ReplyQueue = reply_queue % h->nreply_queues; | |
997 | /* | |
998 | * Set the bits in the address sent down to include: | |
c349775e ST |
999 | * - performant mode bit (bit 0) |
1000 | * - pull count (bits 1-3) | |
1001 | * - command type (bits 4-6) | |
1002 | */ | |
1003 | c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | | |
1004 | IOACCEL1_BUSADDR_CMDTYPE; | |
1005 | } | |
1006 | ||
8be986cc SC |
1007 | static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h, |
1008 | struct CommandList *c, | |
1009 | int reply_queue) | |
1010 | { | |
1011 | struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *) | |
1012 | &h->ioaccel2_cmd_pool[c->cmdindex]; | |
1013 | ||
1014 | /* Tell the controller to post the reply to the queue for this | |
1015 | * processor. This seems to give the best I/O throughput. | |
1016 | */ | |
1017 | if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) | |
1018 | cp->reply_queue = smp_processor_id() % h->nreply_queues; | |
1019 | else | |
1020 | cp->reply_queue = reply_queue % h->nreply_queues; | |
1021 | /* Set the bits in the address sent down to include: | |
1022 | * - performant mode bit not used in ioaccel mode 2 | |
1023 | * - pull count (bits 0-3) | |
1024 | * - command type isn't needed for ioaccel2 | |
1025 | */ | |
1026 | c->busaddr |= h->ioaccel2_blockFetchTable[0]; | |
1027 | } | |
1028 | ||
c349775e | 1029 | static void set_ioaccel2_performant_mode(struct ctlr_info *h, |
25163bd5 WS |
1030 | struct CommandList *c, |
1031 | int reply_queue) | |
c349775e ST |
1032 | { |
1033 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
1034 | ||
25163bd5 WS |
1035 | /* |
1036 | * Tell the controller to post the reply to the queue for this | |
c349775e ST |
1037 | * processor. This seems to give the best I/O throughput. |
1038 | */ | |
25163bd5 WS |
1039 | if (likely(reply_queue == DEFAULT_REPLY_QUEUE)) |
1040 | cp->reply_queue = smp_processor_id() % h->nreply_queues; | |
1041 | else | |
1042 | cp->reply_queue = reply_queue % h->nreply_queues; | |
1043 | /* | |
1044 | * Set the bits in the address sent down to include: | |
c349775e ST |
1045 | * - performant mode bit not used in ioaccel mode 2 |
1046 | * - pull count (bits 0-3) | |
1047 | * - command type isn't needed for ioaccel2 | |
1048 | */ | |
1049 | c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); | |
1050 | } | |
1051 | ||
e85c5974 SC |
1052 | static int is_firmware_flash_cmd(u8 *cdb) |
1053 | { | |
1054 | return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; | |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * During firmware flash, the heartbeat register may not update as frequently | |
1059 | * as it should. So we dial down lockup detection during firmware flash. and | |
1060 | * dial it back up when firmware flash completes. | |
1061 | */ | |
1062 | #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) | |
1063 | #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) | |
1064 | static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, | |
1065 | struct CommandList *c) | |
1066 | { | |
1067 | if (!is_firmware_flash_cmd(c->Request.CDB)) | |
1068 | return; | |
1069 | atomic_inc(&h->firmware_flash_in_progress); | |
1070 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; | |
1071 | } | |
1072 | ||
1073 | static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h, | |
1074 | struct CommandList *c) | |
1075 | { | |
1076 | if (is_firmware_flash_cmd(c->Request.CDB) && | |
1077 | atomic_dec_and_test(&h->firmware_flash_in_progress)) | |
1078 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; | |
1079 | } | |
1080 | ||
25163bd5 WS |
1081 | static void __enqueue_cmd_and_start_io(struct ctlr_info *h, |
1082 | struct CommandList *c, int reply_queue) | |
3f5eac3a | 1083 | { |
c05e8866 SC |
1084 | dial_down_lockup_detection_during_fw_flash(h, c); |
1085 | atomic_inc(&h->commands_outstanding); | |
c349775e ST |
1086 | switch (c->cmd_type) { |
1087 | case CMD_IOACCEL1: | |
25163bd5 | 1088 | set_ioaccel1_performant_mode(h, c, reply_queue); |
c05e8866 | 1089 | writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); |
c349775e ST |
1090 | break; |
1091 | case CMD_IOACCEL2: | |
25163bd5 | 1092 | set_ioaccel2_performant_mode(h, c, reply_queue); |
c05e8866 | 1093 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); |
c349775e | 1094 | break; |
8be986cc SC |
1095 | case IOACCEL2_TMF: |
1096 | set_ioaccel2_tmf_performant_mode(h, c, reply_queue); | |
1097 | writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); | |
1098 | break; | |
c349775e | 1099 | default: |
25163bd5 | 1100 | set_performant_mode(h, c, reply_queue); |
c05e8866 | 1101 | h->access.submit_command(h, c); |
c349775e | 1102 | } |
3f5eac3a SC |
1103 | } |
1104 | ||
a58e7e53 | 1105 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c) |
25163bd5 | 1106 | { |
d604f533 | 1107 | if (unlikely(hpsa_is_pending_event(c))) |
a58e7e53 WS |
1108 | return finish_cmd(c); |
1109 | ||
25163bd5 WS |
1110 | __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE); |
1111 | } | |
1112 | ||
3f5eac3a SC |
1113 | static inline int is_hba_lunid(unsigned char scsi3addr[]) |
1114 | { | |
1115 | return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; | |
1116 | } | |
1117 | ||
1118 | static inline int is_scsi_rev_5(struct ctlr_info *h) | |
1119 | { | |
1120 | if (!h->hba_inquiry_data) | |
1121 | return 0; | |
1122 | if ((h->hba_inquiry_data[2] & 0x07) == 5) | |
1123 | return 1; | |
1124 | return 0; | |
1125 | } | |
1126 | ||
edd16368 SC |
1127 | static int hpsa_find_target_lun(struct ctlr_info *h, |
1128 | unsigned char scsi3addr[], int bus, int *target, int *lun) | |
1129 | { | |
1130 | /* finds an unused bus, target, lun for a new physical device | |
1131 | * assumes h->devlock is held | |
1132 | */ | |
1133 | int i, found = 0; | |
cfe5badc | 1134 | DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); |
edd16368 | 1135 | |
263d9401 | 1136 | bitmap_zero(lun_taken, HPSA_MAX_DEVICES); |
edd16368 SC |
1137 | |
1138 | for (i = 0; i < h->ndevices; i++) { | |
1139 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) | |
263d9401 | 1140 | __set_bit(h->dev[i]->target, lun_taken); |
edd16368 SC |
1141 | } |
1142 | ||
263d9401 AM |
1143 | i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES); |
1144 | if (i < HPSA_MAX_DEVICES) { | |
1145 | /* *bus = 1; */ | |
1146 | *target = i; | |
1147 | *lun = 0; | |
1148 | found = 1; | |
edd16368 SC |
1149 | } |
1150 | return !found; | |
1151 | } | |
1152 | ||
1d33d85d | 1153 | static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h, |
0d96ef5f WS |
1154 | struct hpsa_scsi_dev_t *dev, char *description) |
1155 | { | |
7c59a0d4 DB |
1156 | #define LABEL_SIZE 25 |
1157 | char label[LABEL_SIZE]; | |
1158 | ||
9975ec9d DB |
1159 | if (h == NULL || h->pdev == NULL || h->scsi_host == NULL) |
1160 | return; | |
1161 | ||
7c59a0d4 DB |
1162 | switch (dev->devtype) { |
1163 | case TYPE_RAID: | |
1164 | snprintf(label, LABEL_SIZE, "controller"); | |
1165 | break; | |
1166 | case TYPE_ENCLOSURE: | |
1167 | snprintf(label, LABEL_SIZE, "enclosure"); | |
1168 | break; | |
1169 | case TYPE_DISK: | |
af15ed36 | 1170 | case TYPE_ZBC: |
7c59a0d4 DB |
1171 | if (dev->external) |
1172 | snprintf(label, LABEL_SIZE, "external"); | |
1173 | else if (!is_logical_dev_addr_mode(dev->scsi3addr)) | |
1174 | snprintf(label, LABEL_SIZE, "%s", | |
1175 | raid_label[PHYSICAL_DRIVE]); | |
1176 | else | |
1177 | snprintf(label, LABEL_SIZE, "RAID-%s", | |
1178 | dev->raid_level > RAID_UNKNOWN ? "?" : | |
1179 | raid_label[dev->raid_level]); | |
1180 | break; | |
1181 | case TYPE_ROM: | |
1182 | snprintf(label, LABEL_SIZE, "rom"); | |
1183 | break; | |
1184 | case TYPE_TAPE: | |
1185 | snprintf(label, LABEL_SIZE, "tape"); | |
1186 | break; | |
1187 | case TYPE_MEDIUM_CHANGER: | |
1188 | snprintf(label, LABEL_SIZE, "changer"); | |
1189 | break; | |
1190 | default: | |
1191 | snprintf(label, LABEL_SIZE, "UNKNOWN"); | |
1192 | break; | |
1193 | } | |
1194 | ||
0d96ef5f | 1195 | dev_printk(level, &h->pdev->dev, |
7c59a0d4 | 1196 | "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n", |
0d96ef5f WS |
1197 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun, |
1198 | description, | |
1199 | scsi_device_type(dev->devtype), | |
1200 | dev->vendor, | |
1201 | dev->model, | |
7c59a0d4 | 1202 | label, |
0d96ef5f WS |
1203 | dev->offload_config ? '+' : '-', |
1204 | dev->offload_enabled ? '+' : '-', | |
2a168208 | 1205 | dev->expose_device); |
0d96ef5f WS |
1206 | } |
1207 | ||
edd16368 | 1208 | /* Add an entry into h->dev[] array. */ |
8aa60681 | 1209 | static int hpsa_scsi_add_entry(struct ctlr_info *h, |
edd16368 SC |
1210 | struct hpsa_scsi_dev_t *device, |
1211 | struct hpsa_scsi_dev_t *added[], int *nadded) | |
1212 | { | |
1213 | /* assumes h->devlock is held */ | |
1214 | int n = h->ndevices; | |
1215 | int i; | |
1216 | unsigned char addr1[8], addr2[8]; | |
1217 | struct hpsa_scsi_dev_t *sd; | |
1218 | ||
cfe5badc | 1219 | if (n >= HPSA_MAX_DEVICES) { |
edd16368 SC |
1220 | dev_err(&h->pdev->dev, "too many devices, some will be " |
1221 | "inaccessible.\n"); | |
1222 | return -1; | |
1223 | } | |
1224 | ||
1225 | /* physical devices do not have lun or target assigned until now. */ | |
1226 | if (device->lun != -1) | |
1227 | /* Logical device, lun is already assigned. */ | |
1228 | goto lun_assigned; | |
1229 | ||
1230 | /* If this device a non-zero lun of a multi-lun device | |
1231 | * byte 4 of the 8-byte LUN addr will contain the logical | |
2b08b3e9 | 1232 | * unit no, zero otherwise. |
edd16368 SC |
1233 | */ |
1234 | if (device->scsi3addr[4] == 0) { | |
1235 | /* This is not a non-zero lun of a multi-lun device */ | |
1236 | if (hpsa_find_target_lun(h, device->scsi3addr, | |
1237 | device->bus, &device->target, &device->lun) != 0) | |
1238 | return -1; | |
1239 | goto lun_assigned; | |
1240 | } | |
1241 | ||
1242 | /* This is a non-zero lun of a multi-lun device. | |
1243 | * Search through our list and find the device which | |
9a4178b7 | 1244 | * has the same 8 byte LUN address, excepting byte 4 and 5. |
edd16368 SC |
1245 | * Assign the same bus and target for this new LUN. |
1246 | * Use the logical unit number from the firmware. | |
1247 | */ | |
1248 | memcpy(addr1, device->scsi3addr, 8); | |
1249 | addr1[4] = 0; | |
9a4178b7 | 1250 | addr1[5] = 0; |
edd16368 SC |
1251 | for (i = 0; i < n; i++) { |
1252 | sd = h->dev[i]; | |
1253 | memcpy(addr2, sd->scsi3addr, 8); | |
1254 | addr2[4] = 0; | |
9a4178b7 | 1255 | addr2[5] = 0; |
1256 | /* differ only in byte 4 and 5? */ | |
edd16368 SC |
1257 | if (memcmp(addr1, addr2, 8) == 0) { |
1258 | device->bus = sd->bus; | |
1259 | device->target = sd->target; | |
1260 | device->lun = device->scsi3addr[4]; | |
1261 | break; | |
1262 | } | |
1263 | } | |
1264 | if (device->lun == -1) { | |
1265 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," | |
1266 | " suspect firmware bug or unsupported hardware " | |
1267 | "configuration.\n"); | |
1268 | return -1; | |
1269 | } | |
1270 | ||
1271 | lun_assigned: | |
1272 | ||
1273 | h->dev[n] = device; | |
1274 | h->ndevices++; | |
1275 | added[*nadded] = device; | |
1276 | (*nadded)++; | |
0d96ef5f | 1277 | hpsa_show_dev_msg(KERN_INFO, h, device, |
2a168208 | 1278 | device->expose_device ? "added" : "masked"); |
a473d86c RE |
1279 | device->offload_to_be_enabled = device->offload_enabled; |
1280 | device->offload_enabled = 0; | |
edd16368 SC |
1281 | return 0; |
1282 | } | |
1283 | ||
bd9244f7 | 1284 | /* Update an entry in h->dev[] array. */ |
8aa60681 | 1285 | static void hpsa_scsi_update_entry(struct ctlr_info *h, |
bd9244f7 ST |
1286 | int entry, struct hpsa_scsi_dev_t *new_entry) |
1287 | { | |
a473d86c | 1288 | int offload_enabled; |
bd9244f7 ST |
1289 | /* assumes h->devlock is held */ |
1290 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); | |
1291 | ||
1292 | /* Raid level changed. */ | |
1293 | h->dev[entry]->raid_level = new_entry->raid_level; | |
250fb125 | 1294 | |
03383736 DB |
1295 | /* Raid offload parameters changed. Careful about the ordering. */ |
1296 | if (new_entry->offload_config && new_entry->offload_enabled) { | |
1297 | /* | |
1298 | * if drive is newly offload_enabled, we want to copy the | |
1299 | * raid map data first. If previously offload_enabled and | |
1300 | * offload_config were set, raid map data had better be | |
1301 | * the same as it was before. if raid map data is changed | |
1302 | * then it had better be the case that | |
1303 | * h->dev[entry]->offload_enabled is currently 0. | |
1304 | */ | |
1305 | h->dev[entry]->raid_map = new_entry->raid_map; | |
1306 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | |
03383736 | 1307 | } |
a3144e0b JH |
1308 | if (new_entry->hba_ioaccel_enabled) { |
1309 | h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; | |
1310 | wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */ | |
1311 | } | |
1312 | h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled; | |
250fb125 | 1313 | h->dev[entry]->offload_config = new_entry->offload_config; |
9fb0de2d | 1314 | h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; |
03383736 | 1315 | h->dev[entry]->queue_depth = new_entry->queue_depth; |
250fb125 | 1316 | |
41ce4c35 SC |
1317 | /* |
1318 | * We can turn off ioaccel offload now, but need to delay turning | |
1319 | * it on until we can update h->dev[entry]->phys_disk[], but we | |
1320 | * can't do that until all the devices are updated. | |
1321 | */ | |
1322 | h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled; | |
1323 | if (!new_entry->offload_enabled) | |
1324 | h->dev[entry]->offload_enabled = 0; | |
1325 | ||
a473d86c RE |
1326 | offload_enabled = h->dev[entry]->offload_enabled; |
1327 | h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled; | |
0d96ef5f | 1328 | hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated"); |
a473d86c | 1329 | h->dev[entry]->offload_enabled = offload_enabled; |
bd9244f7 ST |
1330 | } |
1331 | ||
2a8ccf31 | 1332 | /* Replace an entry from h->dev[] array. */ |
8aa60681 | 1333 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, |
2a8ccf31 SC |
1334 | int entry, struct hpsa_scsi_dev_t *new_entry, |
1335 | struct hpsa_scsi_dev_t *added[], int *nadded, | |
1336 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | |
1337 | { | |
1338 | /* assumes h->devlock is held */ | |
cfe5badc | 1339 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
2a8ccf31 SC |
1340 | removed[*nremoved] = h->dev[entry]; |
1341 | (*nremoved)++; | |
01350d05 SC |
1342 | |
1343 | /* | |
1344 | * New physical devices won't have target/lun assigned yet | |
1345 | * so we need to preserve the values in the slot we are replacing. | |
1346 | */ | |
1347 | if (new_entry->target == -1) { | |
1348 | new_entry->target = h->dev[entry]->target; | |
1349 | new_entry->lun = h->dev[entry]->lun; | |
1350 | } | |
1351 | ||
2a8ccf31 SC |
1352 | h->dev[entry] = new_entry; |
1353 | added[*nadded] = new_entry; | |
1354 | (*nadded)++; | |
0d96ef5f | 1355 | hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced"); |
a473d86c RE |
1356 | new_entry->offload_to_be_enabled = new_entry->offload_enabled; |
1357 | new_entry->offload_enabled = 0; | |
2a8ccf31 SC |
1358 | } |
1359 | ||
edd16368 | 1360 | /* Remove an entry from h->dev[] array. */ |
8aa60681 | 1361 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry, |
edd16368 SC |
1362 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
1363 | { | |
1364 | /* assumes h->devlock is held */ | |
1365 | int i; | |
1366 | struct hpsa_scsi_dev_t *sd; | |
1367 | ||
cfe5badc | 1368 | BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES); |
edd16368 SC |
1369 | |
1370 | sd = h->dev[entry]; | |
1371 | removed[*nremoved] = h->dev[entry]; | |
1372 | (*nremoved)++; | |
1373 | ||
1374 | for (i = entry; i < h->ndevices-1; i++) | |
1375 | h->dev[i] = h->dev[i+1]; | |
1376 | h->ndevices--; | |
0d96ef5f | 1377 | hpsa_show_dev_msg(KERN_INFO, h, sd, "removed"); |
edd16368 SC |
1378 | } |
1379 | ||
1380 | #define SCSI3ADDR_EQ(a, b) ( \ | |
1381 | (a)[7] == (b)[7] && \ | |
1382 | (a)[6] == (b)[6] && \ | |
1383 | (a)[5] == (b)[5] && \ | |
1384 | (a)[4] == (b)[4] && \ | |
1385 | (a)[3] == (b)[3] && \ | |
1386 | (a)[2] == (b)[2] && \ | |
1387 | (a)[1] == (b)[1] && \ | |
1388 | (a)[0] == (b)[0]) | |
1389 | ||
1390 | static void fixup_botched_add(struct ctlr_info *h, | |
1391 | struct hpsa_scsi_dev_t *added) | |
1392 | { | |
1393 | /* called when scsi_add_device fails in order to re-adjust | |
1394 | * h->dev[] to match the mid layer's view. | |
1395 | */ | |
1396 | unsigned long flags; | |
1397 | int i, j; | |
1398 | ||
1399 | spin_lock_irqsave(&h->lock, flags); | |
1400 | for (i = 0; i < h->ndevices; i++) { | |
1401 | if (h->dev[i] == added) { | |
1402 | for (j = i; j < h->ndevices-1; j++) | |
1403 | h->dev[j] = h->dev[j+1]; | |
1404 | h->ndevices--; | |
1405 | break; | |
1406 | } | |
1407 | } | |
1408 | spin_unlock_irqrestore(&h->lock, flags); | |
1409 | kfree(added); | |
1410 | } | |
1411 | ||
1412 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, | |
1413 | struct hpsa_scsi_dev_t *dev2) | |
1414 | { | |
edd16368 SC |
1415 | /* we compare everything except lun and target as these |
1416 | * are not yet assigned. Compare parts likely | |
1417 | * to differ first | |
1418 | */ | |
1419 | if (memcmp(dev1->scsi3addr, dev2->scsi3addr, | |
1420 | sizeof(dev1->scsi3addr)) != 0) | |
1421 | return 0; | |
1422 | if (memcmp(dev1->device_id, dev2->device_id, | |
1423 | sizeof(dev1->device_id)) != 0) | |
1424 | return 0; | |
1425 | if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) | |
1426 | return 0; | |
1427 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) | |
1428 | return 0; | |
edd16368 SC |
1429 | if (dev1->devtype != dev2->devtype) |
1430 | return 0; | |
edd16368 SC |
1431 | if (dev1->bus != dev2->bus) |
1432 | return 0; | |
1433 | return 1; | |
1434 | } | |
1435 | ||
bd9244f7 ST |
1436 | static inline int device_updated(struct hpsa_scsi_dev_t *dev1, |
1437 | struct hpsa_scsi_dev_t *dev2) | |
1438 | { | |
1439 | /* Device attributes that can change, but don't mean | |
1440 | * that the device is a different device, nor that the OS | |
1441 | * needs to be told anything about the change. | |
1442 | */ | |
1443 | if (dev1->raid_level != dev2->raid_level) | |
1444 | return 1; | |
250fb125 SC |
1445 | if (dev1->offload_config != dev2->offload_config) |
1446 | return 1; | |
1447 | if (dev1->offload_enabled != dev2->offload_enabled) | |
1448 | return 1; | |
93849508 DB |
1449 | if (!is_logical_dev_addr_mode(dev1->scsi3addr)) |
1450 | if (dev1->queue_depth != dev2->queue_depth) | |
1451 | return 1; | |
bd9244f7 ST |
1452 | return 0; |
1453 | } | |
1454 | ||
edd16368 SC |
1455 | /* Find needle in haystack. If exact match found, return DEVICE_SAME, |
1456 | * and return needle location in *index. If scsi3addr matches, but not | |
1457 | * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle | |
bd9244f7 ST |
1458 | * location in *index. |
1459 | * In the case of a minor device attribute change, such as RAID level, just | |
1460 | * return DEVICE_UPDATED, along with the updated device's location in index. | |
1461 | * If needle not found, return DEVICE_NOT_FOUND. | |
edd16368 SC |
1462 | */ |
1463 | static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |
1464 | struct hpsa_scsi_dev_t *haystack[], int haystack_size, | |
1465 | int *index) | |
1466 | { | |
1467 | int i; | |
1468 | #define DEVICE_NOT_FOUND 0 | |
1469 | #define DEVICE_CHANGED 1 | |
1470 | #define DEVICE_SAME 2 | |
bd9244f7 | 1471 | #define DEVICE_UPDATED 3 |
1d33d85d DB |
1472 | if (needle == NULL) |
1473 | return DEVICE_NOT_FOUND; | |
1474 | ||
edd16368 | 1475 | for (i = 0; i < haystack_size; i++) { |
23231048 SC |
1476 | if (haystack[i] == NULL) /* previously removed. */ |
1477 | continue; | |
edd16368 SC |
1478 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
1479 | *index = i; | |
bd9244f7 ST |
1480 | if (device_is_the_same(needle, haystack[i])) { |
1481 | if (device_updated(needle, haystack[i])) | |
1482 | return DEVICE_UPDATED; | |
edd16368 | 1483 | return DEVICE_SAME; |
bd9244f7 | 1484 | } else { |
9846590e SC |
1485 | /* Keep offline devices offline */ |
1486 | if (needle->volume_offline) | |
1487 | return DEVICE_NOT_FOUND; | |
edd16368 | 1488 | return DEVICE_CHANGED; |
bd9244f7 | 1489 | } |
edd16368 SC |
1490 | } |
1491 | } | |
1492 | *index = -1; | |
1493 | return DEVICE_NOT_FOUND; | |
1494 | } | |
1495 | ||
9846590e SC |
1496 | static void hpsa_monitor_offline_device(struct ctlr_info *h, |
1497 | unsigned char scsi3addr[]) | |
1498 | { | |
1499 | struct offline_device_entry *device; | |
1500 | unsigned long flags; | |
1501 | ||
1502 | /* Check to see if device is already on the list */ | |
1503 | spin_lock_irqsave(&h->offline_device_lock, flags); | |
1504 | list_for_each_entry(device, &h->offline_device_list, offline_list) { | |
1505 | if (memcmp(device->scsi3addr, scsi3addr, | |
1506 | sizeof(device->scsi3addr)) == 0) { | |
1507 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
1508 | return; | |
1509 | } | |
1510 | } | |
1511 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
1512 | ||
1513 | /* Device is not on the list, add it. */ | |
1514 | device = kmalloc(sizeof(*device), GFP_KERNEL); | |
1515 | if (!device) { | |
1516 | dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); | |
1517 | return; | |
1518 | } | |
1519 | memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); | |
1520 | spin_lock_irqsave(&h->offline_device_lock, flags); | |
1521 | list_add_tail(&device->offline_list, &h->offline_device_list); | |
1522 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
1523 | } | |
1524 | ||
1525 | /* Print a message explaining various offline volume states */ | |
1526 | static void hpsa_show_volume_status(struct ctlr_info *h, | |
1527 | struct hpsa_scsi_dev_t *sd) | |
1528 | { | |
1529 | if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) | |
1530 | dev_info(&h->pdev->dev, | |
1531 | "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", | |
1532 | h->scsi_host->host_no, | |
1533 | sd->bus, sd->target, sd->lun); | |
1534 | switch (sd->volume_offline) { | |
1535 | case HPSA_LV_OK: | |
1536 | break; | |
1537 | case HPSA_LV_UNDERGOING_ERASE: | |
1538 | dev_info(&h->pdev->dev, | |
1539 | "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", | |
1540 | h->scsi_host->host_no, | |
1541 | sd->bus, sd->target, sd->lun); | |
1542 | break; | |
5ca01204 SB |
1543 | case HPSA_LV_NOT_AVAILABLE: |
1544 | dev_info(&h->pdev->dev, | |
1545 | "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n", | |
1546 | h->scsi_host->host_no, | |
1547 | sd->bus, sd->target, sd->lun); | |
1548 | break; | |
9846590e SC |
1549 | case HPSA_LV_UNDERGOING_RPI: |
1550 | dev_info(&h->pdev->dev, | |
5ca01204 | 1551 | "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n", |
9846590e SC |
1552 | h->scsi_host->host_no, |
1553 | sd->bus, sd->target, sd->lun); | |
1554 | break; | |
1555 | case HPSA_LV_PENDING_RPI: | |
1556 | dev_info(&h->pdev->dev, | |
5ca01204 SB |
1557 | "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", |
1558 | h->scsi_host->host_no, | |
1559 | sd->bus, sd->target, sd->lun); | |
9846590e SC |
1560 | break; |
1561 | case HPSA_LV_ENCRYPTED_NO_KEY: | |
1562 | dev_info(&h->pdev->dev, | |
1563 | "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", | |
1564 | h->scsi_host->host_no, | |
1565 | sd->bus, sd->target, sd->lun); | |
1566 | break; | |
1567 | case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: | |
1568 | dev_info(&h->pdev->dev, | |
1569 | "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", | |
1570 | h->scsi_host->host_no, | |
1571 | sd->bus, sd->target, sd->lun); | |
1572 | break; | |
1573 | case HPSA_LV_UNDERGOING_ENCRYPTION: | |
1574 | dev_info(&h->pdev->dev, | |
1575 | "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", | |
1576 | h->scsi_host->host_no, | |
1577 | sd->bus, sd->target, sd->lun); | |
1578 | break; | |
1579 | case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: | |
1580 | dev_info(&h->pdev->dev, | |
1581 | "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", | |
1582 | h->scsi_host->host_no, | |
1583 | sd->bus, sd->target, sd->lun); | |
1584 | break; | |
1585 | case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: | |
1586 | dev_info(&h->pdev->dev, | |
1587 | "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", | |
1588 | h->scsi_host->host_no, | |
1589 | sd->bus, sd->target, sd->lun); | |
1590 | break; | |
1591 | case HPSA_LV_PENDING_ENCRYPTION: | |
1592 | dev_info(&h->pdev->dev, | |
1593 | "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", | |
1594 | h->scsi_host->host_no, | |
1595 | sd->bus, sd->target, sd->lun); | |
1596 | break; | |
1597 | case HPSA_LV_PENDING_ENCRYPTION_REKEYING: | |
1598 | dev_info(&h->pdev->dev, | |
1599 | "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", | |
1600 | h->scsi_host->host_no, | |
1601 | sd->bus, sd->target, sd->lun); | |
1602 | break; | |
1603 | } | |
1604 | } | |
1605 | ||
03383736 DB |
1606 | /* |
1607 | * Figure the list of physical drive pointers for a logical drive with | |
1608 | * raid offload configured. | |
1609 | */ | |
1610 | static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h, | |
1611 | struct hpsa_scsi_dev_t *dev[], int ndevices, | |
1612 | struct hpsa_scsi_dev_t *logical_drive) | |
1613 | { | |
1614 | struct raid_map_data *map = &logical_drive->raid_map; | |
1615 | struct raid_map_disk_data *dd = &map->data[0]; | |
1616 | int i, j; | |
1617 | int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + | |
1618 | le16_to_cpu(map->metadata_disks_per_row); | |
1619 | int nraid_map_entries = le16_to_cpu(map->row_cnt) * | |
1620 | le16_to_cpu(map->layout_map_count) * | |
1621 | total_disks_per_row; | |
1622 | int nphys_disk = le16_to_cpu(map->layout_map_count) * | |
1623 | total_disks_per_row; | |
1624 | int qdepth; | |
1625 | ||
1626 | if (nraid_map_entries > RAID_MAP_MAX_ENTRIES) | |
1627 | nraid_map_entries = RAID_MAP_MAX_ENTRIES; | |
1628 | ||
d604f533 WS |
1629 | logical_drive->nphysical_disks = nraid_map_entries; |
1630 | ||
03383736 DB |
1631 | qdepth = 0; |
1632 | for (i = 0; i < nraid_map_entries; i++) { | |
1633 | logical_drive->phys_disk[i] = NULL; | |
1634 | if (!logical_drive->offload_config) | |
1635 | continue; | |
1636 | for (j = 0; j < ndevices; j++) { | |
1d33d85d DB |
1637 | if (dev[j] == NULL) |
1638 | continue; | |
03383736 DB |
1639 | if (dev[j]->devtype != TYPE_DISK) |
1640 | continue; | |
af15ed36 DB |
1641 | if (dev[j]->devtype != TYPE_ZBC) |
1642 | continue; | |
f3f01730 | 1643 | if (is_logical_device(dev[j])) |
03383736 DB |
1644 | continue; |
1645 | if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle) | |
1646 | continue; | |
1647 | ||
1648 | logical_drive->phys_disk[i] = dev[j]; | |
1649 | if (i < nphys_disk) | |
1650 | qdepth = min(h->nr_cmds, qdepth + | |
1651 | logical_drive->phys_disk[i]->queue_depth); | |
1652 | break; | |
1653 | } | |
1654 | ||
1655 | /* | |
1656 | * This can happen if a physical drive is removed and | |
1657 | * the logical drive is degraded. In that case, the RAID | |
1658 | * map data will refer to a physical disk which isn't actually | |
1659 | * present. And in that case offload_enabled should already | |
1660 | * be 0, but we'll turn it off here just in case | |
1661 | */ | |
1662 | if (!logical_drive->phys_disk[i]) { | |
1663 | logical_drive->offload_enabled = 0; | |
41ce4c35 SC |
1664 | logical_drive->offload_to_be_enabled = 0; |
1665 | logical_drive->queue_depth = 8; | |
03383736 DB |
1666 | } |
1667 | } | |
1668 | if (nraid_map_entries) | |
1669 | /* | |
1670 | * This is correct for reads, too high for full stripe writes, | |
1671 | * way too high for partial stripe writes | |
1672 | */ | |
1673 | logical_drive->queue_depth = qdepth; | |
1674 | else | |
1675 | logical_drive->queue_depth = h->nr_cmds; | |
1676 | } | |
1677 | ||
1678 | static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h, | |
1679 | struct hpsa_scsi_dev_t *dev[], int ndevices) | |
1680 | { | |
1681 | int i; | |
1682 | ||
1683 | for (i = 0; i < ndevices; i++) { | |
1d33d85d DB |
1684 | if (dev[i] == NULL) |
1685 | continue; | |
03383736 DB |
1686 | if (dev[i]->devtype != TYPE_DISK) |
1687 | continue; | |
af15ed36 DB |
1688 | if (dev[i]->devtype != TYPE_ZBC) |
1689 | continue; | |
f3f01730 | 1690 | if (!is_logical_device(dev[i])) |
03383736 | 1691 | continue; |
41ce4c35 SC |
1692 | |
1693 | /* | |
1694 | * If offload is currently enabled, the RAID map and | |
1695 | * phys_disk[] assignment *better* not be changing | |
1696 | * and since it isn't changing, we do not need to | |
1697 | * update it. | |
1698 | */ | |
1699 | if (dev[i]->offload_enabled) | |
1700 | continue; | |
1701 | ||
03383736 DB |
1702 | hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]); |
1703 | } | |
1704 | } | |
1705 | ||
096ccff4 KB |
1706 | static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) |
1707 | { | |
1708 | int rc = 0; | |
1709 | ||
1710 | if (!h->scsi_host) | |
1711 | return 1; | |
1712 | ||
d04e62b9 KB |
1713 | if (is_logical_device(device)) /* RAID */ |
1714 | rc = scsi_add_device(h->scsi_host, device->bus, | |
096ccff4 | 1715 | device->target, device->lun); |
d04e62b9 KB |
1716 | else /* HBA */ |
1717 | rc = hpsa_add_sas_device(h->sas_host, device); | |
1718 | ||
096ccff4 KB |
1719 | return rc; |
1720 | } | |
1721 | ||
1722 | static void hpsa_remove_device(struct ctlr_info *h, | |
1723 | struct hpsa_scsi_dev_t *device) | |
1724 | { | |
1725 | struct scsi_device *sdev = NULL; | |
1726 | ||
1727 | if (!h->scsi_host) | |
1728 | return; | |
1729 | ||
d04e62b9 KB |
1730 | if (is_logical_device(device)) { /* RAID */ |
1731 | sdev = scsi_device_lookup(h->scsi_host, device->bus, | |
096ccff4 | 1732 | device->target, device->lun); |
d04e62b9 KB |
1733 | if (sdev) { |
1734 | scsi_remove_device(sdev); | |
1735 | scsi_device_put(sdev); | |
1736 | } else { | |
1737 | /* | |
1738 | * We don't expect to get here. Future commands | |
1739 | * to this device will get a selection timeout as | |
1740 | * if the device were gone. | |
1741 | */ | |
1742 | hpsa_show_dev_msg(KERN_WARNING, h, device, | |
096ccff4 | 1743 | "didn't find device for removal."); |
d04e62b9 KB |
1744 | } |
1745 | } else /* HBA */ | |
1746 | hpsa_remove_sas_device(device); | |
096ccff4 KB |
1747 | } |
1748 | ||
8aa60681 | 1749 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, |
edd16368 SC |
1750 | struct hpsa_scsi_dev_t *sd[], int nsds) |
1751 | { | |
1752 | /* sd contains scsi3 addresses and devtypes, and inquiry | |
1753 | * data. This function takes what's in sd to be the current | |
1754 | * reality and updates h->dev[] to reflect that reality. | |
1755 | */ | |
1756 | int i, entry, device_change, changes = 0; | |
1757 | struct hpsa_scsi_dev_t *csd; | |
1758 | unsigned long flags; | |
1759 | struct hpsa_scsi_dev_t **added, **removed; | |
1760 | int nadded, nremoved; | |
edd16368 | 1761 | |
da03ded0 DB |
1762 | /* |
1763 | * A reset can cause a device status to change | |
1764 | * re-schedule the scan to see what happened. | |
1765 | */ | |
1766 | if (h->reset_in_progress) { | |
1767 | h->drv_req_rescan = 1; | |
1768 | return; | |
1769 | } | |
edd16368 | 1770 | |
cfe5badc ST |
1771 | added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); |
1772 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); | |
edd16368 SC |
1773 | |
1774 | if (!added || !removed) { | |
1775 | dev_warn(&h->pdev->dev, "out of memory in " | |
1776 | "adjust_hpsa_scsi_table\n"); | |
1777 | goto free_and_out; | |
1778 | } | |
1779 | ||
1780 | spin_lock_irqsave(&h->devlock, flags); | |
1781 | ||
1782 | /* find any devices in h->dev[] that are not in | |
1783 | * sd[] and remove them from h->dev[], and for any | |
1784 | * devices which have changed, remove the old device | |
1785 | * info and add the new device info. | |
bd9244f7 ST |
1786 | * If minor device attributes change, just update |
1787 | * the existing device structure. | |
edd16368 SC |
1788 | */ |
1789 | i = 0; | |
1790 | nremoved = 0; | |
1791 | nadded = 0; | |
1792 | while (i < h->ndevices) { | |
1793 | csd = h->dev[i]; | |
1794 | device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); | |
1795 | if (device_change == DEVICE_NOT_FOUND) { | |
1796 | changes++; | |
8aa60681 | 1797 | hpsa_scsi_remove_entry(h, i, removed, &nremoved); |
edd16368 SC |
1798 | continue; /* remove ^^^, hence i not incremented */ |
1799 | } else if (device_change == DEVICE_CHANGED) { | |
1800 | changes++; | |
8aa60681 | 1801 | hpsa_scsi_replace_entry(h, i, sd[entry], |
2a8ccf31 | 1802 | added, &nadded, removed, &nremoved); |
c7f172dc SC |
1803 | /* Set it to NULL to prevent it from being freed |
1804 | * at the bottom of hpsa_update_scsi_devices() | |
1805 | */ | |
1806 | sd[entry] = NULL; | |
bd9244f7 | 1807 | } else if (device_change == DEVICE_UPDATED) { |
8aa60681 | 1808 | hpsa_scsi_update_entry(h, i, sd[entry]); |
edd16368 SC |
1809 | } |
1810 | i++; | |
1811 | } | |
1812 | ||
1813 | /* Now, make sure every device listed in sd[] is also | |
1814 | * listed in h->dev[], adding them if they aren't found | |
1815 | */ | |
1816 | ||
1817 | for (i = 0; i < nsds; i++) { | |
1818 | if (!sd[i]) /* if already added above. */ | |
1819 | continue; | |
9846590e SC |
1820 | |
1821 | /* Don't add devices which are NOT READY, FORMAT IN PROGRESS | |
1822 | * as the SCSI mid-layer does not handle such devices well. | |
1823 | * It relentlessly loops sending TUR at 3Hz, then READ(10) | |
1824 | * at 160Hz, and prevents the system from coming up. | |
1825 | */ | |
1826 | if (sd[i]->volume_offline) { | |
1827 | hpsa_show_volume_status(h, sd[i]); | |
0d96ef5f | 1828 | hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline"); |
9846590e SC |
1829 | continue; |
1830 | } | |
1831 | ||
edd16368 SC |
1832 | device_change = hpsa_scsi_find_entry(sd[i], h->dev, |
1833 | h->ndevices, &entry); | |
1834 | if (device_change == DEVICE_NOT_FOUND) { | |
1835 | changes++; | |
8aa60681 | 1836 | if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0) |
edd16368 SC |
1837 | break; |
1838 | sd[i] = NULL; /* prevent from being freed later. */ | |
1839 | } else if (device_change == DEVICE_CHANGED) { | |
1840 | /* should never happen... */ | |
1841 | changes++; | |
1842 | dev_warn(&h->pdev->dev, | |
1843 | "device unexpectedly changed.\n"); | |
1844 | /* but if it does happen, we just ignore that device */ | |
1845 | } | |
1846 | } | |
41ce4c35 SC |
1847 | hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices); |
1848 | ||
1849 | /* Now that h->dev[]->phys_disk[] is coherent, we can enable | |
1850 | * any logical drives that need it enabled. | |
1851 | */ | |
1d33d85d DB |
1852 | for (i = 0; i < h->ndevices; i++) { |
1853 | if (h->dev[i] == NULL) | |
1854 | continue; | |
41ce4c35 | 1855 | h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled; |
1d33d85d | 1856 | } |
41ce4c35 | 1857 | |
edd16368 SC |
1858 | spin_unlock_irqrestore(&h->devlock, flags); |
1859 | ||
9846590e SC |
1860 | /* Monitor devices which are in one of several NOT READY states to be |
1861 | * brought online later. This must be done without holding h->devlock, | |
1862 | * so don't touch h->dev[] | |
1863 | */ | |
1864 | for (i = 0; i < nsds; i++) { | |
1865 | if (!sd[i]) /* if already added above. */ | |
1866 | continue; | |
1867 | if (sd[i]->volume_offline) | |
1868 | hpsa_monitor_offline_device(h, sd[i]->scsi3addr); | |
1869 | } | |
1870 | ||
edd16368 SC |
1871 | /* Don't notify scsi mid layer of any changes the first time through |
1872 | * (or if there are no changes) scsi_scan_host will do it later the | |
1873 | * first time through. | |
1874 | */ | |
8aa60681 | 1875 | if (!changes) |
edd16368 SC |
1876 | goto free_and_out; |
1877 | ||
edd16368 SC |
1878 | /* Notify scsi mid layer of any removed devices */ |
1879 | for (i = 0; i < nremoved; i++) { | |
1d33d85d DB |
1880 | if (removed[i] == NULL) |
1881 | continue; | |
096ccff4 KB |
1882 | if (removed[i]->expose_device) |
1883 | hpsa_remove_device(h, removed[i]); | |
edd16368 SC |
1884 | kfree(removed[i]); |
1885 | removed[i] = NULL; | |
1886 | } | |
1887 | ||
1888 | /* Notify scsi mid layer of any added devices */ | |
1889 | for (i = 0; i < nadded; i++) { | |
096ccff4 KB |
1890 | int rc = 0; |
1891 | ||
1d33d85d DB |
1892 | if (added[i] == NULL) |
1893 | continue; | |
2a168208 | 1894 | if (!(added[i]->expose_device)) |
41ce4c35 | 1895 | continue; |
096ccff4 KB |
1896 | rc = hpsa_add_device(h, added[i]); |
1897 | if (!rc) | |
edd16368 | 1898 | continue; |
096ccff4 KB |
1899 | dev_warn(&h->pdev->dev, |
1900 | "addition failed %d, device not added.", rc); | |
edd16368 SC |
1901 | /* now we have to remove it from h->dev, |
1902 | * since it didn't get added to scsi mid layer | |
1903 | */ | |
1904 | fixup_botched_add(h, added[i]); | |
853633e8 | 1905 | h->drv_req_rescan = 1; |
edd16368 SC |
1906 | } |
1907 | ||
1908 | free_and_out: | |
1909 | kfree(added); | |
1910 | kfree(removed); | |
edd16368 SC |
1911 | } |
1912 | ||
1913 | /* | |
9e03aa2f | 1914 | * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t * |
edd16368 SC |
1915 | * Assume's h->devlock is held. |
1916 | */ | |
1917 | static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, | |
1918 | int bus, int target, int lun) | |
1919 | { | |
1920 | int i; | |
1921 | struct hpsa_scsi_dev_t *sd; | |
1922 | ||
1923 | for (i = 0; i < h->ndevices; i++) { | |
1924 | sd = h->dev[i]; | |
1925 | if (sd->bus == bus && sd->target == target && sd->lun == lun) | |
1926 | return sd; | |
1927 | } | |
1928 | return NULL; | |
1929 | } | |
1930 | ||
edd16368 SC |
1931 | static int hpsa_slave_alloc(struct scsi_device *sdev) |
1932 | { | |
1933 | struct hpsa_scsi_dev_t *sd; | |
1934 | unsigned long flags; | |
1935 | struct ctlr_info *h; | |
1936 | ||
1937 | h = sdev_to_hba(sdev); | |
1938 | spin_lock_irqsave(&h->devlock, flags); | |
d04e62b9 KB |
1939 | if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) { |
1940 | struct scsi_target *starget; | |
1941 | struct sas_rphy *rphy; | |
1942 | ||
1943 | starget = scsi_target(sdev); | |
1944 | rphy = target_to_rphy(starget); | |
1945 | sd = hpsa_find_device_by_sas_rphy(h, rphy); | |
1946 | if (sd) { | |
1947 | sd->target = sdev_id(sdev); | |
1948 | sd->lun = sdev->lun; | |
1949 | } | |
1950 | } else | |
1951 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), | |
1952 | sdev_id(sdev), sdev->lun); | |
1953 | ||
1954 | if (sd && sd->expose_device) { | |
03383736 | 1955 | atomic_set(&sd->ioaccel_cmds_out, 0); |
d04e62b9 | 1956 | sdev->hostdata = sd; |
41ce4c35 SC |
1957 | } else |
1958 | sdev->hostdata = NULL; | |
edd16368 SC |
1959 | spin_unlock_irqrestore(&h->devlock, flags); |
1960 | return 0; | |
1961 | } | |
1962 | ||
41ce4c35 SC |
1963 | /* configure scsi device based on internal per-device structure */ |
1964 | static int hpsa_slave_configure(struct scsi_device *sdev) | |
1965 | { | |
1966 | struct hpsa_scsi_dev_t *sd; | |
1967 | int queue_depth; | |
1968 | ||
1969 | sd = sdev->hostdata; | |
2a168208 | 1970 | sdev->no_uld_attach = !sd || !sd->expose_device; |
41ce4c35 SC |
1971 | |
1972 | if (sd) | |
1973 | queue_depth = sd->queue_depth != 0 ? | |
1974 | sd->queue_depth : sdev->host->can_queue; | |
1975 | else | |
1976 | queue_depth = sdev->host->can_queue; | |
1977 | ||
1978 | scsi_change_queue_depth(sdev, queue_depth); | |
1979 | ||
1980 | return 0; | |
1981 | } | |
1982 | ||
edd16368 SC |
1983 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
1984 | { | |
bcc44255 | 1985 | /* nothing to do. */ |
edd16368 SC |
1986 | } |
1987 | ||
d9a729f3 WS |
1988 | static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h) |
1989 | { | |
1990 | int i; | |
1991 | ||
1992 | if (!h->ioaccel2_cmd_sg_list) | |
1993 | return; | |
1994 | for (i = 0; i < h->nr_cmds; i++) { | |
1995 | kfree(h->ioaccel2_cmd_sg_list[i]); | |
1996 | h->ioaccel2_cmd_sg_list[i] = NULL; | |
1997 | } | |
1998 | kfree(h->ioaccel2_cmd_sg_list); | |
1999 | h->ioaccel2_cmd_sg_list = NULL; | |
2000 | } | |
2001 | ||
2002 | static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h) | |
2003 | { | |
2004 | int i; | |
2005 | ||
2006 | if (h->chainsize <= 0) | |
2007 | return 0; | |
2008 | ||
2009 | h->ioaccel2_cmd_sg_list = | |
2010 | kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds, | |
2011 | GFP_KERNEL); | |
2012 | if (!h->ioaccel2_cmd_sg_list) | |
2013 | return -ENOMEM; | |
2014 | for (i = 0; i < h->nr_cmds; i++) { | |
2015 | h->ioaccel2_cmd_sg_list[i] = | |
2016 | kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) * | |
2017 | h->maxsgentries, GFP_KERNEL); | |
2018 | if (!h->ioaccel2_cmd_sg_list[i]) | |
2019 | goto clean; | |
2020 | } | |
2021 | return 0; | |
2022 | ||
2023 | clean: | |
2024 | hpsa_free_ioaccel2_sg_chain_blocks(h); | |
2025 | return -ENOMEM; | |
2026 | } | |
2027 | ||
33a2ffce SC |
2028 | static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
2029 | { | |
2030 | int i; | |
2031 | ||
2032 | if (!h->cmd_sg_list) | |
2033 | return; | |
2034 | for (i = 0; i < h->nr_cmds; i++) { | |
2035 | kfree(h->cmd_sg_list[i]); | |
2036 | h->cmd_sg_list[i] = NULL; | |
2037 | } | |
2038 | kfree(h->cmd_sg_list); | |
2039 | h->cmd_sg_list = NULL; | |
2040 | } | |
2041 | ||
105a3dbc | 2042 | static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h) |
33a2ffce SC |
2043 | { |
2044 | int i; | |
2045 | ||
2046 | if (h->chainsize <= 0) | |
2047 | return 0; | |
2048 | ||
2049 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, | |
2050 | GFP_KERNEL); | |
3d4e6af8 RE |
2051 | if (!h->cmd_sg_list) { |
2052 | dev_err(&h->pdev->dev, "Failed to allocate SG list\n"); | |
33a2ffce | 2053 | return -ENOMEM; |
3d4e6af8 | 2054 | } |
33a2ffce SC |
2055 | for (i = 0; i < h->nr_cmds; i++) { |
2056 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * | |
2057 | h->chainsize, GFP_KERNEL); | |
3d4e6af8 RE |
2058 | if (!h->cmd_sg_list[i]) { |
2059 | dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n"); | |
33a2ffce | 2060 | goto clean; |
3d4e6af8 | 2061 | } |
33a2ffce SC |
2062 | } |
2063 | return 0; | |
2064 | ||
2065 | clean: | |
2066 | hpsa_free_sg_chain_blocks(h); | |
2067 | return -ENOMEM; | |
2068 | } | |
2069 | ||
d9a729f3 WS |
2070 | static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h, |
2071 | struct io_accel2_cmd *cp, struct CommandList *c) | |
2072 | { | |
2073 | struct ioaccel2_sg_element *chain_block; | |
2074 | u64 temp64; | |
2075 | u32 chain_size; | |
2076 | ||
2077 | chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex]; | |
a736e9b6 | 2078 | chain_size = le32_to_cpu(cp->sg[0].length); |
d9a729f3 WS |
2079 | temp64 = pci_map_single(h->pdev, chain_block, chain_size, |
2080 | PCI_DMA_TODEVICE); | |
2081 | if (dma_mapping_error(&h->pdev->dev, temp64)) { | |
2082 | /* prevent subsequent unmapping */ | |
2083 | cp->sg->address = 0; | |
2084 | return -1; | |
2085 | } | |
2086 | cp->sg->address = cpu_to_le64(temp64); | |
2087 | return 0; | |
2088 | } | |
2089 | ||
2090 | static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h, | |
2091 | struct io_accel2_cmd *cp) | |
2092 | { | |
2093 | struct ioaccel2_sg_element *chain_sg; | |
2094 | u64 temp64; | |
2095 | u32 chain_size; | |
2096 | ||
2097 | chain_sg = cp->sg; | |
2098 | temp64 = le64_to_cpu(chain_sg->address); | |
a736e9b6 | 2099 | chain_size = le32_to_cpu(cp->sg[0].length); |
d9a729f3 WS |
2100 | pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE); |
2101 | } | |
2102 | ||
e2bea6df | 2103 | static int hpsa_map_sg_chain_block(struct ctlr_info *h, |
33a2ffce SC |
2104 | struct CommandList *c) |
2105 | { | |
2106 | struct SGDescriptor *chain_sg, *chain_block; | |
2107 | u64 temp64; | |
50a0decf | 2108 | u32 chain_len; |
33a2ffce SC |
2109 | |
2110 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
2111 | chain_block = h->cmd_sg_list[c->cmdindex]; | |
50a0decf SC |
2112 | chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN); |
2113 | chain_len = sizeof(*chain_sg) * | |
2b08b3e9 | 2114 | (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries); |
50a0decf SC |
2115 | chain_sg->Len = cpu_to_le32(chain_len); |
2116 | temp64 = pci_map_single(h->pdev, chain_block, chain_len, | |
33a2ffce | 2117 | PCI_DMA_TODEVICE); |
e2bea6df SC |
2118 | if (dma_mapping_error(&h->pdev->dev, temp64)) { |
2119 | /* prevent subsequent unmapping */ | |
50a0decf | 2120 | chain_sg->Addr = cpu_to_le64(0); |
e2bea6df SC |
2121 | return -1; |
2122 | } | |
50a0decf | 2123 | chain_sg->Addr = cpu_to_le64(temp64); |
e2bea6df | 2124 | return 0; |
33a2ffce SC |
2125 | } |
2126 | ||
2127 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, | |
2128 | struct CommandList *c) | |
2129 | { | |
2130 | struct SGDescriptor *chain_sg; | |
33a2ffce | 2131 | |
50a0decf | 2132 | if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries) |
33a2ffce SC |
2133 | return; |
2134 | ||
2135 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
50a0decf SC |
2136 | pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr), |
2137 | le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE); | |
33a2ffce SC |
2138 | } |
2139 | ||
a09c1441 ST |
2140 | |
2141 | /* Decode the various types of errors on ioaccel2 path. | |
2142 | * Return 1 for any error that should generate a RAID path retry. | |
2143 | * Return 0 for errors that don't require a RAID path retry. | |
2144 | */ | |
2145 | static int handle_ioaccel_mode2_error(struct ctlr_info *h, | |
c349775e ST |
2146 | struct CommandList *c, |
2147 | struct scsi_cmnd *cmd, | |
2148 | struct io_accel2_cmd *c2) | |
2149 | { | |
2150 | int data_len; | |
a09c1441 | 2151 | int retry = 0; |
c40820d5 | 2152 | u32 ioaccel2_resid = 0; |
c349775e ST |
2153 | |
2154 | switch (c2->error_data.serv_response) { | |
2155 | case IOACCEL2_SERV_RESPONSE_COMPLETE: | |
2156 | switch (c2->error_data.status) { | |
2157 | case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: | |
2158 | break; | |
2159 | case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: | |
ee6b1889 | 2160 | cmd->result |= SAM_STAT_CHECK_CONDITION; |
c349775e | 2161 | if (c2->error_data.data_present != |
ee6b1889 SC |
2162 | IOACCEL2_SENSE_DATA_PRESENT) { |
2163 | memset(cmd->sense_buffer, 0, | |
2164 | SCSI_SENSE_BUFFERSIZE); | |
c349775e | 2165 | break; |
ee6b1889 | 2166 | } |
c349775e ST |
2167 | /* copy the sense data */ |
2168 | data_len = c2->error_data.sense_data_len; | |
2169 | if (data_len > SCSI_SENSE_BUFFERSIZE) | |
2170 | data_len = SCSI_SENSE_BUFFERSIZE; | |
2171 | if (data_len > sizeof(c2->error_data.sense_data_buff)) | |
2172 | data_len = | |
2173 | sizeof(c2->error_data.sense_data_buff); | |
2174 | memcpy(cmd->sense_buffer, | |
2175 | c2->error_data.sense_data_buff, data_len); | |
a09c1441 | 2176 | retry = 1; |
c349775e ST |
2177 | break; |
2178 | case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: | |
a09c1441 | 2179 | retry = 1; |
c349775e ST |
2180 | break; |
2181 | case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: | |
a09c1441 | 2182 | retry = 1; |
c349775e ST |
2183 | break; |
2184 | case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: | |
4a8da22b | 2185 | retry = 1; |
c349775e ST |
2186 | break; |
2187 | case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: | |
a09c1441 | 2188 | retry = 1; |
c349775e ST |
2189 | break; |
2190 | default: | |
a09c1441 | 2191 | retry = 1; |
c349775e ST |
2192 | break; |
2193 | } | |
2194 | break; | |
2195 | case IOACCEL2_SERV_RESPONSE_FAILURE: | |
c40820d5 JH |
2196 | switch (c2->error_data.status) { |
2197 | case IOACCEL2_STATUS_SR_IO_ERROR: | |
2198 | case IOACCEL2_STATUS_SR_IO_ABORTED: | |
2199 | case IOACCEL2_STATUS_SR_OVERRUN: | |
2200 | retry = 1; | |
2201 | break; | |
2202 | case IOACCEL2_STATUS_SR_UNDERRUN: | |
2203 | cmd->result = (DID_OK << 16); /* host byte */ | |
2204 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | |
2205 | ioaccel2_resid = get_unaligned_le32( | |
2206 | &c2->error_data.resid_cnt[0]); | |
2207 | scsi_set_resid(cmd, ioaccel2_resid); | |
2208 | break; | |
2209 | case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE: | |
2210 | case IOACCEL2_STATUS_SR_INVALID_DEVICE: | |
2211 | case IOACCEL2_STATUS_SR_IOACCEL_DISABLED: | |
2212 | /* We will get an event from ctlr to trigger rescan */ | |
2213 | retry = 1; | |
2214 | break; | |
2215 | default: | |
2216 | retry = 1; | |
c40820d5 | 2217 | } |
c349775e ST |
2218 | break; |
2219 | case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: | |
2220 | break; | |
2221 | case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: | |
2222 | break; | |
2223 | case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: | |
a09c1441 | 2224 | retry = 1; |
c349775e ST |
2225 | break; |
2226 | case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: | |
c349775e ST |
2227 | break; |
2228 | default: | |
a09c1441 | 2229 | retry = 1; |
c349775e ST |
2230 | break; |
2231 | } | |
a09c1441 ST |
2232 | |
2233 | return retry; /* retry on raid path? */ | |
c349775e ST |
2234 | } |
2235 | ||
a58e7e53 WS |
2236 | static void hpsa_cmd_resolve_events(struct ctlr_info *h, |
2237 | struct CommandList *c) | |
2238 | { | |
d604f533 WS |
2239 | bool do_wake = false; |
2240 | ||
a58e7e53 WS |
2241 | /* |
2242 | * Prevent the following race in the abort handler: | |
2243 | * | |
2244 | * 1. LLD is requested to abort a SCSI command | |
2245 | * 2. The SCSI command completes | |
2246 | * 3. The struct CommandList associated with step 2 is made available | |
2247 | * 4. New I/O request to LLD to another LUN re-uses struct CommandList | |
2248 | * 5. Abort handler follows scsi_cmnd->host_scribble and | |
2249 | * finds struct CommandList and tries to aborts it | |
2250 | * Now we have aborted the wrong command. | |
2251 | * | |
d604f533 WS |
2252 | * Reset c->scsi_cmd here so that the abort or reset handler will know |
2253 | * this command has completed. Then, check to see if the handler is | |
a58e7e53 WS |
2254 | * waiting for this command, and, if so, wake it. |
2255 | */ | |
2256 | c->scsi_cmd = SCSI_CMD_IDLE; | |
d604f533 | 2257 | mb(); /* Declare command idle before checking for pending events. */ |
a58e7e53 | 2258 | if (c->abort_pending) { |
d604f533 | 2259 | do_wake = true; |
a58e7e53 | 2260 | c->abort_pending = false; |
a58e7e53 | 2261 | } |
d604f533 WS |
2262 | if (c->reset_pending) { |
2263 | unsigned long flags; | |
2264 | struct hpsa_scsi_dev_t *dev; | |
2265 | ||
2266 | /* | |
2267 | * There appears to be a reset pending; lock the lock and | |
2268 | * reconfirm. If so, then decrement the count of outstanding | |
2269 | * commands and wake the reset command if this is the last one. | |
2270 | */ | |
2271 | spin_lock_irqsave(&h->lock, flags); | |
2272 | dev = c->reset_pending; /* Re-fetch under the lock. */ | |
2273 | if (dev && atomic_dec_and_test(&dev->reset_cmds_out)) | |
2274 | do_wake = true; | |
2275 | c->reset_pending = NULL; | |
2276 | spin_unlock_irqrestore(&h->lock, flags); | |
2277 | } | |
2278 | ||
2279 | if (do_wake) | |
2280 | wake_up_all(&h->event_sync_wait_queue); | |
a58e7e53 WS |
2281 | } |
2282 | ||
73153fe5 WS |
2283 | static void hpsa_cmd_resolve_and_free(struct ctlr_info *h, |
2284 | struct CommandList *c) | |
2285 | { | |
2286 | hpsa_cmd_resolve_events(h, c); | |
2287 | cmd_tagged_free(h, c); | |
2288 | } | |
2289 | ||
8a0ff92c WS |
2290 | static void hpsa_cmd_free_and_done(struct ctlr_info *h, |
2291 | struct CommandList *c, struct scsi_cmnd *cmd) | |
2292 | { | |
73153fe5 | 2293 | hpsa_cmd_resolve_and_free(h, c); |
8a0ff92c WS |
2294 | cmd->scsi_done(cmd); |
2295 | } | |
2296 | ||
2297 | static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) | |
2298 | { | |
2299 | INIT_WORK(&c->work, hpsa_command_resubmit_worker); | |
2300 | queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); | |
2301 | } | |
2302 | ||
a58e7e53 WS |
2303 | static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) |
2304 | { | |
2305 | cmd->result = DID_ABORT << 16; | |
2306 | } | |
2307 | ||
2308 | static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, | |
2309 | struct scsi_cmnd *cmd) | |
2310 | { | |
2311 | hpsa_set_scsi_cmd_aborted(cmd); | |
2312 | dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", | |
2313 | c->Request.CDB, c->err_info->ScsiStatus); | |
73153fe5 | 2314 | hpsa_cmd_resolve_and_free(h, c); |
a58e7e53 WS |
2315 | } |
2316 | ||
c349775e ST |
2317 | static void process_ioaccel2_completion(struct ctlr_info *h, |
2318 | struct CommandList *c, struct scsi_cmnd *cmd, | |
2319 | struct hpsa_scsi_dev_t *dev) | |
2320 | { | |
2321 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
2322 | ||
2323 | /* check for good status */ | |
2324 | if (likely(c2->error_data.serv_response == 0 && | |
8a0ff92c WS |
2325 | c2->error_data.status == 0)) |
2326 | return hpsa_cmd_free_and_done(h, c, cmd); | |
c349775e | 2327 | |
8a0ff92c WS |
2328 | /* |
2329 | * Any RAID offload error results in retry which will use | |
c349775e ST |
2330 | * the normal I/O path so the controller can handle whatever's |
2331 | * wrong. | |
2332 | */ | |
f3f01730 | 2333 | if (is_logical_device(dev) && |
c349775e ST |
2334 | c2->error_data.serv_response == |
2335 | IOACCEL2_SERV_RESPONSE_FAILURE) { | |
080ef1cc DB |
2336 | if (c2->error_data.status == |
2337 | IOACCEL2_STATUS_SR_IOACCEL_DISABLED) | |
2338 | dev->offload_enabled = 0; | |
8a0ff92c WS |
2339 | |
2340 | return hpsa_retry_cmd(h, c); | |
a09c1441 | 2341 | } |
080ef1cc DB |
2342 | |
2343 | if (handle_ioaccel_mode2_error(h, c, cmd, c2)) | |
8a0ff92c | 2344 | return hpsa_retry_cmd(h, c); |
080ef1cc | 2345 | |
8a0ff92c | 2346 | return hpsa_cmd_free_and_done(h, c, cmd); |
c349775e ST |
2347 | } |
2348 | ||
9437ac43 SC |
2349 | /* Returns 0 on success, < 0 otherwise. */ |
2350 | static int hpsa_evaluate_tmf_status(struct ctlr_info *h, | |
2351 | struct CommandList *cp) | |
2352 | { | |
2353 | u8 tmf_status = cp->err_info->ScsiStatus; | |
2354 | ||
2355 | switch (tmf_status) { | |
2356 | case CISS_TMF_COMPLETE: | |
2357 | /* | |
2358 | * CISS_TMF_COMPLETE never happens, instead, | |
2359 | * ei->CommandStatus == 0 for this case. | |
2360 | */ | |
2361 | case CISS_TMF_SUCCESS: | |
2362 | return 0; | |
2363 | case CISS_TMF_INVALID_FRAME: | |
2364 | case CISS_TMF_NOT_SUPPORTED: | |
2365 | case CISS_TMF_FAILED: | |
2366 | case CISS_TMF_WRONG_LUN: | |
2367 | case CISS_TMF_OVERLAPPED_TAG: | |
2368 | break; | |
2369 | default: | |
2370 | dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n", | |
2371 | tmf_status); | |
2372 | break; | |
2373 | } | |
2374 | return -tmf_status; | |
2375 | } | |
2376 | ||
1fb011fb | 2377 | static void complete_scsi_command(struct CommandList *cp) |
edd16368 SC |
2378 | { |
2379 | struct scsi_cmnd *cmd; | |
2380 | struct ctlr_info *h; | |
2381 | struct ErrorInfo *ei; | |
283b4a9b | 2382 | struct hpsa_scsi_dev_t *dev; |
d9a729f3 | 2383 | struct io_accel2_cmd *c2; |
edd16368 | 2384 | |
9437ac43 SC |
2385 | u8 sense_key; |
2386 | u8 asc; /* additional sense code */ | |
2387 | u8 ascq; /* additional sense code qualifier */ | |
db111e18 | 2388 | unsigned long sense_data_size; |
edd16368 SC |
2389 | |
2390 | ei = cp->err_info; | |
7fa3030c | 2391 | cmd = cp->scsi_cmd; |
edd16368 | 2392 | h = cp->h; |
283b4a9b | 2393 | dev = cmd->device->hostdata; |
d9a729f3 | 2394 | c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; |
edd16368 SC |
2395 | |
2396 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ | |
e1f7de0c | 2397 | if ((cp->cmd_type == CMD_SCSI) && |
2b08b3e9 | 2398 | (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries)) |
33a2ffce | 2399 | hpsa_unmap_sg_chain_block(h, cp); |
edd16368 | 2400 | |
d9a729f3 WS |
2401 | if ((cp->cmd_type == CMD_IOACCEL2) && |
2402 | (c2->sg[0].chain_indicator == IOACCEL2_CHAIN)) | |
2403 | hpsa_unmap_ioaccel2_sg_chain_block(h, c2); | |
2404 | ||
edd16368 SC |
2405 | cmd->result = (DID_OK << 16); /* host byte */ |
2406 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | |
c349775e | 2407 | |
03383736 DB |
2408 | if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) |
2409 | atomic_dec(&cp->phys_disk->ioaccel_cmds_out); | |
2410 | ||
25163bd5 WS |
2411 | /* |
2412 | * We check for lockup status here as it may be set for | |
2413 | * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by | |
2414 | * fail_all_oustanding_cmds() | |
2415 | */ | |
2416 | if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { | |
2417 | /* DID_NO_CONNECT will prevent a retry */ | |
2418 | cmd->result = DID_NO_CONNECT << 16; | |
8a0ff92c | 2419 | return hpsa_cmd_free_and_done(h, cp, cmd); |
25163bd5 WS |
2420 | } |
2421 | ||
d604f533 WS |
2422 | if ((unlikely(hpsa_is_pending_event(cp)))) { |
2423 | if (cp->reset_pending) | |
2424 | return hpsa_cmd_resolve_and_free(h, cp); | |
2425 | if (cp->abort_pending) | |
2426 | return hpsa_cmd_abort_and_free(h, cp, cmd); | |
2427 | } | |
2428 | ||
c349775e ST |
2429 | if (cp->cmd_type == CMD_IOACCEL2) |
2430 | return process_ioaccel2_completion(h, cp, cmd, dev); | |
2431 | ||
6aa4c361 | 2432 | scsi_set_resid(cmd, ei->ResidualCnt); |
8a0ff92c WS |
2433 | if (ei->CommandStatus == 0) |
2434 | return hpsa_cmd_free_and_done(h, cp, cmd); | |
6aa4c361 | 2435 | |
e1f7de0c MG |
2436 | /* For I/O accelerator commands, copy over some fields to the normal |
2437 | * CISS header used below for error handling. | |
2438 | */ | |
2439 | if (cp->cmd_type == CMD_IOACCEL1) { | |
2440 | struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; | |
2b08b3e9 DB |
2441 | cp->Header.SGList = scsi_sg_count(cmd); |
2442 | cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList); | |
2443 | cp->Request.CDBLen = le16_to_cpu(c->io_flags) & | |
2444 | IOACCEL1_IOFLAGS_CDBLEN_MASK; | |
50a0decf | 2445 | cp->Header.tag = c->tag; |
e1f7de0c MG |
2446 | memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); |
2447 | memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); | |
283b4a9b SC |
2448 | |
2449 | /* Any RAID offload error results in retry which will use | |
2450 | * the normal I/O path so the controller can handle whatever's | |
2451 | * wrong. | |
2452 | */ | |
f3f01730 | 2453 | if (is_logical_device(dev)) { |
283b4a9b SC |
2454 | if (ei->CommandStatus == CMD_IOACCEL_DISABLED) |
2455 | dev->offload_enabled = 0; | |
d604f533 | 2456 | return hpsa_retry_cmd(h, cp); |
283b4a9b | 2457 | } |
e1f7de0c MG |
2458 | } |
2459 | ||
edd16368 SC |
2460 | /* an error has occurred */ |
2461 | switch (ei->CommandStatus) { | |
2462 | ||
2463 | case CMD_TARGET_STATUS: | |
9437ac43 SC |
2464 | cmd->result |= ei->ScsiStatus; |
2465 | /* copy the sense data */ | |
2466 | if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) | |
2467 | sense_data_size = SCSI_SENSE_BUFFERSIZE; | |
2468 | else | |
2469 | sense_data_size = sizeof(ei->SenseInfo); | |
2470 | if (ei->SenseLen < sense_data_size) | |
2471 | sense_data_size = ei->SenseLen; | |
2472 | memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); | |
2473 | if (ei->ScsiStatus) | |
2474 | decode_sense_data(ei->SenseInfo, sense_data_size, | |
2475 | &sense_key, &asc, &ascq); | |
edd16368 | 2476 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { |
1d3b3609 | 2477 | if (sense_key == ABORTED_COMMAND) { |
2e311fba | 2478 | cmd->result |= DID_SOFT_ERROR << 16; |
1d3b3609 MG |
2479 | break; |
2480 | } | |
edd16368 SC |
2481 | break; |
2482 | } | |
edd16368 SC |
2483 | /* Problem was not a check condition |
2484 | * Pass it up to the upper layers... | |
2485 | */ | |
2486 | if (ei->ScsiStatus) { | |
2487 | dev_warn(&h->pdev->dev, "cp %p has status 0x%x " | |
2488 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | |
2489 | "Returning result: 0x%x\n", | |
2490 | cp, ei->ScsiStatus, | |
2491 | sense_key, asc, ascq, | |
2492 | cmd->result); | |
2493 | } else { /* scsi status is zero??? How??? */ | |
2494 | dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " | |
2495 | "Returning no connection.\n", cp), | |
2496 | ||
2497 | /* Ordinarily, this case should never happen, | |
2498 | * but there is a bug in some released firmware | |
2499 | * revisions that allows it to happen if, for | |
2500 | * example, a 4100 backplane loses power and | |
2501 | * the tape drive is in it. We assume that | |
2502 | * it's a fatal error of some kind because we | |
2503 | * can't show that it wasn't. We will make it | |
2504 | * look like selection timeout since that is | |
2505 | * the most common reason for this to occur, | |
2506 | * and it's severe enough. | |
2507 | */ | |
2508 | ||
2509 | cmd->result = DID_NO_CONNECT << 16; | |
2510 | } | |
2511 | break; | |
2512 | ||
2513 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
2514 | break; | |
2515 | case CMD_DATA_OVERRUN: | |
f42e81e1 SC |
2516 | dev_warn(&h->pdev->dev, |
2517 | "CDB %16phN data overrun\n", cp->Request.CDB); | |
edd16368 SC |
2518 | break; |
2519 | case CMD_INVALID: { | |
2520 | /* print_bytes(cp, sizeof(*cp), 1, 0); | |
2521 | print_cmd(cp); */ | |
2522 | /* We get CMD_INVALID if you address a non-existent device | |
2523 | * instead of a selection timeout (no response). You will | |
2524 | * see this if you yank out a drive, then try to access it. | |
2525 | * This is kind of a shame because it means that any other | |
2526 | * CMD_INVALID (e.g. driver bug) will get interpreted as a | |
2527 | * missing target. */ | |
2528 | cmd->result = DID_NO_CONNECT << 16; | |
2529 | } | |
2530 | break; | |
2531 | case CMD_PROTOCOL_ERR: | |
256d0eaa | 2532 | cmd->result = DID_ERROR << 16; |
f42e81e1 SC |
2533 | dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n", |
2534 | cp->Request.CDB); | |
edd16368 SC |
2535 | break; |
2536 | case CMD_HARDWARE_ERR: | |
2537 | cmd->result = DID_ERROR << 16; | |
f42e81e1 SC |
2538 | dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n", |
2539 | cp->Request.CDB); | |
edd16368 SC |
2540 | break; |
2541 | case CMD_CONNECTION_LOST: | |
2542 | cmd->result = DID_ERROR << 16; | |
f42e81e1 SC |
2543 | dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", |
2544 | cp->Request.CDB); | |
edd16368 SC |
2545 | break; |
2546 | case CMD_ABORTED: | |
a58e7e53 WS |
2547 | /* Return now to avoid calling scsi_done(). */ |
2548 | return hpsa_cmd_abort_and_free(h, cp, cmd); | |
edd16368 SC |
2549 | case CMD_ABORT_FAILED: |
2550 | cmd->result = DID_ERROR << 16; | |
f42e81e1 SC |
2551 | dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", |
2552 | cp->Request.CDB); | |
edd16368 SC |
2553 | break; |
2554 | case CMD_UNSOLICITED_ABORT: | |
f6e76055 | 2555 | cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ |
f42e81e1 SC |
2556 | dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", |
2557 | cp->Request.CDB); | |
edd16368 SC |
2558 | break; |
2559 | case CMD_TIMEOUT: | |
2560 | cmd->result = DID_TIME_OUT << 16; | |
f42e81e1 SC |
2561 | dev_warn(&h->pdev->dev, "CDB %16phN timed out\n", |
2562 | cp->Request.CDB); | |
edd16368 | 2563 | break; |
1d5e2ed0 SC |
2564 | case CMD_UNABORTABLE: |
2565 | cmd->result = DID_ERROR << 16; | |
2566 | dev_warn(&h->pdev->dev, "Command unabortable\n"); | |
2567 | break; | |
9437ac43 SC |
2568 | case CMD_TMF_STATUS: |
2569 | if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */ | |
2570 | cmd->result = DID_ERROR << 16; | |
2571 | break; | |
283b4a9b SC |
2572 | case CMD_IOACCEL_DISABLED: |
2573 | /* This only handles the direct pass-through case since RAID | |
2574 | * offload is handled above. Just attempt a retry. | |
2575 | */ | |
2576 | cmd->result = DID_SOFT_ERROR << 16; | |
2577 | dev_warn(&h->pdev->dev, | |
2578 | "cp %p had HP SSD Smart Path error\n", cp); | |
2579 | break; | |
edd16368 SC |
2580 | default: |
2581 | cmd->result = DID_ERROR << 16; | |
2582 | dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", | |
2583 | cp, ei->CommandStatus); | |
2584 | } | |
8a0ff92c WS |
2585 | |
2586 | return hpsa_cmd_free_and_done(h, cp, cmd); | |
edd16368 SC |
2587 | } |
2588 | ||
edd16368 SC |
2589 | static void hpsa_pci_unmap(struct pci_dev *pdev, |
2590 | struct CommandList *c, int sg_used, int data_direction) | |
2591 | { | |
2592 | int i; | |
edd16368 | 2593 | |
50a0decf SC |
2594 | for (i = 0; i < sg_used; i++) |
2595 | pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr), | |
2596 | le32_to_cpu(c->SG[i].Len), | |
2597 | data_direction); | |
edd16368 SC |
2598 | } |
2599 | ||
a2dac136 | 2600 | static int hpsa_map_one(struct pci_dev *pdev, |
edd16368 SC |
2601 | struct CommandList *cp, |
2602 | unsigned char *buf, | |
2603 | size_t buflen, | |
2604 | int data_direction) | |
2605 | { | |
01a02ffc | 2606 | u64 addr64; |
edd16368 SC |
2607 | |
2608 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | |
2609 | cp->Header.SGList = 0; | |
50a0decf | 2610 | cp->Header.SGTotal = cpu_to_le16(0); |
a2dac136 | 2611 | return 0; |
edd16368 SC |
2612 | } |
2613 | ||
50a0decf | 2614 | addr64 = pci_map_single(pdev, buf, buflen, data_direction); |
eceaae18 | 2615 | if (dma_mapping_error(&pdev->dev, addr64)) { |
a2dac136 | 2616 | /* Prevent subsequent unmap of something never mapped */ |
eceaae18 | 2617 | cp->Header.SGList = 0; |
50a0decf | 2618 | cp->Header.SGTotal = cpu_to_le16(0); |
a2dac136 | 2619 | return -1; |
eceaae18 | 2620 | } |
50a0decf SC |
2621 | cp->SG[0].Addr = cpu_to_le64(addr64); |
2622 | cp->SG[0].Len = cpu_to_le32(buflen); | |
2623 | cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */ | |
2624 | cp->Header.SGList = 1; /* no. SGs contig in this cmd */ | |
2625 | cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */ | |
a2dac136 | 2626 | return 0; |
edd16368 SC |
2627 | } |
2628 | ||
25163bd5 WS |
2629 | #define NO_TIMEOUT ((unsigned long) -1) |
2630 | #define DEFAULT_TIMEOUT 30000 /* milliseconds */ | |
2631 | static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |
2632 | struct CommandList *c, int reply_queue, unsigned long timeout_msecs) | |
edd16368 SC |
2633 | { |
2634 | DECLARE_COMPLETION_ONSTACK(wait); | |
2635 | ||
2636 | c->waiting = &wait; | |
25163bd5 WS |
2637 | __enqueue_cmd_and_start_io(h, c, reply_queue); |
2638 | if (timeout_msecs == NO_TIMEOUT) { | |
2639 | /* TODO: get rid of this no-timeout thing */ | |
2640 | wait_for_completion_io(&wait); | |
2641 | return IO_OK; | |
2642 | } | |
2643 | if (!wait_for_completion_io_timeout(&wait, | |
2644 | msecs_to_jiffies(timeout_msecs))) { | |
2645 | dev_warn(&h->pdev->dev, "Command timed out.\n"); | |
2646 | return -ETIMEDOUT; | |
2647 | } | |
2648 | return IO_OK; | |
2649 | } | |
2650 | ||
2651 | static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c, | |
2652 | int reply_queue, unsigned long timeout_msecs) | |
2653 | { | |
2654 | if (unlikely(lockup_detected(h))) { | |
2655 | c->err_info->CommandStatus = CMD_CTLR_LOCKUP; | |
2656 | return IO_OK; | |
2657 | } | |
2658 | return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs); | |
edd16368 SC |
2659 | } |
2660 | ||
094963da SC |
2661 | static u32 lockup_detected(struct ctlr_info *h) |
2662 | { | |
2663 | int cpu; | |
2664 | u32 rc, *lockup_detected; | |
2665 | ||
2666 | cpu = get_cpu(); | |
2667 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); | |
2668 | rc = *lockup_detected; | |
2669 | put_cpu(); | |
2670 | return rc; | |
2671 | } | |
2672 | ||
9c2fc160 | 2673 | #define MAX_DRIVER_CMD_RETRIES 25 |
25163bd5 WS |
2674 | static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
2675 | struct CommandList *c, int data_direction, unsigned long timeout_msecs) | |
edd16368 | 2676 | { |
9c2fc160 | 2677 | int backoff_time = 10, retry_count = 0; |
25163bd5 | 2678 | int rc; |
edd16368 SC |
2679 | |
2680 | do { | |
7630abd0 | 2681 | memset(c->err_info, 0, sizeof(*c->err_info)); |
25163bd5 WS |
2682 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, |
2683 | timeout_msecs); | |
2684 | if (rc) | |
2685 | break; | |
edd16368 | 2686 | retry_count++; |
9c2fc160 SC |
2687 | if (retry_count > 3) { |
2688 | msleep(backoff_time); | |
2689 | if (backoff_time < 1000) | |
2690 | backoff_time *= 2; | |
2691 | } | |
852af20a | 2692 | } while ((check_for_unit_attention(h, c) || |
9c2fc160 SC |
2693 | check_for_busy(h, c)) && |
2694 | retry_count <= MAX_DRIVER_CMD_RETRIES); | |
edd16368 | 2695 | hpsa_pci_unmap(h->pdev, c, 1, data_direction); |
25163bd5 WS |
2696 | if (retry_count > MAX_DRIVER_CMD_RETRIES) |
2697 | rc = -EIO; | |
2698 | return rc; | |
edd16368 SC |
2699 | } |
2700 | ||
d1e8beac SC |
2701 | static void hpsa_print_cmd(struct ctlr_info *h, char *txt, |
2702 | struct CommandList *c) | |
edd16368 | 2703 | { |
d1e8beac SC |
2704 | const u8 *cdb = c->Request.CDB; |
2705 | const u8 *lun = c->Header.LUN.LunAddrBytes; | |
2706 | ||
2707 | dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" | |
2708 | " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
2709 | txt, lun[0], lun[1], lun[2], lun[3], | |
2710 | lun[4], lun[5], lun[6], lun[7], | |
2711 | cdb[0], cdb[1], cdb[2], cdb[3], | |
2712 | cdb[4], cdb[5], cdb[6], cdb[7], | |
2713 | cdb[8], cdb[9], cdb[10], cdb[11], | |
2714 | cdb[12], cdb[13], cdb[14], cdb[15]); | |
2715 | } | |
2716 | ||
2717 | static void hpsa_scsi_interpret_error(struct ctlr_info *h, | |
2718 | struct CommandList *cp) | |
2719 | { | |
2720 | const struct ErrorInfo *ei = cp->err_info; | |
edd16368 | 2721 | struct device *d = &cp->h->pdev->dev; |
9437ac43 SC |
2722 | u8 sense_key, asc, ascq; |
2723 | int sense_len; | |
edd16368 | 2724 | |
edd16368 SC |
2725 | switch (ei->CommandStatus) { |
2726 | case CMD_TARGET_STATUS: | |
9437ac43 SC |
2727 | if (ei->SenseLen > sizeof(ei->SenseInfo)) |
2728 | sense_len = sizeof(ei->SenseInfo); | |
2729 | else | |
2730 | sense_len = ei->SenseLen; | |
2731 | decode_sense_data(ei->SenseInfo, sense_len, | |
2732 | &sense_key, &asc, &ascq); | |
d1e8beac SC |
2733 | hpsa_print_cmd(h, "SCSI status", cp); |
2734 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) | |
9437ac43 SC |
2735 | dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n", |
2736 | sense_key, asc, ascq); | |
d1e8beac | 2737 | else |
9437ac43 | 2738 | dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus); |
edd16368 SC |
2739 | if (ei->ScsiStatus == 0) |
2740 | dev_warn(d, "SCSI status is abnormally zero. " | |
2741 | "(probably indicates selection timeout " | |
2742 | "reported incorrectly due to a known " | |
2743 | "firmware bug, circa July, 2001.)\n"); | |
2744 | break; | |
2745 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
edd16368 SC |
2746 | break; |
2747 | case CMD_DATA_OVERRUN: | |
d1e8beac | 2748 | hpsa_print_cmd(h, "overrun condition", cp); |
edd16368 SC |
2749 | break; |
2750 | case CMD_INVALID: { | |
2751 | /* controller unfortunately reports SCSI passthru's | |
2752 | * to non-existent targets as invalid commands. | |
2753 | */ | |
d1e8beac SC |
2754 | hpsa_print_cmd(h, "invalid command", cp); |
2755 | dev_warn(d, "probably means device no longer present\n"); | |
edd16368 SC |
2756 | } |
2757 | break; | |
2758 | case CMD_PROTOCOL_ERR: | |
d1e8beac | 2759 | hpsa_print_cmd(h, "protocol error", cp); |
edd16368 SC |
2760 | break; |
2761 | case CMD_HARDWARE_ERR: | |
d1e8beac | 2762 | hpsa_print_cmd(h, "hardware error", cp); |
edd16368 SC |
2763 | break; |
2764 | case CMD_CONNECTION_LOST: | |
d1e8beac | 2765 | hpsa_print_cmd(h, "connection lost", cp); |
edd16368 SC |
2766 | break; |
2767 | case CMD_ABORTED: | |
d1e8beac | 2768 | hpsa_print_cmd(h, "aborted", cp); |
edd16368 SC |
2769 | break; |
2770 | case CMD_ABORT_FAILED: | |
d1e8beac | 2771 | hpsa_print_cmd(h, "abort failed", cp); |
edd16368 SC |
2772 | break; |
2773 | case CMD_UNSOLICITED_ABORT: | |
d1e8beac | 2774 | hpsa_print_cmd(h, "unsolicited abort", cp); |
edd16368 SC |
2775 | break; |
2776 | case CMD_TIMEOUT: | |
d1e8beac | 2777 | hpsa_print_cmd(h, "timed out", cp); |
edd16368 | 2778 | break; |
1d5e2ed0 | 2779 | case CMD_UNABORTABLE: |
d1e8beac | 2780 | hpsa_print_cmd(h, "unabortable", cp); |
1d5e2ed0 | 2781 | break; |
25163bd5 WS |
2782 | case CMD_CTLR_LOCKUP: |
2783 | hpsa_print_cmd(h, "controller lockup detected", cp); | |
2784 | break; | |
edd16368 | 2785 | default: |
d1e8beac SC |
2786 | hpsa_print_cmd(h, "unknown status", cp); |
2787 | dev_warn(d, "Unknown command status %x\n", | |
edd16368 SC |
2788 | ei->CommandStatus); |
2789 | } | |
2790 | } | |
2791 | ||
2792 | static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |
b7bb24eb | 2793 | u16 page, unsigned char *buf, |
edd16368 SC |
2794 | unsigned char bufsize) |
2795 | { | |
2796 | int rc = IO_OK; | |
2797 | struct CommandList *c; | |
2798 | struct ErrorInfo *ei; | |
2799 | ||
45fcb86e | 2800 | c = cmd_alloc(h); |
edd16368 | 2801 | |
a2dac136 SC |
2802 | if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, |
2803 | page, scsi3addr, TYPE_CMD)) { | |
2804 | rc = -1; | |
2805 | goto out; | |
2806 | } | |
25163bd5 WS |
2807 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
2808 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
2809 | if (rc) | |
2810 | goto out; | |
edd16368 SC |
2811 | ei = c->err_info; |
2812 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
d1e8beac | 2813 | hpsa_scsi_interpret_error(h, c); |
edd16368 SC |
2814 | rc = -1; |
2815 | } | |
a2dac136 | 2816 | out: |
45fcb86e | 2817 | cmd_free(h, c); |
edd16368 SC |
2818 | return rc; |
2819 | } | |
2820 | ||
bf711ac6 | 2821 | static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, |
25163bd5 | 2822 | u8 reset_type, int reply_queue) |
edd16368 SC |
2823 | { |
2824 | int rc = IO_OK; | |
2825 | struct CommandList *c; | |
2826 | struct ErrorInfo *ei; | |
2827 | ||
45fcb86e | 2828 | c = cmd_alloc(h); |
edd16368 | 2829 | |
edd16368 | 2830 | |
a2dac136 | 2831 | /* fill_cmd can't fail here, no data buffer to map. */ |
0b9b7b6e | 2832 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, |
bf711ac6 | 2833 | scsi3addr, TYPE_MSG); |
25163bd5 WS |
2834 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
2835 | if (rc) { | |
2836 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); | |
2837 | goto out; | |
2838 | } | |
edd16368 SC |
2839 | /* no unmap needed here because no data xfer. */ |
2840 | ||
2841 | ei = c->err_info; | |
2842 | if (ei->CommandStatus != 0) { | |
d1e8beac | 2843 | hpsa_scsi_interpret_error(h, c); |
edd16368 SC |
2844 | rc = -1; |
2845 | } | |
25163bd5 | 2846 | out: |
45fcb86e | 2847 | cmd_free(h, c); |
edd16368 SC |
2848 | return rc; |
2849 | } | |
2850 | ||
d604f533 WS |
2851 | static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, |
2852 | struct hpsa_scsi_dev_t *dev, | |
2853 | unsigned char *scsi3addr) | |
2854 | { | |
2855 | int i; | |
2856 | bool match = false; | |
2857 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
2858 | struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; | |
2859 | ||
2860 | if (hpsa_is_cmd_idle(c)) | |
2861 | return false; | |
2862 | ||
2863 | switch (c->cmd_type) { | |
2864 | case CMD_SCSI: | |
2865 | case CMD_IOCTL_PEND: | |
2866 | match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes, | |
2867 | sizeof(c->Header.LUN.LunAddrBytes)); | |
2868 | break; | |
2869 | ||
2870 | case CMD_IOACCEL1: | |
2871 | case CMD_IOACCEL2: | |
2872 | if (c->phys_disk == dev) { | |
2873 | /* HBA mode match */ | |
2874 | match = true; | |
2875 | } else { | |
2876 | /* Possible RAID mode -- check each phys dev. */ | |
2877 | /* FIXME: Do we need to take out a lock here? If | |
2878 | * so, we could just call hpsa_get_pdisk_of_ioaccel2() | |
2879 | * instead. */ | |
2880 | for (i = 0; i < dev->nphysical_disks && !match; i++) { | |
2881 | /* FIXME: an alternate test might be | |
2882 | * | |
2883 | * match = dev->phys_disk[i]->ioaccel_handle | |
2884 | * == c2->scsi_nexus; */ | |
2885 | match = dev->phys_disk[i] == c->phys_disk; | |
2886 | } | |
2887 | } | |
2888 | break; | |
2889 | ||
2890 | case IOACCEL2_TMF: | |
2891 | for (i = 0; i < dev->nphysical_disks && !match; i++) { | |
2892 | match = dev->phys_disk[i]->ioaccel_handle == | |
2893 | le32_to_cpu(ac->it_nexus); | |
2894 | } | |
2895 | break; | |
2896 | ||
2897 | case 0: /* The command is in the middle of being initialized. */ | |
2898 | match = false; | |
2899 | break; | |
2900 | ||
2901 | default: | |
2902 | dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n", | |
2903 | c->cmd_type); | |
2904 | BUG(); | |
2905 | } | |
2906 | ||
2907 | return match; | |
2908 | } | |
2909 | ||
2910 | static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev, | |
2911 | unsigned char *scsi3addr, u8 reset_type, int reply_queue) | |
2912 | { | |
2913 | int i; | |
2914 | int rc = 0; | |
2915 | ||
2916 | /* We can really only handle one reset at a time */ | |
2917 | if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) { | |
2918 | dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n"); | |
2919 | return -EINTR; | |
2920 | } | |
2921 | ||
2922 | BUG_ON(atomic_read(&dev->reset_cmds_out) != 0); | |
2923 | ||
2924 | for (i = 0; i < h->nr_cmds; i++) { | |
2925 | struct CommandList *c = h->cmd_pool + i; | |
2926 | int refcount = atomic_inc_return(&c->refcount); | |
2927 | ||
2928 | if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) { | |
2929 | unsigned long flags; | |
2930 | ||
2931 | /* | |
2932 | * Mark the target command as having a reset pending, | |
2933 | * then lock a lock so that the command cannot complete | |
2934 | * while we're considering it. If the command is not | |
2935 | * idle then count it; otherwise revoke the event. | |
2936 | */ | |
2937 | c->reset_pending = dev; | |
2938 | spin_lock_irqsave(&h->lock, flags); /* Implied MB */ | |
2939 | if (!hpsa_is_cmd_idle(c)) | |
2940 | atomic_inc(&dev->reset_cmds_out); | |
2941 | else | |
2942 | c->reset_pending = NULL; | |
2943 | spin_unlock_irqrestore(&h->lock, flags); | |
2944 | } | |
2945 | ||
2946 | cmd_free(h, c); | |
2947 | } | |
2948 | ||
2949 | rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue); | |
2950 | if (!rc) | |
2951 | wait_event(h->event_sync_wait_queue, | |
2952 | atomic_read(&dev->reset_cmds_out) == 0 || | |
2953 | lockup_detected(h)); | |
2954 | ||
2955 | if (unlikely(lockup_detected(h))) { | |
77678d3a DB |
2956 | dev_warn(&h->pdev->dev, |
2957 | "Controller lockup detected during reset wait\n"); | |
2958 | rc = -ENODEV; | |
2959 | } | |
d604f533 WS |
2960 | |
2961 | if (unlikely(rc)) | |
2962 | atomic_set(&dev->reset_cmds_out, 0); | |
2963 | ||
2964 | mutex_unlock(&h->reset_mutex); | |
2965 | return rc; | |
2966 | } | |
2967 | ||
edd16368 SC |
2968 | static void hpsa_get_raid_level(struct ctlr_info *h, |
2969 | unsigned char *scsi3addr, unsigned char *raid_level) | |
2970 | { | |
2971 | int rc; | |
2972 | unsigned char *buf; | |
2973 | ||
2974 | *raid_level = RAID_UNKNOWN; | |
2975 | buf = kzalloc(64, GFP_KERNEL); | |
2976 | if (!buf) | |
2977 | return; | |
b7bb24eb | 2978 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); |
edd16368 SC |
2979 | if (rc == 0) |
2980 | *raid_level = buf[8]; | |
2981 | if (*raid_level > RAID_UNKNOWN) | |
2982 | *raid_level = RAID_UNKNOWN; | |
2983 | kfree(buf); | |
2984 | return; | |
2985 | } | |
2986 | ||
283b4a9b SC |
2987 | #define HPSA_MAP_DEBUG |
2988 | #ifdef HPSA_MAP_DEBUG | |
2989 | static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, | |
2990 | struct raid_map_data *map_buff) | |
2991 | { | |
2992 | struct raid_map_disk_data *dd = &map_buff->data[0]; | |
2993 | int map, row, col; | |
2994 | u16 map_cnt, row_cnt, disks_per_row; | |
2995 | ||
2996 | if (rc != 0) | |
2997 | return; | |
2998 | ||
2ba8bfc8 SC |
2999 | /* Show details only if debugging has been activated. */ |
3000 | if (h->raid_offload_debug < 2) | |
3001 | return; | |
3002 | ||
283b4a9b SC |
3003 | dev_info(&h->pdev->dev, "structure_size = %u\n", |
3004 | le32_to_cpu(map_buff->structure_size)); | |
3005 | dev_info(&h->pdev->dev, "volume_blk_size = %u\n", | |
3006 | le32_to_cpu(map_buff->volume_blk_size)); | |
3007 | dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", | |
3008 | le64_to_cpu(map_buff->volume_blk_cnt)); | |
3009 | dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", | |
3010 | map_buff->phys_blk_shift); | |
3011 | dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", | |
3012 | map_buff->parity_rotation_shift); | |
3013 | dev_info(&h->pdev->dev, "strip_size = %u\n", | |
3014 | le16_to_cpu(map_buff->strip_size)); | |
3015 | dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", | |
3016 | le64_to_cpu(map_buff->disk_starting_blk)); | |
3017 | dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", | |
3018 | le64_to_cpu(map_buff->disk_blk_cnt)); | |
3019 | dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", | |
3020 | le16_to_cpu(map_buff->data_disks_per_row)); | |
3021 | dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", | |
3022 | le16_to_cpu(map_buff->metadata_disks_per_row)); | |
3023 | dev_info(&h->pdev->dev, "row_cnt = %u\n", | |
3024 | le16_to_cpu(map_buff->row_cnt)); | |
3025 | dev_info(&h->pdev->dev, "layout_map_count = %u\n", | |
3026 | le16_to_cpu(map_buff->layout_map_count)); | |
2b08b3e9 | 3027 | dev_info(&h->pdev->dev, "flags = 0x%x\n", |
dd0e19f3 | 3028 | le16_to_cpu(map_buff->flags)); |
2b08b3e9 DB |
3029 | dev_info(&h->pdev->dev, "encrypytion = %s\n", |
3030 | le16_to_cpu(map_buff->flags) & | |
3031 | RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); | |
dd0e19f3 ST |
3032 | dev_info(&h->pdev->dev, "dekindex = %u\n", |
3033 | le16_to_cpu(map_buff->dekindex)); | |
283b4a9b SC |
3034 | map_cnt = le16_to_cpu(map_buff->layout_map_count); |
3035 | for (map = 0; map < map_cnt; map++) { | |
3036 | dev_info(&h->pdev->dev, "Map%u:\n", map); | |
3037 | row_cnt = le16_to_cpu(map_buff->row_cnt); | |
3038 | for (row = 0; row < row_cnt; row++) { | |
3039 | dev_info(&h->pdev->dev, " Row%u:\n", row); | |
3040 | disks_per_row = | |
3041 | le16_to_cpu(map_buff->data_disks_per_row); | |
3042 | for (col = 0; col < disks_per_row; col++, dd++) | |
3043 | dev_info(&h->pdev->dev, | |
3044 | " D%02u: h=0x%04x xor=%u,%u\n", | |
3045 | col, dd->ioaccel_handle, | |
3046 | dd->xor_mult[0], dd->xor_mult[1]); | |
3047 | disks_per_row = | |
3048 | le16_to_cpu(map_buff->metadata_disks_per_row); | |
3049 | for (col = 0; col < disks_per_row; col++, dd++) | |
3050 | dev_info(&h->pdev->dev, | |
3051 | " M%02u: h=0x%04x xor=%u,%u\n", | |
3052 | col, dd->ioaccel_handle, | |
3053 | dd->xor_mult[0], dd->xor_mult[1]); | |
3054 | } | |
3055 | } | |
3056 | } | |
3057 | #else | |
3058 | static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, | |
3059 | __attribute__((unused)) int rc, | |
3060 | __attribute__((unused)) struct raid_map_data *map_buff) | |
3061 | { | |
3062 | } | |
3063 | #endif | |
3064 | ||
3065 | static int hpsa_get_raid_map(struct ctlr_info *h, | |
3066 | unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) | |
3067 | { | |
3068 | int rc = 0; | |
3069 | struct CommandList *c; | |
3070 | struct ErrorInfo *ei; | |
3071 | ||
45fcb86e | 3072 | c = cmd_alloc(h); |
bf43caf3 | 3073 | |
283b4a9b SC |
3074 | if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, |
3075 | sizeof(this_device->raid_map), 0, | |
3076 | scsi3addr, TYPE_CMD)) { | |
2dd02d74 RE |
3077 | dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n"); |
3078 | cmd_free(h, c); | |
3079 | return -1; | |
283b4a9b | 3080 | } |
25163bd5 WS |
3081 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3082 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
3083 | if (rc) | |
3084 | goto out; | |
283b4a9b SC |
3085 | ei = c->err_info; |
3086 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
d1e8beac | 3087 | hpsa_scsi_interpret_error(h, c); |
25163bd5 WS |
3088 | rc = -1; |
3089 | goto out; | |
283b4a9b | 3090 | } |
45fcb86e | 3091 | cmd_free(h, c); |
283b4a9b SC |
3092 | |
3093 | /* @todo in the future, dynamically allocate RAID map memory */ | |
3094 | if (le32_to_cpu(this_device->raid_map.structure_size) > | |
3095 | sizeof(this_device->raid_map)) { | |
3096 | dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); | |
3097 | rc = -1; | |
3098 | } | |
3099 | hpsa_debug_map_buff(h, rc, &this_device->raid_map); | |
3100 | return rc; | |
25163bd5 WS |
3101 | out: |
3102 | cmd_free(h, c); | |
3103 | return rc; | |
283b4a9b SC |
3104 | } |
3105 | ||
d04e62b9 KB |
3106 | static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h, |
3107 | unsigned char scsi3addr[], u16 bmic_device_index, | |
3108 | struct bmic_sense_subsystem_info *buf, size_t bufsize) | |
3109 | { | |
3110 | int rc = IO_OK; | |
3111 | struct CommandList *c; | |
3112 | struct ErrorInfo *ei; | |
3113 | ||
3114 | c = cmd_alloc(h); | |
3115 | ||
3116 | rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize, | |
3117 | 0, RAID_CTLR_LUNID, TYPE_CMD); | |
3118 | if (rc) | |
3119 | goto out; | |
3120 | ||
3121 | c->Request.CDB[2] = bmic_device_index & 0xff; | |
3122 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | |
3123 | ||
3124 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | |
3125 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
3126 | if (rc) | |
3127 | goto out; | |
3128 | ei = c->err_info; | |
3129 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
3130 | hpsa_scsi_interpret_error(h, c); | |
3131 | rc = -1; | |
3132 | } | |
3133 | out: | |
3134 | cmd_free(h, c); | |
3135 | return rc; | |
3136 | } | |
3137 | ||
66749d0d ST |
3138 | static int hpsa_bmic_id_controller(struct ctlr_info *h, |
3139 | struct bmic_identify_controller *buf, size_t bufsize) | |
3140 | { | |
3141 | int rc = IO_OK; | |
3142 | struct CommandList *c; | |
3143 | struct ErrorInfo *ei; | |
3144 | ||
3145 | c = cmd_alloc(h); | |
3146 | ||
3147 | rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize, | |
3148 | 0, RAID_CTLR_LUNID, TYPE_CMD); | |
3149 | if (rc) | |
3150 | goto out; | |
3151 | ||
3152 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | |
3153 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
3154 | if (rc) | |
3155 | goto out; | |
3156 | ei = c->err_info; | |
3157 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
3158 | hpsa_scsi_interpret_error(h, c); | |
3159 | rc = -1; | |
3160 | } | |
3161 | out: | |
3162 | cmd_free(h, c); | |
3163 | return rc; | |
3164 | } | |
3165 | ||
03383736 DB |
3166 | static int hpsa_bmic_id_physical_device(struct ctlr_info *h, |
3167 | unsigned char scsi3addr[], u16 bmic_device_index, | |
3168 | struct bmic_identify_physical_device *buf, size_t bufsize) | |
3169 | { | |
3170 | int rc = IO_OK; | |
3171 | struct CommandList *c; | |
3172 | struct ErrorInfo *ei; | |
3173 | ||
3174 | c = cmd_alloc(h); | |
3175 | rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize, | |
3176 | 0, RAID_CTLR_LUNID, TYPE_CMD); | |
3177 | if (rc) | |
3178 | goto out; | |
3179 | ||
3180 | c->Request.CDB[2] = bmic_device_index & 0xff; | |
3181 | c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff; | |
3182 | ||
25163bd5 WS |
3183 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, |
3184 | NO_TIMEOUT); | |
03383736 DB |
3185 | ei = c->err_info; |
3186 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
3187 | hpsa_scsi_interpret_error(h, c); | |
3188 | rc = -1; | |
3189 | } | |
3190 | out: | |
3191 | cmd_free(h, c); | |
d04e62b9 | 3192 | |
03383736 DB |
3193 | return rc; |
3194 | } | |
3195 | ||
cca8f13b DB |
3196 | /* |
3197 | * get enclosure information | |
3198 | * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number | |
3199 | * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure | |
3200 | * Uses id_physical_device to determine the box_index. | |
3201 | */ | |
3202 | static void hpsa_get_enclosure_info(struct ctlr_info *h, | |
3203 | unsigned char *scsi3addr, | |
3204 | struct ReportExtendedLUNdata *rlep, int rle_index, | |
3205 | struct hpsa_scsi_dev_t *encl_dev) | |
3206 | { | |
3207 | int rc = -1; | |
3208 | struct CommandList *c = NULL; | |
3209 | struct ErrorInfo *ei = NULL; | |
3210 | struct bmic_sense_storage_box_params *bssbp = NULL; | |
3211 | struct bmic_identify_physical_device *id_phys = NULL; | |
3212 | struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; | |
3213 | u16 bmic_device_index = 0; | |
3214 | ||
3215 | bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); | |
3216 | ||
17a9e54a DB |
3217 | if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { |
3218 | rc = IO_OK; | |
cca8f13b | 3219 | goto out; |
17a9e54a | 3220 | } |
cca8f13b DB |
3221 | |
3222 | bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); | |
3223 | if (!bssbp) | |
3224 | goto out; | |
3225 | ||
3226 | id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); | |
3227 | if (!id_phys) | |
3228 | goto out; | |
3229 | ||
3230 | rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index, | |
3231 | id_phys, sizeof(*id_phys)); | |
3232 | if (rc) { | |
3233 | dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n", | |
3234 | __func__, encl_dev->external, bmic_device_index); | |
3235 | goto out; | |
3236 | } | |
3237 | ||
3238 | c = cmd_alloc(h); | |
3239 | ||
3240 | rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp, | |
3241 | sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD); | |
3242 | ||
3243 | if (rc) | |
3244 | goto out; | |
3245 | ||
3246 | if (id_phys->phys_connector[1] == 'E') | |
3247 | c->Request.CDB[5] = id_phys->box_index; | |
3248 | else | |
3249 | c->Request.CDB[5] = 0; | |
3250 | ||
3251 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE, | |
3252 | NO_TIMEOUT); | |
3253 | if (rc) | |
3254 | goto out; | |
3255 | ||
3256 | ei = c->err_info; | |
3257 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
3258 | rc = -1; | |
3259 | goto out; | |
3260 | } | |
3261 | ||
3262 | encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; | |
3263 | memcpy(&encl_dev->phys_connector[id_phys->active_path_number], | |
3264 | bssbp->phys_connector, sizeof(bssbp->phys_connector)); | |
3265 | ||
3266 | rc = IO_OK; | |
3267 | out: | |
3268 | kfree(bssbp); | |
3269 | kfree(id_phys); | |
3270 | ||
3271 | if (c) | |
3272 | cmd_free(h, c); | |
3273 | ||
3274 | if (rc != IO_OK) | |
3275 | hpsa_show_dev_msg(KERN_INFO, h, encl_dev, | |
3276 | "Error, could not get enclosure information\n"); | |
3277 | } | |
3278 | ||
d04e62b9 KB |
3279 | static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h, |
3280 | unsigned char *scsi3addr) | |
3281 | { | |
3282 | struct ReportExtendedLUNdata *physdev; | |
3283 | u32 nphysicals; | |
3284 | u64 sa = 0; | |
3285 | int i; | |
3286 | ||
3287 | physdev = kzalloc(sizeof(*physdev), GFP_KERNEL); | |
3288 | if (!physdev) | |
3289 | return 0; | |
3290 | ||
3291 | if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { | |
3292 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | |
3293 | kfree(physdev); | |
3294 | return 0; | |
3295 | } | |
3296 | nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24; | |
3297 | ||
3298 | for (i = 0; i < nphysicals; i++) | |
3299 | if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) { | |
3300 | sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]); | |
3301 | break; | |
3302 | } | |
3303 | ||
3304 | kfree(physdev); | |
3305 | ||
3306 | return sa; | |
3307 | } | |
3308 | ||
3309 | static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr, | |
3310 | struct hpsa_scsi_dev_t *dev) | |
3311 | { | |
3312 | int rc; | |
3313 | u64 sa = 0; | |
3314 | ||
3315 | if (is_hba_lunid(scsi3addr)) { | |
3316 | struct bmic_sense_subsystem_info *ssi; | |
3317 | ||
3318 | ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); | |
3319 | if (ssi == NULL) { | |
3320 | dev_warn(&h->pdev->dev, | |
3321 | "%s: out of memory\n", __func__); | |
3322 | return; | |
3323 | } | |
3324 | ||
3325 | rc = hpsa_bmic_sense_subsystem_information(h, | |
3326 | scsi3addr, 0, ssi, sizeof(*ssi)); | |
3327 | if (rc == 0) { | |
3328 | sa = get_unaligned_be64(ssi->primary_world_wide_id); | |
3329 | h->sas_address = sa; | |
3330 | } | |
3331 | ||
3332 | kfree(ssi); | |
3333 | } else | |
3334 | sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr); | |
3335 | ||
3336 | dev->sas_address = sa; | |
3337 | } | |
3338 | ||
3339 | /* Get a device id from inquiry page 0x83 */ | |
1b70150a SC |
3340 | static int hpsa_vpd_page_supported(struct ctlr_info *h, |
3341 | unsigned char scsi3addr[], u8 page) | |
3342 | { | |
3343 | int rc; | |
3344 | int i; | |
3345 | int pages; | |
3346 | unsigned char *buf, bufsize; | |
3347 | ||
3348 | buf = kzalloc(256, GFP_KERNEL); | |
3349 | if (!buf) | |
3350 | return 0; | |
3351 | ||
3352 | /* Get the size of the page list first */ | |
3353 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, | |
3354 | VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, | |
3355 | buf, HPSA_VPD_HEADER_SZ); | |
3356 | if (rc != 0) | |
3357 | goto exit_unsupported; | |
3358 | pages = buf[3]; | |
3359 | if ((pages + HPSA_VPD_HEADER_SZ) <= 255) | |
3360 | bufsize = pages + HPSA_VPD_HEADER_SZ; | |
3361 | else | |
3362 | bufsize = 255; | |
3363 | ||
3364 | /* Get the whole VPD page list */ | |
3365 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, | |
3366 | VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, | |
3367 | buf, bufsize); | |
3368 | if (rc != 0) | |
3369 | goto exit_unsupported; | |
3370 | ||
3371 | pages = buf[3]; | |
3372 | for (i = 1; i <= pages; i++) | |
3373 | if (buf[3 + i] == page) | |
3374 | goto exit_supported; | |
3375 | exit_unsupported: | |
3376 | kfree(buf); | |
3377 | return 0; | |
3378 | exit_supported: | |
3379 | kfree(buf); | |
3380 | return 1; | |
3381 | } | |
3382 | ||
283b4a9b SC |
3383 | static void hpsa_get_ioaccel_status(struct ctlr_info *h, |
3384 | unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) | |
3385 | { | |
3386 | int rc; | |
3387 | unsigned char *buf; | |
3388 | u8 ioaccel_status; | |
3389 | ||
3390 | this_device->offload_config = 0; | |
3391 | this_device->offload_enabled = 0; | |
41ce4c35 | 3392 | this_device->offload_to_be_enabled = 0; |
283b4a9b SC |
3393 | |
3394 | buf = kzalloc(64, GFP_KERNEL); | |
3395 | if (!buf) | |
3396 | return; | |
1b70150a SC |
3397 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) |
3398 | goto out; | |
283b4a9b | 3399 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, |
b7bb24eb | 3400 | VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); |
283b4a9b SC |
3401 | if (rc != 0) |
3402 | goto out; | |
3403 | ||
3404 | #define IOACCEL_STATUS_BYTE 4 | |
3405 | #define OFFLOAD_CONFIGURED_BIT 0x01 | |
3406 | #define OFFLOAD_ENABLED_BIT 0x02 | |
3407 | ioaccel_status = buf[IOACCEL_STATUS_BYTE]; | |
3408 | this_device->offload_config = | |
3409 | !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); | |
3410 | if (this_device->offload_config) { | |
3411 | this_device->offload_enabled = | |
3412 | !!(ioaccel_status & OFFLOAD_ENABLED_BIT); | |
3413 | if (hpsa_get_raid_map(h, scsi3addr, this_device)) | |
3414 | this_device->offload_enabled = 0; | |
3415 | } | |
41ce4c35 | 3416 | this_device->offload_to_be_enabled = this_device->offload_enabled; |
283b4a9b SC |
3417 | out: |
3418 | kfree(buf); | |
3419 | return; | |
3420 | } | |
3421 | ||
edd16368 SC |
3422 | /* Get the device id from inquiry page 0x83 */ |
3423 | static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |
75d23d89 | 3424 | unsigned char *device_id, int index, int buflen) |
edd16368 SC |
3425 | { |
3426 | int rc; | |
3427 | unsigned char *buf; | |
3428 | ||
3429 | if (buflen > 16) | |
3430 | buflen = 16; | |
3431 | buf = kzalloc(64, GFP_KERNEL); | |
3432 | if (!buf) | |
a84d794d | 3433 | return -ENOMEM; |
b7bb24eb | 3434 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); |
edd16368 | 3435 | if (rc == 0) |
75d23d89 DB |
3436 | memcpy(device_id, &buf[index], buflen); |
3437 | ||
edd16368 | 3438 | kfree(buf); |
75d23d89 | 3439 | |
edd16368 SC |
3440 | return rc != 0; |
3441 | } | |
3442 | ||
3443 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |
03383736 | 3444 | void *buf, int bufsize, |
edd16368 SC |
3445 | int extended_response) |
3446 | { | |
3447 | int rc = IO_OK; | |
3448 | struct CommandList *c; | |
3449 | unsigned char scsi3addr[8]; | |
3450 | struct ErrorInfo *ei; | |
3451 | ||
45fcb86e | 3452 | c = cmd_alloc(h); |
bf43caf3 | 3453 | |
e89c0ae7 SC |
3454 | /* address the controller */ |
3455 | memset(scsi3addr, 0, sizeof(scsi3addr)); | |
a2dac136 SC |
3456 | if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
3457 | buf, bufsize, 0, scsi3addr, TYPE_CMD)) { | |
3458 | rc = -1; | |
3459 | goto out; | |
3460 | } | |
edd16368 SC |
3461 | if (extended_response) |
3462 | c->Request.CDB[1] = extended_response; | |
25163bd5 WS |
3463 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
3464 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
3465 | if (rc) | |
3466 | goto out; | |
edd16368 SC |
3467 | ei = c->err_info; |
3468 | if (ei->CommandStatus != 0 && | |
3469 | ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
d1e8beac | 3470 | hpsa_scsi_interpret_error(h, c); |
edd16368 | 3471 | rc = -1; |
283b4a9b | 3472 | } else { |
03383736 DB |
3473 | struct ReportLUNdata *rld = buf; |
3474 | ||
3475 | if (rld->extended_response_flag != extended_response) { | |
283b4a9b SC |
3476 | dev_err(&h->pdev->dev, |
3477 | "report luns requested format %u, got %u\n", | |
3478 | extended_response, | |
03383736 | 3479 | rld->extended_response_flag); |
283b4a9b SC |
3480 | rc = -1; |
3481 | } | |
edd16368 | 3482 | } |
a2dac136 | 3483 | out: |
45fcb86e | 3484 | cmd_free(h, c); |
edd16368 SC |
3485 | return rc; |
3486 | } | |
3487 | ||
3488 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | |
03383736 | 3489 | struct ReportExtendedLUNdata *buf, int bufsize) |
edd16368 | 3490 | { |
03383736 DB |
3491 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, |
3492 | HPSA_REPORT_PHYS_EXTENDED); | |
edd16368 SC |
3493 | } |
3494 | ||
3495 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, | |
3496 | struct ReportLUNdata *buf, int bufsize) | |
3497 | { | |
3498 | return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); | |
3499 | } | |
3500 | ||
3501 | static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, | |
3502 | int bus, int target, int lun) | |
3503 | { | |
3504 | device->bus = bus; | |
3505 | device->target = target; | |
3506 | device->lun = lun; | |
3507 | } | |
3508 | ||
9846590e SC |
3509 | /* Use VPD inquiry to get details of volume status */ |
3510 | static int hpsa_get_volume_status(struct ctlr_info *h, | |
3511 | unsigned char scsi3addr[]) | |
3512 | { | |
3513 | int rc; | |
3514 | int status; | |
3515 | int size; | |
3516 | unsigned char *buf; | |
3517 | ||
3518 | buf = kzalloc(64, GFP_KERNEL); | |
3519 | if (!buf) | |
3520 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; | |
3521 | ||
3522 | /* Does controller have VPD for logical volume status? */ | |
24a4b078 | 3523 | if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) |
9846590e | 3524 | goto exit_failed; |
9846590e SC |
3525 | |
3526 | /* Get the size of the VPD return buffer */ | |
3527 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, | |
3528 | buf, HPSA_VPD_HEADER_SZ); | |
24a4b078 | 3529 | if (rc != 0) |
9846590e | 3530 | goto exit_failed; |
9846590e SC |
3531 | size = buf[3]; |
3532 | ||
3533 | /* Now get the whole VPD buffer */ | |
3534 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, | |
3535 | buf, size + HPSA_VPD_HEADER_SZ); | |
24a4b078 | 3536 | if (rc != 0) |
9846590e | 3537 | goto exit_failed; |
9846590e SC |
3538 | status = buf[4]; /* status byte */ |
3539 | ||
3540 | kfree(buf); | |
3541 | return status; | |
3542 | exit_failed: | |
3543 | kfree(buf); | |
3544 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; | |
3545 | } | |
3546 | ||
3547 | /* Determine offline status of a volume. | |
3548 | * Return either: | |
3549 | * 0 (not offline) | |
67955ba3 | 3550 | * 0xff (offline for unknown reasons) |
9846590e SC |
3551 | * # (integer code indicating one of several NOT READY states |
3552 | * describing why a volume is to be kept offline) | |
3553 | */ | |
67955ba3 | 3554 | static int hpsa_volume_offline(struct ctlr_info *h, |
9846590e SC |
3555 | unsigned char scsi3addr[]) |
3556 | { | |
3557 | struct CommandList *c; | |
9437ac43 SC |
3558 | unsigned char *sense; |
3559 | u8 sense_key, asc, ascq; | |
3560 | int sense_len; | |
25163bd5 | 3561 | int rc, ldstat = 0; |
9846590e SC |
3562 | u16 cmd_status; |
3563 | u8 scsi_status; | |
3564 | #define ASC_LUN_NOT_READY 0x04 | |
3565 | #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 | |
3566 | #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 | |
3567 | ||
3568 | c = cmd_alloc(h); | |
bf43caf3 | 3569 | |
9846590e | 3570 | (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); |
25163bd5 WS |
3571 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); |
3572 | if (rc) { | |
3573 | cmd_free(h, c); | |
3574 | return 0; | |
3575 | } | |
9846590e | 3576 | sense = c->err_info->SenseInfo; |
9437ac43 SC |
3577 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) |
3578 | sense_len = sizeof(c->err_info->SenseInfo); | |
3579 | else | |
3580 | sense_len = c->err_info->SenseLen; | |
3581 | decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq); | |
9846590e SC |
3582 | cmd_status = c->err_info->CommandStatus; |
3583 | scsi_status = c->err_info->ScsiStatus; | |
3584 | cmd_free(h, c); | |
3585 | /* Is the volume 'not ready'? */ | |
3586 | if (cmd_status != CMD_TARGET_STATUS || | |
3587 | scsi_status != SAM_STAT_CHECK_CONDITION || | |
3588 | sense_key != NOT_READY || | |
3589 | asc != ASC_LUN_NOT_READY) { | |
3590 | return 0; | |
3591 | } | |
3592 | ||
3593 | /* Determine the reason for not ready state */ | |
3594 | ldstat = hpsa_get_volume_status(h, scsi3addr); | |
3595 | ||
3596 | /* Keep volume offline in certain cases: */ | |
3597 | switch (ldstat) { | |
3598 | case HPSA_LV_UNDERGOING_ERASE: | |
5ca01204 | 3599 | case HPSA_LV_NOT_AVAILABLE: |
9846590e SC |
3600 | case HPSA_LV_UNDERGOING_RPI: |
3601 | case HPSA_LV_PENDING_RPI: | |
3602 | case HPSA_LV_ENCRYPTED_NO_KEY: | |
3603 | case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: | |
3604 | case HPSA_LV_UNDERGOING_ENCRYPTION: | |
3605 | case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: | |
3606 | case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: | |
3607 | return ldstat; | |
3608 | case HPSA_VPD_LV_STATUS_UNSUPPORTED: | |
3609 | /* If VPD status page isn't available, | |
3610 | * use ASC/ASCQ to determine state | |
3611 | */ | |
3612 | if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || | |
3613 | (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) | |
3614 | return ldstat; | |
3615 | break; | |
3616 | default: | |
3617 | break; | |
3618 | } | |
3619 | return 0; | |
3620 | } | |
3621 | ||
9b5c48c2 SC |
3622 | /* |
3623 | * Find out if a logical device supports aborts by simply trying one. | |
3624 | * Smart Array may claim not to support aborts on logical drives, but | |
3625 | * if a MSA2000 * is connected, the drives on that will be presented | |
3626 | * by the Smart Array as logical drives, and aborts may be sent to | |
3627 | * those devices successfully. So the simplest way to find out is | |
3628 | * to simply try an abort and see how the device responds. | |
3629 | */ | |
3630 | static int hpsa_device_supports_aborts(struct ctlr_info *h, | |
3631 | unsigned char *scsi3addr) | |
3632 | { | |
3633 | struct CommandList *c; | |
3634 | struct ErrorInfo *ei; | |
3635 | int rc = 0; | |
3636 | ||
3637 | u64 tag = (u64) -1; /* bogus tag */ | |
3638 | ||
3639 | /* Assume that physical devices support aborts */ | |
3640 | if (!is_logical_dev_addr_mode(scsi3addr)) | |
3641 | return 1; | |
3642 | ||
3643 | c = cmd_alloc(h); | |
bf43caf3 | 3644 | |
9b5c48c2 SC |
3645 | (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); |
3646 | (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); | |
3647 | /* no unmap needed here because no data xfer. */ | |
3648 | ei = c->err_info; | |
3649 | switch (ei->CommandStatus) { | |
3650 | case CMD_INVALID: | |
3651 | rc = 0; | |
3652 | break; | |
3653 | case CMD_UNABORTABLE: | |
3654 | case CMD_ABORT_FAILED: | |
3655 | rc = 1; | |
3656 | break; | |
9437ac43 SC |
3657 | case CMD_TMF_STATUS: |
3658 | rc = hpsa_evaluate_tmf_status(h, c); | |
3659 | break; | |
9b5c48c2 SC |
3660 | default: |
3661 | rc = 0; | |
3662 | break; | |
3663 | } | |
3664 | cmd_free(h, c); | |
3665 | return rc; | |
3666 | } | |
3667 | ||
edd16368 | 3668 | static int hpsa_update_device_info(struct ctlr_info *h, |
0b0e1d6c SC |
3669 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, |
3670 | unsigned char *is_OBDR_device) | |
edd16368 | 3671 | { |
0b0e1d6c SC |
3672 | |
3673 | #define OBDR_SIG_OFFSET 43 | |
3674 | #define OBDR_TAPE_SIG "$DR-10" | |
3675 | #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) | |
3676 | #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN) | |
3677 | ||
ea6d3bc3 | 3678 | unsigned char *inq_buff; |
0b0e1d6c | 3679 | unsigned char *obdr_sig; |
683fc444 | 3680 | int rc = 0; |
edd16368 | 3681 | |
ea6d3bc3 | 3682 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
683fc444 DB |
3683 | if (!inq_buff) { |
3684 | rc = -ENOMEM; | |
edd16368 | 3685 | goto bail_out; |
683fc444 | 3686 | } |
edd16368 | 3687 | |
edd16368 SC |
3688 | /* Do an inquiry to the device to see what it is. */ |
3689 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | |
3690 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | |
3691 | /* Inquiry failed (msg printed already) */ | |
3692 | dev_err(&h->pdev->dev, | |
3693 | "hpsa_update_device_info: inquiry failed\n"); | |
683fc444 | 3694 | rc = -EIO; |
edd16368 SC |
3695 | goto bail_out; |
3696 | } | |
3697 | ||
4af61e4f DB |
3698 | scsi_sanitize_inquiry_string(&inq_buff[8], 8); |
3699 | scsi_sanitize_inquiry_string(&inq_buff[16], 16); | |
75d23d89 | 3700 | |
edd16368 SC |
3701 | this_device->devtype = (inq_buff[0] & 0x1f); |
3702 | memcpy(this_device->scsi3addr, scsi3addr, 8); | |
3703 | memcpy(this_device->vendor, &inq_buff[8], | |
3704 | sizeof(this_device->vendor)); | |
3705 | memcpy(this_device->model, &inq_buff[16], | |
3706 | sizeof(this_device->model)); | |
edd16368 SC |
3707 | memset(this_device->device_id, 0, |
3708 | sizeof(this_device->device_id)); | |
75d23d89 | 3709 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, |
edd16368 SC |
3710 | sizeof(this_device->device_id)); |
3711 | ||
af15ed36 DB |
3712 | if ((this_device->devtype == TYPE_DISK || |
3713 | this_device->devtype == TYPE_ZBC) && | |
283b4a9b | 3714 | is_logical_dev_addr_mode(scsi3addr)) { |
67955ba3 SC |
3715 | int volume_offline; |
3716 | ||
edd16368 | 3717 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); |
283b4a9b SC |
3718 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) |
3719 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); | |
67955ba3 SC |
3720 | volume_offline = hpsa_volume_offline(h, scsi3addr); |
3721 | if (volume_offline < 0 || volume_offline > 0xff) | |
3722 | volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; | |
3723 | this_device->volume_offline = volume_offline & 0xff; | |
283b4a9b | 3724 | } else { |
edd16368 | 3725 | this_device->raid_level = RAID_UNKNOWN; |
283b4a9b SC |
3726 | this_device->offload_config = 0; |
3727 | this_device->offload_enabled = 0; | |
41ce4c35 | 3728 | this_device->offload_to_be_enabled = 0; |
a3144e0b | 3729 | this_device->hba_ioaccel_enabled = 0; |
9846590e | 3730 | this_device->volume_offline = 0; |
03383736 | 3731 | this_device->queue_depth = h->nr_cmds; |
283b4a9b | 3732 | } |
edd16368 | 3733 | |
0b0e1d6c SC |
3734 | if (is_OBDR_device) { |
3735 | /* See if this is a One-Button-Disaster-Recovery device | |
3736 | * by looking for "$DR-10" at offset 43 in inquiry data. | |
3737 | */ | |
3738 | obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; | |
3739 | *is_OBDR_device = (this_device->devtype == TYPE_ROM && | |
3740 | strncmp(obdr_sig, OBDR_TAPE_SIG, | |
3741 | OBDR_SIG_LEN) == 0); | |
3742 | } | |
edd16368 SC |
3743 | kfree(inq_buff); |
3744 | return 0; | |
3745 | ||
3746 | bail_out: | |
3747 | kfree(inq_buff); | |
683fc444 | 3748 | return rc; |
edd16368 SC |
3749 | } |
3750 | ||
9b5c48c2 SC |
3751 | static void hpsa_update_device_supports_aborts(struct ctlr_info *h, |
3752 | struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) | |
3753 | { | |
3754 | unsigned long flags; | |
3755 | int rc, entry; | |
3756 | /* | |
3757 | * See if this device supports aborts. If we already know | |
3758 | * the device, we already know if it supports aborts, otherwise | |
3759 | * we have to find out if it supports aborts by trying one. | |
3760 | */ | |
3761 | spin_lock_irqsave(&h->devlock, flags); | |
3762 | rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); | |
3763 | if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && | |
3764 | entry >= 0 && entry < h->ndevices) { | |
3765 | dev->supports_aborts = h->dev[entry]->supports_aborts; | |
3766 | spin_unlock_irqrestore(&h->devlock, flags); | |
3767 | } else { | |
3768 | spin_unlock_irqrestore(&h->devlock, flags); | |
3769 | dev->supports_aborts = | |
3770 | hpsa_device_supports_aborts(h, scsi3addr); | |
3771 | if (dev->supports_aborts < 0) | |
3772 | dev->supports_aborts = 0; | |
3773 | } | |
3774 | } | |
3775 | ||
c795505a KB |
3776 | /* |
3777 | * Helper function to assign bus, target, lun mapping of devices. | |
edd16368 SC |
3778 | * Logical drive target and lun are assigned at this time, but |
3779 | * physical device lun and target assignment are deferred (assigned | |
3780 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) | |
c795505a | 3781 | */ |
edd16368 | 3782 | static void figure_bus_target_lun(struct ctlr_info *h, |
1f310bde | 3783 | u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) |
edd16368 | 3784 | { |
c795505a | 3785 | u32 lunid = get_unaligned_le32(lunaddrbytes); |
1f310bde SC |
3786 | |
3787 | if (!is_logical_dev_addr_mode(lunaddrbytes)) { | |
3788 | /* physical device, target and lun filled in later */ | |
edd16368 | 3789 | if (is_hba_lunid(lunaddrbytes)) |
c795505a KB |
3790 | hpsa_set_bus_target_lun(device, |
3791 | HPSA_HBA_BUS, 0, lunid & 0x3fff); | |
edd16368 | 3792 | else |
1f310bde | 3793 | /* defer target, lun assignment for physical devices */ |
c795505a KB |
3794 | hpsa_set_bus_target_lun(device, |
3795 | HPSA_PHYSICAL_DEVICE_BUS, -1, -1); | |
1f310bde SC |
3796 | return; |
3797 | } | |
3798 | /* It's a logical device */ | |
66749d0d | 3799 | if (device->external) { |
1f310bde | 3800 | hpsa_set_bus_target_lun(device, |
c795505a KB |
3801 | HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, |
3802 | lunid & 0x00ff); | |
1f310bde | 3803 | return; |
edd16368 | 3804 | } |
c795505a KB |
3805 | hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, |
3806 | 0, lunid & 0x3fff); | |
edd16368 SC |
3807 | } |
3808 | ||
edd16368 | 3809 | |
54b6e9e9 ST |
3810 | /* |
3811 | * Get address of physical disk used for an ioaccel2 mode command: | |
3812 | * 1. Extract ioaccel2 handle from the command. | |
3813 | * 2. Find a matching ioaccel2 handle from list of physical disks. | |
3814 | * 3. Return: | |
3815 | * 1 and set scsi3addr to address of matching physical | |
3816 | * 0 if no matching physical disk was found. | |
3817 | */ | |
3818 | static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, | |
3819 | struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) | |
3820 | { | |
41ce4c35 SC |
3821 | struct io_accel2_cmd *c2 = |
3822 | &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; | |
3823 | unsigned long flags; | |
54b6e9e9 | 3824 | int i; |
54b6e9e9 | 3825 | |
41ce4c35 SC |
3826 | spin_lock_irqsave(&h->devlock, flags); |
3827 | for (i = 0; i < h->ndevices; i++) | |
3828 | if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { | |
3829 | memcpy(scsi3addr, h->dev[i]->scsi3addr, | |
3830 | sizeof(h->dev[i]->scsi3addr)); | |
3831 | spin_unlock_irqrestore(&h->devlock, flags); | |
3832 | return 1; | |
3833 | } | |
3834 | spin_unlock_irqrestore(&h->devlock, flags); | |
3835 | return 0; | |
54b6e9e9 | 3836 | } |
41ce4c35 | 3837 | |
66749d0d ST |
3838 | static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, |
3839 | int i, int nphysicals, int nlocal_logicals) | |
3840 | { | |
3841 | /* In report logicals, local logicals are listed first, | |
3842 | * then any externals. | |
3843 | */ | |
3844 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | |
3845 | ||
3846 | if (i == raid_ctlr_position) | |
3847 | return 0; | |
3848 | ||
3849 | if (i < logicals_start) | |
3850 | return 0; | |
3851 | ||
3852 | /* i is in logicals range, but still within local logicals */ | |
3853 | if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals) | |
3854 | return 0; | |
3855 | ||
3856 | return 1; /* it's an external lun */ | |
3857 | } | |
3858 | ||
edd16368 SC |
3859 | /* |
3860 | * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, | |
3861 | * logdev. The number of luns in physdev and logdev are returned in | |
3862 | * *nphysicals and *nlogicals, respectively. | |
3863 | * Returns 0 on success, -1 otherwise. | |
3864 | */ | |
3865 | static int hpsa_gather_lun_info(struct ctlr_info *h, | |
03383736 | 3866 | struct ReportExtendedLUNdata *physdev, u32 *nphysicals, |
01a02ffc | 3867 | struct ReportLUNdata *logdev, u32 *nlogicals) |
edd16368 | 3868 | { |
03383736 | 3869 | if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) { |
edd16368 SC |
3870 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
3871 | return -1; | |
3872 | } | |
03383736 | 3873 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24; |
edd16368 | 3874 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
03383736 DB |
3875 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n", |
3876 | HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN); | |
edd16368 SC |
3877 | *nphysicals = HPSA_MAX_PHYS_LUN; |
3878 | } | |
03383736 | 3879 | if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) { |
edd16368 SC |
3880 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
3881 | return -1; | |
3882 | } | |
6df1e954 | 3883 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
edd16368 SC |
3884 | /* Reject Logicals in excess of our max capability. */ |
3885 | if (*nlogicals > HPSA_MAX_LUN) { | |
3886 | dev_warn(&h->pdev->dev, | |
3887 | "maximum logical LUNs (%d) exceeded. " | |
3888 | "%d LUNs ignored.\n", HPSA_MAX_LUN, | |
3889 | *nlogicals - HPSA_MAX_LUN); | |
3890 | *nlogicals = HPSA_MAX_LUN; | |
3891 | } | |
3892 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { | |
3893 | dev_warn(&h->pdev->dev, | |
3894 | "maximum logical + physical LUNs (%d) exceeded. " | |
3895 | "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | |
3896 | *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); | |
3897 | *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; | |
3898 | } | |
3899 | return 0; | |
3900 | } | |
3901 | ||
42a91641 DB |
3902 | static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, |
3903 | int i, int nphysicals, int nlogicals, | |
a93aa1fe | 3904 | struct ReportExtendedLUNdata *physdev_list, |
339b2b14 SC |
3905 | struct ReportLUNdata *logdev_list) |
3906 | { | |
3907 | /* Helper function, figure out where the LUN ID info is coming from | |
3908 | * given index i, lists of physical and logical devices, where in | |
3909 | * the list the raid controller is supposed to appear (first or last) | |
3910 | */ | |
3911 | ||
3912 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | |
3913 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); | |
3914 | ||
3915 | if (i == raid_ctlr_position) | |
3916 | return RAID_CTLR_LUNID; | |
3917 | ||
3918 | if (i < logicals_start) | |
d5b5d964 SC |
3919 | return &physdev_list->LUN[i - |
3920 | (raid_ctlr_position == 0)].lunid[0]; | |
339b2b14 SC |
3921 | |
3922 | if (i < last_device) | |
3923 | return &logdev_list->LUN[i - nphysicals - | |
3924 | (raid_ctlr_position == 0)][0]; | |
3925 | BUG(); | |
3926 | return NULL; | |
3927 | } | |
3928 | ||
03383736 DB |
3929 | /* get physical drive ioaccel handle and queue depth */ |
3930 | static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, | |
3931 | struct hpsa_scsi_dev_t *dev, | |
f2039b03 | 3932 | struct ReportExtendedLUNdata *rlep, int rle_index, |
03383736 DB |
3933 | struct bmic_identify_physical_device *id_phys) |
3934 | { | |
3935 | int rc; | |
f2039b03 | 3936 | struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; |
03383736 DB |
3937 | |
3938 | dev->ioaccel_handle = rle->ioaccel_handle; | |
f2039b03 | 3939 | if ((rle->device_flags & 0x08) && dev->ioaccel_handle) |
a3144e0b | 3940 | dev->hba_ioaccel_enabled = 1; |
03383736 | 3941 | memset(id_phys, 0, sizeof(*id_phys)); |
f2039b03 DB |
3942 | rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], |
3943 | GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, | |
03383736 DB |
3944 | sizeof(*id_phys)); |
3945 | if (!rc) | |
3946 | /* Reserve space for FW operations */ | |
3947 | #define DRIVE_CMDS_RESERVED_FOR_FW 2 | |
3948 | #define DRIVE_QUEUE_DEPTH 7 | |
3949 | dev->queue_depth = | |
3950 | le16_to_cpu(id_phys->current_queue_depth_limit) - | |
3951 | DRIVE_CMDS_RESERVED_FOR_FW; | |
3952 | else | |
3953 | dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */ | |
03383736 DB |
3954 | } |
3955 | ||
8270b862 | 3956 | static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device, |
f2039b03 | 3957 | struct ReportExtendedLUNdata *rlep, int rle_index, |
8270b862 JH |
3958 | struct bmic_identify_physical_device *id_phys) |
3959 | { | |
f2039b03 DB |
3960 | struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; |
3961 | ||
3962 | if ((rle->device_flags & 0x08) && this_device->ioaccel_handle) | |
8270b862 JH |
3963 | this_device->hba_ioaccel_enabled = 1; |
3964 | ||
3965 | memcpy(&this_device->active_path_index, | |
3966 | &id_phys->active_path_number, | |
3967 | sizeof(this_device->active_path_index)); | |
3968 | memcpy(&this_device->path_map, | |
3969 | &id_phys->redundant_path_present_map, | |
3970 | sizeof(this_device->path_map)); | |
3971 | memcpy(&this_device->box, | |
3972 | &id_phys->alternate_paths_phys_box_on_port, | |
3973 | sizeof(this_device->box)); | |
3974 | memcpy(&this_device->phys_connector, | |
3975 | &id_phys->alternate_paths_phys_connector, | |
3976 | sizeof(this_device->phys_connector)); | |
3977 | memcpy(&this_device->bay, | |
3978 | &id_phys->phys_bay_in_box, | |
3979 | sizeof(this_device->bay)); | |
3980 | } | |
3981 | ||
66749d0d ST |
3982 | /* get number of local logical disks. */ |
3983 | static int hpsa_set_local_logical_count(struct ctlr_info *h, | |
3984 | struct bmic_identify_controller *id_ctlr, | |
3985 | u32 *nlocals) | |
3986 | { | |
3987 | int rc; | |
3988 | ||
3989 | if (!id_ctlr) { | |
3990 | dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n", | |
3991 | __func__); | |
3992 | return -ENOMEM; | |
3993 | } | |
3994 | memset(id_ctlr, 0, sizeof(*id_ctlr)); | |
3995 | rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); | |
3996 | if (!rc) | |
3997 | if (id_ctlr->configured_logical_drive_count < 256) | |
3998 | *nlocals = id_ctlr->configured_logical_drive_count; | |
3999 | else | |
4000 | *nlocals = le16_to_cpu( | |
4001 | id_ctlr->extended_logical_unit_count); | |
4002 | else | |
4003 | *nlocals = -1; | |
4004 | return rc; | |
4005 | } | |
4006 | ||
4007 | ||
8aa60681 | 4008 | static void hpsa_update_scsi_devices(struct ctlr_info *h) |
edd16368 SC |
4009 | { |
4010 | /* the idea here is we could get notified | |
4011 | * that some devices have changed, so we do a report | |
4012 | * physical luns and report logical luns cmd, and adjust | |
4013 | * our list of devices accordingly. | |
4014 | * | |
4015 | * The scsi3addr's of devices won't change so long as the | |
4016 | * adapter is not reset. That means we can rescan and | |
4017 | * tell which devices we already know about, vs. new | |
4018 | * devices, vs. disappearing devices. | |
4019 | */ | |
a93aa1fe | 4020 | struct ReportExtendedLUNdata *physdev_list = NULL; |
edd16368 | 4021 | struct ReportLUNdata *logdev_list = NULL; |
03383736 | 4022 | struct bmic_identify_physical_device *id_phys = NULL; |
66749d0d | 4023 | struct bmic_identify_controller *id_ctlr = NULL; |
01a02ffc SC |
4024 | u32 nphysicals = 0; |
4025 | u32 nlogicals = 0; | |
66749d0d | 4026 | u32 nlocal_logicals = 0; |
01a02ffc | 4027 | u32 ndev_allocated = 0; |
edd16368 SC |
4028 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
4029 | int ncurrent = 0; | |
4f4eb9f1 | 4030 | int i, n_ext_target_devs, ndevs_to_allocate; |
339b2b14 | 4031 | int raid_ctlr_position; |
04fa2f44 | 4032 | bool physical_device; |
aca4a520 | 4033 | DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); |
edd16368 | 4034 | |
cfe5badc | 4035 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); |
92084715 SC |
4036 | physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL); |
4037 | logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL); | |
edd16368 | 4038 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
03383736 | 4039 | id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL); |
66749d0d | 4040 | id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL); |
edd16368 | 4041 | |
03383736 | 4042 | if (!currentsd || !physdev_list || !logdev_list || |
66749d0d | 4043 | !tmpdevice || !id_phys || !id_ctlr) { |
edd16368 SC |
4044 | dev_err(&h->pdev->dev, "out of memory\n"); |
4045 | goto out; | |
4046 | } | |
4047 | memset(lunzerobits, 0, sizeof(lunzerobits)); | |
4048 | ||
853633e8 DB |
4049 | h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */ |
4050 | ||
03383736 | 4051 | if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, |
853633e8 DB |
4052 | logdev_list, &nlogicals)) { |
4053 | h->drv_req_rescan = 1; | |
edd16368 | 4054 | goto out; |
853633e8 | 4055 | } |
edd16368 | 4056 | |
66749d0d ST |
4057 | /* Set number of local logicals (non PTRAID) */ |
4058 | if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) { | |
4059 | dev_warn(&h->pdev->dev, | |
4060 | "%s: Can't determine number of local logical devices.\n", | |
4061 | __func__); | |
4062 | } | |
edd16368 | 4063 | |
aca4a520 ST |
4064 | /* We might see up to the maximum number of logical and physical disks |
4065 | * plus external target devices, and a device for the local RAID | |
4066 | * controller. | |
edd16368 | 4067 | */ |
aca4a520 | 4068 | ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1; |
edd16368 SC |
4069 | |
4070 | /* Allocate the per device structures */ | |
4071 | for (i = 0; i < ndevs_to_allocate; i++) { | |
b7ec021f ST |
4072 | if (i >= HPSA_MAX_DEVICES) { |
4073 | dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded." | |
4074 | " %d devices ignored.\n", HPSA_MAX_DEVICES, | |
4075 | ndevs_to_allocate - HPSA_MAX_DEVICES); | |
4076 | break; | |
4077 | } | |
4078 | ||
edd16368 SC |
4079 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); |
4080 | if (!currentsd[i]) { | |
4081 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", | |
4082 | __FILE__, __LINE__); | |
853633e8 | 4083 | h->drv_req_rescan = 1; |
edd16368 SC |
4084 | goto out; |
4085 | } | |
4086 | ndev_allocated++; | |
4087 | } | |
4088 | ||
8645291b | 4089 | if (is_scsi_rev_5(h)) |
339b2b14 SC |
4090 | raid_ctlr_position = 0; |
4091 | else | |
4092 | raid_ctlr_position = nphysicals + nlogicals; | |
4093 | ||
edd16368 | 4094 | /* adjust our table of devices */ |
4f4eb9f1 | 4095 | n_ext_target_devs = 0; |
edd16368 | 4096 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
0b0e1d6c | 4097 | u8 *lunaddrbytes, is_OBDR = 0; |
683fc444 | 4098 | int rc = 0; |
f2039b03 | 4099 | int phys_dev_index = i - (raid_ctlr_position == 0); |
edd16368 | 4100 | |
04fa2f44 | 4101 | physical_device = i < nphysicals + (raid_ctlr_position == 0); |
edd16368 SC |
4102 | |
4103 | /* Figure out where the LUN ID info is coming from */ | |
339b2b14 SC |
4104 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
4105 | i, nphysicals, nlogicals, physdev_list, logdev_list); | |
41ce4c35 SC |
4106 | |
4107 | /* skip masked non-disk devices */ | |
04fa2f44 | 4108 | if (MASKED_DEVICE(lunaddrbytes) && physical_device && |
cca8f13b DB |
4109 | (physdev_list->LUN[phys_dev_index].device_type != 0x06) && |
4110 | (physdev_list->LUN[phys_dev_index].device_flags & 0x01)) | |
04fa2f44 | 4111 | continue; |
edd16368 SC |
4112 | |
4113 | /* Get device type, vendor, model, device id */ | |
683fc444 DB |
4114 | rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice, |
4115 | &is_OBDR); | |
4116 | if (rc == -ENOMEM) { | |
4117 | dev_warn(&h->pdev->dev, | |
4118 | "Out of memory, rescan deferred.\n"); | |
853633e8 | 4119 | h->drv_req_rescan = 1; |
683fc444 | 4120 | goto out; |
853633e8 | 4121 | } |
683fc444 DB |
4122 | if (rc) { |
4123 | dev_warn(&h->pdev->dev, | |
4124 | "Inquiry failed, skipping device.\n"); | |
4125 | continue; | |
4126 | } | |
4127 | ||
66749d0d ST |
4128 | /* Determine if this is a lun from an external target array */ |
4129 | tmpdevice->external = | |
4130 | figure_external_status(h, raid_ctlr_position, i, | |
4131 | nphysicals, nlocal_logicals); | |
4132 | ||
1f310bde | 4133 | figure_bus_target_lun(h, lunaddrbytes, tmpdevice); |
9b5c48c2 | 4134 | hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); |
edd16368 SC |
4135 | this_device = currentsd[ncurrent]; |
4136 | ||
34592254 ST |
4137 | /* Turn on discovery_polling if there are ext target devices. |
4138 | * Event-based change notification is unreliable for those. | |
edd16368 | 4139 | */ |
34592254 ST |
4140 | if (!h->discovery_polling) { |
4141 | if (tmpdevice->external) { | |
4142 | h->discovery_polling = 1; | |
4143 | dev_info(&h->pdev->dev, | |
4144 | "External target, activate discovery polling.\n"); | |
4145 | } | |
edd16368 SC |
4146 | } |
4147 | ||
34592254 | 4148 | |
edd16368 | 4149 | *this_device = *tmpdevice; |
04fa2f44 | 4150 | this_device->physical_device = physical_device; |
edd16368 | 4151 | |
04fa2f44 KB |
4152 | /* |
4153 | * Expose all devices except for physical devices that | |
4154 | * are masked. | |
4155 | */ | |
4156 | if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device) | |
2a168208 KB |
4157 | this_device->expose_device = 0; |
4158 | else | |
4159 | this_device->expose_device = 1; | |
41ce4c35 | 4160 | |
d04e62b9 KB |
4161 | |
4162 | /* | |
4163 | * Get the SAS address for physical devices that are exposed. | |
4164 | */ | |
4165 | if (this_device->physical_device && this_device->expose_device) | |
4166 | hpsa_get_sas_address(h, lunaddrbytes, this_device); | |
41ce4c35 | 4167 | |
edd16368 | 4168 | switch (this_device->devtype) { |
0b0e1d6c | 4169 | case TYPE_ROM: |
edd16368 SC |
4170 | /* We don't *really* support actual CD-ROM devices, |
4171 | * just "One Button Disaster Recovery" tape drive | |
4172 | * which temporarily pretends to be a CD-ROM drive. | |
4173 | * So we check that the device is really an OBDR tape | |
4174 | * device by checking for "$DR-10" in bytes 43-48 of | |
4175 | * the inquiry data. | |
4176 | */ | |
0b0e1d6c SC |
4177 | if (is_OBDR) |
4178 | ncurrent++; | |
edd16368 SC |
4179 | break; |
4180 | case TYPE_DISK: | |
af15ed36 | 4181 | case TYPE_ZBC: |
04fa2f44 | 4182 | if (this_device->physical_device) { |
b9092b79 KB |
4183 | /* The disk is in HBA mode. */ |
4184 | /* Never use RAID mapper in HBA mode. */ | |
ecf418d1 | 4185 | this_device->offload_enabled = 0; |
b9092b79 | 4186 | hpsa_get_ioaccel_drive_info(h, this_device, |
f2039b03 DB |
4187 | physdev_list, phys_dev_index, id_phys); |
4188 | hpsa_get_path_info(this_device, | |
4189 | physdev_list, phys_dev_index, id_phys); | |
b9092b79 | 4190 | } |
ecf418d1 | 4191 | ncurrent++; |
edd16368 SC |
4192 | break; |
4193 | case TYPE_TAPE: | |
4194 | case TYPE_MEDIUM_CHANGER: | |
cca8f13b DB |
4195 | ncurrent++; |
4196 | break; | |
41ce4c35 | 4197 | case TYPE_ENCLOSURE: |
17a9e54a DB |
4198 | if (!this_device->external) |
4199 | hpsa_get_enclosure_info(h, lunaddrbytes, | |
cca8f13b DB |
4200 | physdev_list, phys_dev_index, |
4201 | this_device); | |
b9092b79 | 4202 | ncurrent++; |
41ce4c35 | 4203 | break; |
edd16368 SC |
4204 | case TYPE_RAID: |
4205 | /* Only present the Smartarray HBA as a RAID controller. | |
4206 | * If it's a RAID controller other than the HBA itself | |
4207 | * (an external RAID controller, MSA500 or similar) | |
4208 | * don't present it. | |
4209 | */ | |
4210 | if (!is_hba_lunid(lunaddrbytes)) | |
4211 | break; | |
4212 | ncurrent++; | |
4213 | break; | |
4214 | default: | |
4215 | break; | |
4216 | } | |
cfe5badc | 4217 | if (ncurrent >= HPSA_MAX_DEVICES) |
edd16368 SC |
4218 | break; |
4219 | } | |
d04e62b9 KB |
4220 | |
4221 | if (h->sas_host == NULL) { | |
4222 | int rc = 0; | |
4223 | ||
4224 | rc = hpsa_add_sas_host(h); | |
4225 | if (rc) { | |
4226 | dev_warn(&h->pdev->dev, | |
4227 | "Could not add sas host %d\n", rc); | |
4228 | goto out; | |
4229 | } | |
4230 | } | |
4231 | ||
8aa60681 | 4232 | adjust_hpsa_scsi_table(h, currentsd, ncurrent); |
edd16368 SC |
4233 | out: |
4234 | kfree(tmpdevice); | |
4235 | for (i = 0; i < ndev_allocated; i++) | |
4236 | kfree(currentsd[i]); | |
4237 | kfree(currentsd); | |
edd16368 SC |
4238 | kfree(physdev_list); |
4239 | kfree(logdev_list); | |
66749d0d | 4240 | kfree(id_ctlr); |
03383736 | 4241 | kfree(id_phys); |
edd16368 SC |
4242 | } |
4243 | ||
ec5cbf04 WS |
4244 | static void hpsa_set_sg_descriptor(struct SGDescriptor *desc, |
4245 | struct scatterlist *sg) | |
4246 | { | |
4247 | u64 addr64 = (u64) sg_dma_address(sg); | |
4248 | unsigned int len = sg_dma_len(sg); | |
4249 | ||
4250 | desc->Addr = cpu_to_le64(addr64); | |
4251 | desc->Len = cpu_to_le32(len); | |
4252 | desc->Ext = 0; | |
4253 | } | |
4254 | ||
c7ee65b3 WS |
4255 | /* |
4256 | * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | |
edd16368 SC |
4257 | * dma mapping and fills in the scatter gather entries of the |
4258 | * hpsa command, cp. | |
4259 | */ | |
33a2ffce | 4260 | static int hpsa_scatter_gather(struct ctlr_info *h, |
edd16368 SC |
4261 | struct CommandList *cp, |
4262 | struct scsi_cmnd *cmd) | |
4263 | { | |
edd16368 | 4264 | struct scatterlist *sg; |
b3a7ba7c | 4265 | int use_sg, i, sg_limit, chained, last_sg; |
33a2ffce | 4266 | struct SGDescriptor *curr_sg; |
edd16368 | 4267 | |
33a2ffce | 4268 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
edd16368 SC |
4269 | |
4270 | use_sg = scsi_dma_map(cmd); | |
4271 | if (use_sg < 0) | |
4272 | return use_sg; | |
4273 | ||
4274 | if (!use_sg) | |
4275 | goto sglist_finished; | |
4276 | ||
b3a7ba7c WS |
4277 | /* |
4278 | * If the number of entries is greater than the max for a single list, | |
4279 | * then we have a chained list; we will set up all but one entry in the | |
4280 | * first list (the last entry is saved for link information); | |
4281 | * otherwise, we don't have a chained list and we'll set up at each of | |
4282 | * the entries in the one list. | |
4283 | */ | |
33a2ffce | 4284 | curr_sg = cp->SG; |
b3a7ba7c WS |
4285 | chained = use_sg > h->max_cmd_sg_entries; |
4286 | sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg; | |
4287 | last_sg = scsi_sg_count(cmd) - 1; | |
4288 | scsi_for_each_sg(cmd, sg, sg_limit, i) { | |
ec5cbf04 | 4289 | hpsa_set_sg_descriptor(curr_sg, sg); |
33a2ffce SC |
4290 | curr_sg++; |
4291 | } | |
ec5cbf04 | 4292 | |
b3a7ba7c WS |
4293 | if (chained) { |
4294 | /* | |
4295 | * Continue with the chained list. Set curr_sg to the chained | |
4296 | * list. Modify the limit to the total count less the entries | |
4297 | * we've already set up. Resume the scan at the list entry | |
4298 | * where the previous loop left off. | |
4299 | */ | |
4300 | curr_sg = h->cmd_sg_list[cp->cmdindex]; | |
4301 | sg_limit = use_sg - sg_limit; | |
4302 | for_each_sg(sg, sg, sg_limit, i) { | |
4303 | hpsa_set_sg_descriptor(curr_sg, sg); | |
4304 | curr_sg++; | |
4305 | } | |
4306 | } | |
4307 | ||
ec5cbf04 | 4308 | /* Back the pointer up to the last entry and mark it as "last". */ |
b3a7ba7c | 4309 | (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST); |
33a2ffce SC |
4310 | |
4311 | if (use_sg + chained > h->maxSG) | |
4312 | h->maxSG = use_sg + chained; | |
4313 | ||
4314 | if (chained) { | |
4315 | cp->Header.SGList = h->max_cmd_sg_entries; | |
50a0decf | 4316 | cp->Header.SGTotal = cpu_to_le16(use_sg + 1); |
e2bea6df SC |
4317 | if (hpsa_map_sg_chain_block(h, cp)) { |
4318 | scsi_dma_unmap(cmd); | |
4319 | return -1; | |
4320 | } | |
33a2ffce | 4321 | return 0; |
edd16368 SC |
4322 | } |
4323 | ||
4324 | sglist_finished: | |
4325 | ||
01a02ffc | 4326 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
c7ee65b3 | 4327 | cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ |
edd16368 SC |
4328 | return 0; |
4329 | } | |
4330 | ||
283b4a9b SC |
4331 | #define IO_ACCEL_INELIGIBLE (1) |
4332 | static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) | |
4333 | { | |
4334 | int is_write = 0; | |
4335 | u32 block; | |
4336 | u32 block_cnt; | |
4337 | ||
4338 | /* Perform some CDB fixups if needed using 10 byte reads/writes only */ | |
4339 | switch (cdb[0]) { | |
4340 | case WRITE_6: | |
4341 | case WRITE_12: | |
4342 | is_write = 1; | |
4343 | case READ_6: | |
4344 | case READ_12: | |
4345 | if (*cdb_len == 6) { | |
c8a6c9a6 | 4346 | block = get_unaligned_be16(&cdb[2]); |
283b4a9b | 4347 | block_cnt = cdb[4]; |
c8a6c9a6 DB |
4348 | if (block_cnt == 0) |
4349 | block_cnt = 256; | |
283b4a9b SC |
4350 | } else { |
4351 | BUG_ON(*cdb_len != 12); | |
c8a6c9a6 DB |
4352 | block = get_unaligned_be32(&cdb[2]); |
4353 | block_cnt = get_unaligned_be32(&cdb[6]); | |
283b4a9b SC |
4354 | } |
4355 | if (block_cnt > 0xffff) | |
4356 | return IO_ACCEL_INELIGIBLE; | |
4357 | ||
4358 | cdb[0] = is_write ? WRITE_10 : READ_10; | |
4359 | cdb[1] = 0; | |
4360 | cdb[2] = (u8) (block >> 24); | |
4361 | cdb[3] = (u8) (block >> 16); | |
4362 | cdb[4] = (u8) (block >> 8); | |
4363 | cdb[5] = (u8) (block); | |
4364 | cdb[6] = 0; | |
4365 | cdb[7] = (u8) (block_cnt >> 8); | |
4366 | cdb[8] = (u8) (block_cnt); | |
4367 | cdb[9] = 0; | |
4368 | *cdb_len = 10; | |
4369 | break; | |
4370 | } | |
4371 | return 0; | |
4372 | } | |
4373 | ||
c349775e | 4374 | static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, |
283b4a9b | 4375 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, |
03383736 | 4376 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
e1f7de0c MG |
4377 | { |
4378 | struct scsi_cmnd *cmd = c->scsi_cmd; | |
e1f7de0c MG |
4379 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; |
4380 | unsigned int len; | |
4381 | unsigned int total_len = 0; | |
4382 | struct scatterlist *sg; | |
4383 | u64 addr64; | |
4384 | int use_sg, i; | |
4385 | struct SGDescriptor *curr_sg; | |
4386 | u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; | |
4387 | ||
283b4a9b | 4388 | /* TODO: implement chaining support */ |
03383736 DB |
4389 | if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { |
4390 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
283b4a9b | 4391 | return IO_ACCEL_INELIGIBLE; |
03383736 | 4392 | } |
283b4a9b | 4393 | |
e1f7de0c MG |
4394 | BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); |
4395 | ||
03383736 DB |
4396 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
4397 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
283b4a9b | 4398 | return IO_ACCEL_INELIGIBLE; |
03383736 | 4399 | } |
283b4a9b | 4400 | |
e1f7de0c MG |
4401 | c->cmd_type = CMD_IOACCEL1; |
4402 | ||
4403 | /* Adjust the DMA address to point to the accelerated command buffer */ | |
4404 | c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + | |
4405 | (c->cmdindex * sizeof(*cp)); | |
4406 | BUG_ON(c->busaddr & 0x0000007F); | |
4407 | ||
4408 | use_sg = scsi_dma_map(cmd); | |
03383736 DB |
4409 | if (use_sg < 0) { |
4410 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
e1f7de0c | 4411 | return use_sg; |
03383736 | 4412 | } |
e1f7de0c MG |
4413 | |
4414 | if (use_sg) { | |
4415 | curr_sg = cp->SG; | |
4416 | scsi_for_each_sg(cmd, sg, use_sg, i) { | |
4417 | addr64 = (u64) sg_dma_address(sg); | |
4418 | len = sg_dma_len(sg); | |
4419 | total_len += len; | |
50a0decf SC |
4420 | curr_sg->Addr = cpu_to_le64(addr64); |
4421 | curr_sg->Len = cpu_to_le32(len); | |
4422 | curr_sg->Ext = cpu_to_le32(0); | |
e1f7de0c MG |
4423 | curr_sg++; |
4424 | } | |
50a0decf | 4425 | (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST); |
e1f7de0c MG |
4426 | |
4427 | switch (cmd->sc_data_direction) { | |
4428 | case DMA_TO_DEVICE: | |
4429 | control |= IOACCEL1_CONTROL_DATA_OUT; | |
4430 | break; | |
4431 | case DMA_FROM_DEVICE: | |
4432 | control |= IOACCEL1_CONTROL_DATA_IN; | |
4433 | break; | |
4434 | case DMA_NONE: | |
4435 | control |= IOACCEL1_CONTROL_NODATAXFER; | |
4436 | break; | |
4437 | default: | |
4438 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", | |
4439 | cmd->sc_data_direction); | |
4440 | BUG(); | |
4441 | break; | |
4442 | } | |
4443 | } else { | |
4444 | control |= IOACCEL1_CONTROL_NODATAXFER; | |
4445 | } | |
4446 | ||
c349775e | 4447 | c->Header.SGList = use_sg; |
e1f7de0c | 4448 | /* Fill out the command structure to submit */ |
2b08b3e9 DB |
4449 | cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF); |
4450 | cp->transfer_len = cpu_to_le32(total_len); | |
4451 | cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ | | |
4452 | (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK)); | |
4453 | cp->control = cpu_to_le32(control); | |
283b4a9b SC |
4454 | memcpy(cp->CDB, cdb, cdb_len); |
4455 | memcpy(cp->CISS_LUN, scsi3addr, 8); | |
c349775e | 4456 | /* Tag was already set at init time. */ |
283b4a9b | 4457 | enqueue_cmd_and_start_io(h, c); |
e1f7de0c MG |
4458 | return 0; |
4459 | } | |
edd16368 | 4460 | |
283b4a9b SC |
4461 | /* |
4462 | * Queue a command directly to a device behind the controller using the | |
4463 | * I/O accelerator path. | |
4464 | */ | |
4465 | static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, | |
4466 | struct CommandList *c) | |
4467 | { | |
4468 | struct scsi_cmnd *cmd = c->scsi_cmd; | |
4469 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | |
4470 | ||
03383736 DB |
4471 | c->phys_disk = dev; |
4472 | ||
283b4a9b | 4473 | return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, |
03383736 | 4474 | cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev); |
283b4a9b SC |
4475 | } |
4476 | ||
dd0e19f3 ST |
4477 | /* |
4478 | * Set encryption parameters for the ioaccel2 request | |
4479 | */ | |
4480 | static void set_encrypt_ioaccel2(struct ctlr_info *h, | |
4481 | struct CommandList *c, struct io_accel2_cmd *cp) | |
4482 | { | |
4483 | struct scsi_cmnd *cmd = c->scsi_cmd; | |
4484 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | |
4485 | struct raid_map_data *map = &dev->raid_map; | |
4486 | u64 first_block; | |
4487 | ||
dd0e19f3 | 4488 | /* Are we doing encryption on this device */ |
2b08b3e9 | 4489 | if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) |
dd0e19f3 ST |
4490 | return; |
4491 | /* Set the data encryption key index. */ | |
4492 | cp->dekindex = map->dekindex; | |
4493 | ||
4494 | /* Set the encryption enable flag, encoded into direction field. */ | |
4495 | cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; | |
4496 | ||
4497 | /* Set encryption tweak values based on logical block address | |
4498 | * If block size is 512, tweak value is LBA. | |
4499 | * For other block sizes, tweak is (LBA * block size)/ 512) | |
4500 | */ | |
4501 | switch (cmd->cmnd[0]) { | |
4502 | /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ | |
4503 | case WRITE_6: | |
4504 | case READ_6: | |
2b08b3e9 | 4505 | first_block = get_unaligned_be16(&cmd->cmnd[2]); |
dd0e19f3 ST |
4506 | break; |
4507 | case WRITE_10: | |
4508 | case READ_10: | |
dd0e19f3 ST |
4509 | /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ |
4510 | case WRITE_12: | |
4511 | case READ_12: | |
2b08b3e9 | 4512 | first_block = get_unaligned_be32(&cmd->cmnd[2]); |
dd0e19f3 ST |
4513 | break; |
4514 | case WRITE_16: | |
4515 | case READ_16: | |
2b08b3e9 | 4516 | first_block = get_unaligned_be64(&cmd->cmnd[2]); |
dd0e19f3 ST |
4517 | break; |
4518 | default: | |
4519 | dev_err(&h->pdev->dev, | |
2b08b3e9 DB |
4520 | "ERROR: %s: size (0x%x) not supported for encryption\n", |
4521 | __func__, cmd->cmnd[0]); | |
dd0e19f3 ST |
4522 | BUG(); |
4523 | break; | |
4524 | } | |
2b08b3e9 DB |
4525 | |
4526 | if (le32_to_cpu(map->volume_blk_size) != 512) | |
4527 | first_block = first_block * | |
4528 | le32_to_cpu(map->volume_blk_size)/512; | |
4529 | ||
4530 | cp->tweak_lower = cpu_to_le32(first_block); | |
4531 | cp->tweak_upper = cpu_to_le32(first_block >> 32); | |
dd0e19f3 ST |
4532 | } |
4533 | ||
c349775e ST |
4534 | static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, |
4535 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | |
03383736 | 4536 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
c349775e ST |
4537 | { |
4538 | struct scsi_cmnd *cmd = c->scsi_cmd; | |
4539 | struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
4540 | struct ioaccel2_sg_element *curr_sg; | |
4541 | int use_sg, i; | |
4542 | struct scatterlist *sg; | |
4543 | u64 addr64; | |
4544 | u32 len; | |
4545 | u32 total_len = 0; | |
4546 | ||
d9a729f3 | 4547 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
c349775e | 4548 | |
03383736 DB |
4549 | if (fixup_ioaccel_cdb(cdb, &cdb_len)) { |
4550 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
c349775e | 4551 | return IO_ACCEL_INELIGIBLE; |
03383736 DB |
4552 | } |
4553 | ||
c349775e ST |
4554 | c->cmd_type = CMD_IOACCEL2; |
4555 | /* Adjust the DMA address to point to the accelerated command buffer */ | |
4556 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + | |
4557 | (c->cmdindex * sizeof(*cp)); | |
4558 | BUG_ON(c->busaddr & 0x0000007F); | |
4559 | ||
4560 | memset(cp, 0, sizeof(*cp)); | |
4561 | cp->IU_type = IOACCEL2_IU_TYPE; | |
4562 | ||
4563 | use_sg = scsi_dma_map(cmd); | |
03383736 DB |
4564 | if (use_sg < 0) { |
4565 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
c349775e | 4566 | return use_sg; |
03383736 | 4567 | } |
c349775e ST |
4568 | |
4569 | if (use_sg) { | |
c349775e | 4570 | curr_sg = cp->sg; |
d9a729f3 WS |
4571 | if (use_sg > h->ioaccel_maxsg) { |
4572 | addr64 = le64_to_cpu( | |
4573 | h->ioaccel2_cmd_sg_list[c->cmdindex]->address); | |
4574 | curr_sg->address = cpu_to_le64(addr64); | |
4575 | curr_sg->length = 0; | |
4576 | curr_sg->reserved[0] = 0; | |
4577 | curr_sg->reserved[1] = 0; | |
4578 | curr_sg->reserved[2] = 0; | |
4579 | curr_sg->chain_indicator = 0x80; | |
4580 | ||
4581 | curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex]; | |
4582 | } | |
c349775e ST |
4583 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
4584 | addr64 = (u64) sg_dma_address(sg); | |
4585 | len = sg_dma_len(sg); | |
4586 | total_len += len; | |
4587 | curr_sg->address = cpu_to_le64(addr64); | |
4588 | curr_sg->length = cpu_to_le32(len); | |
4589 | curr_sg->reserved[0] = 0; | |
4590 | curr_sg->reserved[1] = 0; | |
4591 | curr_sg->reserved[2] = 0; | |
4592 | curr_sg->chain_indicator = 0; | |
4593 | curr_sg++; | |
4594 | } | |
4595 | ||
4596 | switch (cmd->sc_data_direction) { | |
4597 | case DMA_TO_DEVICE: | |
dd0e19f3 ST |
4598 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4599 | cp->direction |= IOACCEL2_DIR_DATA_OUT; | |
c349775e ST |
4600 | break; |
4601 | case DMA_FROM_DEVICE: | |
dd0e19f3 ST |
4602 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4603 | cp->direction |= IOACCEL2_DIR_DATA_IN; | |
c349775e ST |
4604 | break; |
4605 | case DMA_NONE: | |
dd0e19f3 ST |
4606 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4607 | cp->direction |= IOACCEL2_DIR_NO_DATA; | |
c349775e ST |
4608 | break; |
4609 | default: | |
4610 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", | |
4611 | cmd->sc_data_direction); | |
4612 | BUG(); | |
4613 | break; | |
4614 | } | |
4615 | } else { | |
dd0e19f3 ST |
4616 | cp->direction &= ~IOACCEL2_DIRECTION_MASK; |
4617 | cp->direction |= IOACCEL2_DIR_NO_DATA; | |
c349775e | 4618 | } |
dd0e19f3 ST |
4619 | |
4620 | /* Set encryption parameters, if necessary */ | |
4621 | set_encrypt_ioaccel2(h, c, cp); | |
4622 | ||
2b08b3e9 | 4623 | cp->scsi_nexus = cpu_to_le32(ioaccel_handle); |
f2405db8 | 4624 | cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT); |
c349775e | 4625 | memcpy(cp->cdb, cdb, sizeof(cp->cdb)); |
c349775e | 4626 | |
c349775e ST |
4627 | cp->data_len = cpu_to_le32(total_len); |
4628 | cp->err_ptr = cpu_to_le64(c->busaddr + | |
4629 | offsetof(struct io_accel2_cmd, error_data)); | |
50a0decf | 4630 | cp->err_len = cpu_to_le32(sizeof(cp->error_data)); |
c349775e | 4631 | |
d9a729f3 WS |
4632 | /* fill in sg elements */ |
4633 | if (use_sg > h->ioaccel_maxsg) { | |
4634 | cp->sg_count = 1; | |
a736e9b6 | 4635 | cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0])); |
d9a729f3 WS |
4636 | if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) { |
4637 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
4638 | scsi_dma_unmap(cmd); | |
4639 | return -1; | |
4640 | } | |
4641 | } else | |
4642 | cp->sg_count = (u8) use_sg; | |
4643 | ||
c349775e ST |
4644 | enqueue_cmd_and_start_io(h, c); |
4645 | return 0; | |
4646 | } | |
4647 | ||
4648 | /* | |
4649 | * Queue a command to the correct I/O accelerator path. | |
4650 | */ | |
4651 | static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, | |
4652 | struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, | |
03383736 | 4653 | u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) |
c349775e | 4654 | { |
03383736 DB |
4655 | /* Try to honor the device's queue depth */ |
4656 | if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > | |
4657 | phys_disk->queue_depth) { | |
4658 | atomic_dec(&phys_disk->ioaccel_cmds_out); | |
4659 | return IO_ACCEL_INELIGIBLE; | |
4660 | } | |
c349775e ST |
4661 | if (h->transMethod & CFGTBL_Trans_io_accel1) |
4662 | return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, | |
03383736 DB |
4663 | cdb, cdb_len, scsi3addr, |
4664 | phys_disk); | |
c349775e ST |
4665 | else |
4666 | return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, | |
03383736 DB |
4667 | cdb, cdb_len, scsi3addr, |
4668 | phys_disk); | |
c349775e ST |
4669 | } |
4670 | ||
6b80b18f ST |
4671 | static void raid_map_helper(struct raid_map_data *map, |
4672 | int offload_to_mirror, u32 *map_index, u32 *current_group) | |
4673 | { | |
4674 | if (offload_to_mirror == 0) { | |
4675 | /* use physical disk in the first mirrored group. */ | |
2b08b3e9 | 4676 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
6b80b18f ST |
4677 | return; |
4678 | } | |
4679 | do { | |
4680 | /* determine mirror group that *map_index indicates */ | |
2b08b3e9 DB |
4681 | *current_group = *map_index / |
4682 | le16_to_cpu(map->data_disks_per_row); | |
6b80b18f ST |
4683 | if (offload_to_mirror == *current_group) |
4684 | continue; | |
2b08b3e9 | 4685 | if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { |
6b80b18f | 4686 | /* select map index from next group */ |
2b08b3e9 | 4687 | *map_index += le16_to_cpu(map->data_disks_per_row); |
6b80b18f ST |
4688 | (*current_group)++; |
4689 | } else { | |
4690 | /* select map index from first group */ | |
2b08b3e9 | 4691 | *map_index %= le16_to_cpu(map->data_disks_per_row); |
6b80b18f ST |
4692 | *current_group = 0; |
4693 | } | |
4694 | } while (offload_to_mirror != *current_group); | |
4695 | } | |
4696 | ||
283b4a9b SC |
4697 | /* |
4698 | * Attempt to perform offload RAID mapping for a logical volume I/O. | |
4699 | */ | |
4700 | static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, | |
4701 | struct CommandList *c) | |
4702 | { | |
4703 | struct scsi_cmnd *cmd = c->scsi_cmd; | |
4704 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | |
4705 | struct raid_map_data *map = &dev->raid_map; | |
4706 | struct raid_map_disk_data *dd = &map->data[0]; | |
4707 | int is_write = 0; | |
4708 | u32 map_index; | |
4709 | u64 first_block, last_block; | |
4710 | u32 block_cnt; | |
4711 | u32 blocks_per_row; | |
4712 | u64 first_row, last_row; | |
4713 | u32 first_row_offset, last_row_offset; | |
4714 | u32 first_column, last_column; | |
6b80b18f ST |
4715 | u64 r0_first_row, r0_last_row; |
4716 | u32 r5or6_blocks_per_row; | |
4717 | u64 r5or6_first_row, r5or6_last_row; | |
4718 | u32 r5or6_first_row_offset, r5or6_last_row_offset; | |
4719 | u32 r5or6_first_column, r5or6_last_column; | |
4720 | u32 total_disks_per_row; | |
4721 | u32 stripesize; | |
4722 | u32 first_group, last_group, current_group; | |
283b4a9b SC |
4723 | u32 map_row; |
4724 | u32 disk_handle; | |
4725 | u64 disk_block; | |
4726 | u32 disk_block_cnt; | |
4727 | u8 cdb[16]; | |
4728 | u8 cdb_len; | |
2b08b3e9 | 4729 | u16 strip_size; |
283b4a9b SC |
4730 | #if BITS_PER_LONG == 32 |
4731 | u64 tmpdiv; | |
4732 | #endif | |
6b80b18f | 4733 | int offload_to_mirror; |
283b4a9b | 4734 | |
283b4a9b SC |
4735 | /* check for valid opcode, get LBA and block count */ |
4736 | switch (cmd->cmnd[0]) { | |
4737 | case WRITE_6: | |
4738 | is_write = 1; | |
4739 | case READ_6: | |
c8a6c9a6 | 4740 | first_block = get_unaligned_be16(&cmd->cmnd[2]); |
283b4a9b | 4741 | block_cnt = cmd->cmnd[4]; |
3fa89a04 SC |
4742 | if (block_cnt == 0) |
4743 | block_cnt = 256; | |
283b4a9b SC |
4744 | break; |
4745 | case WRITE_10: | |
4746 | is_write = 1; | |
4747 | case READ_10: | |
4748 | first_block = | |
4749 | (((u64) cmd->cmnd[2]) << 24) | | |
4750 | (((u64) cmd->cmnd[3]) << 16) | | |
4751 | (((u64) cmd->cmnd[4]) << 8) | | |
4752 | cmd->cmnd[5]; | |
4753 | block_cnt = | |
4754 | (((u32) cmd->cmnd[7]) << 8) | | |
4755 | cmd->cmnd[8]; | |
4756 | break; | |
4757 | case WRITE_12: | |
4758 | is_write = 1; | |
4759 | case READ_12: | |
4760 | first_block = | |
4761 | (((u64) cmd->cmnd[2]) << 24) | | |
4762 | (((u64) cmd->cmnd[3]) << 16) | | |
4763 | (((u64) cmd->cmnd[4]) << 8) | | |
4764 | cmd->cmnd[5]; | |
4765 | block_cnt = | |
4766 | (((u32) cmd->cmnd[6]) << 24) | | |
4767 | (((u32) cmd->cmnd[7]) << 16) | | |
4768 | (((u32) cmd->cmnd[8]) << 8) | | |
4769 | cmd->cmnd[9]; | |
4770 | break; | |
4771 | case WRITE_16: | |
4772 | is_write = 1; | |
4773 | case READ_16: | |
4774 | first_block = | |
4775 | (((u64) cmd->cmnd[2]) << 56) | | |
4776 | (((u64) cmd->cmnd[3]) << 48) | | |
4777 | (((u64) cmd->cmnd[4]) << 40) | | |
4778 | (((u64) cmd->cmnd[5]) << 32) | | |
4779 | (((u64) cmd->cmnd[6]) << 24) | | |
4780 | (((u64) cmd->cmnd[7]) << 16) | | |
4781 | (((u64) cmd->cmnd[8]) << 8) | | |
4782 | cmd->cmnd[9]; | |
4783 | block_cnt = | |
4784 | (((u32) cmd->cmnd[10]) << 24) | | |
4785 | (((u32) cmd->cmnd[11]) << 16) | | |
4786 | (((u32) cmd->cmnd[12]) << 8) | | |
4787 | cmd->cmnd[13]; | |
4788 | break; | |
4789 | default: | |
4790 | return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ | |
4791 | } | |
283b4a9b SC |
4792 | last_block = first_block + block_cnt - 1; |
4793 | ||
4794 | /* check for write to non-RAID-0 */ | |
4795 | if (is_write && dev->raid_level != 0) | |
4796 | return IO_ACCEL_INELIGIBLE; | |
4797 | ||
4798 | /* check for invalid block or wraparound */ | |
2b08b3e9 DB |
4799 | if (last_block >= le64_to_cpu(map->volume_blk_cnt) || |
4800 | last_block < first_block) | |
283b4a9b SC |
4801 | return IO_ACCEL_INELIGIBLE; |
4802 | ||
4803 | /* calculate stripe information for the request */ | |
2b08b3e9 DB |
4804 | blocks_per_row = le16_to_cpu(map->data_disks_per_row) * |
4805 | le16_to_cpu(map->strip_size); | |
4806 | strip_size = le16_to_cpu(map->strip_size); | |
283b4a9b SC |
4807 | #if BITS_PER_LONG == 32 |
4808 | tmpdiv = first_block; | |
4809 | (void) do_div(tmpdiv, blocks_per_row); | |
4810 | first_row = tmpdiv; | |
4811 | tmpdiv = last_block; | |
4812 | (void) do_div(tmpdiv, blocks_per_row); | |
4813 | last_row = tmpdiv; | |
4814 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | |
4815 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | |
4816 | tmpdiv = first_row_offset; | |
2b08b3e9 | 4817 | (void) do_div(tmpdiv, strip_size); |
283b4a9b SC |
4818 | first_column = tmpdiv; |
4819 | tmpdiv = last_row_offset; | |
2b08b3e9 | 4820 | (void) do_div(tmpdiv, strip_size); |
283b4a9b SC |
4821 | last_column = tmpdiv; |
4822 | #else | |
4823 | first_row = first_block / blocks_per_row; | |
4824 | last_row = last_block / blocks_per_row; | |
4825 | first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); | |
4826 | last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); | |
2b08b3e9 DB |
4827 | first_column = first_row_offset / strip_size; |
4828 | last_column = last_row_offset / strip_size; | |
283b4a9b SC |
4829 | #endif |
4830 | ||
4831 | /* if this isn't a single row/column then give to the controller */ | |
4832 | if ((first_row != last_row) || (first_column != last_column)) | |
4833 | return IO_ACCEL_INELIGIBLE; | |
4834 | ||
4835 | /* proceeding with driver mapping */ | |
2b08b3e9 DB |
4836 | total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + |
4837 | le16_to_cpu(map->metadata_disks_per_row); | |
283b4a9b | 4838 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % |
2b08b3e9 | 4839 | le16_to_cpu(map->row_cnt); |
6b80b18f ST |
4840 | map_index = (map_row * total_disks_per_row) + first_column; |
4841 | ||
4842 | switch (dev->raid_level) { | |
4843 | case HPSA_RAID_0: | |
4844 | break; /* nothing special to do */ | |
4845 | case HPSA_RAID_1: | |
4846 | /* Handles load balance across RAID 1 members. | |
4847 | * (2-drive R1 and R10 with even # of drives.) | |
4848 | * Appropriate for SSDs, not optimal for HDDs | |
283b4a9b | 4849 | */ |
2b08b3e9 | 4850 | BUG_ON(le16_to_cpu(map->layout_map_count) != 2); |
283b4a9b | 4851 | if (dev->offload_to_mirror) |
2b08b3e9 | 4852 | map_index += le16_to_cpu(map->data_disks_per_row); |
283b4a9b | 4853 | dev->offload_to_mirror = !dev->offload_to_mirror; |
6b80b18f ST |
4854 | break; |
4855 | case HPSA_RAID_ADM: | |
4856 | /* Handles N-way mirrors (R1-ADM) | |
4857 | * and R10 with # of drives divisible by 3.) | |
4858 | */ | |
2b08b3e9 | 4859 | BUG_ON(le16_to_cpu(map->layout_map_count) != 3); |
6b80b18f ST |
4860 | |
4861 | offload_to_mirror = dev->offload_to_mirror; | |
4862 | raid_map_helper(map, offload_to_mirror, | |
4863 | &map_index, ¤t_group); | |
4864 | /* set mirror group to use next time */ | |
4865 | offload_to_mirror = | |
2b08b3e9 DB |
4866 | (offload_to_mirror >= |
4867 | le16_to_cpu(map->layout_map_count) - 1) | |
6b80b18f | 4868 | ? 0 : offload_to_mirror + 1; |
6b80b18f ST |
4869 | dev->offload_to_mirror = offload_to_mirror; |
4870 | /* Avoid direct use of dev->offload_to_mirror within this | |
4871 | * function since multiple threads might simultaneously | |
4872 | * increment it beyond the range of dev->layout_map_count -1. | |
4873 | */ | |
4874 | break; | |
4875 | case HPSA_RAID_5: | |
4876 | case HPSA_RAID_6: | |
2b08b3e9 | 4877 | if (le16_to_cpu(map->layout_map_count) <= 1) |
6b80b18f ST |
4878 | break; |
4879 | ||
4880 | /* Verify first and last block are in same RAID group */ | |
4881 | r5or6_blocks_per_row = | |
2b08b3e9 DB |
4882 | le16_to_cpu(map->strip_size) * |
4883 | le16_to_cpu(map->data_disks_per_row); | |
6b80b18f | 4884 | BUG_ON(r5or6_blocks_per_row == 0); |
2b08b3e9 DB |
4885 | stripesize = r5or6_blocks_per_row * |
4886 | le16_to_cpu(map->layout_map_count); | |
6b80b18f ST |
4887 | #if BITS_PER_LONG == 32 |
4888 | tmpdiv = first_block; | |
4889 | first_group = do_div(tmpdiv, stripesize); | |
4890 | tmpdiv = first_group; | |
4891 | (void) do_div(tmpdiv, r5or6_blocks_per_row); | |
4892 | first_group = tmpdiv; | |
4893 | tmpdiv = last_block; | |
4894 | last_group = do_div(tmpdiv, stripesize); | |
4895 | tmpdiv = last_group; | |
4896 | (void) do_div(tmpdiv, r5or6_blocks_per_row); | |
4897 | last_group = tmpdiv; | |
4898 | #else | |
4899 | first_group = (first_block % stripesize) / r5or6_blocks_per_row; | |
4900 | last_group = (last_block % stripesize) / r5or6_blocks_per_row; | |
6b80b18f | 4901 | #endif |
000ff7c2 | 4902 | if (first_group != last_group) |
6b80b18f ST |
4903 | return IO_ACCEL_INELIGIBLE; |
4904 | ||
4905 | /* Verify request is in a single row of RAID 5/6 */ | |
4906 | #if BITS_PER_LONG == 32 | |
4907 | tmpdiv = first_block; | |
4908 | (void) do_div(tmpdiv, stripesize); | |
4909 | first_row = r5or6_first_row = r0_first_row = tmpdiv; | |
4910 | tmpdiv = last_block; | |
4911 | (void) do_div(tmpdiv, stripesize); | |
4912 | r5or6_last_row = r0_last_row = tmpdiv; | |
4913 | #else | |
4914 | first_row = r5or6_first_row = r0_first_row = | |
4915 | first_block / stripesize; | |
4916 | r5or6_last_row = r0_last_row = last_block / stripesize; | |
4917 | #endif | |
4918 | if (r5or6_first_row != r5or6_last_row) | |
4919 | return IO_ACCEL_INELIGIBLE; | |
4920 | ||
4921 | ||
4922 | /* Verify request is in a single column */ | |
4923 | #if BITS_PER_LONG == 32 | |
4924 | tmpdiv = first_block; | |
4925 | first_row_offset = do_div(tmpdiv, stripesize); | |
4926 | tmpdiv = first_row_offset; | |
4927 | first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); | |
4928 | r5or6_first_row_offset = first_row_offset; | |
4929 | tmpdiv = last_block; | |
4930 | r5or6_last_row_offset = do_div(tmpdiv, stripesize); | |
4931 | tmpdiv = r5or6_last_row_offset; | |
4932 | r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); | |
4933 | tmpdiv = r5or6_first_row_offset; | |
4934 | (void) do_div(tmpdiv, map->strip_size); | |
4935 | first_column = r5or6_first_column = tmpdiv; | |
4936 | tmpdiv = r5or6_last_row_offset; | |
4937 | (void) do_div(tmpdiv, map->strip_size); | |
4938 | r5or6_last_column = tmpdiv; | |
4939 | #else | |
4940 | first_row_offset = r5or6_first_row_offset = | |
4941 | (u32)((first_block % stripesize) % | |
4942 | r5or6_blocks_per_row); | |
4943 | ||
4944 | r5or6_last_row_offset = | |
4945 | (u32)((last_block % stripesize) % | |
4946 | r5or6_blocks_per_row); | |
4947 | ||
4948 | first_column = r5or6_first_column = | |
2b08b3e9 | 4949 | r5or6_first_row_offset / le16_to_cpu(map->strip_size); |
6b80b18f | 4950 | r5or6_last_column = |
2b08b3e9 | 4951 | r5or6_last_row_offset / le16_to_cpu(map->strip_size); |
6b80b18f ST |
4952 | #endif |
4953 | if (r5or6_first_column != r5or6_last_column) | |
4954 | return IO_ACCEL_INELIGIBLE; | |
4955 | ||
4956 | /* Request is eligible */ | |
4957 | map_row = ((u32)(first_row >> map->parity_rotation_shift)) % | |
2b08b3e9 | 4958 | le16_to_cpu(map->row_cnt); |
6b80b18f ST |
4959 | |
4960 | map_index = (first_group * | |
2b08b3e9 | 4961 | (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + |
6b80b18f ST |
4962 | (map_row * total_disks_per_row) + first_column; |
4963 | break; | |
4964 | default: | |
4965 | return IO_ACCEL_INELIGIBLE; | |
283b4a9b | 4966 | } |
6b80b18f | 4967 | |
07543e0c SC |
4968 | if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES)) |
4969 | return IO_ACCEL_INELIGIBLE; | |
4970 | ||
03383736 | 4971 | c->phys_disk = dev->phys_disk[map_index]; |
c3390df4 DB |
4972 | if (!c->phys_disk) |
4973 | return IO_ACCEL_INELIGIBLE; | |
03383736 | 4974 | |
283b4a9b | 4975 | disk_handle = dd[map_index].ioaccel_handle; |
2b08b3e9 DB |
4976 | disk_block = le64_to_cpu(map->disk_starting_blk) + |
4977 | first_row * le16_to_cpu(map->strip_size) + | |
4978 | (first_row_offset - first_column * | |
4979 | le16_to_cpu(map->strip_size)); | |
283b4a9b SC |
4980 | disk_block_cnt = block_cnt; |
4981 | ||
4982 | /* handle differing logical/physical block sizes */ | |
4983 | if (map->phys_blk_shift) { | |
4984 | disk_block <<= map->phys_blk_shift; | |
4985 | disk_block_cnt <<= map->phys_blk_shift; | |
4986 | } | |
4987 | BUG_ON(disk_block_cnt > 0xffff); | |
4988 | ||
4989 | /* build the new CDB for the physical disk I/O */ | |
4990 | if (disk_block > 0xffffffff) { | |
4991 | cdb[0] = is_write ? WRITE_16 : READ_16; | |
4992 | cdb[1] = 0; | |
4993 | cdb[2] = (u8) (disk_block >> 56); | |
4994 | cdb[3] = (u8) (disk_block >> 48); | |
4995 | cdb[4] = (u8) (disk_block >> 40); | |
4996 | cdb[5] = (u8) (disk_block >> 32); | |
4997 | cdb[6] = (u8) (disk_block >> 24); | |
4998 | cdb[7] = (u8) (disk_block >> 16); | |
4999 | cdb[8] = (u8) (disk_block >> 8); | |
5000 | cdb[9] = (u8) (disk_block); | |
5001 | cdb[10] = (u8) (disk_block_cnt >> 24); | |
5002 | cdb[11] = (u8) (disk_block_cnt >> 16); | |
5003 | cdb[12] = (u8) (disk_block_cnt >> 8); | |
5004 | cdb[13] = (u8) (disk_block_cnt); | |
5005 | cdb[14] = 0; | |
5006 | cdb[15] = 0; | |
5007 | cdb_len = 16; | |
5008 | } else { | |
5009 | cdb[0] = is_write ? WRITE_10 : READ_10; | |
5010 | cdb[1] = 0; | |
5011 | cdb[2] = (u8) (disk_block >> 24); | |
5012 | cdb[3] = (u8) (disk_block >> 16); | |
5013 | cdb[4] = (u8) (disk_block >> 8); | |
5014 | cdb[5] = (u8) (disk_block); | |
5015 | cdb[6] = 0; | |
5016 | cdb[7] = (u8) (disk_block_cnt >> 8); | |
5017 | cdb[8] = (u8) (disk_block_cnt); | |
5018 | cdb[9] = 0; | |
5019 | cdb_len = 10; | |
5020 | } | |
5021 | return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, | |
03383736 DB |
5022 | dev->scsi3addr, |
5023 | dev->phys_disk[map_index]); | |
283b4a9b SC |
5024 | } |
5025 | ||
25163bd5 WS |
5026 | /* |
5027 | * Submit commands down the "normal" RAID stack path | |
5028 | * All callers to hpsa_ciss_submit must check lockup_detected | |
5029 | * beforehand, before (opt.) and after calling cmd_alloc | |
5030 | */ | |
574f05d3 SC |
5031 | static int hpsa_ciss_submit(struct ctlr_info *h, |
5032 | struct CommandList *c, struct scsi_cmnd *cmd, | |
5033 | unsigned char scsi3addr[]) | |
edd16368 | 5034 | { |
edd16368 | 5035 | cmd->host_scribble = (unsigned char *) c; |
edd16368 SC |
5036 | c->cmd_type = CMD_SCSI; |
5037 | c->scsi_cmd = cmd; | |
5038 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
5039 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | |
f2405db8 | 5040 | c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT)); |
edd16368 SC |
5041 | |
5042 | /* Fill in the request block... */ | |
5043 | ||
5044 | c->Request.Timeout = 0; | |
edd16368 SC |
5045 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); |
5046 | c->Request.CDBLen = cmd->cmd_len; | |
5047 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); | |
edd16368 SC |
5048 | switch (cmd->sc_data_direction) { |
5049 | case DMA_TO_DEVICE: | |
a505b86f SC |
5050 | c->Request.type_attr_dir = |
5051 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE); | |
edd16368 SC |
5052 | break; |
5053 | case DMA_FROM_DEVICE: | |
a505b86f SC |
5054 | c->Request.type_attr_dir = |
5055 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ); | |
edd16368 SC |
5056 | break; |
5057 | case DMA_NONE: | |
a505b86f SC |
5058 | c->Request.type_attr_dir = |
5059 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE); | |
edd16368 SC |
5060 | break; |
5061 | case DMA_BIDIRECTIONAL: | |
5062 | /* This can happen if a buggy application does a scsi passthru | |
5063 | * and sets both inlen and outlen to non-zero. ( see | |
5064 | * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) | |
5065 | */ | |
5066 | ||
a505b86f SC |
5067 | c->Request.type_attr_dir = |
5068 | TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD); | |
edd16368 SC |
5069 | /* This is technically wrong, and hpsa controllers should |
5070 | * reject it with CMD_INVALID, which is the most correct | |
5071 | * response, but non-fibre backends appear to let it | |
5072 | * slide by, and give the same results as if this field | |
5073 | * were set correctly. Either way is acceptable for | |
5074 | * our purposes here. | |
5075 | */ | |
5076 | ||
5077 | break; | |
5078 | ||
5079 | default: | |
5080 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", | |
5081 | cmd->sc_data_direction); | |
5082 | BUG(); | |
5083 | break; | |
5084 | } | |
5085 | ||
33a2ffce | 5086 | if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ |
73153fe5 | 5087 | hpsa_cmd_resolve_and_free(h, c); |
edd16368 SC |
5088 | return SCSI_MLQUEUE_HOST_BUSY; |
5089 | } | |
5090 | enqueue_cmd_and_start_io(h, c); | |
5091 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | |
5092 | return 0; | |
5093 | } | |
5094 | ||
360c73bd SC |
5095 | static void hpsa_cmd_init(struct ctlr_info *h, int index, |
5096 | struct CommandList *c) | |
5097 | { | |
5098 | dma_addr_t cmd_dma_handle, err_dma_handle; | |
5099 | ||
5100 | /* Zero out all of commandlist except the last field, refcount */ | |
5101 | memset(c, 0, offsetof(struct CommandList, refcount)); | |
5102 | c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT)); | |
5103 | cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); | |
5104 | c->err_info = h->errinfo_pool + index; | |
5105 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
5106 | err_dma_handle = h->errinfo_pool_dhandle | |
5107 | + index * sizeof(*c->err_info); | |
5108 | c->cmdindex = index; | |
5109 | c->busaddr = (u32) cmd_dma_handle; | |
5110 | c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle); | |
5111 | c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info)); | |
5112 | c->h = h; | |
a58e7e53 | 5113 | c->scsi_cmd = SCSI_CMD_IDLE; |
360c73bd SC |
5114 | } |
5115 | ||
5116 | static void hpsa_preinitialize_commands(struct ctlr_info *h) | |
5117 | { | |
5118 | int i; | |
5119 | ||
5120 | for (i = 0; i < h->nr_cmds; i++) { | |
5121 | struct CommandList *c = h->cmd_pool + i; | |
5122 | ||
5123 | hpsa_cmd_init(h, i, c); | |
5124 | atomic_set(&c->refcount, 0); | |
5125 | } | |
5126 | } | |
5127 | ||
5128 | static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index, | |
5129 | struct CommandList *c) | |
5130 | { | |
5131 | dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c); | |
5132 | ||
73153fe5 WS |
5133 | BUG_ON(c->cmdindex != index); |
5134 | ||
360c73bd SC |
5135 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); |
5136 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
5137 | c->busaddr = (u32) cmd_dma_handle; | |
5138 | } | |
5139 | ||
592a0ad5 WS |
5140 | static int hpsa_ioaccel_submit(struct ctlr_info *h, |
5141 | struct CommandList *c, struct scsi_cmnd *cmd, | |
5142 | unsigned char *scsi3addr) | |
5143 | { | |
5144 | struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; | |
5145 | int rc = IO_ACCEL_INELIGIBLE; | |
5146 | ||
5147 | cmd->host_scribble = (unsigned char *) c; | |
5148 | ||
5149 | if (dev->offload_enabled) { | |
5150 | hpsa_cmd_init(h, c->cmdindex, c); | |
5151 | c->cmd_type = CMD_SCSI; | |
5152 | c->scsi_cmd = cmd; | |
5153 | rc = hpsa_scsi_ioaccel_raid_map(h, c); | |
5154 | if (rc < 0) /* scsi_dma_map failed. */ | |
5155 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
a3144e0b | 5156 | } else if (dev->hba_ioaccel_enabled) { |
592a0ad5 WS |
5157 | hpsa_cmd_init(h, c->cmdindex, c); |
5158 | c->cmd_type = CMD_SCSI; | |
5159 | c->scsi_cmd = cmd; | |
5160 | rc = hpsa_scsi_ioaccel_direct_map(h, c); | |
5161 | if (rc < 0) /* scsi_dma_map failed. */ | |
5162 | rc = SCSI_MLQUEUE_HOST_BUSY; | |
5163 | } | |
5164 | return rc; | |
5165 | } | |
5166 | ||
080ef1cc DB |
5167 | static void hpsa_command_resubmit_worker(struct work_struct *work) |
5168 | { | |
5169 | struct scsi_cmnd *cmd; | |
5170 | struct hpsa_scsi_dev_t *dev; | |
8a0ff92c | 5171 | struct CommandList *c = container_of(work, struct CommandList, work); |
080ef1cc DB |
5172 | |
5173 | cmd = c->scsi_cmd; | |
5174 | dev = cmd->device->hostdata; | |
5175 | if (!dev) { | |
5176 | cmd->result = DID_NO_CONNECT << 16; | |
8a0ff92c | 5177 | return hpsa_cmd_free_and_done(c->h, c, cmd); |
080ef1cc | 5178 | } |
d604f533 WS |
5179 | if (c->reset_pending) |
5180 | return hpsa_cmd_resolve_and_free(c->h, c); | |
a58e7e53 WS |
5181 | if (c->abort_pending) |
5182 | return hpsa_cmd_abort_and_free(c->h, c, cmd); | |
592a0ad5 WS |
5183 | if (c->cmd_type == CMD_IOACCEL2) { |
5184 | struct ctlr_info *h = c->h; | |
5185 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
5186 | int rc; | |
5187 | ||
5188 | if (c2->error_data.serv_response == | |
5189 | IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { | |
5190 | rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); | |
5191 | if (rc == 0) | |
5192 | return; | |
5193 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { | |
5194 | /* | |
5195 | * If we get here, it means dma mapping failed. | |
5196 | * Try again via scsi mid layer, which will | |
5197 | * then get SCSI_MLQUEUE_HOST_BUSY. | |
5198 | */ | |
5199 | cmd->result = DID_IMM_RETRY << 16; | |
8a0ff92c | 5200 | return hpsa_cmd_free_and_done(h, c, cmd); |
592a0ad5 WS |
5201 | } |
5202 | /* else, fall thru and resubmit down CISS path */ | |
5203 | } | |
5204 | } | |
360c73bd | 5205 | hpsa_cmd_partial_init(c->h, c->cmdindex, c); |
080ef1cc DB |
5206 | if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) { |
5207 | /* | |
5208 | * If we get here, it means dma mapping failed. Try | |
5209 | * again via scsi mid layer, which will then get | |
5210 | * SCSI_MLQUEUE_HOST_BUSY. | |
592a0ad5 WS |
5211 | * |
5212 | * hpsa_ciss_submit will have already freed c | |
5213 | * if it encountered a dma mapping failure. | |
080ef1cc DB |
5214 | */ |
5215 | cmd->result = DID_IMM_RETRY << 16; | |
5216 | cmd->scsi_done(cmd); | |
5217 | } | |
5218 | } | |
5219 | ||
574f05d3 SC |
5220 | /* Running in struct Scsi_Host->host_lock less mode */ |
5221 | static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |
5222 | { | |
5223 | struct ctlr_info *h; | |
5224 | struct hpsa_scsi_dev_t *dev; | |
5225 | unsigned char scsi3addr[8]; | |
5226 | struct CommandList *c; | |
5227 | int rc = 0; | |
5228 | ||
5229 | /* Get the ptr to our adapter structure out of cmd->host. */ | |
5230 | h = sdev_to_hba(cmd->device); | |
73153fe5 WS |
5231 | |
5232 | BUG_ON(cmd->request->tag < 0); | |
5233 | ||
574f05d3 SC |
5234 | dev = cmd->device->hostdata; |
5235 | if (!dev) { | |
5236 | cmd->result = DID_NO_CONNECT << 16; | |
5237 | cmd->scsi_done(cmd); | |
5238 | return 0; | |
5239 | } | |
574f05d3 | 5240 | |
73153fe5 | 5241 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
bf43caf3 | 5242 | |
407863cb | 5243 | if (unlikely(lockup_detected(h))) { |
25163bd5 | 5244 | cmd->result = DID_NO_CONNECT << 16; |
407863cb SC |
5245 | cmd->scsi_done(cmd); |
5246 | return 0; | |
5247 | } | |
73153fe5 | 5248 | c = cmd_tagged_alloc(h, cmd); |
574f05d3 | 5249 | |
407863cb SC |
5250 | /* |
5251 | * Call alternate submit routine for I/O accelerated commands. | |
574f05d3 SC |
5252 | * Retries always go down the normal I/O path. |
5253 | */ | |
5254 | if (likely(cmd->retries == 0 && | |
5255 | cmd->request->cmd_type == REQ_TYPE_FS && | |
5256 | h->acciopath_status)) { | |
592a0ad5 WS |
5257 | rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr); |
5258 | if (rc == 0) | |
5259 | return 0; | |
5260 | if (rc == SCSI_MLQUEUE_HOST_BUSY) { | |
73153fe5 | 5261 | hpsa_cmd_resolve_and_free(h, c); |
592a0ad5 | 5262 | return SCSI_MLQUEUE_HOST_BUSY; |
574f05d3 SC |
5263 | } |
5264 | } | |
5265 | return hpsa_ciss_submit(h, c, cmd, scsi3addr); | |
5266 | } | |
5267 | ||
8ebc9248 | 5268 | static void hpsa_scan_complete(struct ctlr_info *h) |
5f389360 SC |
5269 | { |
5270 | unsigned long flags; | |
5271 | ||
8ebc9248 WS |
5272 | spin_lock_irqsave(&h->scan_lock, flags); |
5273 | h->scan_finished = 1; | |
5274 | wake_up_all(&h->scan_wait_queue); | |
5275 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
5f389360 SC |
5276 | } |
5277 | ||
a08a8471 SC |
5278 | static void hpsa_scan_start(struct Scsi_Host *sh) |
5279 | { | |
5280 | struct ctlr_info *h = shost_to_hba(sh); | |
5281 | unsigned long flags; | |
5282 | ||
8ebc9248 WS |
5283 | /* |
5284 | * Don't let rescans be initiated on a controller known to be locked | |
5285 | * up. If the controller locks up *during* a rescan, that thread is | |
5286 | * probably hosed, but at least we can prevent new rescan threads from | |
5287 | * piling up on a locked up controller. | |
5288 | */ | |
5289 | if (unlikely(lockup_detected(h))) | |
5290 | return hpsa_scan_complete(h); | |
5f389360 | 5291 | |
a08a8471 SC |
5292 | /* wait until any scan already in progress is finished. */ |
5293 | while (1) { | |
5294 | spin_lock_irqsave(&h->scan_lock, flags); | |
5295 | if (h->scan_finished) | |
5296 | break; | |
5297 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
5298 | wait_event(h->scan_wait_queue, h->scan_finished); | |
5299 | /* Note: We don't need to worry about a race between this | |
5300 | * thread and driver unload because the midlayer will | |
5301 | * have incremented the reference count, so unload won't | |
5302 | * happen if we're in here. | |
5303 | */ | |
5304 | } | |
5305 | h->scan_finished = 0; /* mark scan as in progress */ | |
5306 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
5307 | ||
8ebc9248 WS |
5308 | if (unlikely(lockup_detected(h))) |
5309 | return hpsa_scan_complete(h); | |
5f389360 | 5310 | |
8aa60681 | 5311 | hpsa_update_scsi_devices(h); |
a08a8471 | 5312 | |
8ebc9248 | 5313 | hpsa_scan_complete(h); |
a08a8471 SC |
5314 | } |
5315 | ||
7c0a0229 DB |
5316 | static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) |
5317 | { | |
03383736 DB |
5318 | struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata; |
5319 | ||
5320 | if (!logical_drive) | |
5321 | return -ENODEV; | |
7c0a0229 DB |
5322 | |
5323 | if (qdepth < 1) | |
5324 | qdepth = 1; | |
03383736 DB |
5325 | else if (qdepth > logical_drive->queue_depth) |
5326 | qdepth = logical_drive->queue_depth; | |
5327 | ||
5328 | return scsi_change_queue_depth(sdev, qdepth); | |
7c0a0229 DB |
5329 | } |
5330 | ||
a08a8471 SC |
5331 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
5332 | unsigned long elapsed_time) | |
5333 | { | |
5334 | struct ctlr_info *h = shost_to_hba(sh); | |
5335 | unsigned long flags; | |
5336 | int finished; | |
5337 | ||
5338 | spin_lock_irqsave(&h->scan_lock, flags); | |
5339 | finished = h->scan_finished; | |
5340 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
5341 | return finished; | |
5342 | } | |
5343 | ||
2946e82b | 5344 | static int hpsa_scsi_host_alloc(struct ctlr_info *h) |
edd16368 | 5345 | { |
b705690d | 5346 | struct Scsi_Host *sh; |
edd16368 | 5347 | |
b705690d | 5348 | sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); |
2946e82b RE |
5349 | if (sh == NULL) { |
5350 | dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); | |
5351 | return -ENOMEM; | |
5352 | } | |
b705690d SC |
5353 | |
5354 | sh->io_port = 0; | |
5355 | sh->n_io_port = 0; | |
5356 | sh->this_id = -1; | |
5357 | sh->max_channel = 3; | |
5358 | sh->max_cmd_len = MAX_COMMAND_SIZE; | |
5359 | sh->max_lun = HPSA_MAX_LUN; | |
5360 | sh->max_id = HPSA_MAX_LUN; | |
41ce4c35 | 5361 | sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; |
03383736 | 5362 | sh->cmd_per_lun = sh->can_queue; |
b705690d | 5363 | sh->sg_tablesize = h->maxsgentries; |
d04e62b9 | 5364 | sh->transportt = hpsa_sas_transport_template; |
b705690d SC |
5365 | sh->hostdata[0] = (unsigned long) h; |
5366 | sh->irq = h->intr[h->intr_mode]; | |
5367 | sh->unique_id = sh->irq; | |
64d513ac | 5368 | |
2946e82b | 5369 | h->scsi_host = sh; |
b705690d | 5370 | return 0; |
2946e82b | 5371 | } |
b705690d | 5372 | |
2946e82b RE |
5373 | static int hpsa_scsi_add_host(struct ctlr_info *h) |
5374 | { | |
5375 | int rv; | |
5376 | ||
5377 | rv = scsi_add_host(h->scsi_host, &h->pdev->dev); | |
5378 | if (rv) { | |
5379 | dev_err(&h->pdev->dev, "scsi_add_host failed\n"); | |
5380 | return rv; | |
5381 | } | |
5382 | scsi_scan_host(h->scsi_host); | |
5383 | return 0; | |
edd16368 SC |
5384 | } |
5385 | ||
73153fe5 WS |
5386 | /* |
5387 | * The block layer has already gone to the trouble of picking out a unique, | |
5388 | * small-integer tag for this request. We use an offset from that value as | |
5389 | * an index to select our command block. (The offset allows us to reserve the | |
5390 | * low-numbered entries for our own uses.) | |
5391 | */ | |
5392 | static int hpsa_get_cmd_index(struct scsi_cmnd *scmd) | |
5393 | { | |
5394 | int idx = scmd->request->tag; | |
5395 | ||
5396 | if (idx < 0) | |
5397 | return idx; | |
5398 | ||
5399 | /* Offset to leave space for internal cmds. */ | |
5400 | return idx += HPSA_NRESERVED_CMDS; | |
5401 | } | |
5402 | ||
b69324ff WS |
5403 | /* |
5404 | * Send a TEST_UNIT_READY command to the specified LUN using the specified | |
5405 | * reply queue; returns zero if the unit is ready, and non-zero otherwise. | |
5406 | */ | |
5407 | static int hpsa_send_test_unit_ready(struct ctlr_info *h, | |
5408 | struct CommandList *c, unsigned char lunaddr[], | |
5409 | int reply_queue) | |
5410 | { | |
5411 | int rc; | |
5412 | ||
5413 | /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */ | |
5414 | (void) fill_cmd(c, TEST_UNIT_READY, h, | |
5415 | NULL, 0, 0, lunaddr, TYPE_CMD); | |
5416 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | |
5417 | if (rc) | |
5418 | return rc; | |
5419 | /* no unmap needed here because no data xfer. */ | |
5420 | ||
5421 | /* Check if the unit is already ready. */ | |
5422 | if (c->err_info->CommandStatus == CMD_SUCCESS) | |
5423 | return 0; | |
5424 | ||
5425 | /* | |
5426 | * The first command sent after reset will receive "unit attention" to | |
5427 | * indicate that the LUN has been reset...this is actually what we're | |
5428 | * looking for (but, success is good too). | |
5429 | */ | |
5430 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
5431 | c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && | |
5432 | (c->err_info->SenseInfo[2] == NO_SENSE || | |
5433 | c->err_info->SenseInfo[2] == UNIT_ATTENTION)) | |
5434 | return 0; | |
5435 | ||
5436 | return 1; | |
5437 | } | |
5438 | ||
5439 | /* | |
5440 | * Wait for a TEST_UNIT_READY command to complete, retrying as necessary; | |
5441 | * returns zero when the unit is ready, and non-zero when giving up. | |
5442 | */ | |
5443 | static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h, | |
5444 | struct CommandList *c, | |
5445 | unsigned char lunaddr[], int reply_queue) | |
edd16368 | 5446 | { |
8919358e | 5447 | int rc; |
edd16368 SC |
5448 | int count = 0; |
5449 | int waittime = 1; /* seconds */ | |
edd16368 SC |
5450 | |
5451 | /* Send test unit ready until device ready, or give up. */ | |
b69324ff | 5452 | for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) { |
edd16368 | 5453 | |
b69324ff WS |
5454 | /* |
5455 | * Wait for a bit. do this first, because if we send | |
edd16368 SC |
5456 | * the TUR right away, the reset will just abort it. |
5457 | */ | |
5458 | msleep(1000 * waittime); | |
b69324ff WS |
5459 | |
5460 | rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue); | |
5461 | if (!rc) | |
5462 | break; | |
edd16368 SC |
5463 | |
5464 | /* Increase wait time with each try, up to a point. */ | |
5465 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) | |
b69324ff | 5466 | waittime *= 2; |
edd16368 | 5467 | |
b69324ff WS |
5468 | dev_warn(&h->pdev->dev, |
5469 | "waiting %d secs for device to become ready.\n", | |
5470 | waittime); | |
5471 | } | |
edd16368 | 5472 | |
b69324ff WS |
5473 | return rc; |
5474 | } | |
edd16368 | 5475 | |
b69324ff WS |
5476 | static int wait_for_device_to_become_ready(struct ctlr_info *h, |
5477 | unsigned char lunaddr[], | |
5478 | int reply_queue) | |
5479 | { | |
5480 | int first_queue; | |
5481 | int last_queue; | |
5482 | int rq; | |
5483 | int rc = 0; | |
5484 | struct CommandList *c; | |
5485 | ||
5486 | c = cmd_alloc(h); | |
5487 | ||
5488 | /* | |
5489 | * If no specific reply queue was requested, then send the TUR | |
5490 | * repeatedly, requesting a reply on each reply queue; otherwise execute | |
5491 | * the loop exactly once using only the specified queue. | |
5492 | */ | |
5493 | if (reply_queue == DEFAULT_REPLY_QUEUE) { | |
5494 | first_queue = 0; | |
5495 | last_queue = h->nreply_queues - 1; | |
5496 | } else { | |
5497 | first_queue = reply_queue; | |
5498 | last_queue = reply_queue; | |
5499 | } | |
5500 | ||
5501 | for (rq = first_queue; rq <= last_queue; rq++) { | |
5502 | rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq); | |
5503 | if (rc) | |
edd16368 | 5504 | break; |
edd16368 SC |
5505 | } |
5506 | ||
5507 | if (rc) | |
5508 | dev_warn(&h->pdev->dev, "giving up on device.\n"); | |
5509 | else | |
5510 | dev_warn(&h->pdev->dev, "device is ready.\n"); | |
5511 | ||
45fcb86e | 5512 | cmd_free(h, c); |
edd16368 SC |
5513 | return rc; |
5514 | } | |
5515 | ||
5516 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | |
5517 | * complaining. Doing a host- or bus-reset can't do anything good here. | |
5518 | */ | |
5519 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |
5520 | { | |
5521 | int rc; | |
5522 | struct ctlr_info *h; | |
5523 | struct hpsa_scsi_dev_t *dev; | |
0b9b7b6e | 5524 | u8 reset_type; |
2dc127bb | 5525 | char msg[48]; |
edd16368 SC |
5526 | |
5527 | /* find the controller to which the command to be aborted was sent */ | |
5528 | h = sdev_to_hba(scsicmd->device); | |
5529 | if (h == NULL) /* paranoia */ | |
5530 | return FAILED; | |
e345893b DB |
5531 | |
5532 | if (lockup_detected(h)) | |
5533 | return FAILED; | |
5534 | ||
edd16368 SC |
5535 | dev = scsicmd->device->hostdata; |
5536 | if (!dev) { | |
d604f533 | 5537 | dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); |
edd16368 SC |
5538 | return FAILED; |
5539 | } | |
25163bd5 WS |
5540 | |
5541 | /* if controller locked up, we can guarantee command won't complete */ | |
5542 | if (lockup_detected(h)) { | |
2dc127bb DC |
5543 | snprintf(msg, sizeof(msg), |
5544 | "cmd %d RESET FAILED, lockup detected", | |
5545 | hpsa_get_cmd_index(scsicmd)); | |
73153fe5 | 5546 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
25163bd5 WS |
5547 | return FAILED; |
5548 | } | |
5549 | ||
5550 | /* this reset request might be the result of a lockup; check */ | |
5551 | if (detect_controller_lockup(h)) { | |
2dc127bb DC |
5552 | snprintf(msg, sizeof(msg), |
5553 | "cmd %d RESET FAILED, new lockup detected", | |
5554 | hpsa_get_cmd_index(scsicmd)); | |
73153fe5 | 5555 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
25163bd5 WS |
5556 | return FAILED; |
5557 | } | |
5558 | ||
d604f533 WS |
5559 | /* Do not attempt on controller */ |
5560 | if (is_hba_lunid(dev->scsi3addr)) | |
5561 | return SUCCESS; | |
5562 | ||
0b9b7b6e ST |
5563 | if (is_logical_dev_addr_mode(dev->scsi3addr)) |
5564 | reset_type = HPSA_DEVICE_RESET_MSG; | |
5565 | else | |
5566 | reset_type = HPSA_PHYS_TARGET_RESET; | |
5567 | ||
5568 | sprintf(msg, "resetting %s", | |
5569 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); | |
5570 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); | |
25163bd5 | 5571 | |
da03ded0 | 5572 | h->reset_in_progress = 1; |
25163bd5 | 5573 | |
edd16368 | 5574 | /* send a reset to the SCSI LUN which the command was sent to */ |
0b9b7b6e | 5575 | rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, |
d604f533 | 5576 | DEFAULT_REPLY_QUEUE); |
0b9b7b6e ST |
5577 | sprintf(msg, "reset %s %s", |
5578 | reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", | |
5579 | rc == 0 ? "completed successfully" : "failed"); | |
d604f533 | 5580 | hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
da03ded0 | 5581 | h->reset_in_progress = 0; |
d604f533 | 5582 | return rc == 0 ? SUCCESS : FAILED; |
edd16368 SC |
5583 | } |
5584 | ||
6cba3f19 SC |
5585 | static void swizzle_abort_tag(u8 *tag) |
5586 | { | |
5587 | u8 original_tag[8]; | |
5588 | ||
5589 | memcpy(original_tag, tag, 8); | |
5590 | tag[0] = original_tag[3]; | |
5591 | tag[1] = original_tag[2]; | |
5592 | tag[2] = original_tag[1]; | |
5593 | tag[3] = original_tag[0]; | |
5594 | tag[4] = original_tag[7]; | |
5595 | tag[5] = original_tag[6]; | |
5596 | tag[6] = original_tag[5]; | |
5597 | tag[7] = original_tag[4]; | |
5598 | } | |
5599 | ||
17eb87d2 | 5600 | static void hpsa_get_tag(struct ctlr_info *h, |
2b08b3e9 | 5601 | struct CommandList *c, __le32 *taglower, __le32 *tagupper) |
17eb87d2 | 5602 | { |
2b08b3e9 | 5603 | u64 tag; |
17eb87d2 ST |
5604 | if (c->cmd_type == CMD_IOACCEL1) { |
5605 | struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) | |
5606 | &h->ioaccel_cmd_pool[c->cmdindex]; | |
2b08b3e9 DB |
5607 | tag = le64_to_cpu(cm1->tag); |
5608 | *tagupper = cpu_to_le32(tag >> 32); | |
5609 | *taglower = cpu_to_le32(tag); | |
54b6e9e9 ST |
5610 | return; |
5611 | } | |
5612 | if (c->cmd_type == CMD_IOACCEL2) { | |
5613 | struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) | |
5614 | &h->ioaccel2_cmd_pool[c->cmdindex]; | |
dd0e19f3 ST |
5615 | /* upper tag not used in ioaccel2 mode */ |
5616 | memset(tagupper, 0, sizeof(*tagupper)); | |
5617 | *taglower = cm2->Tag; | |
54b6e9e9 | 5618 | return; |
17eb87d2 | 5619 | } |
2b08b3e9 DB |
5620 | tag = le64_to_cpu(c->Header.tag); |
5621 | *tagupper = cpu_to_le32(tag >> 32); | |
5622 | *taglower = cpu_to_le32(tag); | |
17eb87d2 ST |
5623 | } |
5624 | ||
75167d2c | 5625 | static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, |
9b5c48c2 | 5626 | struct CommandList *abort, int reply_queue) |
75167d2c SC |
5627 | { |
5628 | int rc = IO_OK; | |
5629 | struct CommandList *c; | |
5630 | struct ErrorInfo *ei; | |
2b08b3e9 | 5631 | __le32 tagupper, taglower; |
75167d2c | 5632 | |
45fcb86e | 5633 | c = cmd_alloc(h); |
75167d2c | 5634 | |
a2dac136 | 5635 | /* fill_cmd can't fail here, no buffer to map */ |
9b5c48c2 | 5636 | (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, |
a2dac136 | 5637 | 0, 0, scsi3addr, TYPE_MSG); |
9b5c48c2 | 5638 | if (h->needs_abort_tags_swizzled) |
6cba3f19 | 5639 | swizzle_abort_tag(&c->Request.CDB[4]); |
25163bd5 | 5640 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
17eb87d2 | 5641 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
25163bd5 | 5642 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", |
17eb87d2 | 5643 | __func__, tagupper, taglower); |
75167d2c SC |
5644 | /* no unmap needed here because no data xfer. */ |
5645 | ||
5646 | ei = c->err_info; | |
5647 | switch (ei->CommandStatus) { | |
5648 | case CMD_SUCCESS: | |
5649 | break; | |
9437ac43 SC |
5650 | case CMD_TMF_STATUS: |
5651 | rc = hpsa_evaluate_tmf_status(h, c); | |
5652 | break; | |
75167d2c SC |
5653 | case CMD_UNABORTABLE: /* Very common, don't make noise. */ |
5654 | rc = -1; | |
5655 | break; | |
5656 | default: | |
5657 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", | |
17eb87d2 | 5658 | __func__, tagupper, taglower); |
d1e8beac | 5659 | hpsa_scsi_interpret_error(h, c); |
75167d2c SC |
5660 | rc = -1; |
5661 | break; | |
5662 | } | |
45fcb86e | 5663 | cmd_free(h, c); |
dd0e19f3 ST |
5664 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", |
5665 | __func__, tagupper, taglower); | |
75167d2c SC |
5666 | return rc; |
5667 | } | |
5668 | ||
8be986cc SC |
5669 | static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, |
5670 | struct CommandList *command_to_abort, int reply_queue) | |
5671 | { | |
5672 | struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
5673 | struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; | |
5674 | struct io_accel2_cmd *c2a = | |
5675 | &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; | |
a58e7e53 | 5676 | struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; |
8be986cc SC |
5677 | struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; |
5678 | ||
5679 | /* | |
5680 | * We're overlaying struct hpsa_tmf_struct on top of something which | |
5681 | * was allocated as a struct io_accel2_cmd, so we better be sure it | |
5682 | * actually fits, and doesn't overrun the error info space. | |
5683 | */ | |
5684 | BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > | |
5685 | sizeof(struct io_accel2_cmd)); | |
5686 | BUG_ON(offsetof(struct io_accel2_cmd, error_data) < | |
5687 | offsetof(struct hpsa_tmf_struct, error_len) + | |
5688 | sizeof(ac->error_len)); | |
5689 | ||
5690 | c->cmd_type = IOACCEL2_TMF; | |
a58e7e53 WS |
5691 | c->scsi_cmd = SCSI_CMD_BUSY; |
5692 | ||
8be986cc SC |
5693 | /* Adjust the DMA address to point to the accelerated command buffer */ |
5694 | c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + | |
5695 | (c->cmdindex * sizeof(struct io_accel2_cmd)); | |
5696 | BUG_ON(c->busaddr & 0x0000007F); | |
5697 | ||
5698 | memset(ac, 0, sizeof(*c2)); /* yes this is correct */ | |
5699 | ac->iu_type = IOACCEL2_IU_TMF_TYPE; | |
5700 | ac->reply_queue = reply_queue; | |
5701 | ac->tmf = IOACCEL2_TMF_ABORT; | |
5702 | ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); | |
5703 | memset(ac->lun_id, 0, sizeof(ac->lun_id)); | |
5704 | ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); | |
5705 | ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); | |
5706 | ac->error_ptr = cpu_to_le64(c->busaddr + | |
5707 | offsetof(struct io_accel2_cmd, error_data)); | |
5708 | ac->error_len = cpu_to_le32(sizeof(c2->error_data)); | |
5709 | } | |
5710 | ||
54b6e9e9 ST |
5711 | /* ioaccel2 path firmware cannot handle abort task requests. |
5712 | * Change abort requests to physical target reset, and send to the | |
5713 | * address of the physical disk used for the ioaccel 2 command. | |
5714 | * Return 0 on success (IO_OK) | |
5715 | * -1 on failure | |
5716 | */ | |
5717 | ||
5718 | static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, | |
25163bd5 | 5719 | unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) |
54b6e9e9 ST |
5720 | { |
5721 | int rc = IO_OK; | |
5722 | struct scsi_cmnd *scmd; /* scsi command within request being aborted */ | |
5723 | struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ | |
5724 | unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ | |
5725 | unsigned char *psa = &phys_scsi3addr[0]; | |
5726 | ||
5727 | /* Get a pointer to the hpsa logical device. */ | |
7fa3030c | 5728 | scmd = abort->scsi_cmd; |
54b6e9e9 ST |
5729 | dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); |
5730 | if (dev == NULL) { | |
5731 | dev_warn(&h->pdev->dev, | |
5732 | "Cannot abort: no device pointer for command.\n"); | |
5733 | return -1; /* not abortable */ | |
5734 | } | |
5735 | ||
2ba8bfc8 SC |
5736 | if (h->raid_offload_debug > 0) |
5737 | dev_info(&h->pdev->dev, | |
0d96ef5f | 5738 | "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
2ba8bfc8 | 5739 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun, |
0d96ef5f | 5740 | "Reset as abort", |
2ba8bfc8 SC |
5741 | scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], |
5742 | scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); | |
5743 | ||
54b6e9e9 ST |
5744 | if (!dev->offload_enabled) { |
5745 | dev_warn(&h->pdev->dev, | |
5746 | "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); | |
5747 | return -1; /* not abortable */ | |
5748 | } | |
5749 | ||
5750 | /* Incoming scsi3addr is logical addr. We need physical disk addr. */ | |
5751 | if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { | |
5752 | dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); | |
5753 | return -1; /* not abortable */ | |
5754 | } | |
5755 | ||
5756 | /* send the reset */ | |
2ba8bfc8 SC |
5757 | if (h->raid_offload_debug > 0) |
5758 | dev_info(&h->pdev->dev, | |
5759 | "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
5760 | psa[0], psa[1], psa[2], psa[3], | |
5761 | psa[4], psa[5], psa[6], psa[7]); | |
d604f533 | 5762 | rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); |
54b6e9e9 ST |
5763 | if (rc != 0) { |
5764 | dev_warn(&h->pdev->dev, | |
5765 | "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
5766 | psa[0], psa[1], psa[2], psa[3], | |
5767 | psa[4], psa[5], psa[6], psa[7]); | |
5768 | return rc; /* failed to reset */ | |
5769 | } | |
5770 | ||
5771 | /* wait for device to recover */ | |
b69324ff | 5772 | if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { |
54b6e9e9 ST |
5773 | dev_warn(&h->pdev->dev, |
5774 | "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
5775 | psa[0], psa[1], psa[2], psa[3], | |
5776 | psa[4], psa[5], psa[6], psa[7]); | |
5777 | return -1; /* failed to recover */ | |
5778 | } | |
5779 | ||
5780 | /* device recovered */ | |
5781 | dev_info(&h->pdev->dev, | |
5782 | "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
5783 | psa[0], psa[1], psa[2], psa[3], | |
5784 | psa[4], psa[5], psa[6], psa[7]); | |
5785 | ||
5786 | return rc; /* success */ | |
5787 | } | |
5788 | ||
8be986cc SC |
5789 | static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, |
5790 | struct CommandList *abort, int reply_queue) | |
5791 | { | |
5792 | int rc = IO_OK; | |
5793 | struct CommandList *c; | |
5794 | __le32 taglower, tagupper; | |
5795 | struct hpsa_scsi_dev_t *dev; | |
5796 | struct io_accel2_cmd *c2; | |
5797 | ||
5798 | dev = abort->scsi_cmd->device->hostdata; | |
5799 | if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) | |
5800 | return -1; | |
5801 | ||
5802 | c = cmd_alloc(h); | |
5803 | setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); | |
5804 | c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; | |
5805 | (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); | |
5806 | hpsa_get_tag(h, abort, &taglower, &tagupper); | |
5807 | dev_dbg(&h->pdev->dev, | |
5808 | "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", | |
5809 | __func__, tagupper, taglower); | |
5810 | /* no unmap needed here because no data xfer. */ | |
5811 | ||
5812 | dev_dbg(&h->pdev->dev, | |
5813 | "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", | |
5814 | __func__, tagupper, taglower, c2->error_data.serv_response); | |
5815 | switch (c2->error_data.serv_response) { | |
5816 | case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: | |
5817 | case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: | |
5818 | rc = 0; | |
5819 | break; | |
5820 | case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: | |
5821 | case IOACCEL2_SERV_RESPONSE_FAILURE: | |
5822 | case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: | |
5823 | rc = -1; | |
5824 | break; | |
5825 | default: | |
5826 | dev_warn(&h->pdev->dev, | |
5827 | "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", | |
5828 | __func__, tagupper, taglower, | |
5829 | c2->error_data.serv_response); | |
5830 | rc = -1; | |
5831 | } | |
5832 | cmd_free(h, c); | |
5833 | dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, | |
5834 | tagupper, taglower); | |
5835 | return rc; | |
5836 | } | |
5837 | ||
6cba3f19 | 5838 | static int hpsa_send_abort_both_ways(struct ctlr_info *h, |
39f3deb2 | 5839 | struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue) |
6cba3f19 | 5840 | { |
8be986cc SC |
5841 | /* |
5842 | * ioccelerator mode 2 commands should be aborted via the | |
54b6e9e9 | 5843 | * accelerated path, since RAID path is unaware of these commands, |
8be986cc SC |
5844 | * but not all underlying firmware can handle abort TMF. |
5845 | * Change abort to physical device reset when abort TMF is unsupported. | |
54b6e9e9 | 5846 | */ |
8be986cc | 5847 | if (abort->cmd_type == CMD_IOACCEL2) { |
39f3deb2 DB |
5848 | if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) || |
5849 | dev->physical_device) | |
8be986cc SC |
5850 | return hpsa_send_abort_ioaccel2(h, abort, |
5851 | reply_queue); | |
5852 | else | |
39f3deb2 DB |
5853 | return hpsa_send_reset_as_abort_ioaccel2(h, |
5854 | dev->scsi3addr, | |
25163bd5 | 5855 | abort, reply_queue); |
8be986cc | 5856 | } |
39f3deb2 | 5857 | return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue); |
25163bd5 | 5858 | } |
54b6e9e9 | 5859 | |
25163bd5 WS |
5860 | /* Find out which reply queue a command was meant to return on */ |
5861 | static int hpsa_extract_reply_queue(struct ctlr_info *h, | |
5862 | struct CommandList *c) | |
5863 | { | |
5864 | if (c->cmd_type == CMD_IOACCEL2) | |
5865 | return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; | |
5866 | return c->Header.ReplyQueue; | |
6cba3f19 SC |
5867 | } |
5868 | ||
9b5c48c2 SC |
5869 | /* |
5870 | * Limit concurrency of abort commands to prevent | |
5871 | * over-subscription of commands | |
5872 | */ | |
5873 | static inline int wait_for_available_abort_cmd(struct ctlr_info *h) | |
5874 | { | |
5875 | #define ABORT_CMD_WAIT_MSECS 5000 | |
5876 | return !wait_event_timeout(h->abort_cmd_wait_queue, | |
5877 | atomic_dec_if_positive(&h->abort_cmds_available) >= 0, | |
5878 | msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); | |
5879 | } | |
5880 | ||
75167d2c SC |
5881 | /* Send an abort for the specified command. |
5882 | * If the device and controller support it, | |
5883 | * send a task abort request. | |
5884 | */ | |
5885 | static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) | |
5886 | { | |
5887 | ||
a58e7e53 | 5888 | int rc; |
75167d2c SC |
5889 | struct ctlr_info *h; |
5890 | struct hpsa_scsi_dev_t *dev; | |
5891 | struct CommandList *abort; /* pointer to command to be aborted */ | |
75167d2c SC |
5892 | struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ |
5893 | char msg[256]; /* For debug messaging. */ | |
5894 | int ml = 0; | |
2b08b3e9 | 5895 | __le32 tagupper, taglower; |
25163bd5 WS |
5896 | int refcount, reply_queue; |
5897 | ||
5898 | if (sc == NULL) | |
5899 | return FAILED; | |
75167d2c | 5900 | |
9b5c48c2 SC |
5901 | if (sc->device == NULL) |
5902 | return FAILED; | |
5903 | ||
75167d2c SC |
5904 | /* Find the controller of the command to be aborted */ |
5905 | h = sdev_to_hba(sc->device); | |
9b5c48c2 | 5906 | if (h == NULL) |
75167d2c SC |
5907 | return FAILED; |
5908 | ||
25163bd5 WS |
5909 | /* Find the device of the command to be aborted */ |
5910 | dev = sc->device->hostdata; | |
5911 | if (!dev) { | |
5912 | dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", | |
5913 | msg); | |
e345893b | 5914 | return FAILED; |
25163bd5 WS |
5915 | } |
5916 | ||
5917 | /* If controller locked up, we can guarantee command won't complete */ | |
5918 | if (lockup_detected(h)) { | |
5919 | hpsa_show_dev_msg(KERN_WARNING, h, dev, | |
5920 | "ABORT FAILED, lockup detected"); | |
5921 | return FAILED; | |
5922 | } | |
5923 | ||
5924 | /* This is a good time to check if controller lockup has occurred */ | |
5925 | if (detect_controller_lockup(h)) { | |
5926 | hpsa_show_dev_msg(KERN_WARNING, h, dev, | |
5927 | "ABORT FAILED, new lockup detected"); | |
5928 | return FAILED; | |
5929 | } | |
e345893b | 5930 | |
75167d2c SC |
5931 | /* Check that controller supports some kind of task abort */ |
5932 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && | |
5933 | !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) | |
5934 | return FAILED; | |
5935 | ||
5936 | memset(msg, 0, sizeof(msg)); | |
4b761557 | 5937 | ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", |
75167d2c | 5938 | h->scsi_host->host_no, sc->device->channel, |
0d96ef5f | 5939 | sc->device->id, sc->device->lun, |
4b761557 | 5940 | "Aborting command", sc); |
75167d2c | 5941 | |
75167d2c SC |
5942 | /* Get SCSI command to be aborted */ |
5943 | abort = (struct CommandList *) sc->host_scribble; | |
5944 | if (abort == NULL) { | |
281a7fd0 WS |
5945 | /* This can happen if the command already completed. */ |
5946 | return SUCCESS; | |
5947 | } | |
5948 | refcount = atomic_inc_return(&abort->refcount); | |
5949 | if (refcount == 1) { /* Command is done already. */ | |
5950 | cmd_free(h, abort); | |
5951 | return SUCCESS; | |
75167d2c | 5952 | } |
9b5c48c2 SC |
5953 | |
5954 | /* Don't bother trying the abort if we know it won't work. */ | |
5955 | if (abort->cmd_type != CMD_IOACCEL2 && | |
5956 | abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { | |
5957 | cmd_free(h, abort); | |
5958 | return FAILED; | |
5959 | } | |
5960 | ||
a58e7e53 WS |
5961 | /* |
5962 | * Check that we're aborting the right command. | |
5963 | * It's possible the CommandList already completed and got re-used. | |
5964 | */ | |
5965 | if (abort->scsi_cmd != sc) { | |
5966 | cmd_free(h, abort); | |
5967 | return SUCCESS; | |
5968 | } | |
5969 | ||
5970 | abort->abort_pending = true; | |
17eb87d2 | 5971 | hpsa_get_tag(h, abort, &taglower, &tagupper); |
25163bd5 | 5972 | reply_queue = hpsa_extract_reply_queue(h, abort); |
17eb87d2 | 5973 | ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); |
7fa3030c | 5974 | as = abort->scsi_cmd; |
75167d2c | 5975 | if (as != NULL) |
4b761557 RE |
5976 | ml += sprintf(msg+ml, |
5977 | "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", | |
5978 | as->cmd_len, as->cmnd[0], as->cmnd[1], | |
5979 | as->serial_number); | |
5980 | dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); | |
0d96ef5f | 5981 | hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); |
4b761557 | 5982 | |
75167d2c SC |
5983 | /* |
5984 | * Command is in flight, or possibly already completed | |
5985 | * by the firmware (but not to the scsi mid layer) but we can't | |
5986 | * distinguish which. Send the abort down. | |
5987 | */ | |
9b5c48c2 SC |
5988 | if (wait_for_available_abort_cmd(h)) { |
5989 | dev_warn(&h->pdev->dev, | |
4b761557 RE |
5990 | "%s FAILED, timeout waiting for an abort command to become available.\n", |
5991 | msg); | |
9b5c48c2 SC |
5992 | cmd_free(h, abort); |
5993 | return FAILED; | |
5994 | } | |
39f3deb2 | 5995 | rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue); |
9b5c48c2 SC |
5996 | atomic_inc(&h->abort_cmds_available); |
5997 | wake_up_all(&h->abort_cmd_wait_queue); | |
75167d2c | 5998 | if (rc != 0) { |
4b761557 | 5999 | dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); |
0d96ef5f | 6000 | hpsa_show_dev_msg(KERN_WARNING, h, dev, |
4b761557 | 6001 | "FAILED to abort command"); |
281a7fd0 | 6002 | cmd_free(h, abort); |
75167d2c SC |
6003 | return FAILED; |
6004 | } | |
4b761557 | 6005 | dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); |
d604f533 | 6006 | wait_event(h->event_sync_wait_queue, |
a58e7e53 | 6007 | abort->scsi_cmd != sc || lockup_detected(h)); |
281a7fd0 | 6008 | cmd_free(h, abort); |
a58e7e53 | 6009 | return !lockup_detected(h) ? SUCCESS : FAILED; |
75167d2c SC |
6010 | } |
6011 | ||
73153fe5 WS |
6012 | /* |
6013 | * For operations with an associated SCSI command, a command block is allocated | |
6014 | * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the | |
6015 | * block request tag as an index into a table of entries. cmd_tagged_free() is | |
6016 | * the complement, although cmd_free() may be called instead. | |
6017 | */ | |
6018 | static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, | |
6019 | struct scsi_cmnd *scmd) | |
6020 | { | |
6021 | int idx = hpsa_get_cmd_index(scmd); | |
6022 | struct CommandList *c = h->cmd_pool + idx; | |
6023 | ||
6024 | if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) { | |
6025 | dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n", | |
6026 | idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1); | |
6027 | /* The index value comes from the block layer, so if it's out of | |
6028 | * bounds, it's probably not our bug. | |
6029 | */ | |
6030 | BUG(); | |
6031 | } | |
6032 | ||
6033 | atomic_inc(&c->refcount); | |
6034 | if (unlikely(!hpsa_is_cmd_idle(c))) { | |
6035 | /* | |
6036 | * We expect that the SCSI layer will hand us a unique tag | |
6037 | * value. Thus, there should never be a collision here between | |
6038 | * two requests...because if the selected command isn't idle | |
6039 | * then someone is going to be very disappointed. | |
6040 | */ | |
6041 | dev_err(&h->pdev->dev, | |
6042 | "tag collision (tag=%d) in cmd_tagged_alloc().\n", | |
6043 | idx); | |
6044 | if (c->scsi_cmd != NULL) | |
6045 | scsi_print_command(c->scsi_cmd); | |
6046 | scsi_print_command(scmd); | |
6047 | } | |
6048 | ||
6049 | hpsa_cmd_partial_init(h, idx, c); | |
6050 | return c; | |
6051 | } | |
6052 | ||
6053 | static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) | |
6054 | { | |
6055 | /* | |
6056 | * Release our reference to the block. We don't need to do anything | |
6057 | * else to free it, because it is accessed by index. (There's no point | |
6058 | * in checking the result of the decrement, since we cannot guarantee | |
6059 | * that there isn't a concurrent abort which is also accessing it.) | |
6060 | */ | |
6061 | (void)atomic_dec(&c->refcount); | |
6062 | } | |
6063 | ||
edd16368 SC |
6064 | /* |
6065 | * For operations that cannot sleep, a command block is allocated at init, | |
6066 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | |
6067 | * which ones are free or in use. Lock must be held when calling this. | |
6068 | * cmd_free() is the complement. | |
bf43caf3 RE |
6069 | * This function never gives up and returns NULL. If it hangs, |
6070 | * another thread must call cmd_free() to free some tags. | |
edd16368 | 6071 | */ |
281a7fd0 | 6072 | |
edd16368 SC |
6073 | static struct CommandList *cmd_alloc(struct ctlr_info *h) |
6074 | { | |
6075 | struct CommandList *c; | |
360c73bd | 6076 | int refcount, i; |
73153fe5 | 6077 | int offset = 0; |
4c413128 | 6078 | |
33811026 RE |
6079 | /* |
6080 | * There is some *extremely* small but non-zero chance that that | |
4c413128 SC |
6081 | * multiple threads could get in here, and one thread could |
6082 | * be scanning through the list of bits looking for a free | |
6083 | * one, but the free ones are always behind him, and other | |
6084 | * threads sneak in behind him and eat them before he can | |
6085 | * get to them, so that while there is always a free one, a | |
6086 | * very unlucky thread might be starved anyway, never able to | |
6087 | * beat the other threads. In reality, this happens so | |
6088 | * infrequently as to be indistinguishable from never. | |
73153fe5 WS |
6089 | * |
6090 | * Note that we start allocating commands before the SCSI host structure | |
6091 | * is initialized. Since the search starts at bit zero, this | |
6092 | * all works, since we have at least one command structure available; | |
6093 | * however, it means that the structures with the low indexes have to be | |
6094 | * reserved for driver-initiated requests, while requests from the block | |
6095 | * layer will use the higher indexes. | |
4c413128 | 6096 | */ |
edd16368 | 6097 | |
281a7fd0 | 6098 | for (;;) { |
73153fe5 WS |
6099 | i = find_next_zero_bit(h->cmd_pool_bits, |
6100 | HPSA_NRESERVED_CMDS, | |
6101 | offset); | |
6102 | if (unlikely(i >= HPSA_NRESERVED_CMDS)) { | |
281a7fd0 WS |
6103 | offset = 0; |
6104 | continue; | |
6105 | } | |
6106 | c = h->cmd_pool + i; | |
6107 | refcount = atomic_inc_return(&c->refcount); | |
6108 | if (unlikely(refcount > 1)) { | |
6109 | cmd_free(h, c); /* already in use */ | |
73153fe5 | 6110 | offset = (i + 1) % HPSA_NRESERVED_CMDS; |
281a7fd0 WS |
6111 | continue; |
6112 | } | |
6113 | set_bit(i & (BITS_PER_LONG - 1), | |
6114 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | |
6115 | break; /* it's ours now. */ | |
6116 | } | |
360c73bd | 6117 | hpsa_cmd_partial_init(h, i, c); |
edd16368 SC |
6118 | return c; |
6119 | } | |
6120 | ||
73153fe5 WS |
6121 | /* |
6122 | * This is the complementary operation to cmd_alloc(). Note, however, in some | |
6123 | * corner cases it may also be used to free blocks allocated by | |
6124 | * cmd_tagged_alloc() in which case the ref-count decrement does the trick and | |
6125 | * the clear-bit is harmless. | |
6126 | */ | |
edd16368 SC |
6127 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) |
6128 | { | |
281a7fd0 WS |
6129 | if (atomic_dec_and_test(&c->refcount)) { |
6130 | int i; | |
edd16368 | 6131 | |
281a7fd0 WS |
6132 | i = c - h->cmd_pool; |
6133 | clear_bit(i & (BITS_PER_LONG - 1), | |
6134 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | |
6135 | } | |
edd16368 SC |
6136 | } |
6137 | ||
edd16368 SC |
6138 | #ifdef CONFIG_COMPAT |
6139 | ||
42a91641 DB |
6140 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, |
6141 | void __user *arg) | |
edd16368 SC |
6142 | { |
6143 | IOCTL32_Command_struct __user *arg32 = | |
6144 | (IOCTL32_Command_struct __user *) arg; | |
6145 | IOCTL_Command_struct arg64; | |
6146 | IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); | |
6147 | int err; | |
6148 | u32 cp; | |
6149 | ||
938abd84 | 6150 | memset(&arg64, 0, sizeof(arg64)); |
edd16368 SC |
6151 | err = 0; |
6152 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
6153 | sizeof(arg64.LUN_info)); | |
6154 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
6155 | sizeof(arg64.Request)); | |
6156 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
6157 | sizeof(arg64.error_info)); | |
6158 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
6159 | err |= get_user(cp, &arg32->buf); | |
6160 | arg64.buf = compat_ptr(cp); | |
6161 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
6162 | ||
6163 | if (err) | |
6164 | return -EFAULT; | |
6165 | ||
42a91641 | 6166 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, p); |
edd16368 SC |
6167 | if (err) |
6168 | return err; | |
6169 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
6170 | sizeof(arg32->error_info)); | |
6171 | if (err) | |
6172 | return -EFAULT; | |
6173 | return err; | |
6174 | } | |
6175 | ||
6176 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |
42a91641 | 6177 | int cmd, void __user *arg) |
edd16368 SC |
6178 | { |
6179 | BIG_IOCTL32_Command_struct __user *arg32 = | |
6180 | (BIG_IOCTL32_Command_struct __user *) arg; | |
6181 | BIG_IOCTL_Command_struct arg64; | |
6182 | BIG_IOCTL_Command_struct __user *p = | |
6183 | compat_alloc_user_space(sizeof(arg64)); | |
6184 | int err; | |
6185 | u32 cp; | |
6186 | ||
938abd84 | 6187 | memset(&arg64, 0, sizeof(arg64)); |
edd16368 SC |
6188 | err = 0; |
6189 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
6190 | sizeof(arg64.LUN_info)); | |
6191 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
6192 | sizeof(arg64.Request)); | |
6193 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
6194 | sizeof(arg64.error_info)); | |
6195 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
6196 | err |= get_user(arg64.malloc_size, &arg32->malloc_size); | |
6197 | err |= get_user(cp, &arg32->buf); | |
6198 | arg64.buf = compat_ptr(cp); | |
6199 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
6200 | ||
6201 | if (err) | |
6202 | return -EFAULT; | |
6203 | ||
42a91641 | 6204 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p); |
edd16368 SC |
6205 | if (err) |
6206 | return err; | |
6207 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
6208 | sizeof(arg32->error_info)); | |
6209 | if (err) | |
6210 | return -EFAULT; | |
6211 | return err; | |
6212 | } | |
71fe75a7 | 6213 | |
42a91641 | 6214 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
71fe75a7 SC |
6215 | { |
6216 | switch (cmd) { | |
6217 | case CCISS_GETPCIINFO: | |
6218 | case CCISS_GETINTINFO: | |
6219 | case CCISS_SETINTINFO: | |
6220 | case CCISS_GETNODENAME: | |
6221 | case CCISS_SETNODENAME: | |
6222 | case CCISS_GETHEARTBEAT: | |
6223 | case CCISS_GETBUSTYPES: | |
6224 | case CCISS_GETFIRMVER: | |
6225 | case CCISS_GETDRIVVER: | |
6226 | case CCISS_REVALIDVOLS: | |
6227 | case CCISS_DEREGDISK: | |
6228 | case CCISS_REGNEWDISK: | |
6229 | case CCISS_REGNEWD: | |
6230 | case CCISS_RESCANDISK: | |
6231 | case CCISS_GETLUNINFO: | |
6232 | return hpsa_ioctl(dev, cmd, arg); | |
6233 | ||
6234 | case CCISS_PASSTHRU32: | |
6235 | return hpsa_ioctl32_passthru(dev, cmd, arg); | |
6236 | case CCISS_BIG_PASSTHRU32: | |
6237 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | |
6238 | ||
6239 | default: | |
6240 | return -ENOIOCTLCMD; | |
6241 | } | |
6242 | } | |
edd16368 SC |
6243 | #endif |
6244 | ||
6245 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) | |
6246 | { | |
6247 | struct hpsa_pci_info pciinfo; | |
6248 | ||
6249 | if (!argp) | |
6250 | return -EINVAL; | |
6251 | pciinfo.domain = pci_domain_nr(h->pdev->bus); | |
6252 | pciinfo.bus = h->pdev->bus->number; | |
6253 | pciinfo.dev_fn = h->pdev->devfn; | |
6254 | pciinfo.board_id = h->board_id; | |
6255 | if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) | |
6256 | return -EFAULT; | |
6257 | return 0; | |
6258 | } | |
6259 | ||
6260 | static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) | |
6261 | { | |
6262 | DriverVer_type DriverVer; | |
6263 | unsigned char vmaj, vmin, vsubmin; | |
6264 | int rc; | |
6265 | ||
6266 | rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", | |
6267 | &vmaj, &vmin, &vsubmin); | |
6268 | if (rc != 3) { | |
6269 | dev_info(&h->pdev->dev, "driver version string '%s' " | |
6270 | "unrecognized.", HPSA_DRIVER_VERSION); | |
6271 | vmaj = 0; | |
6272 | vmin = 0; | |
6273 | vsubmin = 0; | |
6274 | } | |
6275 | DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; | |
6276 | if (!argp) | |
6277 | return -EINVAL; | |
6278 | if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) | |
6279 | return -EFAULT; | |
6280 | return 0; | |
6281 | } | |
6282 | ||
6283 | static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
6284 | { | |
6285 | IOCTL_Command_struct iocommand; | |
6286 | struct CommandList *c; | |
6287 | char *buff = NULL; | |
50a0decf | 6288 | u64 temp64; |
c1f63c8f | 6289 | int rc = 0; |
edd16368 SC |
6290 | |
6291 | if (!argp) | |
6292 | return -EINVAL; | |
6293 | if (!capable(CAP_SYS_RAWIO)) | |
6294 | return -EPERM; | |
6295 | if (copy_from_user(&iocommand, argp, sizeof(iocommand))) | |
6296 | return -EFAULT; | |
6297 | if ((iocommand.buf_size < 1) && | |
6298 | (iocommand.Request.Type.Direction != XFER_NONE)) { | |
6299 | return -EINVAL; | |
6300 | } | |
6301 | if (iocommand.buf_size > 0) { | |
6302 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); | |
6303 | if (buff == NULL) | |
2dd02d74 | 6304 | return -ENOMEM; |
9233fb10 | 6305 | if (iocommand.Request.Type.Direction & XFER_WRITE) { |
b03a7771 SC |
6306 | /* Copy the data into the buffer we created */ |
6307 | if (copy_from_user(buff, iocommand.buf, | |
6308 | iocommand.buf_size)) { | |
c1f63c8f SC |
6309 | rc = -EFAULT; |
6310 | goto out_kfree; | |
b03a7771 SC |
6311 | } |
6312 | } else { | |
6313 | memset(buff, 0, iocommand.buf_size); | |
edd16368 | 6314 | } |
b03a7771 | 6315 | } |
45fcb86e | 6316 | c = cmd_alloc(h); |
bf43caf3 | 6317 | |
edd16368 SC |
6318 | /* Fill in the command type */ |
6319 | c->cmd_type = CMD_IOCTL_PEND; | |
a58e7e53 | 6320 | c->scsi_cmd = SCSI_CMD_BUSY; |
edd16368 SC |
6321 | /* Fill in Command Header */ |
6322 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
6323 | if (iocommand.buf_size > 0) { /* buffer to fill */ | |
6324 | c->Header.SGList = 1; | |
50a0decf | 6325 | c->Header.SGTotal = cpu_to_le16(1); |
edd16368 SC |
6326 | } else { /* no buffers to fill */ |
6327 | c->Header.SGList = 0; | |
50a0decf | 6328 | c->Header.SGTotal = cpu_to_le16(0); |
edd16368 SC |
6329 | } |
6330 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); | |
edd16368 SC |
6331 | |
6332 | /* Fill in Request block */ | |
6333 | memcpy(&c->Request, &iocommand.Request, | |
6334 | sizeof(c->Request)); | |
6335 | ||
6336 | /* Fill in the scatter gather information */ | |
6337 | if (iocommand.buf_size > 0) { | |
50a0decf | 6338 | temp64 = pci_map_single(h->pdev, buff, |
edd16368 | 6339 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); |
50a0decf SC |
6340 | if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) { |
6341 | c->SG[0].Addr = cpu_to_le64(0); | |
6342 | c->SG[0].Len = cpu_to_le32(0); | |
bcc48ffa SC |
6343 | rc = -ENOMEM; |
6344 | goto out; | |
6345 | } | |
50a0decf SC |
6346 | c->SG[0].Addr = cpu_to_le64(temp64); |
6347 | c->SG[0].Len = cpu_to_le32(iocommand.buf_size); | |
6348 | c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */ | |
edd16368 | 6349 | } |
25163bd5 | 6350 | rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); |
c2dd32e0 SC |
6351 | if (iocommand.buf_size > 0) |
6352 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | |
edd16368 | 6353 | check_ioctl_unit_attention(h, c); |
25163bd5 WS |
6354 | if (rc) { |
6355 | rc = -EIO; | |
6356 | goto out; | |
6357 | } | |
edd16368 SC |
6358 | |
6359 | /* Copy the error information out */ | |
6360 | memcpy(&iocommand.error_info, c->err_info, | |
6361 | sizeof(iocommand.error_info)); | |
6362 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { | |
c1f63c8f SC |
6363 | rc = -EFAULT; |
6364 | goto out; | |
edd16368 | 6365 | } |
9233fb10 | 6366 | if ((iocommand.Request.Type.Direction & XFER_READ) && |
b03a7771 | 6367 | iocommand.buf_size > 0) { |
edd16368 SC |
6368 | /* Copy the data out of the buffer we created */ |
6369 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { | |
c1f63c8f SC |
6370 | rc = -EFAULT; |
6371 | goto out; | |
edd16368 SC |
6372 | } |
6373 | } | |
c1f63c8f | 6374 | out: |
45fcb86e | 6375 | cmd_free(h, c); |
c1f63c8f SC |
6376 | out_kfree: |
6377 | kfree(buff); | |
6378 | return rc; | |
edd16368 SC |
6379 | } |
6380 | ||
6381 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
6382 | { | |
6383 | BIG_IOCTL_Command_struct *ioc; | |
6384 | struct CommandList *c; | |
6385 | unsigned char **buff = NULL; | |
6386 | int *buff_size = NULL; | |
50a0decf | 6387 | u64 temp64; |
edd16368 SC |
6388 | BYTE sg_used = 0; |
6389 | int status = 0; | |
01a02ffc SC |
6390 | u32 left; |
6391 | u32 sz; | |
edd16368 SC |
6392 | BYTE __user *data_ptr; |
6393 | ||
6394 | if (!argp) | |
6395 | return -EINVAL; | |
6396 | if (!capable(CAP_SYS_RAWIO)) | |
6397 | return -EPERM; | |
6398 | ioc = (BIG_IOCTL_Command_struct *) | |
6399 | kmalloc(sizeof(*ioc), GFP_KERNEL); | |
6400 | if (!ioc) { | |
6401 | status = -ENOMEM; | |
6402 | goto cleanup1; | |
6403 | } | |
6404 | if (copy_from_user(ioc, argp, sizeof(*ioc))) { | |
6405 | status = -EFAULT; | |
6406 | goto cleanup1; | |
6407 | } | |
6408 | if ((ioc->buf_size < 1) && | |
6409 | (ioc->Request.Type.Direction != XFER_NONE)) { | |
6410 | status = -EINVAL; | |
6411 | goto cleanup1; | |
6412 | } | |
6413 | /* Check kmalloc limits using all SGs */ | |
6414 | if (ioc->malloc_size > MAX_KMALLOC_SIZE) { | |
6415 | status = -EINVAL; | |
6416 | goto cleanup1; | |
6417 | } | |
d66ae08b | 6418 | if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) { |
edd16368 SC |
6419 | status = -EINVAL; |
6420 | goto cleanup1; | |
6421 | } | |
d66ae08b | 6422 | buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL); |
edd16368 SC |
6423 | if (!buff) { |
6424 | status = -ENOMEM; | |
6425 | goto cleanup1; | |
6426 | } | |
d66ae08b | 6427 | buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL); |
edd16368 SC |
6428 | if (!buff_size) { |
6429 | status = -ENOMEM; | |
6430 | goto cleanup1; | |
6431 | } | |
6432 | left = ioc->buf_size; | |
6433 | data_ptr = ioc->buf; | |
6434 | while (left) { | |
6435 | sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; | |
6436 | buff_size[sg_used] = sz; | |
6437 | buff[sg_used] = kmalloc(sz, GFP_KERNEL); | |
6438 | if (buff[sg_used] == NULL) { | |
6439 | status = -ENOMEM; | |
6440 | goto cleanup1; | |
6441 | } | |
9233fb10 | 6442 | if (ioc->Request.Type.Direction & XFER_WRITE) { |
edd16368 | 6443 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { |
0758f4f7 | 6444 | status = -EFAULT; |
edd16368 SC |
6445 | goto cleanup1; |
6446 | } | |
6447 | } else | |
6448 | memset(buff[sg_used], 0, sz); | |
6449 | left -= sz; | |
6450 | data_ptr += sz; | |
6451 | sg_used++; | |
6452 | } | |
45fcb86e | 6453 | c = cmd_alloc(h); |
bf43caf3 | 6454 | |
edd16368 | 6455 | c->cmd_type = CMD_IOCTL_PEND; |
a58e7e53 | 6456 | c->scsi_cmd = SCSI_CMD_BUSY; |
edd16368 | 6457 | c->Header.ReplyQueue = 0; |
50a0decf SC |
6458 | c->Header.SGList = (u8) sg_used; |
6459 | c->Header.SGTotal = cpu_to_le16(sg_used); | |
edd16368 | 6460 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
edd16368 SC |
6461 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); |
6462 | if (ioc->buf_size > 0) { | |
6463 | int i; | |
6464 | for (i = 0; i < sg_used; i++) { | |
50a0decf | 6465 | temp64 = pci_map_single(h->pdev, buff[i], |
edd16368 | 6466 | buff_size[i], PCI_DMA_BIDIRECTIONAL); |
50a0decf SC |
6467 | if (dma_mapping_error(&h->pdev->dev, |
6468 | (dma_addr_t) temp64)) { | |
6469 | c->SG[i].Addr = cpu_to_le64(0); | |
6470 | c->SG[i].Len = cpu_to_le32(0); | |
bcc48ffa SC |
6471 | hpsa_pci_unmap(h->pdev, c, i, |
6472 | PCI_DMA_BIDIRECTIONAL); | |
6473 | status = -ENOMEM; | |
e2d4a1f6 | 6474 | goto cleanup0; |
bcc48ffa | 6475 | } |
50a0decf SC |
6476 | c->SG[i].Addr = cpu_to_le64(temp64); |
6477 | c->SG[i].Len = cpu_to_le32(buff_size[i]); | |
6478 | c->SG[i].Ext = cpu_to_le32(0); | |
edd16368 | 6479 | } |
50a0decf | 6480 | c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST); |
edd16368 | 6481 | } |
25163bd5 | 6482 | status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT); |
b03a7771 SC |
6483 | if (sg_used) |
6484 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | |
edd16368 | 6485 | check_ioctl_unit_attention(h, c); |
25163bd5 WS |
6486 | if (status) { |
6487 | status = -EIO; | |
6488 | goto cleanup0; | |
6489 | } | |
6490 | ||
edd16368 SC |
6491 | /* Copy the error information out */ |
6492 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); | |
6493 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { | |
edd16368 | 6494 | status = -EFAULT; |
e2d4a1f6 | 6495 | goto cleanup0; |
edd16368 | 6496 | } |
9233fb10 | 6497 | if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) { |
2b08b3e9 DB |
6498 | int i; |
6499 | ||
edd16368 SC |
6500 | /* Copy the data out of the buffer we created */ |
6501 | BYTE __user *ptr = ioc->buf; | |
6502 | for (i = 0; i < sg_used; i++) { | |
6503 | if (copy_to_user(ptr, buff[i], buff_size[i])) { | |
edd16368 | 6504 | status = -EFAULT; |
e2d4a1f6 | 6505 | goto cleanup0; |
edd16368 SC |
6506 | } |
6507 | ptr += buff_size[i]; | |
6508 | } | |
6509 | } | |
edd16368 | 6510 | status = 0; |
e2d4a1f6 | 6511 | cleanup0: |
45fcb86e | 6512 | cmd_free(h, c); |
edd16368 SC |
6513 | cleanup1: |
6514 | if (buff) { | |
2b08b3e9 DB |
6515 | int i; |
6516 | ||
edd16368 SC |
6517 | for (i = 0; i < sg_used; i++) |
6518 | kfree(buff[i]); | |
6519 | kfree(buff); | |
6520 | } | |
6521 | kfree(buff_size); | |
6522 | kfree(ioc); | |
6523 | return status; | |
6524 | } | |
6525 | ||
6526 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
6527 | struct CommandList *c) | |
6528 | { | |
6529 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
6530 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) | |
6531 | (void) check_for_unit_attention(h, c); | |
6532 | } | |
0390f0c0 | 6533 | |
edd16368 SC |
6534 | /* |
6535 | * ioctl | |
6536 | */ | |
42a91641 | 6537 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg) |
edd16368 SC |
6538 | { |
6539 | struct ctlr_info *h; | |
6540 | void __user *argp = (void __user *)arg; | |
0390f0c0 | 6541 | int rc; |
edd16368 SC |
6542 | |
6543 | h = sdev_to_hba(dev); | |
6544 | ||
6545 | switch (cmd) { | |
6546 | case CCISS_DEREGDISK: | |
6547 | case CCISS_REGNEWDISK: | |
6548 | case CCISS_REGNEWD: | |
a08a8471 | 6549 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
6550 | return 0; |
6551 | case CCISS_GETPCIINFO: | |
6552 | return hpsa_getpciinfo_ioctl(h, argp); | |
6553 | case CCISS_GETDRIVVER: | |
6554 | return hpsa_getdrivver_ioctl(h, argp); | |
6555 | case CCISS_PASSTHRU: | |
34f0c627 | 6556 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
0390f0c0 SC |
6557 | return -EAGAIN; |
6558 | rc = hpsa_passthru_ioctl(h, argp); | |
34f0c627 | 6559 | atomic_inc(&h->passthru_cmds_avail); |
0390f0c0 | 6560 | return rc; |
edd16368 | 6561 | case CCISS_BIG_PASSTHRU: |
34f0c627 | 6562 | if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0) |
0390f0c0 SC |
6563 | return -EAGAIN; |
6564 | rc = hpsa_big_passthru_ioctl(h, argp); | |
34f0c627 | 6565 | atomic_inc(&h->passthru_cmds_avail); |
0390f0c0 | 6566 | return rc; |
edd16368 SC |
6567 | default: |
6568 | return -ENOTTY; | |
6569 | } | |
6570 | } | |
6571 | ||
bf43caf3 | 6572 | static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr, |
6f039790 | 6573 | u8 reset_type) |
64670ac8 SC |
6574 | { |
6575 | struct CommandList *c; | |
6576 | ||
6577 | c = cmd_alloc(h); | |
bf43caf3 | 6578 | |
a2dac136 SC |
6579 | /* fill_cmd can't fail here, no data buffer to map */ |
6580 | (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, | |
64670ac8 SC |
6581 | RAID_CTLR_LUNID, TYPE_MSG); |
6582 | c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ | |
6583 | c->waiting = NULL; | |
6584 | enqueue_cmd_and_start_io(h, c); | |
6585 | /* Don't wait for completion, the reset won't complete. Don't free | |
6586 | * the command either. This is the last command we will send before | |
6587 | * re-initializing everything, so it doesn't matter and won't leak. | |
6588 | */ | |
bf43caf3 | 6589 | return; |
64670ac8 SC |
6590 | } |
6591 | ||
a2dac136 | 6592 | static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
b7bb24eb | 6593 | void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, |
edd16368 SC |
6594 | int cmd_type) |
6595 | { | |
6596 | int pci_dir = XFER_NONE; | |
9b5c48c2 | 6597 | u64 tag; /* for commands to be aborted */ |
edd16368 SC |
6598 | |
6599 | c->cmd_type = CMD_IOCTL_PEND; | |
a58e7e53 | 6600 | c->scsi_cmd = SCSI_CMD_BUSY; |
edd16368 SC |
6601 | c->Header.ReplyQueue = 0; |
6602 | if (buff != NULL && size > 0) { | |
6603 | c->Header.SGList = 1; | |
50a0decf | 6604 | c->Header.SGTotal = cpu_to_le16(1); |
edd16368 SC |
6605 | } else { |
6606 | c->Header.SGList = 0; | |
50a0decf | 6607 | c->Header.SGTotal = cpu_to_le16(0); |
edd16368 | 6608 | } |
edd16368 SC |
6609 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); |
6610 | ||
edd16368 SC |
6611 | if (cmd_type == TYPE_CMD) { |
6612 | switch (cmd) { | |
6613 | case HPSA_INQUIRY: | |
6614 | /* are we trying to read a vital product page */ | |
b7bb24eb | 6615 | if (page_code & VPD_PAGE) { |
edd16368 | 6616 | c->Request.CDB[1] = 0x01; |
b7bb24eb | 6617 | c->Request.CDB[2] = (page_code & 0xff); |
edd16368 SC |
6618 | } |
6619 | c->Request.CDBLen = 6; | |
a505b86f SC |
6620 | c->Request.type_attr_dir = |
6621 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
edd16368 SC |
6622 | c->Request.Timeout = 0; |
6623 | c->Request.CDB[0] = HPSA_INQUIRY; | |
6624 | c->Request.CDB[4] = size & 0xFF; | |
6625 | break; | |
6626 | case HPSA_REPORT_LOG: | |
6627 | case HPSA_REPORT_PHYS: | |
6628 | /* Talking to controller so It's a physical command | |
6629 | mode = 00 target = 0. Nothing to write. | |
6630 | */ | |
6631 | c->Request.CDBLen = 12; | |
a505b86f SC |
6632 | c->Request.type_attr_dir = |
6633 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
edd16368 SC |
6634 | c->Request.Timeout = 0; |
6635 | c->Request.CDB[0] = cmd; | |
6636 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ | |
6637 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6638 | c->Request.CDB[8] = (size >> 8) & 0xFF; | |
6639 | c->Request.CDB[9] = size & 0xFF; | |
6640 | break; | |
c2adae44 ST |
6641 | case BMIC_SENSE_DIAG_OPTIONS: |
6642 | c->Request.CDBLen = 16; | |
6643 | c->Request.type_attr_dir = | |
6644 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
6645 | c->Request.Timeout = 0; | |
6646 | /* Spec says this should be BMIC_WRITE */ | |
6647 | c->Request.CDB[0] = BMIC_READ; | |
6648 | c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS; | |
6649 | break; | |
6650 | case BMIC_SET_DIAG_OPTIONS: | |
6651 | c->Request.CDBLen = 16; | |
6652 | c->Request.type_attr_dir = | |
6653 | TYPE_ATTR_DIR(cmd_type, | |
6654 | ATTR_SIMPLE, XFER_WRITE); | |
6655 | c->Request.Timeout = 0; | |
6656 | c->Request.CDB[0] = BMIC_WRITE; | |
6657 | c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS; | |
6658 | break; | |
edd16368 SC |
6659 | case HPSA_CACHE_FLUSH: |
6660 | c->Request.CDBLen = 12; | |
a505b86f SC |
6661 | c->Request.type_attr_dir = |
6662 | TYPE_ATTR_DIR(cmd_type, | |
6663 | ATTR_SIMPLE, XFER_WRITE); | |
edd16368 SC |
6664 | c->Request.Timeout = 0; |
6665 | c->Request.CDB[0] = BMIC_WRITE; | |
6666 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | |
bb158eab SC |
6667 | c->Request.CDB[7] = (size >> 8) & 0xFF; |
6668 | c->Request.CDB[8] = size & 0xFF; | |
edd16368 SC |
6669 | break; |
6670 | case TEST_UNIT_READY: | |
6671 | c->Request.CDBLen = 6; | |
a505b86f SC |
6672 | c->Request.type_attr_dir = |
6673 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); | |
edd16368 SC |
6674 | c->Request.Timeout = 0; |
6675 | break; | |
283b4a9b SC |
6676 | case HPSA_GET_RAID_MAP: |
6677 | c->Request.CDBLen = 12; | |
a505b86f SC |
6678 | c->Request.type_attr_dir = |
6679 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
283b4a9b SC |
6680 | c->Request.Timeout = 0; |
6681 | c->Request.CDB[0] = HPSA_CISS_READ; | |
6682 | c->Request.CDB[1] = cmd; | |
6683 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ | |
6684 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6685 | c->Request.CDB[8] = (size >> 8) & 0xFF; | |
6686 | c->Request.CDB[9] = size & 0xFF; | |
6687 | break; | |
316b221a SC |
6688 | case BMIC_SENSE_CONTROLLER_PARAMETERS: |
6689 | c->Request.CDBLen = 10; | |
a505b86f SC |
6690 | c->Request.type_attr_dir = |
6691 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
316b221a SC |
6692 | c->Request.Timeout = 0; |
6693 | c->Request.CDB[0] = BMIC_READ; | |
6694 | c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; | |
6695 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6696 | c->Request.CDB[8] = (size >> 8) & 0xFF; | |
6697 | break; | |
03383736 DB |
6698 | case BMIC_IDENTIFY_PHYSICAL_DEVICE: |
6699 | c->Request.CDBLen = 10; | |
6700 | c->Request.type_attr_dir = | |
6701 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
6702 | c->Request.Timeout = 0; | |
6703 | c->Request.CDB[0] = BMIC_READ; | |
6704 | c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE; | |
6705 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6706 | c->Request.CDB[8] = (size >> 8) & 0XFF; | |
6707 | break; | |
d04e62b9 KB |
6708 | case BMIC_SENSE_SUBSYSTEM_INFORMATION: |
6709 | c->Request.CDBLen = 10; | |
6710 | c->Request.type_attr_dir = | |
6711 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
6712 | c->Request.Timeout = 0; | |
6713 | c->Request.CDB[0] = BMIC_READ; | |
6714 | c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION; | |
6715 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6716 | c->Request.CDB[8] = (size >> 8) & 0XFF; | |
6717 | break; | |
cca8f13b DB |
6718 | case BMIC_SENSE_STORAGE_BOX_PARAMS: |
6719 | c->Request.CDBLen = 10; | |
6720 | c->Request.type_attr_dir = | |
6721 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
6722 | c->Request.Timeout = 0; | |
6723 | c->Request.CDB[0] = BMIC_READ; | |
6724 | c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS; | |
6725 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6726 | c->Request.CDB[8] = (size >> 8) & 0XFF; | |
6727 | break; | |
66749d0d ST |
6728 | case BMIC_IDENTIFY_CONTROLLER: |
6729 | c->Request.CDBLen = 10; | |
6730 | c->Request.type_attr_dir = | |
6731 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ); | |
6732 | c->Request.Timeout = 0; | |
6733 | c->Request.CDB[0] = BMIC_READ; | |
6734 | c->Request.CDB[1] = 0; | |
6735 | c->Request.CDB[2] = 0; | |
6736 | c->Request.CDB[3] = 0; | |
6737 | c->Request.CDB[4] = 0; | |
6738 | c->Request.CDB[5] = 0; | |
6739 | c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER; | |
6740 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
6741 | c->Request.CDB[8] = (size >> 8) & 0XFF; | |
6742 | c->Request.CDB[9] = 0; | |
6743 | break; | |
edd16368 SC |
6744 | default: |
6745 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | |
6746 | BUG(); | |
a2dac136 | 6747 | return -1; |
edd16368 SC |
6748 | } |
6749 | } else if (cmd_type == TYPE_MSG) { | |
6750 | switch (cmd) { | |
6751 | ||
0b9b7b6e ST |
6752 | case HPSA_PHYS_TARGET_RESET: |
6753 | c->Request.CDBLen = 16; | |
6754 | c->Request.type_attr_dir = | |
6755 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); | |
6756 | c->Request.Timeout = 0; /* Don't time out */ | |
6757 | memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); | |
6758 | c->Request.CDB[0] = HPSA_RESET; | |
6759 | c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE; | |
6760 | /* Physical target reset needs no control bytes 4-7*/ | |
6761 | c->Request.CDB[4] = 0x00; | |
6762 | c->Request.CDB[5] = 0x00; | |
6763 | c->Request.CDB[6] = 0x00; | |
6764 | c->Request.CDB[7] = 0x00; | |
6765 | break; | |
edd16368 SC |
6766 | case HPSA_DEVICE_RESET_MSG: |
6767 | c->Request.CDBLen = 16; | |
a505b86f SC |
6768 | c->Request.type_attr_dir = |
6769 | TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE); | |
edd16368 | 6770 | c->Request.Timeout = 0; /* Don't time out */ |
64670ac8 SC |
6771 | memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); |
6772 | c->Request.CDB[0] = cmd; | |
21e89afd | 6773 | c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; |
edd16368 SC |
6774 | /* If bytes 4-7 are zero, it means reset the */ |
6775 | /* LunID device */ | |
6776 | c->Request.CDB[4] = 0x00; | |
6777 | c->Request.CDB[5] = 0x00; | |
6778 | c->Request.CDB[6] = 0x00; | |
6779 | c->Request.CDB[7] = 0x00; | |
75167d2c SC |
6780 | break; |
6781 | case HPSA_ABORT_MSG: | |
9b5c48c2 | 6782 | memcpy(&tag, buff, sizeof(tag)); |
2b08b3e9 | 6783 | dev_dbg(&h->pdev->dev, |
9b5c48c2 SC |
6784 | "Abort Tag:0x%016llx using rqst Tag:0x%016llx", |
6785 | tag, c->Header.tag); | |
75167d2c | 6786 | c->Request.CDBLen = 16; |
a505b86f SC |
6787 | c->Request.type_attr_dir = |
6788 | TYPE_ATTR_DIR(cmd_type, | |
6789 | ATTR_SIMPLE, XFER_WRITE); | |
75167d2c SC |
6790 | c->Request.Timeout = 0; /* Don't time out */ |
6791 | c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; | |
6792 | c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; | |
6793 | c->Request.CDB[2] = 0x00; /* reserved */ | |
6794 | c->Request.CDB[3] = 0x00; /* reserved */ | |
6795 | /* Tag to abort goes in CDB[4]-CDB[11] */ | |
9b5c48c2 | 6796 | memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); |
75167d2c SC |
6797 | c->Request.CDB[12] = 0x00; /* reserved */ |
6798 | c->Request.CDB[13] = 0x00; /* reserved */ | |
6799 | c->Request.CDB[14] = 0x00; /* reserved */ | |
6800 | c->Request.CDB[15] = 0x00; /* reserved */ | |
edd16368 | 6801 | break; |
edd16368 SC |
6802 | default: |
6803 | dev_warn(&h->pdev->dev, "unknown message type %d\n", | |
6804 | cmd); | |
6805 | BUG(); | |
6806 | } | |
6807 | } else { | |
6808 | dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); | |
6809 | BUG(); | |
6810 | } | |
6811 | ||
a505b86f | 6812 | switch (GET_DIR(c->Request.type_attr_dir)) { |
edd16368 SC |
6813 | case XFER_READ: |
6814 | pci_dir = PCI_DMA_FROMDEVICE; | |
6815 | break; | |
6816 | case XFER_WRITE: | |
6817 | pci_dir = PCI_DMA_TODEVICE; | |
6818 | break; | |
6819 | case XFER_NONE: | |
6820 | pci_dir = PCI_DMA_NONE; | |
6821 | break; | |
6822 | default: | |
6823 | pci_dir = PCI_DMA_BIDIRECTIONAL; | |
6824 | } | |
a2dac136 SC |
6825 | if (hpsa_map_one(h->pdev, c, buff, size, pci_dir)) |
6826 | return -1; | |
6827 | return 0; | |
edd16368 SC |
6828 | } |
6829 | ||
6830 | /* | |
6831 | * Map (physical) PCI mem into (virtual) kernel space | |
6832 | */ | |
6833 | static void __iomem *remap_pci_mem(ulong base, ulong size) | |
6834 | { | |
6835 | ulong page_base = ((ulong) base) & PAGE_MASK; | |
6836 | ulong page_offs = ((ulong) base) - page_base; | |
088ba34c SC |
6837 | void __iomem *page_remapped = ioremap_nocache(page_base, |
6838 | page_offs + size); | |
edd16368 SC |
6839 | |
6840 | return page_remapped ? (page_remapped + page_offs) : NULL; | |
6841 | } | |
6842 | ||
254f796b | 6843 | static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) |
edd16368 | 6844 | { |
254f796b | 6845 | return h->access.command_completed(h, q); |
edd16368 SC |
6846 | } |
6847 | ||
900c5440 | 6848 | static inline bool interrupt_pending(struct ctlr_info *h) |
edd16368 SC |
6849 | { |
6850 | return h->access.intr_pending(h); | |
6851 | } | |
6852 | ||
6853 | static inline long interrupt_not_for_us(struct ctlr_info *h) | |
6854 | { | |
10f66018 SC |
6855 | return (h->access.intr_pending(h) == 0) || |
6856 | (h->interrupts_enabled == 0); | |
edd16368 SC |
6857 | } |
6858 | ||
01a02ffc SC |
6859 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
6860 | u32 raw_tag) | |
edd16368 SC |
6861 | { |
6862 | if (unlikely(tag_index >= h->nr_cmds)) { | |
6863 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | |
6864 | return 1; | |
6865 | } | |
6866 | return 0; | |
6867 | } | |
6868 | ||
5a3d16f5 | 6869 | static inline void finish_cmd(struct CommandList *c) |
edd16368 | 6870 | { |
e85c5974 | 6871 | dial_up_lockup_detection_on_fw_flash_complete(c->h, c); |
c349775e ST |
6872 | if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI |
6873 | || c->cmd_type == CMD_IOACCEL2)) | |
1fb011fb | 6874 | complete_scsi_command(c); |
8be986cc | 6875 | else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF) |
edd16368 | 6876 | complete(c->waiting); |
a104c99f SC |
6877 | } |
6878 | ||
303932fd | 6879 | /* process completion of an indexed ("direct lookup") command */ |
1d94f94d | 6880 | static inline void process_indexed_cmd(struct ctlr_info *h, |
303932fd DB |
6881 | u32 raw_tag) |
6882 | { | |
6883 | u32 tag_index; | |
6884 | struct CommandList *c; | |
6885 | ||
f2405db8 | 6886 | tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT; |
1d94f94d SC |
6887 | if (!bad_tag(h, tag_index, raw_tag)) { |
6888 | c = h->cmd_pool + tag_index; | |
6889 | finish_cmd(c); | |
6890 | } | |
303932fd DB |
6891 | } |
6892 | ||
64670ac8 SC |
6893 | /* Some controllers, like p400, will give us one interrupt |
6894 | * after a soft reset, even if we turned interrupts off. | |
6895 | * Only need to check for this in the hpsa_xxx_discard_completions | |
6896 | * functions. | |
6897 | */ | |
6898 | static int ignore_bogus_interrupt(struct ctlr_info *h) | |
6899 | { | |
6900 | if (likely(!reset_devices)) | |
6901 | return 0; | |
6902 | ||
6903 | if (likely(h->interrupts_enabled)) | |
6904 | return 0; | |
6905 | ||
6906 | dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " | |
6907 | "(known firmware bug.) Ignoring.\n"); | |
6908 | ||
6909 | return 1; | |
6910 | } | |
6911 | ||
254f796b MG |
6912 | /* |
6913 | * Convert &h->q[x] (passed to interrupt handlers) back to h. | |
6914 | * Relies on (h-q[x] == x) being true for x such that | |
6915 | * 0 <= x < MAX_REPLY_QUEUES. | |
6916 | */ | |
6917 | static struct ctlr_info *queue_to_hba(u8 *queue) | |
64670ac8 | 6918 | { |
254f796b MG |
6919 | return container_of((queue - *queue), struct ctlr_info, q[0]); |
6920 | } | |
6921 | ||
6922 | static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue) | |
6923 | { | |
6924 | struct ctlr_info *h = queue_to_hba(queue); | |
6925 | u8 q = *(u8 *) queue; | |
64670ac8 SC |
6926 | u32 raw_tag; |
6927 | ||
6928 | if (ignore_bogus_interrupt(h)) | |
6929 | return IRQ_NONE; | |
6930 | ||
6931 | if (interrupt_not_for_us(h)) | |
6932 | return IRQ_NONE; | |
a0c12413 | 6933 | h->last_intr_timestamp = get_jiffies_64(); |
64670ac8 | 6934 | while (interrupt_pending(h)) { |
254f796b | 6935 | raw_tag = get_next_completion(h, q); |
64670ac8 | 6936 | while (raw_tag != FIFO_EMPTY) |
254f796b | 6937 | raw_tag = next_command(h, q); |
64670ac8 | 6938 | } |
64670ac8 SC |
6939 | return IRQ_HANDLED; |
6940 | } | |
6941 | ||
254f796b | 6942 | static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue) |
64670ac8 | 6943 | { |
254f796b | 6944 | struct ctlr_info *h = queue_to_hba(queue); |
64670ac8 | 6945 | u32 raw_tag; |
254f796b | 6946 | u8 q = *(u8 *) queue; |
64670ac8 SC |
6947 | |
6948 | if (ignore_bogus_interrupt(h)) | |
6949 | return IRQ_NONE; | |
6950 | ||
a0c12413 | 6951 | h->last_intr_timestamp = get_jiffies_64(); |
254f796b | 6952 | raw_tag = get_next_completion(h, q); |
64670ac8 | 6953 | while (raw_tag != FIFO_EMPTY) |
254f796b | 6954 | raw_tag = next_command(h, q); |
64670ac8 SC |
6955 | return IRQ_HANDLED; |
6956 | } | |
6957 | ||
254f796b | 6958 | static irqreturn_t do_hpsa_intr_intx(int irq, void *queue) |
edd16368 | 6959 | { |
254f796b | 6960 | struct ctlr_info *h = queue_to_hba((u8 *) queue); |
303932fd | 6961 | u32 raw_tag; |
254f796b | 6962 | u8 q = *(u8 *) queue; |
edd16368 SC |
6963 | |
6964 | if (interrupt_not_for_us(h)) | |
6965 | return IRQ_NONE; | |
a0c12413 | 6966 | h->last_intr_timestamp = get_jiffies_64(); |
10f66018 | 6967 | while (interrupt_pending(h)) { |
254f796b | 6968 | raw_tag = get_next_completion(h, q); |
10f66018 | 6969 | while (raw_tag != FIFO_EMPTY) { |
f2405db8 | 6970 | process_indexed_cmd(h, raw_tag); |
254f796b | 6971 | raw_tag = next_command(h, q); |
10f66018 SC |
6972 | } |
6973 | } | |
10f66018 SC |
6974 | return IRQ_HANDLED; |
6975 | } | |
6976 | ||
254f796b | 6977 | static irqreturn_t do_hpsa_intr_msi(int irq, void *queue) |
10f66018 | 6978 | { |
254f796b | 6979 | struct ctlr_info *h = queue_to_hba(queue); |
10f66018 | 6980 | u32 raw_tag; |
254f796b | 6981 | u8 q = *(u8 *) queue; |
10f66018 | 6982 | |
a0c12413 | 6983 | h->last_intr_timestamp = get_jiffies_64(); |
254f796b | 6984 | raw_tag = get_next_completion(h, q); |
303932fd | 6985 | while (raw_tag != FIFO_EMPTY) { |
f2405db8 | 6986 | process_indexed_cmd(h, raw_tag); |
254f796b | 6987 | raw_tag = next_command(h, q); |
edd16368 | 6988 | } |
edd16368 SC |
6989 | return IRQ_HANDLED; |
6990 | } | |
6991 | ||
a9a3a273 SC |
6992 | /* Send a message CDB to the firmware. Careful, this only works |
6993 | * in simple mode, not performant mode due to the tag lookup. | |
6994 | * We only ever use this immediately after a controller reset. | |
6995 | */ | |
6f039790 GKH |
6996 | static int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
6997 | unsigned char type) | |
edd16368 SC |
6998 | { |
6999 | struct Command { | |
7000 | struct CommandListHeader CommandHeader; | |
7001 | struct RequestBlock Request; | |
7002 | struct ErrDescriptor ErrorDescriptor; | |
7003 | }; | |
7004 | struct Command *cmd; | |
7005 | static const size_t cmd_sz = sizeof(*cmd) + | |
7006 | sizeof(cmd->ErrorDescriptor); | |
7007 | dma_addr_t paddr64; | |
2b08b3e9 DB |
7008 | __le32 paddr32; |
7009 | u32 tag; | |
edd16368 SC |
7010 | void __iomem *vaddr; |
7011 | int i, err; | |
7012 | ||
7013 | vaddr = pci_ioremap_bar(pdev, 0); | |
7014 | if (vaddr == NULL) | |
7015 | return -ENOMEM; | |
7016 | ||
7017 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the | |
7018 | * CCISS commands, so they must be allocated from the lower 4GiB of | |
7019 | * memory. | |
7020 | */ | |
7021 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
7022 | if (err) { | |
7023 | iounmap(vaddr); | |
1eaec8f3 | 7024 | return err; |
edd16368 SC |
7025 | } |
7026 | ||
7027 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | |
7028 | if (cmd == NULL) { | |
7029 | iounmap(vaddr); | |
7030 | return -ENOMEM; | |
7031 | } | |
7032 | ||
7033 | /* This must fit, because of the 32-bit consistent DMA mask. Also, | |
7034 | * although there's no guarantee, we assume that the address is at | |
7035 | * least 4-byte aligned (most likely, it's page-aligned). | |
7036 | */ | |
2b08b3e9 | 7037 | paddr32 = cpu_to_le32(paddr64); |
edd16368 SC |
7038 | |
7039 | cmd->CommandHeader.ReplyQueue = 0; | |
7040 | cmd->CommandHeader.SGList = 0; | |
50a0decf | 7041 | cmd->CommandHeader.SGTotal = cpu_to_le16(0); |
2b08b3e9 | 7042 | cmd->CommandHeader.tag = cpu_to_le64(paddr64); |
edd16368 SC |
7043 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); |
7044 | ||
7045 | cmd->Request.CDBLen = 16; | |
a505b86f SC |
7046 | cmd->Request.type_attr_dir = |
7047 | TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE); | |
edd16368 SC |
7048 | cmd->Request.Timeout = 0; /* Don't time out */ |
7049 | cmd->Request.CDB[0] = opcode; | |
7050 | cmd->Request.CDB[1] = type; | |
7051 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ | |
50a0decf | 7052 | cmd->ErrorDescriptor.Addr = |
2b08b3e9 | 7053 | cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd))); |
50a0decf | 7054 | cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo)); |
edd16368 | 7055 | |
2b08b3e9 | 7056 | writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET); |
edd16368 SC |
7057 | |
7058 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | |
7059 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | |
2b08b3e9 | 7060 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64) |
edd16368 SC |
7061 | break; |
7062 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | |
7063 | } | |
7064 | ||
7065 | iounmap(vaddr); | |
7066 | ||
7067 | /* we leak the DMA buffer here ... no choice since the controller could | |
7068 | * still complete the command. | |
7069 | */ | |
7070 | if (i == HPSA_MSG_SEND_RETRY_LIMIT) { | |
7071 | dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", | |
7072 | opcode, type); | |
7073 | return -ETIMEDOUT; | |
7074 | } | |
7075 | ||
7076 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); | |
7077 | ||
7078 | if (tag & HPSA_ERROR_BIT) { | |
7079 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", | |
7080 | opcode, type); | |
7081 | return -EIO; | |
7082 | } | |
7083 | ||
7084 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", | |
7085 | opcode, type); | |
7086 | return 0; | |
7087 | } | |
7088 | ||
edd16368 SC |
7089 | #define hpsa_noop(p) hpsa_message(p, 3, 0) |
7090 | ||
1df8552a | 7091 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
42a91641 | 7092 | void __iomem *vaddr, u32 use_doorbell) |
1df8552a | 7093 | { |
1df8552a SC |
7094 | |
7095 | if (use_doorbell) { | |
7096 | /* For everything after the P600, the PCI power state method | |
7097 | * of resetting the controller doesn't work, so we have this | |
7098 | * other way using the doorbell register. | |
7099 | */ | |
7100 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); | |
cf0b08d0 | 7101 | writel(use_doorbell, vaddr + SA5_DOORBELL); |
85009239 | 7102 | |
00701a96 | 7103 | /* PMC hardware guys tell us we need a 10 second delay after |
85009239 SC |
7104 | * doorbell reset and before any attempt to talk to the board |
7105 | * at all to ensure that this actually works and doesn't fall | |
7106 | * over in some weird corner cases. | |
7107 | */ | |
00701a96 | 7108 | msleep(10000); |
1df8552a SC |
7109 | } else { /* Try to do it the PCI power state way */ |
7110 | ||
7111 | /* Quoting from the Open CISS Specification: "The Power | |
7112 | * Management Control/Status Register (CSR) controls the power | |
7113 | * state of the device. The normal operating state is D0, | |
7114 | * CSR=00h. The software off state is D3, CSR=03h. To reset | |
7115 | * the controller, place the interface device in D3 then to D0, | |
7116 | * this causes a secondary PCI reset which will reset the | |
7117 | * controller." */ | |
2662cab8 DB |
7118 | |
7119 | int rc = 0; | |
7120 | ||
1df8552a | 7121 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); |
2662cab8 | 7122 | |
1df8552a | 7123 | /* enter the D3hot power management state */ |
2662cab8 DB |
7124 | rc = pci_set_power_state(pdev, PCI_D3hot); |
7125 | if (rc) | |
7126 | return rc; | |
1df8552a SC |
7127 | |
7128 | msleep(500); | |
7129 | ||
7130 | /* enter the D0 power management state */ | |
2662cab8 DB |
7131 | rc = pci_set_power_state(pdev, PCI_D0); |
7132 | if (rc) | |
7133 | return rc; | |
c4853efe MM |
7134 | |
7135 | /* | |
7136 | * The P600 requires a small delay when changing states. | |
7137 | * Otherwise we may think the board did not reset and we bail. | |
7138 | * This for kdump only and is particular to the P600. | |
7139 | */ | |
7140 | msleep(500); | |
1df8552a SC |
7141 | } |
7142 | return 0; | |
7143 | } | |
7144 | ||
6f039790 | 7145 | static void init_driver_version(char *driver_version, int len) |
580ada3c SC |
7146 | { |
7147 | memset(driver_version, 0, len); | |
f79cfec6 | 7148 | strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1); |
580ada3c SC |
7149 | } |
7150 | ||
6f039790 | 7151 | static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable) |
580ada3c SC |
7152 | { |
7153 | char *driver_version; | |
7154 | int i, size = sizeof(cfgtable->driver_version); | |
7155 | ||
7156 | driver_version = kmalloc(size, GFP_KERNEL); | |
7157 | if (!driver_version) | |
7158 | return -ENOMEM; | |
7159 | ||
7160 | init_driver_version(driver_version, size); | |
7161 | for (i = 0; i < size; i++) | |
7162 | writeb(driver_version[i], &cfgtable->driver_version[i]); | |
7163 | kfree(driver_version); | |
7164 | return 0; | |
7165 | } | |
7166 | ||
6f039790 GKH |
7167 | static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable, |
7168 | unsigned char *driver_ver) | |
580ada3c SC |
7169 | { |
7170 | int i; | |
7171 | ||
7172 | for (i = 0; i < sizeof(cfgtable->driver_version); i++) | |
7173 | driver_ver[i] = readb(&cfgtable->driver_version[i]); | |
7174 | } | |
7175 | ||
6f039790 | 7176 | static int controller_reset_failed(struct CfgTable __iomem *cfgtable) |
580ada3c SC |
7177 | { |
7178 | ||
7179 | char *driver_ver, *old_driver_ver; | |
7180 | int rc, size = sizeof(cfgtable->driver_version); | |
7181 | ||
7182 | old_driver_ver = kmalloc(2 * size, GFP_KERNEL); | |
7183 | if (!old_driver_ver) | |
7184 | return -ENOMEM; | |
7185 | driver_ver = old_driver_ver + size; | |
7186 | ||
7187 | /* After a reset, the 32 bytes of "driver version" in the cfgtable | |
7188 | * should have been changed, otherwise we know the reset failed. | |
7189 | */ | |
7190 | init_driver_version(old_driver_ver, size); | |
7191 | read_driver_ver_from_cfgtable(cfgtable, driver_ver); | |
7192 | rc = !memcmp(driver_ver, old_driver_ver, size); | |
7193 | kfree(old_driver_ver); | |
7194 | return rc; | |
7195 | } | |
edd16368 | 7196 | /* This does a hard reset of the controller using PCI power management |
1df8552a | 7197 | * states or the using the doorbell register. |
edd16368 | 7198 | */ |
6b6c1cd7 | 7199 | static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id) |
edd16368 | 7200 | { |
1df8552a SC |
7201 | u64 cfg_offset; |
7202 | u32 cfg_base_addr; | |
7203 | u64 cfg_base_addr_index; | |
7204 | void __iomem *vaddr; | |
7205 | unsigned long paddr; | |
580ada3c | 7206 | u32 misc_fw_support; |
270d05de | 7207 | int rc; |
1df8552a | 7208 | struct CfgTable __iomem *cfgtable; |
cf0b08d0 | 7209 | u32 use_doorbell; |
270d05de | 7210 | u16 command_register; |
edd16368 | 7211 | |
1df8552a SC |
7212 | /* For controllers as old as the P600, this is very nearly |
7213 | * the same thing as | |
edd16368 SC |
7214 | * |
7215 | * pci_save_state(pci_dev); | |
7216 | * pci_set_power_state(pci_dev, PCI_D3hot); | |
7217 | * pci_set_power_state(pci_dev, PCI_D0); | |
7218 | * pci_restore_state(pci_dev); | |
7219 | * | |
1df8552a SC |
7220 | * For controllers newer than the P600, the pci power state |
7221 | * method of resetting doesn't work so we have another way | |
7222 | * using the doorbell register. | |
edd16368 | 7223 | */ |
18867659 | 7224 | |
60f923b9 RE |
7225 | if (!ctlr_is_resettable(board_id)) { |
7226 | dev_warn(&pdev->dev, "Controller not resettable\n"); | |
25c1e56a SC |
7227 | return -ENODEV; |
7228 | } | |
46380786 SC |
7229 | |
7230 | /* if controller is soft- but not hard resettable... */ | |
7231 | if (!ctlr_is_hard_resettable(board_id)) | |
7232 | return -ENOTSUPP; /* try soft reset later. */ | |
18867659 | 7233 | |
270d05de SC |
7234 | /* Save the PCI command register */ |
7235 | pci_read_config_word(pdev, 4, &command_register); | |
270d05de | 7236 | pci_save_state(pdev); |
edd16368 | 7237 | |
1df8552a SC |
7238 | /* find the first memory BAR, so we can find the cfg table */ |
7239 | rc = hpsa_pci_find_memory_BAR(pdev, &paddr); | |
7240 | if (rc) | |
7241 | return rc; | |
7242 | vaddr = remap_pci_mem(paddr, 0x250); | |
7243 | if (!vaddr) | |
7244 | return -ENOMEM; | |
edd16368 | 7245 | |
1df8552a SC |
7246 | /* find cfgtable in order to check if reset via doorbell is supported */ |
7247 | rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, | |
7248 | &cfg_base_addr_index, &cfg_offset); | |
7249 | if (rc) | |
7250 | goto unmap_vaddr; | |
7251 | cfgtable = remap_pci_mem(pci_resource_start(pdev, | |
7252 | cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); | |
7253 | if (!cfgtable) { | |
7254 | rc = -ENOMEM; | |
7255 | goto unmap_vaddr; | |
7256 | } | |
580ada3c SC |
7257 | rc = write_driver_ver_to_cfgtable(cfgtable); |
7258 | if (rc) | |
03741d95 | 7259 | goto unmap_cfgtable; |
edd16368 | 7260 | |
cf0b08d0 SC |
7261 | /* If reset via doorbell register is supported, use that. |
7262 | * There are two such methods. Favor the newest method. | |
7263 | */ | |
1df8552a | 7264 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
cf0b08d0 SC |
7265 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; |
7266 | if (use_doorbell) { | |
7267 | use_doorbell = DOORBELL_CTLR_RESET2; | |
7268 | } else { | |
7269 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | |
7270 | if (use_doorbell) { | |
050f7147 SC |
7271 | dev_warn(&pdev->dev, |
7272 | "Soft reset not supported. Firmware update is required.\n"); | |
64670ac8 | 7273 | rc = -ENOTSUPP; /* try soft reset */ |
cf0b08d0 SC |
7274 | goto unmap_cfgtable; |
7275 | } | |
7276 | } | |
edd16368 | 7277 | |
1df8552a SC |
7278 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
7279 | if (rc) | |
7280 | goto unmap_cfgtable; | |
edd16368 | 7281 | |
270d05de | 7282 | pci_restore_state(pdev); |
270d05de | 7283 | pci_write_config_word(pdev, 4, command_register); |
edd16368 | 7284 | |
1df8552a SC |
7285 | /* Some devices (notably the HP Smart Array 5i Controller) |
7286 | need a little pause here */ | |
7287 | msleep(HPSA_POST_RESET_PAUSE_MSECS); | |
7288 | ||
fe5389c8 SC |
7289 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); |
7290 | if (rc) { | |
7291 | dev_warn(&pdev->dev, | |
050f7147 | 7292 | "Failed waiting for board to become ready after hard reset\n"); |
fe5389c8 SC |
7293 | goto unmap_cfgtable; |
7294 | } | |
fe5389c8 | 7295 | |
580ada3c SC |
7296 | rc = controller_reset_failed(vaddr); |
7297 | if (rc < 0) | |
7298 | goto unmap_cfgtable; | |
7299 | if (rc) { | |
64670ac8 SC |
7300 | dev_warn(&pdev->dev, "Unable to successfully reset " |
7301 | "controller. Will try soft reset.\n"); | |
7302 | rc = -ENOTSUPP; | |
580ada3c | 7303 | } else { |
64670ac8 | 7304 | dev_info(&pdev->dev, "board ready after hard reset.\n"); |
1df8552a SC |
7305 | } |
7306 | ||
7307 | unmap_cfgtable: | |
7308 | iounmap(cfgtable); | |
7309 | ||
7310 | unmap_vaddr: | |
7311 | iounmap(vaddr); | |
7312 | return rc; | |
edd16368 SC |
7313 | } |
7314 | ||
7315 | /* | |
7316 | * We cannot read the structure directly, for portability we must use | |
7317 | * the io functions. | |
7318 | * This is for debug only. | |
7319 | */ | |
42a91641 | 7320 | static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb) |
edd16368 | 7321 | { |
58f8665c | 7322 | #ifdef HPSA_DEBUG |
edd16368 SC |
7323 | int i; |
7324 | char temp_name[17]; | |
7325 | ||
7326 | dev_info(dev, "Controller Configuration information\n"); | |
7327 | dev_info(dev, "------------------------------------\n"); | |
7328 | for (i = 0; i < 4; i++) | |
7329 | temp_name[i] = readb(&(tb->Signature[i])); | |
7330 | temp_name[4] = '\0'; | |
7331 | dev_info(dev, " Signature = %s\n", temp_name); | |
7332 | dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); | |
7333 | dev_info(dev, " Transport methods supported = 0x%x\n", | |
7334 | readl(&(tb->TransportSupport))); | |
7335 | dev_info(dev, " Transport methods active = 0x%x\n", | |
7336 | readl(&(tb->TransportActive))); | |
7337 | dev_info(dev, " Requested transport Method = 0x%x\n", | |
7338 | readl(&(tb->HostWrite.TransportRequest))); | |
7339 | dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", | |
7340 | readl(&(tb->HostWrite.CoalIntDelay))); | |
7341 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", | |
7342 | readl(&(tb->HostWrite.CoalIntCount))); | |
69d6e33d | 7343 | dev_info(dev, " Max outstanding commands = %d\n", |
edd16368 SC |
7344 | readl(&(tb->CmdsOutMax))); |
7345 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | |
7346 | for (i = 0; i < 16; i++) | |
7347 | temp_name[i] = readb(&(tb->ServerName[i])); | |
7348 | temp_name[16] = '\0'; | |
7349 | dev_info(dev, " Server Name = %s\n", temp_name); | |
7350 | dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", | |
7351 | readl(&(tb->HeartBeat))); | |
edd16368 | 7352 | #endif /* HPSA_DEBUG */ |
58f8665c | 7353 | } |
edd16368 SC |
7354 | |
7355 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |
7356 | { | |
7357 | int i, offset, mem_type, bar_type; | |
7358 | ||
7359 | if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ | |
7360 | return 0; | |
7361 | offset = 0; | |
7362 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
7363 | bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; | |
7364 | if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) | |
7365 | offset += 4; | |
7366 | else { | |
7367 | mem_type = pci_resource_flags(pdev, i) & | |
7368 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; | |
7369 | switch (mem_type) { | |
7370 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
7371 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
7372 | offset += 4; /* 32 bit */ | |
7373 | break; | |
7374 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
7375 | offset += 8; | |
7376 | break; | |
7377 | default: /* reserved in PCI 2.2 */ | |
7378 | dev_warn(&pdev->dev, | |
7379 | "base address is invalid\n"); | |
7380 | return -1; | |
7381 | break; | |
7382 | } | |
7383 | } | |
7384 | if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) | |
7385 | return i + 1; | |
7386 | } | |
7387 | return -1; | |
7388 | } | |
7389 | ||
cc64c817 RE |
7390 | static void hpsa_disable_interrupt_mode(struct ctlr_info *h) |
7391 | { | |
7392 | if (h->msix_vector) { | |
7393 | if (h->pdev->msix_enabled) | |
7394 | pci_disable_msix(h->pdev); | |
105a3dbc | 7395 | h->msix_vector = 0; |
cc64c817 RE |
7396 | } else if (h->msi_vector) { |
7397 | if (h->pdev->msi_enabled) | |
7398 | pci_disable_msi(h->pdev); | |
105a3dbc | 7399 | h->msi_vector = 0; |
cc64c817 RE |
7400 | } |
7401 | } | |
7402 | ||
edd16368 | 7403 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
050f7147 | 7404 | * controllers that are capable. If not, we use legacy INTx mode. |
edd16368 | 7405 | */ |
6f039790 | 7406 | static void hpsa_interrupt_mode(struct ctlr_info *h) |
edd16368 SC |
7407 | { |
7408 | #ifdef CONFIG_PCI_MSI | |
254f796b MG |
7409 | int err, i; |
7410 | struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES]; | |
7411 | ||
7412 | for (i = 0; i < MAX_REPLY_QUEUES; i++) { | |
7413 | hpsa_msix_entries[i].vector = 0; | |
7414 | hpsa_msix_entries[i].entry = i; | |
7415 | } | |
edd16368 SC |
7416 | |
7417 | /* Some boards advertise MSI but don't really support it */ | |
6b3f4c52 SC |
7418 | if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || |
7419 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) | |
edd16368 | 7420 | goto default_int_mode; |
55c06c71 | 7421 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
050f7147 | 7422 | dev_info(&h->pdev->dev, "MSI-X capable controller\n"); |
eee0f03a | 7423 | h->msix_vector = MAX_REPLY_QUEUES; |
f89439bc SC |
7424 | if (h->msix_vector > num_online_cpus()) |
7425 | h->msix_vector = num_online_cpus(); | |
18fce3c4 AG |
7426 | err = pci_enable_msix_range(h->pdev, hpsa_msix_entries, |
7427 | 1, h->msix_vector); | |
7428 | if (err < 0) { | |
7429 | dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); | |
7430 | h->msix_vector = 0; | |
7431 | goto single_msi_mode; | |
7432 | } else if (err < h->msix_vector) { | |
55c06c71 | 7433 | dev_warn(&h->pdev->dev, "only %d MSI-X vectors " |
edd16368 | 7434 | "available\n", err); |
edd16368 | 7435 | } |
18fce3c4 AG |
7436 | h->msix_vector = err; |
7437 | for (i = 0; i < h->msix_vector; i++) | |
7438 | h->intr[i] = hpsa_msix_entries[i].vector; | |
7439 | return; | |
edd16368 | 7440 | } |
18fce3c4 | 7441 | single_msi_mode: |
55c06c71 | 7442 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
050f7147 | 7443 | dev_info(&h->pdev->dev, "MSI capable controller\n"); |
55c06c71 | 7444 | if (!pci_enable_msi(h->pdev)) |
edd16368 SC |
7445 | h->msi_vector = 1; |
7446 | else | |
55c06c71 | 7447 | dev_warn(&h->pdev->dev, "MSI init failed\n"); |
edd16368 SC |
7448 | } |
7449 | default_int_mode: | |
7450 | #endif /* CONFIG_PCI_MSI */ | |
7451 | /* if we get here we're going to use the default interrupt mode */ | |
a9a3a273 | 7452 | h->intr[h->intr_mode] = h->pdev->irq; |
edd16368 SC |
7453 | } |
7454 | ||
6f039790 | 7455 | static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) |
e5c880d1 SC |
7456 | { |
7457 | int i; | |
7458 | u32 subsystem_vendor_id, subsystem_device_id; | |
7459 | ||
7460 | subsystem_vendor_id = pdev->subsystem_vendor; | |
7461 | subsystem_device_id = pdev->subsystem_device; | |
7462 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | | |
7463 | subsystem_vendor_id; | |
7464 | ||
7465 | for (i = 0; i < ARRAY_SIZE(products); i++) | |
7466 | if (*board_id == products[i].board_id) | |
7467 | return i; | |
7468 | ||
6798cc0a SC |
7469 | if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && |
7470 | subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || | |
7471 | !hpsa_allow_any) { | |
e5c880d1 SC |
7472 | dev_warn(&pdev->dev, "unrecognized board ID: " |
7473 | "0x%08x, ignoring.\n", *board_id); | |
7474 | return -ENODEV; | |
7475 | } | |
7476 | return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ | |
7477 | } | |
7478 | ||
6f039790 GKH |
7479 | static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
7480 | unsigned long *memory_bar) | |
3a7774ce SC |
7481 | { |
7482 | int i; | |
7483 | ||
7484 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) | |
12d2cd47 | 7485 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
3a7774ce | 7486 | /* addressing mode bits already removed */ |
12d2cd47 SC |
7487 | *memory_bar = pci_resource_start(pdev, i); |
7488 | dev_dbg(&pdev->dev, "memory BAR = %lx\n", | |
3a7774ce SC |
7489 | *memory_bar); |
7490 | return 0; | |
7491 | } | |
12d2cd47 | 7492 | dev_warn(&pdev->dev, "no memory BAR found\n"); |
3a7774ce SC |
7493 | return -ENODEV; |
7494 | } | |
7495 | ||
6f039790 GKH |
7496 | static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, |
7497 | int wait_for_ready) | |
2c4c8c8b | 7498 | { |
fe5389c8 | 7499 | int i, iterations; |
2c4c8c8b | 7500 | u32 scratchpad; |
fe5389c8 SC |
7501 | if (wait_for_ready) |
7502 | iterations = HPSA_BOARD_READY_ITERATIONS; | |
7503 | else | |
7504 | iterations = HPSA_BOARD_NOT_READY_ITERATIONS; | |
2c4c8c8b | 7505 | |
fe5389c8 SC |
7506 | for (i = 0; i < iterations; i++) { |
7507 | scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); | |
7508 | if (wait_for_ready) { | |
7509 | if (scratchpad == HPSA_FIRMWARE_READY) | |
7510 | return 0; | |
7511 | } else { | |
7512 | if (scratchpad != HPSA_FIRMWARE_READY) | |
7513 | return 0; | |
7514 | } | |
2c4c8c8b SC |
7515 | msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); |
7516 | } | |
fe5389c8 | 7517 | dev_warn(&pdev->dev, "board not ready, timed out.\n"); |
2c4c8c8b SC |
7518 | return -ENODEV; |
7519 | } | |
7520 | ||
6f039790 GKH |
7521 | static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, |
7522 | u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
7523 | u64 *cfg_offset) | |
a51fd47f SC |
7524 | { |
7525 | *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); | |
7526 | *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); | |
7527 | *cfg_base_addr &= (u32) 0x0000ffff; | |
7528 | *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); | |
7529 | if (*cfg_base_addr_index == -1) { | |
7530 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); | |
7531 | return -ENODEV; | |
7532 | } | |
7533 | return 0; | |
7534 | } | |
7535 | ||
195f2c65 RE |
7536 | static void hpsa_free_cfgtables(struct ctlr_info *h) |
7537 | { | |
105a3dbc | 7538 | if (h->transtable) { |
195f2c65 | 7539 | iounmap(h->transtable); |
105a3dbc RE |
7540 | h->transtable = NULL; |
7541 | } | |
7542 | if (h->cfgtable) { | |
195f2c65 | 7543 | iounmap(h->cfgtable); |
105a3dbc RE |
7544 | h->cfgtable = NULL; |
7545 | } | |
195f2c65 RE |
7546 | } |
7547 | ||
7548 | /* Find and map CISS config table and transfer table | |
7549 | + * several items must be unmapped (freed) later | |
7550 | + * */ | |
6f039790 | 7551 | static int hpsa_find_cfgtables(struct ctlr_info *h) |
edd16368 | 7552 | { |
01a02ffc SC |
7553 | u64 cfg_offset; |
7554 | u32 cfg_base_addr; | |
7555 | u64 cfg_base_addr_index; | |
303932fd | 7556 | u32 trans_offset; |
a51fd47f | 7557 | int rc; |
77c4495c | 7558 | |
a51fd47f SC |
7559 | rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, |
7560 | &cfg_base_addr_index, &cfg_offset); | |
7561 | if (rc) | |
7562 | return rc; | |
77c4495c | 7563 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
a51fd47f | 7564 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
cd3c81c4 RE |
7565 | if (!h->cfgtable) { |
7566 | dev_err(&h->pdev->dev, "Failed mapping cfgtable\n"); | |
77c4495c | 7567 | return -ENOMEM; |
cd3c81c4 | 7568 | } |
580ada3c SC |
7569 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
7570 | if (rc) | |
7571 | return rc; | |
77c4495c | 7572 | /* Find performant mode table. */ |
a51fd47f | 7573 | trans_offset = readl(&h->cfgtable->TransMethodOffset); |
77c4495c SC |
7574 | h->transtable = remap_pci_mem(pci_resource_start(h->pdev, |
7575 | cfg_base_addr_index)+cfg_offset+trans_offset, | |
7576 | sizeof(*h->transtable)); | |
195f2c65 RE |
7577 | if (!h->transtable) { |
7578 | dev_err(&h->pdev->dev, "Failed mapping transfer table\n"); | |
7579 | hpsa_free_cfgtables(h); | |
77c4495c | 7580 | return -ENOMEM; |
195f2c65 | 7581 | } |
77c4495c SC |
7582 | return 0; |
7583 | } | |
7584 | ||
6f039790 | 7585 | static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) |
cba3d38b | 7586 | { |
41ce4c35 SC |
7587 | #define MIN_MAX_COMMANDS 16 |
7588 | BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS); | |
7589 | ||
7590 | h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands); | |
72ceeaec SC |
7591 | |
7592 | /* Limit commands in memory limited kdump scenario. */ | |
7593 | if (reset_devices && h->max_commands > 32) | |
7594 | h->max_commands = 32; | |
7595 | ||
41ce4c35 SC |
7596 | if (h->max_commands < MIN_MAX_COMMANDS) { |
7597 | dev_warn(&h->pdev->dev, | |
7598 | "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n", | |
7599 | h->max_commands, | |
7600 | MIN_MAX_COMMANDS); | |
7601 | h->max_commands = MIN_MAX_COMMANDS; | |
cba3d38b SC |
7602 | } |
7603 | } | |
7604 | ||
c7ee65b3 WS |
7605 | /* If the controller reports that the total max sg entries is greater than 512, |
7606 | * then we know that chained SG blocks work. (Original smart arrays did not | |
7607 | * support chained SG blocks and would return zero for max sg entries.) | |
7608 | */ | |
7609 | static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h) | |
7610 | { | |
7611 | return h->maxsgentries > 512; | |
7612 | } | |
7613 | ||
b93d7536 SC |
7614 | /* Interrogate the hardware for some limits: |
7615 | * max commands, max SG elements without chaining, and with chaining, | |
7616 | * SG chain block size, etc. | |
7617 | */ | |
6f039790 | 7618 | static void hpsa_find_board_params(struct ctlr_info *h) |
b93d7536 | 7619 | { |
cba3d38b | 7620 | hpsa_get_max_perf_mode_cmds(h); |
45fcb86e | 7621 | h->nr_cmds = h->max_commands; |
b93d7536 | 7622 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); |
283b4a9b | 7623 | h->fw_support = readl(&(h->cfgtable->misc_fw_support)); |
c7ee65b3 WS |
7624 | if (hpsa_supports_chained_sg_blocks(h)) { |
7625 | /* Limit in-command s/g elements to 32 save dma'able memory. */ | |
b93d7536 | 7626 | h->max_cmd_sg_entries = 32; |
1a63ea6f | 7627 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries; |
b93d7536 SC |
7628 | h->maxsgentries--; /* save one for chain pointer */ |
7629 | } else { | |
c7ee65b3 WS |
7630 | /* |
7631 | * Original smart arrays supported at most 31 s/g entries | |
7632 | * embedded inline in the command (trying to use more | |
7633 | * would lock up the controller) | |
7634 | */ | |
7635 | h->max_cmd_sg_entries = 31; | |
1a63ea6f | 7636 | h->maxsgentries = 31; /* default to traditional values */ |
c7ee65b3 | 7637 | h->chainsize = 0; |
b93d7536 | 7638 | } |
75167d2c SC |
7639 | |
7640 | /* Find out what task management functions are supported and cache */ | |
7641 | h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); | |
0e7a7fce ST |
7642 | if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) |
7643 | dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); | |
7644 | if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) | |
7645 | dev_warn(&h->pdev->dev, "Logical aborts not supported\n"); | |
8be986cc SC |
7646 | if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)) |
7647 | dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n"); | |
b93d7536 SC |
7648 | } |
7649 | ||
76c46e49 SC |
7650 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
7651 | { | |
0fc9fd40 | 7652 | if (!check_signature(h->cfgtable->Signature, "CISS", 4)) { |
050f7147 | 7653 | dev_err(&h->pdev->dev, "not a valid CISS config table\n"); |
76c46e49 SC |
7654 | return false; |
7655 | } | |
7656 | return true; | |
7657 | } | |
7658 | ||
97a5e98c | 7659 | static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) |
f7c39101 | 7660 | { |
97a5e98c | 7661 | u32 driver_support; |
f7c39101 | 7662 | |
97a5e98c | 7663 | driver_support = readl(&(h->cfgtable->driver_support)); |
0b9e7b74 AB |
7664 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
7665 | #ifdef CONFIG_X86 | |
97a5e98c | 7666 | driver_support |= ENABLE_SCSI_PREFETCH; |
f7c39101 | 7667 | #endif |
28e13446 SC |
7668 | driver_support |= ENABLE_UNIT_ATTN; |
7669 | writel(driver_support, &(h->cfgtable->driver_support)); | |
f7c39101 SC |
7670 | } |
7671 | ||
3d0eab67 SC |
7672 | /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result |
7673 | * in a prefetch beyond physical memory. | |
7674 | */ | |
7675 | static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) | |
7676 | { | |
7677 | u32 dma_prefetch; | |
7678 | ||
7679 | if (h->board_id != 0x3225103C) | |
7680 | return; | |
7681 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); | |
7682 | dma_prefetch |= 0x8000; | |
7683 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | |
7684 | } | |
7685 | ||
c706a795 | 7686 | static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) |
76438d08 SC |
7687 | { |
7688 | int i; | |
7689 | u32 doorbell_value; | |
7690 | unsigned long flags; | |
7691 | /* wait until the clear_event_notify bit 6 is cleared by controller. */ | |
007e7aa9 | 7692 | for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) { |
76438d08 SC |
7693 | spin_lock_irqsave(&h->lock, flags); |
7694 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | |
7695 | spin_unlock_irqrestore(&h->lock, flags); | |
7696 | if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) | |
c706a795 | 7697 | goto done; |
76438d08 | 7698 | /* delay and try again */ |
007e7aa9 | 7699 | msleep(CLEAR_EVENT_WAIT_INTERVAL); |
76438d08 | 7700 | } |
c706a795 RE |
7701 | return -ENODEV; |
7702 | done: | |
7703 | return 0; | |
76438d08 SC |
7704 | } |
7705 | ||
c706a795 | 7706 | static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
eb6b2ae9 SC |
7707 | { |
7708 | int i; | |
6eaf46fd SC |
7709 | u32 doorbell_value; |
7710 | unsigned long flags; | |
eb6b2ae9 SC |
7711 | |
7712 | /* under certain very rare conditions, this can take awhile. | |
7713 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | |
7714 | * as we enter this code.) | |
7715 | */ | |
007e7aa9 | 7716 | for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) { |
25163bd5 WS |
7717 | if (h->remove_in_progress) |
7718 | goto done; | |
6eaf46fd SC |
7719 | spin_lock_irqsave(&h->lock, flags); |
7720 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | |
7721 | spin_unlock_irqrestore(&h->lock, flags); | |
382be668 | 7722 | if (!(doorbell_value & CFGTBL_ChangeReq)) |
c706a795 | 7723 | goto done; |
eb6b2ae9 | 7724 | /* delay and try again */ |
007e7aa9 | 7725 | msleep(MODE_CHANGE_WAIT_INTERVAL); |
eb6b2ae9 | 7726 | } |
c706a795 RE |
7727 | return -ENODEV; |
7728 | done: | |
7729 | return 0; | |
3f4336f3 SC |
7730 | } |
7731 | ||
c706a795 | 7732 | /* return -ENODEV or other reason on error, 0 on success */ |
6f039790 | 7733 | static int hpsa_enter_simple_mode(struct ctlr_info *h) |
3f4336f3 SC |
7734 | { |
7735 | u32 trans_support; | |
7736 | ||
7737 | trans_support = readl(&(h->cfgtable->TransportSupport)); | |
7738 | if (!(trans_support & SIMPLE_MODE)) | |
7739 | return -ENOTSUPP; | |
7740 | ||
7741 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); | |
283b4a9b | 7742 | |
3f4336f3 SC |
7743 | /* Update the field, and then ring the doorbell */ |
7744 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); | |
b9af4937 | 7745 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); |
3f4336f3 | 7746 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
c706a795 RE |
7747 | if (hpsa_wait_for_mode_change_ack(h)) |
7748 | goto error; | |
eb6b2ae9 | 7749 | print_cfg_table(&h->pdev->dev, h->cfgtable); |
283b4a9b SC |
7750 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) |
7751 | goto error; | |
960a30e7 | 7752 | h->transMethod = CFGTBL_Trans_Simple; |
eb6b2ae9 | 7753 | return 0; |
283b4a9b | 7754 | error: |
050f7147 | 7755 | dev_err(&h->pdev->dev, "failed to enter simple mode\n"); |
283b4a9b | 7756 | return -ENODEV; |
eb6b2ae9 SC |
7757 | } |
7758 | ||
195f2c65 RE |
7759 | /* free items allocated or mapped by hpsa_pci_init */ |
7760 | static void hpsa_free_pci_init(struct ctlr_info *h) | |
7761 | { | |
7762 | hpsa_free_cfgtables(h); /* pci_init 4 */ | |
7763 | iounmap(h->vaddr); /* pci_init 3 */ | |
105a3dbc | 7764 | h->vaddr = NULL; |
195f2c65 | 7765 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
943a7021 RE |
7766 | /* |
7767 | * call pci_disable_device before pci_release_regions per | |
7768 | * Documentation/PCI/pci.txt | |
7769 | */ | |
195f2c65 | 7770 | pci_disable_device(h->pdev); /* pci_init 1 */ |
943a7021 | 7771 | pci_release_regions(h->pdev); /* pci_init 2 */ |
195f2c65 RE |
7772 | } |
7773 | ||
7774 | /* several items must be freed later */ | |
6f039790 | 7775 | static int hpsa_pci_init(struct ctlr_info *h) |
77c4495c | 7776 | { |
eb6b2ae9 | 7777 | int prod_index, err; |
edd16368 | 7778 | |
e5c880d1 SC |
7779 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); |
7780 | if (prod_index < 0) | |
60f923b9 | 7781 | return prod_index; |
e5c880d1 SC |
7782 | h->product_name = products[prod_index].product_name; |
7783 | h->access = *(products[prod_index].access); | |
edd16368 | 7784 | |
9b5c48c2 SC |
7785 | h->needs_abort_tags_swizzled = |
7786 | ctlr_needs_abort_tags_swizzled(h->board_id); | |
7787 | ||
e5a44df8 MG |
7788 | pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | |
7789 | PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); | |
7790 | ||
55c06c71 | 7791 | err = pci_enable_device(h->pdev); |
edd16368 | 7792 | if (err) { |
195f2c65 | 7793 | dev_err(&h->pdev->dev, "failed to enable PCI device\n"); |
943a7021 | 7794 | pci_disable_device(h->pdev); |
edd16368 SC |
7795 | return err; |
7796 | } | |
7797 | ||
f79cfec6 | 7798 | err = pci_request_regions(h->pdev, HPSA); |
edd16368 | 7799 | if (err) { |
55c06c71 | 7800 | dev_err(&h->pdev->dev, |
195f2c65 | 7801 | "failed to obtain PCI resources\n"); |
943a7021 RE |
7802 | pci_disable_device(h->pdev); |
7803 | return err; | |
edd16368 | 7804 | } |
4fa604e1 RE |
7805 | |
7806 | pci_set_master(h->pdev); | |
7807 | ||
6b3f4c52 | 7808 | hpsa_interrupt_mode(h); |
12d2cd47 | 7809 | err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); |
3a7774ce | 7810 | if (err) |
195f2c65 | 7811 | goto clean2; /* intmode+region, pci */ |
edd16368 | 7812 | h->vaddr = remap_pci_mem(h->paddr, 0x250); |
204892e9 | 7813 | if (!h->vaddr) { |
195f2c65 | 7814 | dev_err(&h->pdev->dev, "failed to remap PCI mem\n"); |
204892e9 | 7815 | err = -ENOMEM; |
195f2c65 | 7816 | goto clean2; /* intmode+region, pci */ |
204892e9 | 7817 | } |
fe5389c8 | 7818 | err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); |
2c4c8c8b | 7819 | if (err) |
195f2c65 | 7820 | goto clean3; /* vaddr, intmode+region, pci */ |
77c4495c SC |
7821 | err = hpsa_find_cfgtables(h); |
7822 | if (err) | |
195f2c65 | 7823 | goto clean3; /* vaddr, intmode+region, pci */ |
b93d7536 | 7824 | hpsa_find_board_params(h); |
edd16368 | 7825 | |
76c46e49 | 7826 | if (!hpsa_CISS_signature_present(h)) { |
edd16368 | 7827 | err = -ENODEV; |
195f2c65 | 7828 | goto clean4; /* cfgtables, vaddr, intmode+region, pci */ |
edd16368 | 7829 | } |
97a5e98c | 7830 | hpsa_set_driver_support_bits(h); |
3d0eab67 | 7831 | hpsa_p600_dma_prefetch_quirk(h); |
eb6b2ae9 SC |
7832 | err = hpsa_enter_simple_mode(h); |
7833 | if (err) | |
195f2c65 | 7834 | goto clean4; /* cfgtables, vaddr, intmode+region, pci */ |
edd16368 SC |
7835 | return 0; |
7836 | ||
195f2c65 RE |
7837 | clean4: /* cfgtables, vaddr, intmode+region, pci */ |
7838 | hpsa_free_cfgtables(h); | |
7839 | clean3: /* vaddr, intmode+region, pci */ | |
7840 | iounmap(h->vaddr); | |
105a3dbc | 7841 | h->vaddr = NULL; |
195f2c65 RE |
7842 | clean2: /* intmode+region, pci */ |
7843 | hpsa_disable_interrupt_mode(h); | |
943a7021 RE |
7844 | /* |
7845 | * call pci_disable_device before pci_release_regions per | |
7846 | * Documentation/PCI/pci.txt | |
7847 | */ | |
195f2c65 | 7848 | pci_disable_device(h->pdev); |
943a7021 | 7849 | pci_release_regions(h->pdev); |
edd16368 SC |
7850 | return err; |
7851 | } | |
7852 | ||
6f039790 | 7853 | static void hpsa_hba_inquiry(struct ctlr_info *h) |
339b2b14 SC |
7854 | { |
7855 | int rc; | |
7856 | ||
7857 | #define HBA_INQUIRY_BYTE_COUNT 64 | |
7858 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); | |
7859 | if (!h->hba_inquiry_data) | |
7860 | return; | |
7861 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, | |
7862 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); | |
7863 | if (rc != 0) { | |
7864 | kfree(h->hba_inquiry_data); | |
7865 | h->hba_inquiry_data = NULL; | |
7866 | } | |
7867 | } | |
7868 | ||
6b6c1cd7 | 7869 | static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id) |
4c2a8c40 | 7870 | { |
1df8552a | 7871 | int rc, i; |
3b747298 | 7872 | void __iomem *vaddr; |
4c2a8c40 SC |
7873 | |
7874 | if (!reset_devices) | |
7875 | return 0; | |
7876 | ||
132aa220 TH |
7877 | /* kdump kernel is loading, we don't know in which state is |
7878 | * the pci interface. The dev->enable_cnt is equal zero | |
7879 | * so we call enable+disable, wait a while and switch it on. | |
7880 | */ | |
7881 | rc = pci_enable_device(pdev); | |
7882 | if (rc) { | |
7883 | dev_warn(&pdev->dev, "Failed to enable PCI device\n"); | |
7884 | return -ENODEV; | |
7885 | } | |
7886 | pci_disable_device(pdev); | |
7887 | msleep(260); /* a randomly chosen number */ | |
7888 | rc = pci_enable_device(pdev); | |
7889 | if (rc) { | |
7890 | dev_warn(&pdev->dev, "failed to enable device.\n"); | |
7891 | return -ENODEV; | |
7892 | } | |
4fa604e1 | 7893 | |
859c75ab | 7894 | pci_set_master(pdev); |
4fa604e1 | 7895 | |
3b747298 TH |
7896 | vaddr = pci_ioremap_bar(pdev, 0); |
7897 | if (vaddr == NULL) { | |
7898 | rc = -ENOMEM; | |
7899 | goto out_disable; | |
7900 | } | |
7901 | writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET); | |
7902 | iounmap(vaddr); | |
7903 | ||
1df8552a | 7904 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
6b6c1cd7 | 7905 | rc = hpsa_kdump_hard_reset_controller(pdev, board_id); |
4c2a8c40 | 7906 | |
1df8552a SC |
7907 | /* -ENOTSUPP here means we cannot reset the controller |
7908 | * but it's already (and still) up and running in | |
18867659 SC |
7909 | * "performant mode". Or, it might be 640x, which can't reset |
7910 | * due to concerns about shared bbwc between 6402/6404 pair. | |
1df8552a | 7911 | */ |
adf1b3a3 | 7912 | if (rc) |
132aa220 | 7913 | goto out_disable; |
4c2a8c40 SC |
7914 | |
7915 | /* Now try to get the controller to respond to a no-op */ | |
1ba66c9c | 7916 | dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n"); |
4c2a8c40 SC |
7917 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
7918 | if (hpsa_noop(pdev) == 0) | |
7919 | break; | |
7920 | else | |
7921 | dev_warn(&pdev->dev, "no-op failed%s\n", | |
7922 | (i < 11 ? "; re-trying" : "")); | |
7923 | } | |
132aa220 TH |
7924 | |
7925 | out_disable: | |
7926 | ||
7927 | pci_disable_device(pdev); | |
7928 | return rc; | |
4c2a8c40 SC |
7929 | } |
7930 | ||
1fb7c98a RE |
7931 | static void hpsa_free_cmd_pool(struct ctlr_info *h) |
7932 | { | |
7933 | kfree(h->cmd_pool_bits); | |
105a3dbc RE |
7934 | h->cmd_pool_bits = NULL; |
7935 | if (h->cmd_pool) { | |
1fb7c98a RE |
7936 | pci_free_consistent(h->pdev, |
7937 | h->nr_cmds * sizeof(struct CommandList), | |
7938 | h->cmd_pool, | |
7939 | h->cmd_pool_dhandle); | |
105a3dbc RE |
7940 | h->cmd_pool = NULL; |
7941 | h->cmd_pool_dhandle = 0; | |
7942 | } | |
7943 | if (h->errinfo_pool) { | |
1fb7c98a RE |
7944 | pci_free_consistent(h->pdev, |
7945 | h->nr_cmds * sizeof(struct ErrorInfo), | |
7946 | h->errinfo_pool, | |
7947 | h->errinfo_pool_dhandle); | |
105a3dbc RE |
7948 | h->errinfo_pool = NULL; |
7949 | h->errinfo_pool_dhandle = 0; | |
7950 | } | |
1fb7c98a RE |
7951 | } |
7952 | ||
d37ffbe4 | 7953 | static int hpsa_alloc_cmd_pool(struct ctlr_info *h) |
2e9d1b36 SC |
7954 | { |
7955 | h->cmd_pool_bits = kzalloc( | |
7956 | DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * | |
7957 | sizeof(unsigned long), GFP_KERNEL); | |
7958 | h->cmd_pool = pci_alloc_consistent(h->pdev, | |
7959 | h->nr_cmds * sizeof(*h->cmd_pool), | |
7960 | &(h->cmd_pool_dhandle)); | |
7961 | h->errinfo_pool = pci_alloc_consistent(h->pdev, | |
7962 | h->nr_cmds * sizeof(*h->errinfo_pool), | |
7963 | &(h->errinfo_pool_dhandle)); | |
7964 | if ((h->cmd_pool_bits == NULL) | |
7965 | || (h->cmd_pool == NULL) | |
7966 | || (h->errinfo_pool == NULL)) { | |
7967 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); | |
2c143342 | 7968 | goto clean_up; |
2e9d1b36 | 7969 | } |
360c73bd | 7970 | hpsa_preinitialize_commands(h); |
2e9d1b36 | 7971 | return 0; |
2c143342 RE |
7972 | clean_up: |
7973 | hpsa_free_cmd_pool(h); | |
7974 | return -ENOMEM; | |
2e9d1b36 SC |
7975 | } |
7976 | ||
41b3cf08 SC |
7977 | static void hpsa_irq_affinity_hints(struct ctlr_info *h) |
7978 | { | |
ec429952 | 7979 | int i, cpu; |
41b3cf08 SC |
7980 | |
7981 | cpu = cpumask_first(cpu_online_mask); | |
7982 | for (i = 0; i < h->msix_vector; i++) { | |
ec429952 | 7983 | irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); |
41b3cf08 SC |
7984 | cpu = cpumask_next(cpu, cpu_online_mask); |
7985 | } | |
7986 | } | |
7987 | ||
ec501a18 RE |
7988 | /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */ |
7989 | static void hpsa_free_irqs(struct ctlr_info *h) | |
7990 | { | |
7991 | int i; | |
7992 | ||
7993 | if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) { | |
7994 | /* Single reply queue, only one irq to free */ | |
7995 | i = h->intr_mode; | |
7996 | irq_set_affinity_hint(h->intr[i], NULL); | |
7997 | free_irq(h->intr[i], &h->q[i]); | |
105a3dbc | 7998 | h->q[i] = 0; |
ec501a18 RE |
7999 | return; |
8000 | } | |
8001 | ||
8002 | for (i = 0; i < h->msix_vector; i++) { | |
8003 | irq_set_affinity_hint(h->intr[i], NULL); | |
8004 | free_irq(h->intr[i], &h->q[i]); | |
105a3dbc | 8005 | h->q[i] = 0; |
ec501a18 | 8006 | } |
a4e17fc1 RE |
8007 | for (; i < MAX_REPLY_QUEUES; i++) |
8008 | h->q[i] = 0; | |
ec501a18 RE |
8009 | } |
8010 | ||
9ee61794 RE |
8011 | /* returns 0 on success; cleans up and returns -Enn on error */ |
8012 | static int hpsa_request_irqs(struct ctlr_info *h, | |
0ae01a32 SC |
8013 | irqreturn_t (*msixhandler)(int, void *), |
8014 | irqreturn_t (*intxhandler)(int, void *)) | |
8015 | { | |
254f796b | 8016 | int rc, i; |
0ae01a32 | 8017 | |
254f796b MG |
8018 | /* |
8019 | * initialize h->q[x] = x so that interrupt handlers know which | |
8020 | * queue to process. | |
8021 | */ | |
8022 | for (i = 0; i < MAX_REPLY_QUEUES; i++) | |
8023 | h->q[i] = (u8) i; | |
8024 | ||
eee0f03a | 8025 | if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) { |
254f796b | 8026 | /* If performant mode and MSI-X, use multiple reply queues */ |
a4e17fc1 | 8027 | for (i = 0; i < h->msix_vector; i++) { |
8b47004a | 8028 | sprintf(h->intrname[i], "%s-msix%d", h->devname, i); |
254f796b | 8029 | rc = request_irq(h->intr[i], msixhandler, |
8b47004a | 8030 | 0, h->intrname[i], |
254f796b | 8031 | &h->q[i]); |
a4e17fc1 RE |
8032 | if (rc) { |
8033 | int j; | |
8034 | ||
8035 | dev_err(&h->pdev->dev, | |
8036 | "failed to get irq %d for %s\n", | |
8037 | h->intr[i], h->devname); | |
8038 | for (j = 0; j < i; j++) { | |
8039 | free_irq(h->intr[j], &h->q[j]); | |
8040 | h->q[j] = 0; | |
8041 | } | |
8042 | for (; j < MAX_REPLY_QUEUES; j++) | |
8043 | h->q[j] = 0; | |
8044 | return rc; | |
8045 | } | |
8046 | } | |
41b3cf08 | 8047 | hpsa_irq_affinity_hints(h); |
254f796b MG |
8048 | } else { |
8049 | /* Use single reply pool */ | |
eee0f03a | 8050 | if (h->msix_vector > 0 || h->msi_vector) { |
8b47004a RE |
8051 | if (h->msix_vector) |
8052 | sprintf(h->intrname[h->intr_mode], | |
8053 | "%s-msix", h->devname); | |
8054 | else | |
8055 | sprintf(h->intrname[h->intr_mode], | |
8056 | "%s-msi", h->devname); | |
254f796b | 8057 | rc = request_irq(h->intr[h->intr_mode], |
8b47004a RE |
8058 | msixhandler, 0, |
8059 | h->intrname[h->intr_mode], | |
254f796b MG |
8060 | &h->q[h->intr_mode]); |
8061 | } else { | |
8b47004a RE |
8062 | sprintf(h->intrname[h->intr_mode], |
8063 | "%s-intx", h->devname); | |
254f796b | 8064 | rc = request_irq(h->intr[h->intr_mode], |
8b47004a RE |
8065 | intxhandler, IRQF_SHARED, |
8066 | h->intrname[h->intr_mode], | |
254f796b MG |
8067 | &h->q[h->intr_mode]); |
8068 | } | |
105a3dbc | 8069 | irq_set_affinity_hint(h->intr[h->intr_mode], NULL); |
254f796b | 8070 | } |
0ae01a32 | 8071 | if (rc) { |
195f2c65 | 8072 | dev_err(&h->pdev->dev, "failed to get irq %d for %s\n", |
0ae01a32 | 8073 | h->intr[h->intr_mode], h->devname); |
195f2c65 | 8074 | hpsa_free_irqs(h); |
0ae01a32 SC |
8075 | return -ENODEV; |
8076 | } | |
8077 | return 0; | |
8078 | } | |
8079 | ||
6f039790 | 8080 | static int hpsa_kdump_soft_reset(struct ctlr_info *h) |
64670ac8 | 8081 | { |
39c53f55 | 8082 | int rc; |
bf43caf3 | 8083 | hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER); |
64670ac8 SC |
8084 | |
8085 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); | |
39c53f55 RE |
8086 | rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY); |
8087 | if (rc) { | |
64670ac8 | 8088 | dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); |
39c53f55 | 8089 | return rc; |
64670ac8 SC |
8090 | } |
8091 | ||
8092 | dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); | |
39c53f55 RE |
8093 | rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); |
8094 | if (rc) { | |
64670ac8 SC |
8095 | dev_warn(&h->pdev->dev, "Board failed to become ready " |
8096 | "after soft reset.\n"); | |
39c53f55 | 8097 | return rc; |
64670ac8 SC |
8098 | } |
8099 | ||
8100 | return 0; | |
8101 | } | |
8102 | ||
072b0518 SC |
8103 | static void hpsa_free_reply_queues(struct ctlr_info *h) |
8104 | { | |
8105 | int i; | |
8106 | ||
8107 | for (i = 0; i < h->nreply_queues; i++) { | |
8108 | if (!h->reply_queue[i].head) | |
8109 | continue; | |
1fb7c98a RE |
8110 | pci_free_consistent(h->pdev, |
8111 | h->reply_queue_size, | |
8112 | h->reply_queue[i].head, | |
8113 | h->reply_queue[i].busaddr); | |
072b0518 SC |
8114 | h->reply_queue[i].head = NULL; |
8115 | h->reply_queue[i].busaddr = 0; | |
8116 | } | |
105a3dbc | 8117 | h->reply_queue_size = 0; |
072b0518 SC |
8118 | } |
8119 | ||
0097f0f4 SC |
8120 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) |
8121 | { | |
105a3dbc RE |
8122 | hpsa_free_performant_mode(h); /* init_one 7 */ |
8123 | hpsa_free_sg_chain_blocks(h); /* init_one 6 */ | |
8124 | hpsa_free_cmd_pool(h); /* init_one 5 */ | |
8125 | hpsa_free_irqs(h); /* init_one 4 */ | |
2946e82b RE |
8126 | scsi_host_put(h->scsi_host); /* init_one 3 */ |
8127 | h->scsi_host = NULL; /* init_one 3 */ | |
8128 | hpsa_free_pci_init(h); /* init_one 2_5 */ | |
9ecd953a RE |
8129 | free_percpu(h->lockup_detected); /* init_one 2 */ |
8130 | h->lockup_detected = NULL; /* init_one 2 */ | |
8131 | if (h->resubmit_wq) { | |
8132 | destroy_workqueue(h->resubmit_wq); /* init_one 1 */ | |
8133 | h->resubmit_wq = NULL; | |
8134 | } | |
8135 | if (h->rescan_ctlr_wq) { | |
8136 | destroy_workqueue(h->rescan_ctlr_wq); | |
8137 | h->rescan_ctlr_wq = NULL; | |
8138 | } | |
105a3dbc | 8139 | kfree(h); /* init_one 1 */ |
64670ac8 SC |
8140 | } |
8141 | ||
a0c12413 | 8142 | /* Called when controller lockup detected. */ |
f2405db8 | 8143 | static void fail_all_outstanding_cmds(struct ctlr_info *h) |
a0c12413 | 8144 | { |
281a7fd0 WS |
8145 | int i, refcount; |
8146 | struct CommandList *c; | |
25163bd5 | 8147 | int failcount = 0; |
a0c12413 | 8148 | |
080ef1cc | 8149 | flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */ |
f2405db8 | 8150 | for (i = 0; i < h->nr_cmds; i++) { |
f2405db8 | 8151 | c = h->cmd_pool + i; |
281a7fd0 WS |
8152 | refcount = atomic_inc_return(&c->refcount); |
8153 | if (refcount > 1) { | |
25163bd5 | 8154 | c->err_info->CommandStatus = CMD_CTLR_LOCKUP; |
281a7fd0 | 8155 | finish_cmd(c); |
433b5f4d | 8156 | atomic_dec(&h->commands_outstanding); |
25163bd5 | 8157 | failcount++; |
281a7fd0 WS |
8158 | } |
8159 | cmd_free(h, c); | |
a0c12413 | 8160 | } |
25163bd5 WS |
8161 | dev_warn(&h->pdev->dev, |
8162 | "failed %d commands in fail_all\n", failcount); | |
a0c12413 SC |
8163 | } |
8164 | ||
094963da SC |
8165 | static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) |
8166 | { | |
c8ed0010 | 8167 | int cpu; |
094963da | 8168 | |
c8ed0010 | 8169 | for_each_online_cpu(cpu) { |
094963da SC |
8170 | u32 *lockup_detected; |
8171 | lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); | |
8172 | *lockup_detected = value; | |
094963da SC |
8173 | } |
8174 | wmb(); /* be sure the per-cpu variables are out to memory */ | |
8175 | } | |
8176 | ||
a0c12413 SC |
8177 | static void controller_lockup_detected(struct ctlr_info *h) |
8178 | { | |
8179 | unsigned long flags; | |
094963da | 8180 | u32 lockup_detected; |
a0c12413 | 8181 | |
a0c12413 SC |
8182 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
8183 | spin_lock_irqsave(&h->lock, flags); | |
094963da SC |
8184 | lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); |
8185 | if (!lockup_detected) { | |
8186 | /* no heartbeat, but controller gave us a zero. */ | |
8187 | dev_warn(&h->pdev->dev, | |
25163bd5 WS |
8188 | "lockup detected after %d but scratchpad register is zero\n", |
8189 | h->heartbeat_sample_interval / HZ); | |
094963da SC |
8190 | lockup_detected = 0xffffffff; |
8191 | } | |
8192 | set_lockup_detected_for_all_cpus(h, lockup_detected); | |
a0c12413 | 8193 | spin_unlock_irqrestore(&h->lock, flags); |
25163bd5 WS |
8194 | dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n", |
8195 | lockup_detected, h->heartbeat_sample_interval / HZ); | |
a0c12413 | 8196 | pci_disable_device(h->pdev); |
f2405db8 | 8197 | fail_all_outstanding_cmds(h); |
a0c12413 SC |
8198 | } |
8199 | ||
25163bd5 | 8200 | static int detect_controller_lockup(struct ctlr_info *h) |
a0c12413 SC |
8201 | { |
8202 | u64 now; | |
8203 | u32 heartbeat; | |
8204 | unsigned long flags; | |
8205 | ||
a0c12413 SC |
8206 | now = get_jiffies_64(); |
8207 | /* If we've received an interrupt recently, we're ok. */ | |
8208 | if (time_after64(h->last_intr_timestamp + | |
e85c5974 | 8209 | (h->heartbeat_sample_interval), now)) |
25163bd5 | 8210 | return false; |
a0c12413 SC |
8211 | |
8212 | /* | |
8213 | * If we've already checked the heartbeat recently, we're ok. | |
8214 | * This could happen if someone sends us a signal. We | |
8215 | * otherwise don't care about signals in this thread. | |
8216 | */ | |
8217 | if (time_after64(h->last_heartbeat_timestamp + | |
e85c5974 | 8218 | (h->heartbeat_sample_interval), now)) |
25163bd5 | 8219 | return false; |
a0c12413 SC |
8220 | |
8221 | /* If heartbeat has not changed since we last looked, we're not ok. */ | |
8222 | spin_lock_irqsave(&h->lock, flags); | |
8223 | heartbeat = readl(&h->cfgtable->HeartBeat); | |
8224 | spin_unlock_irqrestore(&h->lock, flags); | |
8225 | if (h->last_heartbeat == heartbeat) { | |
8226 | controller_lockup_detected(h); | |
25163bd5 | 8227 | return true; |
a0c12413 SC |
8228 | } |
8229 | ||
8230 | /* We're ok. */ | |
8231 | h->last_heartbeat = heartbeat; | |
8232 | h->last_heartbeat_timestamp = now; | |
25163bd5 | 8233 | return false; |
a0c12413 SC |
8234 | } |
8235 | ||
9846590e | 8236 | static void hpsa_ack_ctlr_events(struct ctlr_info *h) |
76438d08 SC |
8237 | { |
8238 | int i; | |
8239 | char *event_type; | |
8240 | ||
e4aa3e6a SC |
8241 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
8242 | return; | |
8243 | ||
76438d08 | 8244 | /* Ask the controller to clear the events we're handling. */ |
1f7cee8c SC |
8245 | if ((h->transMethod & (CFGTBL_Trans_io_accel1 |
8246 | | CFGTBL_Trans_io_accel2)) && | |
76438d08 SC |
8247 | (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || |
8248 | h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { | |
8249 | ||
8250 | if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) | |
8251 | event_type = "state change"; | |
8252 | if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) | |
8253 | event_type = "configuration change"; | |
8254 | /* Stop sending new RAID offload reqs via the IO accelerator */ | |
8255 | scsi_block_requests(h->scsi_host); | |
8256 | for (i = 0; i < h->ndevices; i++) | |
8257 | h->dev[i]->offload_enabled = 0; | |
23100dd9 | 8258 | hpsa_drain_accel_commands(h); |
76438d08 SC |
8259 | /* Set 'accelerator path config change' bit */ |
8260 | dev_warn(&h->pdev->dev, | |
8261 | "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", | |
8262 | h->events, event_type); | |
8263 | writel(h->events, &(h->cfgtable->clear_event_notify)); | |
8264 | /* Set the "clear event notify field update" bit 6 */ | |
8265 | writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); | |
8266 | /* Wait until ctlr clears 'clear event notify field', bit 6 */ | |
8267 | hpsa_wait_for_clear_event_notify_ack(h); | |
8268 | scsi_unblock_requests(h->scsi_host); | |
8269 | } else { | |
8270 | /* Acknowledge controller notification events. */ | |
8271 | writel(h->events, &(h->cfgtable->clear_event_notify)); | |
8272 | writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); | |
8273 | hpsa_wait_for_clear_event_notify_ack(h); | |
8274 | #if 0 | |
8275 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | |
8276 | hpsa_wait_for_mode_change_ack(h); | |
8277 | #endif | |
8278 | } | |
9846590e | 8279 | return; |
76438d08 SC |
8280 | } |
8281 | ||
8282 | /* Check a register on the controller to see if there are configuration | |
8283 | * changes (added/changed/removed logical drives, etc.) which mean that | |
e863d68e ST |
8284 | * we should rescan the controller for devices. |
8285 | * Also check flag for driver-initiated rescan. | |
76438d08 | 8286 | */ |
9846590e | 8287 | static int hpsa_ctlr_needs_rescan(struct ctlr_info *h) |
76438d08 | 8288 | { |
853633e8 DB |
8289 | if (h->drv_req_rescan) { |
8290 | h->drv_req_rescan = 0; | |
8291 | return 1; | |
8292 | } | |
8293 | ||
76438d08 | 8294 | if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) |
9846590e | 8295 | return 0; |
76438d08 SC |
8296 | |
8297 | h->events = readl(&(h->cfgtable->event_notify)); | |
9846590e SC |
8298 | return h->events & RESCAN_REQUIRED_EVENT_BITS; |
8299 | } | |
76438d08 | 8300 | |
9846590e SC |
8301 | /* |
8302 | * Check if any of the offline devices have become ready | |
8303 | */ | |
8304 | static int hpsa_offline_devices_ready(struct ctlr_info *h) | |
8305 | { | |
8306 | unsigned long flags; | |
8307 | struct offline_device_entry *d; | |
8308 | struct list_head *this, *tmp; | |
8309 | ||
8310 | spin_lock_irqsave(&h->offline_device_lock, flags); | |
8311 | list_for_each_safe(this, tmp, &h->offline_device_list) { | |
8312 | d = list_entry(this, struct offline_device_entry, | |
8313 | offline_list); | |
8314 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
d1fea47c SC |
8315 | if (!hpsa_volume_offline(h, d->scsi3addr)) { |
8316 | spin_lock_irqsave(&h->offline_device_lock, flags); | |
8317 | list_del(&d->offline_list); | |
8318 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
9846590e | 8319 | return 1; |
d1fea47c | 8320 | } |
9846590e SC |
8321 | spin_lock_irqsave(&h->offline_device_lock, flags); |
8322 | } | |
8323 | spin_unlock_irqrestore(&h->offline_device_lock, flags); | |
8324 | return 0; | |
76438d08 SC |
8325 | } |
8326 | ||
34592254 ST |
8327 | static int hpsa_luns_changed(struct ctlr_info *h) |
8328 | { | |
8329 | int rc = 1; /* assume there are changes */ | |
8330 | struct ReportLUNdata *logdev = NULL; | |
8331 | ||
8332 | /* if we can't find out if lun data has changed, | |
8333 | * assume that it has. | |
8334 | */ | |
8335 | ||
8336 | if (!h->lastlogicals) | |
8337 | goto out; | |
8338 | ||
8339 | logdev = kzalloc(sizeof(*logdev), GFP_KERNEL); | |
8340 | if (!logdev) { | |
8341 | dev_warn(&h->pdev->dev, | |
8342 | "Out of memory, can't track lun changes.\n"); | |
8343 | goto out; | |
8344 | } | |
8345 | if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) { | |
8346 | dev_warn(&h->pdev->dev, | |
8347 | "report luns failed, can't track lun changes.\n"); | |
8348 | goto out; | |
8349 | } | |
8350 | if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) { | |
8351 | dev_info(&h->pdev->dev, | |
8352 | "Lun changes detected.\n"); | |
8353 | memcpy(h->lastlogicals, logdev, sizeof(*logdev)); | |
8354 | goto out; | |
8355 | } else | |
8356 | rc = 0; /* no changes detected. */ | |
8357 | out: | |
8358 | kfree(logdev); | |
8359 | return rc; | |
8360 | } | |
8361 | ||
6636e7f4 | 8362 | static void hpsa_rescan_ctlr_worker(struct work_struct *work) |
a0c12413 SC |
8363 | { |
8364 | unsigned long flags; | |
8a98db73 | 8365 | struct ctlr_info *h = container_of(to_delayed_work(work), |
6636e7f4 DB |
8366 | struct ctlr_info, rescan_ctlr_work); |
8367 | ||
8368 | ||
8369 | if (h->remove_in_progress) | |
8a98db73 | 8370 | return; |
9846590e SC |
8371 | |
8372 | if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { | |
8373 | scsi_host_get(h->scsi_host); | |
9846590e SC |
8374 | hpsa_ack_ctlr_events(h); |
8375 | hpsa_scan_start(h->scsi_host); | |
8376 | scsi_host_put(h->scsi_host); | |
34592254 | 8377 | } else if (h->discovery_polling) { |
c2adae44 | 8378 | hpsa_disable_rld_caching(h); |
34592254 ST |
8379 | if (hpsa_luns_changed(h)) { |
8380 | struct Scsi_Host *sh = NULL; | |
8381 | ||
8382 | dev_info(&h->pdev->dev, | |
8383 | "driver discovery polling rescan.\n"); | |
8384 | sh = scsi_host_get(h->scsi_host); | |
8385 | if (sh != NULL) { | |
8386 | hpsa_scan_start(sh); | |
8387 | scsi_host_put(sh); | |
8388 | } | |
8389 | } | |
9846590e | 8390 | } |
8a98db73 | 8391 | spin_lock_irqsave(&h->lock, flags); |
6636e7f4 DB |
8392 | if (!h->remove_in_progress) |
8393 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, | |
8394 | h->heartbeat_sample_interval); | |
8395 | spin_unlock_irqrestore(&h->lock, flags); | |
8396 | } | |
8397 | ||
8398 | static void hpsa_monitor_ctlr_worker(struct work_struct *work) | |
8399 | { | |
8400 | unsigned long flags; | |
8401 | struct ctlr_info *h = container_of(to_delayed_work(work), | |
8402 | struct ctlr_info, monitor_ctlr_work); | |
8403 | ||
8404 | detect_controller_lockup(h); | |
8405 | if (lockup_detected(h)) | |
a0c12413 | 8406 | return; |
6636e7f4 DB |
8407 | |
8408 | spin_lock_irqsave(&h->lock, flags); | |
8409 | if (!h->remove_in_progress) | |
8410 | schedule_delayed_work(&h->monitor_ctlr_work, | |
8a98db73 SC |
8411 | h->heartbeat_sample_interval); |
8412 | spin_unlock_irqrestore(&h->lock, flags); | |
a0c12413 SC |
8413 | } |
8414 | ||
6636e7f4 DB |
8415 | static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, |
8416 | char *name) | |
8417 | { | |
8418 | struct workqueue_struct *wq = NULL; | |
6636e7f4 | 8419 | |
397ea9cb | 8420 | wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); |
6636e7f4 DB |
8421 | if (!wq) |
8422 | dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); | |
8423 | ||
8424 | return wq; | |
8425 | } | |
8426 | ||
6f039790 | 8427 | static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
edd16368 | 8428 | { |
4c2a8c40 | 8429 | int dac, rc; |
edd16368 | 8430 | struct ctlr_info *h; |
64670ac8 SC |
8431 | int try_soft_reset = 0; |
8432 | unsigned long flags; | |
6b6c1cd7 | 8433 | u32 board_id; |
edd16368 SC |
8434 | |
8435 | if (number_of_controllers == 0) | |
8436 | printk(KERN_INFO DRIVER_NAME "\n"); | |
edd16368 | 8437 | |
6b6c1cd7 TH |
8438 | rc = hpsa_lookup_board_id(pdev, &board_id); |
8439 | if (rc < 0) { | |
8440 | dev_warn(&pdev->dev, "Board ID not found\n"); | |
8441 | return rc; | |
8442 | } | |
8443 | ||
8444 | rc = hpsa_init_reset_devices(pdev, board_id); | |
64670ac8 SC |
8445 | if (rc) { |
8446 | if (rc != -ENOTSUPP) | |
8447 | return rc; | |
8448 | /* If the reset fails in a particular way (it has no way to do | |
8449 | * a proper hard reset, so returns -ENOTSUPP) we can try to do | |
8450 | * a soft reset once we get the controller configured up to the | |
8451 | * point that it can accept a command. | |
8452 | */ | |
8453 | try_soft_reset = 1; | |
8454 | rc = 0; | |
8455 | } | |
8456 | ||
8457 | reinit_after_soft_reset: | |
edd16368 | 8458 | |
303932fd DB |
8459 | /* Command structures must be aligned on a 32-byte boundary because |
8460 | * the 5 lower bits of the address are used by the hardware. and by | |
8461 | * the driver. See comments in hpsa.h for more info. | |
8462 | */ | |
303932fd | 8463 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); |
edd16368 | 8464 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
105a3dbc RE |
8465 | if (!h) { |
8466 | dev_err(&pdev->dev, "Failed to allocate controller head\n"); | |
ecd9aad4 | 8467 | return -ENOMEM; |
105a3dbc | 8468 | } |
edd16368 | 8469 | |
55c06c71 | 8470 | h->pdev = pdev; |
105a3dbc | 8471 | |
a9a3a273 | 8472 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
9846590e | 8473 | INIT_LIST_HEAD(&h->offline_device_list); |
6eaf46fd | 8474 | spin_lock_init(&h->lock); |
9846590e | 8475 | spin_lock_init(&h->offline_device_lock); |
6eaf46fd | 8476 | spin_lock_init(&h->scan_lock); |
34f0c627 | 8477 | atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); |
9b5c48c2 | 8478 | atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); |
094963da SC |
8479 | |
8480 | /* Allocate and clear per-cpu variable lockup_detected */ | |
8481 | h->lockup_detected = alloc_percpu(u32); | |
2a5ac326 | 8482 | if (!h->lockup_detected) { |
105a3dbc | 8483 | dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); |
2a5ac326 | 8484 | rc = -ENOMEM; |
2efa5929 | 8485 | goto clean1; /* aer/h */ |
2a5ac326 | 8486 | } |
094963da SC |
8487 | set_lockup_detected_for_all_cpus(h, 0); |
8488 | ||
55c06c71 | 8489 | rc = hpsa_pci_init(h); |
105a3dbc | 8490 | if (rc) |
2946e82b RE |
8491 | goto clean2; /* lu, aer/h */ |
8492 | ||
8493 | /* relies on h-> settings made by hpsa_pci_init, including | |
8494 | * interrupt_mode h->intr */ | |
8495 | rc = hpsa_scsi_host_alloc(h); | |
8496 | if (rc) | |
8497 | goto clean2_5; /* pci, lu, aer/h */ | |
edd16368 | 8498 | |
2946e82b | 8499 | sprintf(h->devname, HPSA "%d", h->scsi_host->host_no); |
edd16368 SC |
8500 | h->ctlr = number_of_controllers; |
8501 | number_of_controllers++; | |
edd16368 SC |
8502 | |
8503 | /* configure PCI DMA stuff */ | |
ecd9aad4 SC |
8504 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
8505 | if (rc == 0) { | |
edd16368 | 8506 | dac = 1; |
ecd9aad4 SC |
8507 | } else { |
8508 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
8509 | if (rc == 0) { | |
8510 | dac = 0; | |
8511 | } else { | |
8512 | dev_err(&pdev->dev, "no suitable DMA available\n"); | |
2946e82b | 8513 | goto clean3; /* shost, pci, lu, aer/h */ |
ecd9aad4 | 8514 | } |
edd16368 SC |
8515 | } |
8516 | ||
8517 | /* make sure the board interrupts are off */ | |
8518 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
10f66018 | 8519 | |
105a3dbc RE |
8520 | rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx); |
8521 | if (rc) | |
2946e82b | 8522 | goto clean3; /* shost, pci, lu, aer/h */ |
d37ffbe4 | 8523 | rc = hpsa_alloc_cmd_pool(h); |
8947fd10 | 8524 | if (rc) |
2946e82b | 8525 | goto clean4; /* irq, shost, pci, lu, aer/h */ |
105a3dbc RE |
8526 | rc = hpsa_alloc_sg_chain_blocks(h); |
8527 | if (rc) | |
2946e82b | 8528 | goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ |
a08a8471 | 8529 | init_waitqueue_head(&h->scan_wait_queue); |
9b5c48c2 | 8530 | init_waitqueue_head(&h->abort_cmd_wait_queue); |
d604f533 WS |
8531 | init_waitqueue_head(&h->event_sync_wait_queue); |
8532 | mutex_init(&h->reset_mutex); | |
a08a8471 | 8533 | h->scan_finished = 1; /* no scan currently in progress */ |
edd16368 SC |
8534 | |
8535 | pci_set_drvdata(pdev, h); | |
9a41338e | 8536 | h->ndevices = 0; |
2946e82b | 8537 | |
9a41338e | 8538 | spin_lock_init(&h->devlock); |
105a3dbc RE |
8539 | rc = hpsa_put_ctlr_into_performant_mode(h); |
8540 | if (rc) | |
2946e82b RE |
8541 | goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */ |
8542 | ||
8543 | /* hook into SCSI subsystem */ | |
8544 | rc = hpsa_scsi_add_host(h); | |
8545 | if (rc) | |
8546 | goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ | |
2efa5929 RE |
8547 | |
8548 | /* create the resubmit workqueue */ | |
8549 | h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan"); | |
8550 | if (!h->rescan_ctlr_wq) { | |
8551 | rc = -ENOMEM; | |
8552 | goto clean7; | |
8553 | } | |
8554 | ||
8555 | h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit"); | |
8556 | if (!h->resubmit_wq) { | |
8557 | rc = -ENOMEM; | |
8558 | goto clean7; /* aer/h */ | |
8559 | } | |
64670ac8 | 8560 | |
105a3dbc RE |
8561 | /* |
8562 | * At this point, the controller is ready to take commands. | |
64670ac8 SC |
8563 | * Now, if reset_devices and the hard reset didn't work, try |
8564 | * the soft reset and see if that works. | |
8565 | */ | |
8566 | if (try_soft_reset) { | |
8567 | ||
8568 | /* This is kind of gross. We may or may not get a completion | |
8569 | * from the soft reset command, and if we do, then the value | |
8570 | * from the fifo may or may not be valid. So, we wait 10 secs | |
8571 | * after the reset throwing away any completions we get during | |
8572 | * that time. Unregister the interrupt handler and register | |
8573 | * fake ones to scoop up any residual completions. | |
8574 | */ | |
8575 | spin_lock_irqsave(&h->lock, flags); | |
8576 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
8577 | spin_unlock_irqrestore(&h->lock, flags); | |
ec501a18 | 8578 | hpsa_free_irqs(h); |
9ee61794 | 8579 | rc = hpsa_request_irqs(h, hpsa_msix_discard_completions, |
64670ac8 SC |
8580 | hpsa_intx_discard_completions); |
8581 | if (rc) { | |
9ee61794 RE |
8582 | dev_warn(&h->pdev->dev, |
8583 | "Failed to request_irq after soft reset.\n"); | |
d498757c | 8584 | /* |
b2ef480c RE |
8585 | * cannot goto clean7 or free_irqs will be called |
8586 | * again. Instead, do its work | |
8587 | */ | |
8588 | hpsa_free_performant_mode(h); /* clean7 */ | |
8589 | hpsa_free_sg_chain_blocks(h); /* clean6 */ | |
8590 | hpsa_free_cmd_pool(h); /* clean5 */ | |
8591 | /* | |
8592 | * skip hpsa_free_irqs(h) clean4 since that | |
8593 | * was just called before request_irqs failed | |
d498757c RE |
8594 | */ |
8595 | goto clean3; | |
64670ac8 SC |
8596 | } |
8597 | ||
8598 | rc = hpsa_kdump_soft_reset(h); | |
8599 | if (rc) | |
8600 | /* Neither hard nor soft reset worked, we're hosed. */ | |
7ef7323f | 8601 | goto clean7; |
64670ac8 SC |
8602 | |
8603 | dev_info(&h->pdev->dev, "Board READY.\n"); | |
8604 | dev_info(&h->pdev->dev, | |
8605 | "Waiting for stale completions to drain.\n"); | |
8606 | h->access.set_intr_mask(h, HPSA_INTR_ON); | |
8607 | msleep(10000); | |
8608 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
8609 | ||
8610 | rc = controller_reset_failed(h->cfgtable); | |
8611 | if (rc) | |
8612 | dev_info(&h->pdev->dev, | |
8613 | "Soft reset appears to have failed.\n"); | |
8614 | ||
8615 | /* since the controller's reset, we have to go back and re-init | |
8616 | * everything. Easiest to just forget what we've done and do it | |
8617 | * all over again. | |
8618 | */ | |
8619 | hpsa_undo_allocations_after_kdump_soft_reset(h); | |
8620 | try_soft_reset = 0; | |
8621 | if (rc) | |
b2ef480c | 8622 | /* don't goto clean, we already unallocated */ |
64670ac8 SC |
8623 | return -ENODEV; |
8624 | ||
8625 | goto reinit_after_soft_reset; | |
8626 | } | |
edd16368 | 8627 | |
105a3dbc RE |
8628 | /* Enable Accelerated IO path at driver layer */ |
8629 | h->acciopath_status = 1; | |
34592254 ST |
8630 | /* Disable discovery polling.*/ |
8631 | h->discovery_polling = 0; | |
da0697bd | 8632 | |
e863d68e | 8633 | |
edd16368 SC |
8634 | /* Turn the interrupts on so we can service requests */ |
8635 | h->access.set_intr_mask(h, HPSA_INTR_ON); | |
8636 | ||
339b2b14 | 8637 | hpsa_hba_inquiry(h); |
8a98db73 | 8638 | |
34592254 ST |
8639 | h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL); |
8640 | if (!h->lastlogicals) | |
8641 | dev_info(&h->pdev->dev, | |
8642 | "Can't track change to report lun data\n"); | |
8643 | ||
8a98db73 SC |
8644 | /* Monitor the controller for firmware lockups */ |
8645 | h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; | |
8646 | INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); | |
8647 | schedule_delayed_work(&h->monitor_ctlr_work, | |
8648 | h->heartbeat_sample_interval); | |
6636e7f4 DB |
8649 | INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); |
8650 | queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, | |
8651 | h->heartbeat_sample_interval); | |
88bf6d62 | 8652 | return 0; |
edd16368 | 8653 | |
2946e82b | 8654 | clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ |
105a3dbc RE |
8655 | hpsa_free_performant_mode(h); |
8656 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
8657 | clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ | |
33a2ffce | 8658 | hpsa_free_sg_chain_blocks(h); |
2946e82b | 8659 | clean5: /* cmd, irq, shost, pci, lu, aer/h */ |
2e9d1b36 | 8660 | hpsa_free_cmd_pool(h); |
2946e82b | 8661 | clean4: /* irq, shost, pci, lu, aer/h */ |
ec501a18 | 8662 | hpsa_free_irqs(h); |
2946e82b RE |
8663 | clean3: /* shost, pci, lu, aer/h */ |
8664 | scsi_host_put(h->scsi_host); | |
8665 | h->scsi_host = NULL; | |
8666 | clean2_5: /* pci, lu, aer/h */ | |
195f2c65 | 8667 | hpsa_free_pci_init(h); |
2946e82b | 8668 | clean2: /* lu, aer/h */ |
105a3dbc RE |
8669 | if (h->lockup_detected) { |
8670 | free_percpu(h->lockup_detected); | |
8671 | h->lockup_detected = NULL; | |
8672 | } | |
8673 | clean1: /* wq/aer/h */ | |
8674 | if (h->resubmit_wq) { | |
080ef1cc | 8675 | destroy_workqueue(h->resubmit_wq); |
105a3dbc RE |
8676 | h->resubmit_wq = NULL; |
8677 | } | |
8678 | if (h->rescan_ctlr_wq) { | |
6636e7f4 | 8679 | destroy_workqueue(h->rescan_ctlr_wq); |
105a3dbc RE |
8680 | h->rescan_ctlr_wq = NULL; |
8681 | } | |
edd16368 | 8682 | kfree(h); |
ecd9aad4 | 8683 | return rc; |
edd16368 SC |
8684 | } |
8685 | ||
8686 | static void hpsa_flush_cache(struct ctlr_info *h) | |
8687 | { | |
8688 | char *flush_buf; | |
8689 | struct CommandList *c; | |
25163bd5 | 8690 | int rc; |
702890e3 | 8691 | |
094963da | 8692 | if (unlikely(lockup_detected(h))) |
702890e3 | 8693 | return; |
edd16368 SC |
8694 | flush_buf = kzalloc(4, GFP_KERNEL); |
8695 | if (!flush_buf) | |
8696 | return; | |
8697 | ||
45fcb86e | 8698 | c = cmd_alloc(h); |
bf43caf3 | 8699 | |
a2dac136 SC |
8700 | if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, |
8701 | RAID_CTLR_LUNID, TYPE_CMD)) { | |
8702 | goto out; | |
8703 | } | |
25163bd5 WS |
8704 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, |
8705 | PCI_DMA_TODEVICE, NO_TIMEOUT); | |
8706 | if (rc) | |
8707 | goto out; | |
edd16368 | 8708 | if (c->err_info->CommandStatus != 0) |
a2dac136 | 8709 | out: |
edd16368 SC |
8710 | dev_warn(&h->pdev->dev, |
8711 | "error flushing cache on controller\n"); | |
45fcb86e | 8712 | cmd_free(h, c); |
edd16368 SC |
8713 | kfree(flush_buf); |
8714 | } | |
8715 | ||
c2adae44 ST |
8716 | /* Make controller gather fresh report lun data each time we |
8717 | * send down a report luns request | |
8718 | */ | |
8719 | static void hpsa_disable_rld_caching(struct ctlr_info *h) | |
8720 | { | |
8721 | u32 *options; | |
8722 | struct CommandList *c; | |
8723 | int rc; | |
8724 | ||
8725 | /* Don't bother trying to set diag options if locked up */ | |
8726 | if (unlikely(h->lockup_detected)) | |
8727 | return; | |
8728 | ||
8729 | options = kzalloc(sizeof(*options), GFP_KERNEL); | |
8730 | if (!options) { | |
8731 | dev_err(&h->pdev->dev, | |
8732 | "Error: failed to disable rld caching, during alloc.\n"); | |
8733 | return; | |
8734 | } | |
8735 | ||
8736 | c = cmd_alloc(h); | |
8737 | ||
8738 | /* first, get the current diag options settings */ | |
8739 | if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, | |
8740 | RAID_CTLR_LUNID, TYPE_CMD)) | |
8741 | goto errout; | |
8742 | ||
8743 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | |
8744 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
8745 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | |
8746 | goto errout; | |
8747 | ||
8748 | /* Now, set the bit for disabling the RLD caching */ | |
8749 | *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING; | |
8750 | ||
8751 | if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0, | |
8752 | RAID_CTLR_LUNID, TYPE_CMD)) | |
8753 | goto errout; | |
8754 | ||
8755 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | |
8756 | PCI_DMA_TODEVICE, NO_TIMEOUT); | |
8757 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | |
8758 | goto errout; | |
8759 | ||
8760 | /* Now verify that it got set: */ | |
8761 | if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0, | |
8762 | RAID_CTLR_LUNID, TYPE_CMD)) | |
8763 | goto errout; | |
8764 | ||
8765 | rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, | |
8766 | PCI_DMA_FROMDEVICE, NO_TIMEOUT); | |
8767 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | |
8768 | goto errout; | |
8769 | ||
d8a080c3 | 8770 | if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) |
c2adae44 ST |
8771 | goto out; |
8772 | ||
8773 | errout: | |
8774 | dev_err(&h->pdev->dev, | |
8775 | "Error: failed to disable report lun data caching.\n"); | |
8776 | out: | |
8777 | cmd_free(h, c); | |
8778 | kfree(options); | |
8779 | } | |
8780 | ||
edd16368 SC |
8781 | static void hpsa_shutdown(struct pci_dev *pdev) |
8782 | { | |
8783 | struct ctlr_info *h; | |
8784 | ||
8785 | h = pci_get_drvdata(pdev); | |
8786 | /* Turn board interrupts off and send the flush cache command | |
8787 | * sendcmd will turn off interrupt, and send the flush... | |
8788 | * To write all data in the battery backed cache to disks | |
8789 | */ | |
8790 | hpsa_flush_cache(h); | |
8791 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
105a3dbc | 8792 | hpsa_free_irqs(h); /* init_one 4 */ |
cc64c817 | 8793 | hpsa_disable_interrupt_mode(h); /* pci_init 2 */ |
edd16368 SC |
8794 | } |
8795 | ||
6f039790 | 8796 | static void hpsa_free_device_info(struct ctlr_info *h) |
55e14e76 SC |
8797 | { |
8798 | int i; | |
8799 | ||
105a3dbc | 8800 | for (i = 0; i < h->ndevices; i++) { |
55e14e76 | 8801 | kfree(h->dev[i]); |
105a3dbc RE |
8802 | h->dev[i] = NULL; |
8803 | } | |
55e14e76 SC |
8804 | } |
8805 | ||
6f039790 | 8806 | static void hpsa_remove_one(struct pci_dev *pdev) |
edd16368 SC |
8807 | { |
8808 | struct ctlr_info *h; | |
8a98db73 | 8809 | unsigned long flags; |
edd16368 SC |
8810 | |
8811 | if (pci_get_drvdata(pdev) == NULL) { | |
a0c12413 | 8812 | dev_err(&pdev->dev, "unable to remove device\n"); |
edd16368 SC |
8813 | return; |
8814 | } | |
8815 | h = pci_get_drvdata(pdev); | |
8a98db73 SC |
8816 | |
8817 | /* Get rid of any controller monitoring work items */ | |
8818 | spin_lock_irqsave(&h->lock, flags); | |
8819 | h->remove_in_progress = 1; | |
8a98db73 | 8820 | spin_unlock_irqrestore(&h->lock, flags); |
6636e7f4 DB |
8821 | cancel_delayed_work_sync(&h->monitor_ctlr_work); |
8822 | cancel_delayed_work_sync(&h->rescan_ctlr_work); | |
8823 | destroy_workqueue(h->rescan_ctlr_wq); | |
8824 | destroy_workqueue(h->resubmit_wq); | |
cc64c817 | 8825 | |
2d041306 DB |
8826 | /* |
8827 | * Call before disabling interrupts. | |
8828 | * scsi_remove_host can trigger I/O operations especially | |
8829 | * when multipath is enabled. There can be SYNCHRONIZE CACHE | |
8830 | * operations which cannot complete and will hang the system. | |
8831 | */ | |
8832 | if (h->scsi_host) | |
8833 | scsi_remove_host(h->scsi_host); /* init_one 8 */ | |
105a3dbc | 8834 | /* includes hpsa_free_irqs - init_one 4 */ |
195f2c65 | 8835 | /* includes hpsa_disable_interrupt_mode - pci_init 2 */ |
edd16368 | 8836 | hpsa_shutdown(pdev); |
cc64c817 | 8837 | |
105a3dbc RE |
8838 | hpsa_free_device_info(h); /* scan */ |
8839 | ||
2946e82b RE |
8840 | kfree(h->hba_inquiry_data); /* init_one 10 */ |
8841 | h->hba_inquiry_data = NULL; /* init_one 10 */ | |
2946e82b | 8842 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
105a3dbc RE |
8843 | hpsa_free_performant_mode(h); /* init_one 7 */ |
8844 | hpsa_free_sg_chain_blocks(h); /* init_one 6 */ | |
8845 | hpsa_free_cmd_pool(h); /* init_one 5 */ | |
34592254 | 8846 | kfree(h->lastlogicals); |
105a3dbc RE |
8847 | |
8848 | /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */ | |
195f2c65 | 8849 | |
2946e82b RE |
8850 | scsi_host_put(h->scsi_host); /* init_one 3 */ |
8851 | h->scsi_host = NULL; /* init_one 3 */ | |
8852 | ||
195f2c65 | 8853 | /* includes hpsa_disable_interrupt_mode - pci_init 2 */ |
2946e82b | 8854 | hpsa_free_pci_init(h); /* init_one 2.5 */ |
195f2c65 | 8855 | |
105a3dbc RE |
8856 | free_percpu(h->lockup_detected); /* init_one 2 */ |
8857 | h->lockup_detected = NULL; /* init_one 2 */ | |
8858 | /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */ | |
d04e62b9 KB |
8859 | |
8860 | hpsa_delete_sas_host(h); | |
8861 | ||
105a3dbc | 8862 | kfree(h); /* init_one 1 */ |
edd16368 SC |
8863 | } |
8864 | ||
8865 | static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, | |
8866 | __attribute__((unused)) pm_message_t state) | |
8867 | { | |
8868 | return -ENOSYS; | |
8869 | } | |
8870 | ||
8871 | static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) | |
8872 | { | |
8873 | return -ENOSYS; | |
8874 | } | |
8875 | ||
8876 | static struct pci_driver hpsa_pci_driver = { | |
f79cfec6 | 8877 | .name = HPSA, |
edd16368 | 8878 | .probe = hpsa_init_one, |
6f039790 | 8879 | .remove = hpsa_remove_one, |
edd16368 SC |
8880 | .id_table = hpsa_pci_device_id, /* id_table */ |
8881 | .shutdown = hpsa_shutdown, | |
8882 | .suspend = hpsa_suspend, | |
8883 | .resume = hpsa_resume, | |
8884 | }; | |
8885 | ||
303932fd DB |
8886 | /* Fill in bucket_map[], given nsgs (the max number of |
8887 | * scatter gather elements supported) and bucket[], | |
8888 | * which is an array of 8 integers. The bucket[] array | |
8889 | * contains 8 different DMA transfer sizes (in 16 | |
8890 | * byte increments) which the controller uses to fetch | |
8891 | * commands. This function fills in bucket_map[], which | |
8892 | * maps a given number of scatter gather elements to one of | |
8893 | * the 8 DMA transfer sizes. The point of it is to allow the | |
8894 | * controller to only do as much DMA as needed to fetch the | |
8895 | * command, with the DMA transfer size encoded in the lower | |
8896 | * bits of the command address. | |
8897 | */ | |
8898 | static void calc_bucket_map(int bucket[], int num_buckets, | |
2b08b3e9 | 8899 | int nsgs, int min_blocks, u32 *bucket_map) |
303932fd DB |
8900 | { |
8901 | int i, j, b, size; | |
8902 | ||
303932fd DB |
8903 | /* Note, bucket_map must have nsgs+1 entries. */ |
8904 | for (i = 0; i <= nsgs; i++) { | |
8905 | /* Compute size of a command with i SG entries */ | |
e1f7de0c | 8906 | size = i + min_blocks; |
303932fd DB |
8907 | b = num_buckets; /* Assume the biggest bucket */ |
8908 | /* Find the bucket that is just big enough */ | |
e1f7de0c | 8909 | for (j = 0; j < num_buckets; j++) { |
303932fd DB |
8910 | if (bucket[j] >= size) { |
8911 | b = j; | |
8912 | break; | |
8913 | } | |
8914 | } | |
8915 | /* for a command with i SG entries, use bucket b. */ | |
8916 | bucket_map[i] = b; | |
8917 | } | |
8918 | } | |
8919 | ||
105a3dbc RE |
8920 | /* |
8921 | * return -ENODEV on err, 0 on success (or no action) | |
8922 | * allocates numerous items that must be freed later | |
8923 | */ | |
c706a795 | 8924 | static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support) |
303932fd | 8925 | { |
6c311b57 SC |
8926 | int i; |
8927 | unsigned long register_value; | |
e1f7de0c MG |
8928 | unsigned long transMethod = CFGTBL_Trans_Performant | |
8929 | (trans_support & CFGTBL_Trans_use_short_tags) | | |
b9af4937 SC |
8930 | CFGTBL_Trans_enable_directed_msix | |
8931 | (trans_support & (CFGTBL_Trans_io_accel1 | | |
8932 | CFGTBL_Trans_io_accel2)); | |
e1f7de0c | 8933 | struct access_method access = SA5_performant_access; |
def342bd SC |
8934 | |
8935 | /* This is a bit complicated. There are 8 registers on | |
8936 | * the controller which we write to to tell it 8 different | |
8937 | * sizes of commands which there may be. It's a way of | |
8938 | * reducing the DMA done to fetch each command. Encoded into | |
8939 | * each command's tag are 3 bits which communicate to the controller | |
8940 | * which of the eight sizes that command fits within. The size of | |
8941 | * each command depends on how many scatter gather entries there are. | |
8942 | * Each SG entry requires 16 bytes. The eight registers are programmed | |
8943 | * with the number of 16-byte blocks a command of that size requires. | |
8944 | * The smallest command possible requires 5 such 16 byte blocks. | |
d66ae08b | 8945 | * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte |
def342bd SC |
8946 | * blocks. Note, this only extends to the SG entries contained |
8947 | * within the command block, and does not extend to chained blocks | |
8948 | * of SG elements. bft[] contains the eight values we write to | |
8949 | * the registers. They are not evenly distributed, but have more | |
8950 | * sizes for small commands, and fewer sizes for larger commands. | |
8951 | */ | |
d66ae08b | 8952 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; |
b9af4937 SC |
8953 | #define MIN_IOACCEL2_BFT_ENTRY 5 |
8954 | #define HPSA_IOACCEL2_HEADER_SZ 4 | |
8955 | int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, | |
8956 | 13, 14, 15, 16, 17, 18, 19, | |
8957 | HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; | |
8958 | BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); | |
8959 | BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); | |
8960 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > | |
8961 | 16 * MIN_IOACCEL2_BFT_ENTRY); | |
8962 | BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16); | |
d66ae08b | 8963 | BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4); |
303932fd DB |
8964 | /* 5 = 1 s/g entry or 4k |
8965 | * 6 = 2 s/g entry or 8k | |
8966 | * 8 = 4 s/g entry or 16k | |
8967 | * 10 = 6 s/g entry or 24k | |
8968 | */ | |
303932fd | 8969 | |
b3a52e79 SC |
8970 | /* If the controller supports either ioaccel method then |
8971 | * we can also use the RAID stack submit path that does not | |
8972 | * perform the superfluous readl() after each command submission. | |
8973 | */ | |
8974 | if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) | |
8975 | access = SA5_performant_access_no_read; | |
8976 | ||
303932fd | 8977 | /* Controller spec: zero out this buffer. */ |
072b0518 SC |
8978 | for (i = 0; i < h->nreply_queues; i++) |
8979 | memset(h->reply_queue[i].head, 0, h->reply_queue_size); | |
303932fd | 8980 | |
d66ae08b SC |
8981 | bft[7] = SG_ENTRIES_IN_CMD + 4; |
8982 | calc_bucket_map(bft, ARRAY_SIZE(bft), | |
e1f7de0c | 8983 | SG_ENTRIES_IN_CMD, 4, h->blockFetchTable); |
303932fd DB |
8984 | for (i = 0; i < 8; i++) |
8985 | writel(bft[i], &h->transtable->BlockFetch[i]); | |
8986 | ||
8987 | /* size of controller ring buffer */ | |
8988 | writel(h->max_commands, &h->transtable->RepQSize); | |
254f796b | 8989 | writel(h->nreply_queues, &h->transtable->RepQCount); |
303932fd DB |
8990 | writel(0, &h->transtable->RepQCtrAddrLow32); |
8991 | writel(0, &h->transtable->RepQCtrAddrHigh32); | |
254f796b MG |
8992 | |
8993 | for (i = 0; i < h->nreply_queues; i++) { | |
8994 | writel(0, &h->transtable->RepQAddr[i].upper); | |
072b0518 | 8995 | writel(h->reply_queue[i].busaddr, |
254f796b MG |
8996 | &h->transtable->RepQAddr[i].lower); |
8997 | } | |
8998 | ||
b9af4937 | 8999 | writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); |
e1f7de0c MG |
9000 | writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); |
9001 | /* | |
9002 | * enable outbound interrupt coalescing in accelerator mode; | |
9003 | */ | |
9004 | if (trans_support & CFGTBL_Trans_io_accel1) { | |
9005 | access = SA5_ioaccel_mode1_access; | |
9006 | writel(10, &h->cfgtable->HostWrite.CoalIntDelay); | |
9007 | writel(4, &h->cfgtable->HostWrite.CoalIntCount); | |
c349775e ST |
9008 | } else { |
9009 | if (trans_support & CFGTBL_Trans_io_accel2) { | |
9010 | access = SA5_ioaccel_mode2_access; | |
9011 | writel(10, &h->cfgtable->HostWrite.CoalIntDelay); | |
9012 | writel(4, &h->cfgtable->HostWrite.CoalIntCount); | |
9013 | } | |
e1f7de0c | 9014 | } |
303932fd | 9015 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
c706a795 RE |
9016 | if (hpsa_wait_for_mode_change_ack(h)) { |
9017 | dev_err(&h->pdev->dev, | |
9018 | "performant mode problem - doorbell timeout\n"); | |
9019 | return -ENODEV; | |
9020 | } | |
303932fd DB |
9021 | register_value = readl(&(h->cfgtable->TransportActive)); |
9022 | if (!(register_value & CFGTBL_Trans_Performant)) { | |
050f7147 SC |
9023 | dev_err(&h->pdev->dev, |
9024 | "performant mode problem - transport not active\n"); | |
c706a795 | 9025 | return -ENODEV; |
303932fd | 9026 | } |
960a30e7 | 9027 | /* Change the access methods to the performant access methods */ |
e1f7de0c MG |
9028 | h->access = access; |
9029 | h->transMethod = transMethod; | |
9030 | ||
b9af4937 SC |
9031 | if (!((trans_support & CFGTBL_Trans_io_accel1) || |
9032 | (trans_support & CFGTBL_Trans_io_accel2))) | |
c706a795 | 9033 | return 0; |
e1f7de0c | 9034 | |
b9af4937 SC |
9035 | if (trans_support & CFGTBL_Trans_io_accel1) { |
9036 | /* Set up I/O accelerator mode */ | |
9037 | for (i = 0; i < h->nreply_queues; i++) { | |
9038 | writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); | |
9039 | h->reply_queue[i].current_entry = | |
9040 | readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); | |
9041 | } | |
9042 | bft[7] = h->ioaccel_maxsg + 8; | |
9043 | calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, | |
9044 | h->ioaccel1_blockFetchTable); | |
e1f7de0c | 9045 | |
b9af4937 | 9046 | /* initialize all reply queue entries to unused */ |
072b0518 SC |
9047 | for (i = 0; i < h->nreply_queues; i++) |
9048 | memset(h->reply_queue[i].head, | |
9049 | (u8) IOACCEL_MODE1_REPLY_UNUSED, | |
9050 | h->reply_queue_size); | |
e1f7de0c | 9051 | |
b9af4937 SC |
9052 | /* set all the constant fields in the accelerator command |
9053 | * frames once at init time to save CPU cycles later. | |
9054 | */ | |
9055 | for (i = 0; i < h->nr_cmds; i++) { | |
9056 | struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; | |
9057 | ||
9058 | cp->function = IOACCEL1_FUNCTION_SCSIIO; | |
9059 | cp->err_info = (u32) (h->errinfo_pool_dhandle + | |
9060 | (i * sizeof(struct ErrorInfo))); | |
9061 | cp->err_info_len = sizeof(struct ErrorInfo); | |
9062 | cp->sgl_offset = IOACCEL1_SGLOFFSET; | |
2b08b3e9 DB |
9063 | cp->host_context_flags = |
9064 | cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT); | |
b9af4937 SC |
9065 | cp->timeout_sec = 0; |
9066 | cp->ReplyQueue = 0; | |
50a0decf | 9067 | cp->tag = |
f2405db8 | 9068 | cpu_to_le64((i << DIRECT_LOOKUP_SHIFT)); |
50a0decf SC |
9069 | cp->host_addr = |
9070 | cpu_to_le64(h->ioaccel_cmd_pool_dhandle + | |
b9af4937 | 9071 | (i * sizeof(struct io_accel1_cmd))); |
b9af4937 SC |
9072 | } |
9073 | } else if (trans_support & CFGTBL_Trans_io_accel2) { | |
9074 | u64 cfg_offset, cfg_base_addr_index; | |
9075 | u32 bft2_offset, cfg_base_addr; | |
9076 | int rc; | |
9077 | ||
9078 | rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, | |
9079 | &cfg_base_addr_index, &cfg_offset); | |
9080 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); | |
9081 | bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; | |
9082 | calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, | |
9083 | 4, h->ioaccel2_blockFetchTable); | |
9084 | bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); | |
9085 | BUILD_BUG_ON(offsetof(struct CfgTable, | |
9086 | io_accel_request_size_offset) != 0xb8); | |
9087 | h->ioaccel2_bft2_regs = | |
9088 | remap_pci_mem(pci_resource_start(h->pdev, | |
9089 | cfg_base_addr_index) + | |
9090 | cfg_offset + bft2_offset, | |
9091 | ARRAY_SIZE(bft2) * | |
9092 | sizeof(*h->ioaccel2_bft2_regs)); | |
9093 | for (i = 0; i < ARRAY_SIZE(bft2); i++) | |
9094 | writel(bft2[i], &h->ioaccel2_bft2_regs[i]); | |
e1f7de0c | 9095 | } |
b9af4937 | 9096 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
c706a795 RE |
9097 | if (hpsa_wait_for_mode_change_ack(h)) { |
9098 | dev_err(&h->pdev->dev, | |
9099 | "performant mode problem - enabling ioaccel mode\n"); | |
9100 | return -ENODEV; | |
9101 | } | |
9102 | return 0; | |
e1f7de0c MG |
9103 | } |
9104 | ||
1fb7c98a RE |
9105 | /* Free ioaccel1 mode command blocks and block fetch table */ |
9106 | static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h) | |
9107 | { | |
105a3dbc | 9108 | if (h->ioaccel_cmd_pool) { |
1fb7c98a RE |
9109 | pci_free_consistent(h->pdev, |
9110 | h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), | |
9111 | h->ioaccel_cmd_pool, | |
9112 | h->ioaccel_cmd_pool_dhandle); | |
105a3dbc RE |
9113 | h->ioaccel_cmd_pool = NULL; |
9114 | h->ioaccel_cmd_pool_dhandle = 0; | |
9115 | } | |
1fb7c98a | 9116 | kfree(h->ioaccel1_blockFetchTable); |
105a3dbc | 9117 | h->ioaccel1_blockFetchTable = NULL; |
1fb7c98a RE |
9118 | } |
9119 | ||
d37ffbe4 RE |
9120 | /* Allocate ioaccel1 mode command blocks and block fetch table */ |
9121 | static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h) | |
e1f7de0c | 9122 | { |
283b4a9b SC |
9123 | h->ioaccel_maxsg = |
9124 | readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); | |
9125 | if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) | |
9126 | h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; | |
9127 | ||
e1f7de0c MG |
9128 | /* Command structures must be aligned on a 128-byte boundary |
9129 | * because the 7 lower bits of the address are used by the | |
9130 | * hardware. | |
9131 | */ | |
e1f7de0c MG |
9132 | BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % |
9133 | IOACCEL1_COMMANDLIST_ALIGNMENT); | |
9134 | h->ioaccel_cmd_pool = | |
9135 | pci_alloc_consistent(h->pdev, | |
9136 | h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), | |
9137 | &(h->ioaccel_cmd_pool_dhandle)); | |
9138 | ||
9139 | h->ioaccel1_blockFetchTable = | |
283b4a9b | 9140 | kmalloc(((h->ioaccel_maxsg + 1) * |
e1f7de0c MG |
9141 | sizeof(u32)), GFP_KERNEL); |
9142 | ||
9143 | if ((h->ioaccel_cmd_pool == NULL) || | |
9144 | (h->ioaccel1_blockFetchTable == NULL)) | |
9145 | goto clean_up; | |
9146 | ||
9147 | memset(h->ioaccel_cmd_pool, 0, | |
9148 | h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); | |
9149 | return 0; | |
9150 | ||
9151 | clean_up: | |
1fb7c98a | 9152 | hpsa_free_ioaccel1_cmd_and_bft(h); |
2dd02d74 | 9153 | return -ENOMEM; |
6c311b57 SC |
9154 | } |
9155 | ||
1fb7c98a RE |
9156 | /* Free ioaccel2 mode command blocks and block fetch table */ |
9157 | static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h) | |
9158 | { | |
d9a729f3 WS |
9159 | hpsa_free_ioaccel2_sg_chain_blocks(h); |
9160 | ||
105a3dbc | 9161 | if (h->ioaccel2_cmd_pool) { |
1fb7c98a RE |
9162 | pci_free_consistent(h->pdev, |
9163 | h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), | |
9164 | h->ioaccel2_cmd_pool, | |
9165 | h->ioaccel2_cmd_pool_dhandle); | |
105a3dbc RE |
9166 | h->ioaccel2_cmd_pool = NULL; |
9167 | h->ioaccel2_cmd_pool_dhandle = 0; | |
9168 | } | |
1fb7c98a | 9169 | kfree(h->ioaccel2_blockFetchTable); |
105a3dbc | 9170 | h->ioaccel2_blockFetchTable = NULL; |
1fb7c98a RE |
9171 | } |
9172 | ||
d37ffbe4 RE |
9173 | /* Allocate ioaccel2 mode command blocks and block fetch table */ |
9174 | static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h) | |
aca9012a | 9175 | { |
d9a729f3 WS |
9176 | int rc; |
9177 | ||
aca9012a SC |
9178 | /* Allocate ioaccel2 mode command blocks and block fetch table */ |
9179 | ||
9180 | h->ioaccel_maxsg = | |
9181 | readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); | |
9182 | if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) | |
9183 | h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; | |
9184 | ||
aca9012a SC |
9185 | BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % |
9186 | IOACCEL2_COMMANDLIST_ALIGNMENT); | |
9187 | h->ioaccel2_cmd_pool = | |
9188 | pci_alloc_consistent(h->pdev, | |
9189 | h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), | |
9190 | &(h->ioaccel2_cmd_pool_dhandle)); | |
9191 | ||
9192 | h->ioaccel2_blockFetchTable = | |
9193 | kmalloc(((h->ioaccel_maxsg + 1) * | |
9194 | sizeof(u32)), GFP_KERNEL); | |
9195 | ||
9196 | if ((h->ioaccel2_cmd_pool == NULL) || | |
d9a729f3 WS |
9197 | (h->ioaccel2_blockFetchTable == NULL)) { |
9198 | rc = -ENOMEM; | |
9199 | goto clean_up; | |
9200 | } | |
9201 | ||
9202 | rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h); | |
9203 | if (rc) | |
aca9012a SC |
9204 | goto clean_up; |
9205 | ||
9206 | memset(h->ioaccel2_cmd_pool, 0, | |
9207 | h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); | |
9208 | return 0; | |
9209 | ||
9210 | clean_up: | |
1fb7c98a | 9211 | hpsa_free_ioaccel2_cmd_and_bft(h); |
d9a729f3 | 9212 | return rc; |
aca9012a SC |
9213 | } |
9214 | ||
105a3dbc RE |
9215 | /* Free items allocated by hpsa_put_ctlr_into_performant_mode */ |
9216 | static void hpsa_free_performant_mode(struct ctlr_info *h) | |
9217 | { | |
9218 | kfree(h->blockFetchTable); | |
9219 | h->blockFetchTable = NULL; | |
9220 | hpsa_free_reply_queues(h); | |
9221 | hpsa_free_ioaccel1_cmd_and_bft(h); | |
9222 | hpsa_free_ioaccel2_cmd_and_bft(h); | |
9223 | } | |
9224 | ||
9225 | /* return -ENODEV on error, 0 on success (or no action) | |
9226 | * allocates numerous items that must be freed later | |
9227 | */ | |
9228 | static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |
6c311b57 SC |
9229 | { |
9230 | u32 trans_support; | |
e1f7de0c MG |
9231 | unsigned long transMethod = CFGTBL_Trans_Performant | |
9232 | CFGTBL_Trans_use_short_tags; | |
105a3dbc | 9233 | int i, rc; |
6c311b57 | 9234 | |
02ec19c8 | 9235 | if (hpsa_simple_mode) |
105a3dbc | 9236 | return 0; |
02ec19c8 | 9237 | |
67c99a72 | 9238 | trans_support = readl(&(h->cfgtable->TransportSupport)); |
9239 | if (!(trans_support & PERFORMANT_MODE)) | |
105a3dbc | 9240 | return 0; |
67c99a72 | 9241 | |
e1f7de0c MG |
9242 | /* Check for I/O accelerator mode support */ |
9243 | if (trans_support & CFGTBL_Trans_io_accel1) { | |
9244 | transMethod |= CFGTBL_Trans_io_accel1 | | |
9245 | CFGTBL_Trans_enable_directed_msix; | |
105a3dbc RE |
9246 | rc = hpsa_alloc_ioaccel1_cmd_and_bft(h); |
9247 | if (rc) | |
9248 | return rc; | |
9249 | } else if (trans_support & CFGTBL_Trans_io_accel2) { | |
9250 | transMethod |= CFGTBL_Trans_io_accel2 | | |
aca9012a | 9251 | CFGTBL_Trans_enable_directed_msix; |
105a3dbc RE |
9252 | rc = hpsa_alloc_ioaccel2_cmd_and_bft(h); |
9253 | if (rc) | |
9254 | return rc; | |
e1f7de0c MG |
9255 | } |
9256 | ||
eee0f03a | 9257 | h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1; |
cba3d38b | 9258 | hpsa_get_max_perf_mode_cmds(h); |
6c311b57 | 9259 | /* Performant mode ring buffer and supporting data structures */ |
072b0518 | 9260 | h->reply_queue_size = h->max_commands * sizeof(u64); |
6c311b57 | 9261 | |
254f796b | 9262 | for (i = 0; i < h->nreply_queues; i++) { |
072b0518 SC |
9263 | h->reply_queue[i].head = pci_alloc_consistent(h->pdev, |
9264 | h->reply_queue_size, | |
9265 | &(h->reply_queue[i].busaddr)); | |
105a3dbc RE |
9266 | if (!h->reply_queue[i].head) { |
9267 | rc = -ENOMEM; | |
9268 | goto clean1; /* rq, ioaccel */ | |
9269 | } | |
254f796b MG |
9270 | h->reply_queue[i].size = h->max_commands; |
9271 | h->reply_queue[i].wraparound = 1; /* spec: init to 1 */ | |
9272 | h->reply_queue[i].current_entry = 0; | |
9273 | } | |
9274 | ||
6c311b57 | 9275 | /* Need a block fetch table for performant mode */ |
d66ae08b | 9276 | h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) * |
6c311b57 | 9277 | sizeof(u32)), GFP_KERNEL); |
105a3dbc RE |
9278 | if (!h->blockFetchTable) { |
9279 | rc = -ENOMEM; | |
9280 | goto clean1; /* rq, ioaccel */ | |
9281 | } | |
6c311b57 | 9282 | |
105a3dbc RE |
9283 | rc = hpsa_enter_performant_mode(h, trans_support); |
9284 | if (rc) | |
9285 | goto clean2; /* bft, rq, ioaccel */ | |
9286 | return 0; | |
303932fd | 9287 | |
105a3dbc | 9288 | clean2: /* bft, rq, ioaccel */ |
303932fd | 9289 | kfree(h->blockFetchTable); |
105a3dbc RE |
9290 | h->blockFetchTable = NULL; |
9291 | clean1: /* rq, ioaccel */ | |
9292 | hpsa_free_reply_queues(h); | |
9293 | hpsa_free_ioaccel1_cmd_and_bft(h); | |
9294 | hpsa_free_ioaccel2_cmd_and_bft(h); | |
9295 | return rc; | |
303932fd DB |
9296 | } |
9297 | ||
23100dd9 | 9298 | static int is_accelerated_cmd(struct CommandList *c) |
76438d08 | 9299 | { |
23100dd9 SC |
9300 | return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; |
9301 | } | |
9302 | ||
9303 | static void hpsa_drain_accel_commands(struct ctlr_info *h) | |
9304 | { | |
9305 | struct CommandList *c = NULL; | |
f2405db8 | 9306 | int i, accel_cmds_out; |
281a7fd0 | 9307 | int refcount; |
76438d08 | 9308 | |
f2405db8 | 9309 | do { /* wait for all outstanding ioaccel commands to drain out */ |
23100dd9 | 9310 | accel_cmds_out = 0; |
f2405db8 | 9311 | for (i = 0; i < h->nr_cmds; i++) { |
f2405db8 | 9312 | c = h->cmd_pool + i; |
281a7fd0 WS |
9313 | refcount = atomic_inc_return(&c->refcount); |
9314 | if (refcount > 1) /* Command is allocated */ | |
9315 | accel_cmds_out += is_accelerated_cmd(c); | |
9316 | cmd_free(h, c); | |
f2405db8 | 9317 | } |
23100dd9 | 9318 | if (accel_cmds_out <= 0) |
281a7fd0 | 9319 | break; |
76438d08 SC |
9320 | msleep(100); |
9321 | } while (1); | |
9322 | } | |
9323 | ||
d04e62b9 KB |
9324 | static struct hpsa_sas_phy *hpsa_alloc_sas_phy( |
9325 | struct hpsa_sas_port *hpsa_sas_port) | |
9326 | { | |
9327 | struct hpsa_sas_phy *hpsa_sas_phy; | |
9328 | struct sas_phy *phy; | |
9329 | ||
9330 | hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL); | |
9331 | if (!hpsa_sas_phy) | |
9332 | return NULL; | |
9333 | ||
9334 | phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev, | |
9335 | hpsa_sas_port->next_phy_index); | |
9336 | if (!phy) { | |
9337 | kfree(hpsa_sas_phy); | |
9338 | return NULL; | |
9339 | } | |
9340 | ||
9341 | hpsa_sas_port->next_phy_index++; | |
9342 | hpsa_sas_phy->phy = phy; | |
9343 | hpsa_sas_phy->parent_port = hpsa_sas_port; | |
9344 | ||
9345 | return hpsa_sas_phy; | |
9346 | } | |
9347 | ||
9348 | static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy) | |
9349 | { | |
9350 | struct sas_phy *phy = hpsa_sas_phy->phy; | |
9351 | ||
9352 | sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy); | |
9353 | sas_phy_free(phy); | |
9354 | if (hpsa_sas_phy->added_to_port) | |
9355 | list_del(&hpsa_sas_phy->phy_list_entry); | |
9356 | kfree(hpsa_sas_phy); | |
9357 | } | |
9358 | ||
9359 | static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy) | |
9360 | { | |
9361 | int rc; | |
9362 | struct hpsa_sas_port *hpsa_sas_port; | |
9363 | struct sas_phy *phy; | |
9364 | struct sas_identify *identify; | |
9365 | ||
9366 | hpsa_sas_port = hpsa_sas_phy->parent_port; | |
9367 | phy = hpsa_sas_phy->phy; | |
9368 | ||
9369 | identify = &phy->identify; | |
9370 | memset(identify, 0, sizeof(*identify)); | |
9371 | identify->sas_address = hpsa_sas_port->sas_address; | |
9372 | identify->device_type = SAS_END_DEVICE; | |
9373 | identify->initiator_port_protocols = SAS_PROTOCOL_STP; | |
9374 | identify->target_port_protocols = SAS_PROTOCOL_STP; | |
9375 | phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; | |
9376 | phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN; | |
9377 | phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN; | |
9378 | phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; | |
9379 | phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; | |
9380 | ||
9381 | rc = sas_phy_add(hpsa_sas_phy->phy); | |
9382 | if (rc) | |
9383 | return rc; | |
9384 | ||
9385 | sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy); | |
9386 | list_add_tail(&hpsa_sas_phy->phy_list_entry, | |
9387 | &hpsa_sas_port->phy_list_head); | |
9388 | hpsa_sas_phy->added_to_port = true; | |
9389 | ||
9390 | return 0; | |
9391 | } | |
9392 | ||
9393 | static int | |
9394 | hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port, | |
9395 | struct sas_rphy *rphy) | |
9396 | { | |
9397 | struct sas_identify *identify; | |
9398 | ||
9399 | identify = &rphy->identify; | |
9400 | identify->sas_address = hpsa_sas_port->sas_address; | |
9401 | identify->initiator_port_protocols = SAS_PROTOCOL_STP; | |
9402 | identify->target_port_protocols = SAS_PROTOCOL_STP; | |
9403 | ||
9404 | return sas_rphy_add(rphy); | |
9405 | } | |
9406 | ||
9407 | static struct hpsa_sas_port | |
9408 | *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node, | |
9409 | u64 sas_address) | |
9410 | { | |
9411 | int rc; | |
9412 | struct hpsa_sas_port *hpsa_sas_port; | |
9413 | struct sas_port *port; | |
9414 | ||
9415 | hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL); | |
9416 | if (!hpsa_sas_port) | |
9417 | return NULL; | |
9418 | ||
9419 | INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head); | |
9420 | hpsa_sas_port->parent_node = hpsa_sas_node; | |
9421 | ||
9422 | port = sas_port_alloc_num(hpsa_sas_node->parent_dev); | |
9423 | if (!port) | |
9424 | goto free_hpsa_port; | |
9425 | ||
9426 | rc = sas_port_add(port); | |
9427 | if (rc) | |
9428 | goto free_sas_port; | |
9429 | ||
9430 | hpsa_sas_port->port = port; | |
9431 | hpsa_sas_port->sas_address = sas_address; | |
9432 | list_add_tail(&hpsa_sas_port->port_list_entry, | |
9433 | &hpsa_sas_node->port_list_head); | |
9434 | ||
9435 | return hpsa_sas_port; | |
9436 | ||
9437 | free_sas_port: | |
9438 | sas_port_free(port); | |
9439 | free_hpsa_port: | |
9440 | kfree(hpsa_sas_port); | |
9441 | ||
9442 | return NULL; | |
9443 | } | |
9444 | ||
9445 | static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port) | |
9446 | { | |
9447 | struct hpsa_sas_phy *hpsa_sas_phy; | |
9448 | struct hpsa_sas_phy *next; | |
9449 | ||
9450 | list_for_each_entry_safe(hpsa_sas_phy, next, | |
9451 | &hpsa_sas_port->phy_list_head, phy_list_entry) | |
9452 | hpsa_free_sas_phy(hpsa_sas_phy); | |
9453 | ||
9454 | sas_port_delete(hpsa_sas_port->port); | |
9455 | list_del(&hpsa_sas_port->port_list_entry); | |
9456 | kfree(hpsa_sas_port); | |
9457 | } | |
9458 | ||
9459 | static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev) | |
9460 | { | |
9461 | struct hpsa_sas_node *hpsa_sas_node; | |
9462 | ||
9463 | hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL); | |
9464 | if (hpsa_sas_node) { | |
9465 | hpsa_sas_node->parent_dev = parent_dev; | |
9466 | INIT_LIST_HEAD(&hpsa_sas_node->port_list_head); | |
9467 | } | |
9468 | ||
9469 | return hpsa_sas_node; | |
9470 | } | |
9471 | ||
9472 | static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node) | |
9473 | { | |
9474 | struct hpsa_sas_port *hpsa_sas_port; | |
9475 | struct hpsa_sas_port *next; | |
9476 | ||
9477 | if (!hpsa_sas_node) | |
9478 | return; | |
9479 | ||
9480 | list_for_each_entry_safe(hpsa_sas_port, next, | |
9481 | &hpsa_sas_node->port_list_head, port_list_entry) | |
9482 | hpsa_free_sas_port(hpsa_sas_port); | |
9483 | ||
9484 | kfree(hpsa_sas_node); | |
9485 | } | |
9486 | ||
9487 | static struct hpsa_scsi_dev_t | |
9488 | *hpsa_find_device_by_sas_rphy(struct ctlr_info *h, | |
9489 | struct sas_rphy *rphy) | |
9490 | { | |
9491 | int i; | |
9492 | struct hpsa_scsi_dev_t *device; | |
9493 | ||
9494 | for (i = 0; i < h->ndevices; i++) { | |
9495 | device = h->dev[i]; | |
9496 | if (!device->sas_port) | |
9497 | continue; | |
9498 | if (device->sas_port->rphy == rphy) | |
9499 | return device; | |
9500 | } | |
9501 | ||
9502 | return NULL; | |
9503 | } | |
9504 | ||
9505 | static int hpsa_add_sas_host(struct ctlr_info *h) | |
9506 | { | |
9507 | int rc; | |
9508 | struct device *parent_dev; | |
9509 | struct hpsa_sas_node *hpsa_sas_node; | |
9510 | struct hpsa_sas_port *hpsa_sas_port; | |
9511 | struct hpsa_sas_phy *hpsa_sas_phy; | |
9512 | ||
9513 | parent_dev = &h->scsi_host->shost_gendev; | |
9514 | ||
9515 | hpsa_sas_node = hpsa_alloc_sas_node(parent_dev); | |
9516 | if (!hpsa_sas_node) | |
9517 | return -ENOMEM; | |
9518 | ||
9519 | hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address); | |
9520 | if (!hpsa_sas_port) { | |
9521 | rc = -ENODEV; | |
9522 | goto free_sas_node; | |
9523 | } | |
9524 | ||
9525 | hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port); | |
9526 | if (!hpsa_sas_phy) { | |
9527 | rc = -ENODEV; | |
9528 | goto free_sas_port; | |
9529 | } | |
9530 | ||
9531 | rc = hpsa_sas_port_add_phy(hpsa_sas_phy); | |
9532 | if (rc) | |
9533 | goto free_sas_phy; | |
9534 | ||
9535 | h->sas_host = hpsa_sas_node; | |
9536 | ||
9537 | return 0; | |
9538 | ||
9539 | free_sas_phy: | |
9540 | hpsa_free_sas_phy(hpsa_sas_phy); | |
9541 | free_sas_port: | |
9542 | hpsa_free_sas_port(hpsa_sas_port); | |
9543 | free_sas_node: | |
9544 | hpsa_free_sas_node(hpsa_sas_node); | |
9545 | ||
9546 | return rc; | |
9547 | } | |
9548 | ||
9549 | static void hpsa_delete_sas_host(struct ctlr_info *h) | |
9550 | { | |
9551 | hpsa_free_sas_node(h->sas_host); | |
9552 | } | |
9553 | ||
9554 | static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node, | |
9555 | struct hpsa_scsi_dev_t *device) | |
9556 | { | |
9557 | int rc; | |
9558 | struct hpsa_sas_port *hpsa_sas_port; | |
9559 | struct sas_rphy *rphy; | |
9560 | ||
9561 | hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address); | |
9562 | if (!hpsa_sas_port) | |
9563 | return -ENOMEM; | |
9564 | ||
9565 | rphy = sas_end_device_alloc(hpsa_sas_port->port); | |
9566 | if (!rphy) { | |
9567 | rc = -ENODEV; | |
9568 | goto free_sas_port; | |
9569 | } | |
9570 | ||
9571 | hpsa_sas_port->rphy = rphy; | |
9572 | device->sas_port = hpsa_sas_port; | |
9573 | ||
9574 | rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy); | |
9575 | if (rc) | |
9576 | goto free_sas_port; | |
9577 | ||
9578 | return 0; | |
9579 | ||
9580 | free_sas_port: | |
9581 | hpsa_free_sas_port(hpsa_sas_port); | |
9582 | device->sas_port = NULL; | |
9583 | ||
9584 | return rc; | |
9585 | } | |
9586 | ||
9587 | static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device) | |
9588 | { | |
9589 | if (device->sas_port) { | |
9590 | hpsa_free_sas_port(device->sas_port); | |
9591 | device->sas_port = NULL; | |
9592 | } | |
9593 | } | |
9594 | ||
9595 | static int | |
9596 | hpsa_sas_get_linkerrors(struct sas_phy *phy) | |
9597 | { | |
9598 | return 0; | |
9599 | } | |
9600 | ||
9601 | static int | |
9602 | hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) | |
9603 | { | |
9604 | return 0; | |
9605 | } | |
9606 | ||
9607 | static int | |
9608 | hpsa_sas_get_bay_identifier(struct sas_rphy *rphy) | |
9609 | { | |
9610 | return -ENXIO; | |
9611 | } | |
9612 | ||
9613 | static int | |
9614 | hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset) | |
9615 | { | |
9616 | return 0; | |
9617 | } | |
9618 | ||
9619 | static int | |
9620 | hpsa_sas_phy_enable(struct sas_phy *phy, int enable) | |
9621 | { | |
9622 | return 0; | |
9623 | } | |
9624 | ||
9625 | static int | |
9626 | hpsa_sas_phy_setup(struct sas_phy *phy) | |
9627 | { | |
9628 | return 0; | |
9629 | } | |
9630 | ||
9631 | static void | |
9632 | hpsa_sas_phy_release(struct sas_phy *phy) | |
9633 | { | |
9634 | } | |
9635 | ||
9636 | static int | |
9637 | hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) | |
9638 | { | |
9639 | return -EINVAL; | |
9640 | } | |
9641 | ||
9642 | /* SMP = Serial Management Protocol */ | |
9643 | static int | |
9644 | hpsa_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |
9645 | struct request *req) | |
9646 | { | |
9647 | return -EINVAL; | |
9648 | } | |
9649 | ||
9650 | static struct sas_function_template hpsa_sas_transport_functions = { | |
9651 | .get_linkerrors = hpsa_sas_get_linkerrors, | |
9652 | .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier, | |
9653 | .get_bay_identifier = hpsa_sas_get_bay_identifier, | |
9654 | .phy_reset = hpsa_sas_phy_reset, | |
9655 | .phy_enable = hpsa_sas_phy_enable, | |
9656 | .phy_setup = hpsa_sas_phy_setup, | |
9657 | .phy_release = hpsa_sas_phy_release, | |
9658 | .set_phy_speed = hpsa_sas_phy_speed, | |
9659 | .smp_handler = hpsa_sas_smp_handler, | |
9660 | }; | |
9661 | ||
edd16368 SC |
9662 | /* |
9663 | * This is it. Register the PCI driver information for the cards we control | |
9664 | * the OS will call our registered routines when it finds one of our cards. | |
9665 | */ | |
9666 | static int __init hpsa_init(void) | |
9667 | { | |
d04e62b9 KB |
9668 | int rc; |
9669 | ||
9670 | hpsa_sas_transport_template = | |
9671 | sas_attach_transport(&hpsa_sas_transport_functions); | |
9672 | if (!hpsa_sas_transport_template) | |
9673 | return -ENODEV; | |
9674 | ||
9675 | rc = pci_register_driver(&hpsa_pci_driver); | |
9676 | ||
9677 | if (rc) | |
9678 | sas_release_transport(hpsa_sas_transport_template); | |
9679 | ||
9680 | return rc; | |
edd16368 SC |
9681 | } |
9682 | ||
9683 | static void __exit hpsa_cleanup(void) | |
9684 | { | |
9685 | pci_unregister_driver(&hpsa_pci_driver); | |
d04e62b9 | 9686 | sas_release_transport(hpsa_sas_transport_template); |
edd16368 SC |
9687 | } |
9688 | ||
e1f7de0c MG |
9689 | static void __attribute__((unused)) verify_offsets(void) |
9690 | { | |
dd0e19f3 ST |
9691 | #define VERIFY_OFFSET(member, offset) \ |
9692 | BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) | |
9693 | ||
9694 | VERIFY_OFFSET(structure_size, 0); | |
9695 | VERIFY_OFFSET(volume_blk_size, 4); | |
9696 | VERIFY_OFFSET(volume_blk_cnt, 8); | |
9697 | VERIFY_OFFSET(phys_blk_shift, 16); | |
9698 | VERIFY_OFFSET(parity_rotation_shift, 17); | |
9699 | VERIFY_OFFSET(strip_size, 18); | |
9700 | VERIFY_OFFSET(disk_starting_blk, 20); | |
9701 | VERIFY_OFFSET(disk_blk_cnt, 28); | |
9702 | VERIFY_OFFSET(data_disks_per_row, 36); | |
9703 | VERIFY_OFFSET(metadata_disks_per_row, 38); | |
9704 | VERIFY_OFFSET(row_cnt, 40); | |
9705 | VERIFY_OFFSET(layout_map_count, 42); | |
9706 | VERIFY_OFFSET(flags, 44); | |
9707 | VERIFY_OFFSET(dekindex, 46); | |
9708 | /* VERIFY_OFFSET(reserved, 48 */ | |
9709 | VERIFY_OFFSET(data, 64); | |
9710 | ||
9711 | #undef VERIFY_OFFSET | |
9712 | ||
b66cc250 MM |
9713 | #define VERIFY_OFFSET(member, offset) \ |
9714 | BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) | |
9715 | ||
9716 | VERIFY_OFFSET(IU_type, 0); | |
9717 | VERIFY_OFFSET(direction, 1); | |
9718 | VERIFY_OFFSET(reply_queue, 2); | |
9719 | /* VERIFY_OFFSET(reserved1, 3); */ | |
9720 | VERIFY_OFFSET(scsi_nexus, 4); | |
9721 | VERIFY_OFFSET(Tag, 8); | |
9722 | VERIFY_OFFSET(cdb, 16); | |
9723 | VERIFY_OFFSET(cciss_lun, 32); | |
9724 | VERIFY_OFFSET(data_len, 40); | |
9725 | VERIFY_OFFSET(cmd_priority_task_attr, 44); | |
9726 | VERIFY_OFFSET(sg_count, 45); | |
9727 | /* VERIFY_OFFSET(reserved3 */ | |
9728 | VERIFY_OFFSET(err_ptr, 48); | |
9729 | VERIFY_OFFSET(err_len, 56); | |
9730 | /* VERIFY_OFFSET(reserved4 */ | |
9731 | VERIFY_OFFSET(sg, 64); | |
9732 | ||
9733 | #undef VERIFY_OFFSET | |
9734 | ||
e1f7de0c MG |
9735 | #define VERIFY_OFFSET(member, offset) \ |
9736 | BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) | |
9737 | ||
9738 | VERIFY_OFFSET(dev_handle, 0x00); | |
9739 | VERIFY_OFFSET(reserved1, 0x02); | |
9740 | VERIFY_OFFSET(function, 0x03); | |
9741 | VERIFY_OFFSET(reserved2, 0x04); | |
9742 | VERIFY_OFFSET(err_info, 0x0C); | |
9743 | VERIFY_OFFSET(reserved3, 0x10); | |
9744 | VERIFY_OFFSET(err_info_len, 0x12); | |
9745 | VERIFY_OFFSET(reserved4, 0x13); | |
9746 | VERIFY_OFFSET(sgl_offset, 0x14); | |
9747 | VERIFY_OFFSET(reserved5, 0x15); | |
9748 | VERIFY_OFFSET(transfer_len, 0x1C); | |
9749 | VERIFY_OFFSET(reserved6, 0x20); | |
9750 | VERIFY_OFFSET(io_flags, 0x24); | |
9751 | VERIFY_OFFSET(reserved7, 0x26); | |
9752 | VERIFY_OFFSET(LUN, 0x34); | |
9753 | VERIFY_OFFSET(control, 0x3C); | |
9754 | VERIFY_OFFSET(CDB, 0x40); | |
9755 | VERIFY_OFFSET(reserved8, 0x50); | |
9756 | VERIFY_OFFSET(host_context_flags, 0x60); | |
9757 | VERIFY_OFFSET(timeout_sec, 0x62); | |
9758 | VERIFY_OFFSET(ReplyQueue, 0x64); | |
9759 | VERIFY_OFFSET(reserved9, 0x65); | |
50a0decf | 9760 | VERIFY_OFFSET(tag, 0x68); |
e1f7de0c MG |
9761 | VERIFY_OFFSET(host_addr, 0x70); |
9762 | VERIFY_OFFSET(CISS_LUN, 0x78); | |
9763 | VERIFY_OFFSET(SG, 0x78 + 8); | |
9764 | #undef VERIFY_OFFSET | |
9765 | } | |
9766 | ||
edd16368 SC |
9767 | module_init(hpsa_init); |
9768 | module_exit(hpsa_cleanup); |