]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_base.c
Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <linux/fs.h>
47 #include <linux/compat.h>
48 #include <linux/blkdev.h>
49 #include <linux/mutex.h>
50 #include <linux/poll.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_tcq.h>
57 #include "megaraid_sas_fusion.h"
58 #include "megaraid_sas.h"
59
60 /*
61 * Number of sectors per IO command
62 * Will be set in megasas_init_mfi if user does not provide
63 */
64 static unsigned int max_sectors;
65 module_param_named(max_sectors, max_sectors, int, 0);
66 MODULE_PARM_DESC(max_sectors,
67 "Maximum number of sectors per IO command");
68
69 static int msix_disable;
70 module_param(msix_disable, int, S_IRUGO);
71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
72
73 static unsigned int msix_vectors;
74 module_param(msix_vectors, int, S_IRUGO);
75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
76
77 static int allow_vf_ioctls;
78 module_param(allow_vf_ioctls, int, S_IRUGO);
79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
80
81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
82 module_param(throttlequeuedepth, int, S_IRUGO);
83 MODULE_PARM_DESC(throttlequeuedepth,
84 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
85
86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
87 module_param(resetwaittime, int, S_IRUGO);
88 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
89 "before resetting adapter. Default: 180");
90
91 int smp_affinity_enable = 1;
92 module_param(smp_affinity_enable, int, S_IRUGO);
93 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
94
95 int rdpq_enable = 1;
96 module_param(rdpq_enable, int, S_IRUGO);
97 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
98
99 unsigned int dual_qdepth_disable;
100 module_param(dual_qdepth_disable, int, S_IRUGO);
101 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
102
103 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
104 module_param(scmd_timeout, int, S_IRUGO);
105 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
106
107 MODULE_LICENSE("GPL");
108 MODULE_VERSION(MEGASAS_VERSION);
109 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
110 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
111
112 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
113 static int megasas_get_pd_list(struct megasas_instance *instance);
114 static int megasas_ld_list_query(struct megasas_instance *instance,
115 u8 query_type);
116 static int megasas_issue_init_mfi(struct megasas_instance *instance);
117 static int megasas_register_aen(struct megasas_instance *instance,
118 u32 seq_num, u32 class_locale_word);
119 static int
120 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
121 /*
122 * PCI ID table for all supported controllers
123 */
124 static struct pci_device_id megasas_pci_table[] = {
125
126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
127 /* xscale IOP */
128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
129 /* ppc IOP */
130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
131 /* ppc IOP */
132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
133 /* gen2*/
134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
135 /* gen2*/
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
137 /* skinny*/
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
139 /* skinny*/
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
141 /* xscale IOP, vega */
142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
143 /* xscale IOP */
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
145 /* Fusion */
146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
147 /* Plasma */
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
149 /* Invader */
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
151 /* Fury */
152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
153 /* Intruder */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
155 /* Intruder 24 port*/
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
158 {}
159 };
160
161 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
162
163 static int megasas_mgmt_majorno;
164 struct megasas_mgmt_info megasas_mgmt_info;
165 static struct fasync_struct *megasas_async_queue;
166 static DEFINE_MUTEX(megasas_async_queue_mutex);
167
168 static int megasas_poll_wait_aen;
169 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
170 static u32 support_poll_for_event;
171 u32 megasas_dbg_lvl;
172 static u32 support_device_change;
173
174 /* define lock for aen poll */
175 spinlock_t poll_aen_lock;
176
177 void
178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
179 u8 alt_status);
180 static u32
181 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
182 static int
183 megasas_adp_reset_gen2(struct megasas_instance *instance,
184 struct megasas_register_set __iomem *reg_set);
185 static irqreturn_t megasas_isr(int irq, void *devp);
186 static u32
187 megasas_init_adapter_mfi(struct megasas_instance *instance);
188 u32
189 megasas_build_and_issue_cmd(struct megasas_instance *instance,
190 struct scsi_cmnd *scmd);
191 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
192 int
193 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
194 int seconds);
195 void megasas_fusion_ocr_wq(struct work_struct *work);
196 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
197 int initial);
198
199 int
200 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
201 {
202 instance->instancet->fire_cmd(instance,
203 cmd->frame_phys_addr, 0, instance->reg_set);
204 return 0;
205 }
206
207 /**
208 * megasas_get_cmd - Get a command from the free pool
209 * @instance: Adapter soft state
210 *
211 * Returns a free command from the pool
212 */
213 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
214 *instance)
215 {
216 unsigned long flags;
217 struct megasas_cmd *cmd = NULL;
218
219 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
220
221 if (!list_empty(&instance->cmd_pool)) {
222 cmd = list_entry((&instance->cmd_pool)->next,
223 struct megasas_cmd, list);
224 list_del_init(&cmd->list);
225 } else {
226 dev_err(&instance->pdev->dev, "Command pool empty!\n");
227 }
228
229 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
230 return cmd;
231 }
232
233 /**
234 * megasas_return_cmd - Return a cmd to free command pool
235 * @instance: Adapter soft state
236 * @cmd: Command packet to be returned to free command pool
237 */
238 inline void
239 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
240 {
241 unsigned long flags;
242 u32 blk_tags;
243 struct megasas_cmd_fusion *cmd_fusion;
244 struct fusion_context *fusion = instance->ctrl_context;
245
246 /* This flag is used only for fusion adapter.
247 * Wait for Interrupt for Polled mode DCMD
248 */
249 if (cmd->flags & DRV_DCMD_POLLED_MODE)
250 return;
251
252 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
253
254 if (fusion) {
255 blk_tags = instance->max_scsi_cmds + cmd->index;
256 cmd_fusion = fusion->cmd_list[blk_tags];
257 megasas_return_cmd_fusion(instance, cmd_fusion);
258 }
259 cmd->scmd = NULL;
260 cmd->frame_count = 0;
261 cmd->flags = 0;
262 if (!fusion && reset_devices)
263 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
264 list_add(&cmd->list, (&instance->cmd_pool)->next);
265
266 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
267
268 }
269
270 static const char *
271 format_timestamp(uint32_t timestamp)
272 {
273 static char buffer[32];
274
275 if ((timestamp & 0xff000000) == 0xff000000)
276 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
277 0x00ffffff);
278 else
279 snprintf(buffer, sizeof(buffer), "%us", timestamp);
280 return buffer;
281 }
282
283 static const char *
284 format_class(int8_t class)
285 {
286 static char buffer[6];
287
288 switch (class) {
289 case MFI_EVT_CLASS_DEBUG:
290 return "debug";
291 case MFI_EVT_CLASS_PROGRESS:
292 return "progress";
293 case MFI_EVT_CLASS_INFO:
294 return "info";
295 case MFI_EVT_CLASS_WARNING:
296 return "WARN";
297 case MFI_EVT_CLASS_CRITICAL:
298 return "CRIT";
299 case MFI_EVT_CLASS_FATAL:
300 return "FATAL";
301 case MFI_EVT_CLASS_DEAD:
302 return "DEAD";
303 default:
304 snprintf(buffer, sizeof(buffer), "%d", class);
305 return buffer;
306 }
307 }
308
309 /**
310 * megasas_decode_evt: Decode FW AEN event and print critical event
311 * for information.
312 * @instance: Adapter soft state
313 */
314 static void
315 megasas_decode_evt(struct megasas_instance *instance)
316 {
317 struct megasas_evt_detail *evt_detail = instance->evt_detail;
318 union megasas_evt_class_locale class_locale;
319 class_locale.word = le32_to_cpu(evt_detail->cl.word);
320
321 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
322 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
323 le32_to_cpu(evt_detail->seq_num),
324 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
325 (class_locale.members.locale),
326 format_class(class_locale.members.class),
327 evt_detail->description);
328 }
329
330 /**
331 * The following functions are defined for xscale
332 * (deviceid : 1064R, PERC5) controllers
333 */
334
335 /**
336 * megasas_enable_intr_xscale - Enables interrupts
337 * @regs: MFI register set
338 */
339 static inline void
340 megasas_enable_intr_xscale(struct megasas_instance *instance)
341 {
342 struct megasas_register_set __iomem *regs;
343
344 regs = instance->reg_set;
345 writel(0, &(regs)->outbound_intr_mask);
346
347 /* Dummy readl to force pci flush */
348 readl(&regs->outbound_intr_mask);
349 }
350
351 /**
352 * megasas_disable_intr_xscale -Disables interrupt
353 * @regs: MFI register set
354 */
355 static inline void
356 megasas_disable_intr_xscale(struct megasas_instance *instance)
357 {
358 struct megasas_register_set __iomem *regs;
359 u32 mask = 0x1f;
360
361 regs = instance->reg_set;
362 writel(mask, &regs->outbound_intr_mask);
363 /* Dummy readl to force pci flush */
364 readl(&regs->outbound_intr_mask);
365 }
366
367 /**
368 * megasas_read_fw_status_reg_xscale - returns the current FW status value
369 * @regs: MFI register set
370 */
371 static u32
372 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
373 {
374 return readl(&(regs)->outbound_msg_0);
375 }
376 /**
377 * megasas_clear_interrupt_xscale - Check & clear interrupt
378 * @regs: MFI register set
379 */
380 static int
381 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
382 {
383 u32 status;
384 u32 mfiStatus = 0;
385
386 /*
387 * Check if it is our interrupt
388 */
389 status = readl(&regs->outbound_intr_status);
390
391 if (status & MFI_OB_INTR_STATUS_MASK)
392 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
393 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
394 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
395
396 /*
397 * Clear the interrupt by writing back the same value
398 */
399 if (mfiStatus)
400 writel(status, &regs->outbound_intr_status);
401
402 /* Dummy readl to force pci flush */
403 readl(&regs->outbound_intr_status);
404
405 return mfiStatus;
406 }
407
408 /**
409 * megasas_fire_cmd_xscale - Sends command to the FW
410 * @frame_phys_addr : Physical address of cmd
411 * @frame_count : Number of frames for the command
412 * @regs : MFI register set
413 */
414 static inline void
415 megasas_fire_cmd_xscale(struct megasas_instance *instance,
416 dma_addr_t frame_phys_addr,
417 u32 frame_count,
418 struct megasas_register_set __iomem *regs)
419 {
420 unsigned long flags;
421
422 spin_lock_irqsave(&instance->hba_lock, flags);
423 writel((frame_phys_addr >> 3)|(frame_count),
424 &(regs)->inbound_queue_port);
425 spin_unlock_irqrestore(&instance->hba_lock, flags);
426 }
427
428 /**
429 * megasas_adp_reset_xscale - For controller reset
430 * @regs: MFI register set
431 */
432 static int
433 megasas_adp_reset_xscale(struct megasas_instance *instance,
434 struct megasas_register_set __iomem *regs)
435 {
436 u32 i;
437 u32 pcidata;
438
439 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
440
441 for (i = 0; i < 3; i++)
442 msleep(1000); /* sleep for 3 secs */
443 pcidata = 0;
444 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
445 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
446 if (pcidata & 0x2) {
447 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
448 pcidata &= ~0x2;
449 pci_write_config_dword(instance->pdev,
450 MFI_1068_PCSR_OFFSET, pcidata);
451
452 for (i = 0; i < 2; i++)
453 msleep(1000); /* need to wait 2 secs again */
454
455 pcidata = 0;
456 pci_read_config_dword(instance->pdev,
457 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
458 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
459 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
460 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
461 pcidata = 0;
462 pci_write_config_dword(instance->pdev,
463 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
464 }
465 }
466 return 0;
467 }
468
469 /**
470 * megasas_check_reset_xscale - For controller reset check
471 * @regs: MFI register set
472 */
473 static int
474 megasas_check_reset_xscale(struct megasas_instance *instance,
475 struct megasas_register_set __iomem *regs)
476 {
477 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
478 (le32_to_cpu(*instance->consumer) ==
479 MEGASAS_ADPRESET_INPROG_SIGN))
480 return 1;
481 return 0;
482 }
483
484 static struct megasas_instance_template megasas_instance_template_xscale = {
485
486 .fire_cmd = megasas_fire_cmd_xscale,
487 .enable_intr = megasas_enable_intr_xscale,
488 .disable_intr = megasas_disable_intr_xscale,
489 .clear_intr = megasas_clear_intr_xscale,
490 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
491 .adp_reset = megasas_adp_reset_xscale,
492 .check_reset = megasas_check_reset_xscale,
493 .service_isr = megasas_isr,
494 .tasklet = megasas_complete_cmd_dpc,
495 .init_adapter = megasas_init_adapter_mfi,
496 .build_and_issue_cmd = megasas_build_and_issue_cmd,
497 .issue_dcmd = megasas_issue_dcmd,
498 };
499
500 /**
501 * This is the end of set of functions & definitions specific
502 * to xscale (deviceid : 1064R, PERC5) controllers
503 */
504
505 /**
506 * The following functions are defined for ppc (deviceid : 0x60)
507 * controllers
508 */
509
510 /**
511 * megasas_enable_intr_ppc - Enables interrupts
512 * @regs: MFI register set
513 */
514 static inline void
515 megasas_enable_intr_ppc(struct megasas_instance *instance)
516 {
517 struct megasas_register_set __iomem *regs;
518
519 regs = instance->reg_set;
520 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
521
522 writel(~0x80000000, &(regs)->outbound_intr_mask);
523
524 /* Dummy readl to force pci flush */
525 readl(&regs->outbound_intr_mask);
526 }
527
528 /**
529 * megasas_disable_intr_ppc - Disable interrupt
530 * @regs: MFI register set
531 */
532 static inline void
533 megasas_disable_intr_ppc(struct megasas_instance *instance)
534 {
535 struct megasas_register_set __iomem *regs;
536 u32 mask = 0xFFFFFFFF;
537
538 regs = instance->reg_set;
539 writel(mask, &regs->outbound_intr_mask);
540 /* Dummy readl to force pci flush */
541 readl(&regs->outbound_intr_mask);
542 }
543
544 /**
545 * megasas_read_fw_status_reg_ppc - returns the current FW status value
546 * @regs: MFI register set
547 */
548 static u32
549 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
550 {
551 return readl(&(regs)->outbound_scratch_pad);
552 }
553
554 /**
555 * megasas_clear_interrupt_ppc - Check & clear interrupt
556 * @regs: MFI register set
557 */
558 static int
559 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
560 {
561 u32 status, mfiStatus = 0;
562
563 /*
564 * Check if it is our interrupt
565 */
566 status = readl(&regs->outbound_intr_status);
567
568 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
569 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
570
571 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
572 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
573
574 /*
575 * Clear the interrupt by writing back the same value
576 */
577 writel(status, &regs->outbound_doorbell_clear);
578
579 /* Dummy readl to force pci flush */
580 readl(&regs->outbound_doorbell_clear);
581
582 return mfiStatus;
583 }
584
585 /**
586 * megasas_fire_cmd_ppc - Sends command to the FW
587 * @frame_phys_addr : Physical address of cmd
588 * @frame_count : Number of frames for the command
589 * @regs : MFI register set
590 */
591 static inline void
592 megasas_fire_cmd_ppc(struct megasas_instance *instance,
593 dma_addr_t frame_phys_addr,
594 u32 frame_count,
595 struct megasas_register_set __iomem *regs)
596 {
597 unsigned long flags;
598
599 spin_lock_irqsave(&instance->hba_lock, flags);
600 writel((frame_phys_addr | (frame_count<<1))|1,
601 &(regs)->inbound_queue_port);
602 spin_unlock_irqrestore(&instance->hba_lock, flags);
603 }
604
605 /**
606 * megasas_check_reset_ppc - For controller reset check
607 * @regs: MFI register set
608 */
609 static int
610 megasas_check_reset_ppc(struct megasas_instance *instance,
611 struct megasas_register_set __iomem *regs)
612 {
613 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
614 return 1;
615
616 return 0;
617 }
618
619 static struct megasas_instance_template megasas_instance_template_ppc = {
620
621 .fire_cmd = megasas_fire_cmd_ppc,
622 .enable_intr = megasas_enable_intr_ppc,
623 .disable_intr = megasas_disable_intr_ppc,
624 .clear_intr = megasas_clear_intr_ppc,
625 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
626 .adp_reset = megasas_adp_reset_xscale,
627 .check_reset = megasas_check_reset_ppc,
628 .service_isr = megasas_isr,
629 .tasklet = megasas_complete_cmd_dpc,
630 .init_adapter = megasas_init_adapter_mfi,
631 .build_and_issue_cmd = megasas_build_and_issue_cmd,
632 .issue_dcmd = megasas_issue_dcmd,
633 };
634
635 /**
636 * megasas_enable_intr_skinny - Enables interrupts
637 * @regs: MFI register set
638 */
639 static inline void
640 megasas_enable_intr_skinny(struct megasas_instance *instance)
641 {
642 struct megasas_register_set __iomem *regs;
643
644 regs = instance->reg_set;
645 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
646
647 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
648
649 /* Dummy readl to force pci flush */
650 readl(&regs->outbound_intr_mask);
651 }
652
653 /**
654 * megasas_disable_intr_skinny - Disables interrupt
655 * @regs: MFI register set
656 */
657 static inline void
658 megasas_disable_intr_skinny(struct megasas_instance *instance)
659 {
660 struct megasas_register_set __iomem *regs;
661 u32 mask = 0xFFFFFFFF;
662
663 regs = instance->reg_set;
664 writel(mask, &regs->outbound_intr_mask);
665 /* Dummy readl to force pci flush */
666 readl(&regs->outbound_intr_mask);
667 }
668
669 /**
670 * megasas_read_fw_status_reg_skinny - returns the current FW status value
671 * @regs: MFI register set
672 */
673 static u32
674 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
675 {
676 return readl(&(regs)->outbound_scratch_pad);
677 }
678
679 /**
680 * megasas_clear_interrupt_skinny - Check & clear interrupt
681 * @regs: MFI register set
682 */
683 static int
684 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
685 {
686 u32 status;
687 u32 mfiStatus = 0;
688
689 /*
690 * Check if it is our interrupt
691 */
692 status = readl(&regs->outbound_intr_status);
693
694 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
695 return 0;
696 }
697
698 /*
699 * Check if it is our interrupt
700 */
701 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
702 MFI_STATE_FAULT) {
703 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
704 } else
705 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
706
707 /*
708 * Clear the interrupt by writing back the same value
709 */
710 writel(status, &regs->outbound_intr_status);
711
712 /*
713 * dummy read to flush PCI
714 */
715 readl(&regs->outbound_intr_status);
716
717 return mfiStatus;
718 }
719
720 /**
721 * megasas_fire_cmd_skinny - Sends command to the FW
722 * @frame_phys_addr : Physical address of cmd
723 * @frame_count : Number of frames for the command
724 * @regs : MFI register set
725 */
726 static inline void
727 megasas_fire_cmd_skinny(struct megasas_instance *instance,
728 dma_addr_t frame_phys_addr,
729 u32 frame_count,
730 struct megasas_register_set __iomem *regs)
731 {
732 unsigned long flags;
733
734 spin_lock_irqsave(&instance->hba_lock, flags);
735 writel(upper_32_bits(frame_phys_addr),
736 &(regs)->inbound_high_queue_port);
737 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
738 &(regs)->inbound_low_queue_port);
739 mmiowb();
740 spin_unlock_irqrestore(&instance->hba_lock, flags);
741 }
742
743 /**
744 * megasas_check_reset_skinny - For controller reset check
745 * @regs: MFI register set
746 */
747 static int
748 megasas_check_reset_skinny(struct megasas_instance *instance,
749 struct megasas_register_set __iomem *regs)
750 {
751 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
752 return 1;
753
754 return 0;
755 }
756
757 static struct megasas_instance_template megasas_instance_template_skinny = {
758
759 .fire_cmd = megasas_fire_cmd_skinny,
760 .enable_intr = megasas_enable_intr_skinny,
761 .disable_intr = megasas_disable_intr_skinny,
762 .clear_intr = megasas_clear_intr_skinny,
763 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
764 .adp_reset = megasas_adp_reset_gen2,
765 .check_reset = megasas_check_reset_skinny,
766 .service_isr = megasas_isr,
767 .tasklet = megasas_complete_cmd_dpc,
768 .init_adapter = megasas_init_adapter_mfi,
769 .build_and_issue_cmd = megasas_build_and_issue_cmd,
770 .issue_dcmd = megasas_issue_dcmd,
771 };
772
773
774 /**
775 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
776 * controllers
777 */
778
779 /**
780 * megasas_enable_intr_gen2 - Enables interrupts
781 * @regs: MFI register set
782 */
783 static inline void
784 megasas_enable_intr_gen2(struct megasas_instance *instance)
785 {
786 struct megasas_register_set __iomem *regs;
787
788 regs = instance->reg_set;
789 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
790
791 /* write ~0x00000005 (4 & 1) to the intr mask*/
792 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
793
794 /* Dummy readl to force pci flush */
795 readl(&regs->outbound_intr_mask);
796 }
797
798 /**
799 * megasas_disable_intr_gen2 - Disables interrupt
800 * @regs: MFI register set
801 */
802 static inline void
803 megasas_disable_intr_gen2(struct megasas_instance *instance)
804 {
805 struct megasas_register_set __iomem *regs;
806 u32 mask = 0xFFFFFFFF;
807
808 regs = instance->reg_set;
809 writel(mask, &regs->outbound_intr_mask);
810 /* Dummy readl to force pci flush */
811 readl(&regs->outbound_intr_mask);
812 }
813
814 /**
815 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
816 * @regs: MFI register set
817 */
818 static u32
819 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
820 {
821 return readl(&(regs)->outbound_scratch_pad);
822 }
823
824 /**
825 * megasas_clear_interrupt_gen2 - Check & clear interrupt
826 * @regs: MFI register set
827 */
828 static int
829 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
830 {
831 u32 status;
832 u32 mfiStatus = 0;
833
834 /*
835 * Check if it is our interrupt
836 */
837 status = readl(&regs->outbound_intr_status);
838
839 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
840 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
841 }
842 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
843 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
844 }
845
846 /*
847 * Clear the interrupt by writing back the same value
848 */
849 if (mfiStatus)
850 writel(status, &regs->outbound_doorbell_clear);
851
852 /* Dummy readl to force pci flush */
853 readl(&regs->outbound_intr_status);
854
855 return mfiStatus;
856 }
857 /**
858 * megasas_fire_cmd_gen2 - Sends command to the FW
859 * @frame_phys_addr : Physical address of cmd
860 * @frame_count : Number of frames for the command
861 * @regs : MFI register set
862 */
863 static inline void
864 megasas_fire_cmd_gen2(struct megasas_instance *instance,
865 dma_addr_t frame_phys_addr,
866 u32 frame_count,
867 struct megasas_register_set __iomem *regs)
868 {
869 unsigned long flags;
870
871 spin_lock_irqsave(&instance->hba_lock, flags);
872 writel((frame_phys_addr | (frame_count<<1))|1,
873 &(regs)->inbound_queue_port);
874 spin_unlock_irqrestore(&instance->hba_lock, flags);
875 }
876
877 /**
878 * megasas_adp_reset_gen2 - For controller reset
879 * @regs: MFI register set
880 */
881 static int
882 megasas_adp_reset_gen2(struct megasas_instance *instance,
883 struct megasas_register_set __iomem *reg_set)
884 {
885 u32 retry = 0 ;
886 u32 HostDiag;
887 u32 __iomem *seq_offset = &reg_set->seq_offset;
888 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
889
890 if (instance->instancet == &megasas_instance_template_skinny) {
891 seq_offset = &reg_set->fusion_seq_offset;
892 hostdiag_offset = &reg_set->fusion_host_diag;
893 }
894
895 writel(0, seq_offset);
896 writel(4, seq_offset);
897 writel(0xb, seq_offset);
898 writel(2, seq_offset);
899 writel(7, seq_offset);
900 writel(0xd, seq_offset);
901
902 msleep(1000);
903
904 HostDiag = (u32)readl(hostdiag_offset);
905
906 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
907 msleep(100);
908 HostDiag = (u32)readl(hostdiag_offset);
909 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
910 retry, HostDiag);
911
912 if (retry++ >= 100)
913 return 1;
914
915 }
916
917 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
918
919 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
920
921 ssleep(10);
922
923 HostDiag = (u32)readl(hostdiag_offset);
924 while (HostDiag & DIAG_RESET_ADAPTER) {
925 msleep(100);
926 HostDiag = (u32)readl(hostdiag_offset);
927 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
928 retry, HostDiag);
929
930 if (retry++ >= 1000)
931 return 1;
932
933 }
934 return 0;
935 }
936
937 /**
938 * megasas_check_reset_gen2 - For controller reset check
939 * @regs: MFI register set
940 */
941 static int
942 megasas_check_reset_gen2(struct megasas_instance *instance,
943 struct megasas_register_set __iomem *regs)
944 {
945 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
946 return 1;
947
948 return 0;
949 }
950
951 static struct megasas_instance_template megasas_instance_template_gen2 = {
952
953 .fire_cmd = megasas_fire_cmd_gen2,
954 .enable_intr = megasas_enable_intr_gen2,
955 .disable_intr = megasas_disable_intr_gen2,
956 .clear_intr = megasas_clear_intr_gen2,
957 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
958 .adp_reset = megasas_adp_reset_gen2,
959 .check_reset = megasas_check_reset_gen2,
960 .service_isr = megasas_isr,
961 .tasklet = megasas_complete_cmd_dpc,
962 .init_adapter = megasas_init_adapter_mfi,
963 .build_and_issue_cmd = megasas_build_and_issue_cmd,
964 .issue_dcmd = megasas_issue_dcmd,
965 };
966
967 /**
968 * This is the end of set of functions & definitions
969 * specific to gen2 (deviceid : 0x78, 0x79) controllers
970 */
971
972 /*
973 * Template added for TB (Fusion)
974 */
975 extern struct megasas_instance_template megasas_instance_template_fusion;
976
977 /**
978 * megasas_issue_polled - Issues a polling command
979 * @instance: Adapter soft state
980 * @cmd: Command packet to be issued
981 *
982 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
983 */
984 int
985 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
986 {
987 struct megasas_header *frame_hdr = &cmd->frame->hdr;
988
989 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
990 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
991
992 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
993 (instance->instancet->issue_dcmd(instance, cmd))) {
994 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
995 __func__, __LINE__);
996 return DCMD_NOT_FIRED;
997 }
998
999 return wait_and_poll(instance, cmd, instance->requestorId ?
1000 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1001 }
1002
1003 /**
1004 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1005 * @instance: Adapter soft state
1006 * @cmd: Command to be issued
1007 * @timeout: Timeout in seconds
1008 *
1009 * This function waits on an event for the command to be returned from ISR.
1010 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1011 * Used to issue ioctl commands.
1012 */
1013 int
1014 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1015 struct megasas_cmd *cmd, int timeout)
1016 {
1017 int ret = 0;
1018 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1019
1020 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1021 (instance->instancet->issue_dcmd(instance, cmd))) {
1022 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1023 __func__, __LINE__);
1024 return DCMD_NOT_FIRED;
1025 }
1026
1027 if (timeout) {
1028 ret = wait_event_timeout(instance->int_cmd_wait_q,
1029 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1030 if (!ret) {
1031 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1032 __func__, __LINE__);
1033 return DCMD_TIMEOUT;
1034 }
1035 } else
1036 wait_event(instance->int_cmd_wait_q,
1037 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1038
1039 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1040 DCMD_SUCCESS : DCMD_FAILED;
1041 }
1042
1043 /**
1044 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1045 * @instance: Adapter soft state
1046 * @cmd_to_abort: Previously issued cmd to be aborted
1047 * @timeout: Timeout in seconds
1048 *
1049 * MFI firmware can abort previously issued AEN comamnd (automatic event
1050 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1051 * cmd and waits for return status.
1052 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1053 */
1054 static int
1055 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1056 struct megasas_cmd *cmd_to_abort, int timeout)
1057 {
1058 struct megasas_cmd *cmd;
1059 struct megasas_abort_frame *abort_fr;
1060 int ret = 0;
1061
1062 cmd = megasas_get_cmd(instance);
1063
1064 if (!cmd)
1065 return -1;
1066
1067 abort_fr = &cmd->frame->abort;
1068
1069 /*
1070 * Prepare and issue the abort frame
1071 */
1072 abort_fr->cmd = MFI_CMD_ABORT;
1073 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1074 abort_fr->flags = cpu_to_le16(0);
1075 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1076 abort_fr->abort_mfi_phys_addr_lo =
1077 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1078 abort_fr->abort_mfi_phys_addr_hi =
1079 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1080
1081 cmd->sync_cmd = 1;
1082 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1083
1084 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1085 (instance->instancet->issue_dcmd(instance, cmd))) {
1086 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1087 __func__, __LINE__);
1088 return DCMD_NOT_FIRED;
1089 }
1090
1091 if (timeout) {
1092 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1093 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1094 if (!ret) {
1095 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1096 __func__, __LINE__);
1097 return DCMD_TIMEOUT;
1098 }
1099 } else
1100 wait_event(instance->abort_cmd_wait_q,
1101 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1102
1103 cmd->sync_cmd = 0;
1104
1105 megasas_return_cmd(instance, cmd);
1106 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1107 DCMD_SUCCESS : DCMD_FAILED;
1108 }
1109
1110 /**
1111 * megasas_make_sgl32 - Prepares 32-bit SGL
1112 * @instance: Adapter soft state
1113 * @scp: SCSI command from the mid-layer
1114 * @mfi_sgl: SGL to be filled in
1115 *
1116 * If successful, this function returns the number of SG elements. Otherwise,
1117 * it returnes -1.
1118 */
1119 static int
1120 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1121 union megasas_sgl *mfi_sgl)
1122 {
1123 int i;
1124 int sge_count;
1125 struct scatterlist *os_sgl;
1126
1127 sge_count = scsi_dma_map(scp);
1128 BUG_ON(sge_count < 0);
1129
1130 if (sge_count) {
1131 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1132 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1133 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1134 }
1135 }
1136 return sge_count;
1137 }
1138
1139 /**
1140 * megasas_make_sgl64 - Prepares 64-bit SGL
1141 * @instance: Adapter soft state
1142 * @scp: SCSI command from the mid-layer
1143 * @mfi_sgl: SGL to be filled in
1144 *
1145 * If successful, this function returns the number of SG elements. Otherwise,
1146 * it returnes -1.
1147 */
1148 static int
1149 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1150 union megasas_sgl *mfi_sgl)
1151 {
1152 int i;
1153 int sge_count;
1154 struct scatterlist *os_sgl;
1155
1156 sge_count = scsi_dma_map(scp);
1157 BUG_ON(sge_count < 0);
1158
1159 if (sge_count) {
1160 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1161 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1162 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1163 }
1164 }
1165 return sge_count;
1166 }
1167
1168 /**
1169 * megasas_make_sgl_skinny - Prepares IEEE SGL
1170 * @instance: Adapter soft state
1171 * @scp: SCSI command from the mid-layer
1172 * @mfi_sgl: SGL to be filled in
1173 *
1174 * If successful, this function returns the number of SG elements. Otherwise,
1175 * it returnes -1.
1176 */
1177 static int
1178 megasas_make_sgl_skinny(struct megasas_instance *instance,
1179 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1180 {
1181 int i;
1182 int sge_count;
1183 struct scatterlist *os_sgl;
1184
1185 sge_count = scsi_dma_map(scp);
1186
1187 if (sge_count) {
1188 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1189 mfi_sgl->sge_skinny[i].length =
1190 cpu_to_le32(sg_dma_len(os_sgl));
1191 mfi_sgl->sge_skinny[i].phys_addr =
1192 cpu_to_le64(sg_dma_address(os_sgl));
1193 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1194 }
1195 }
1196 return sge_count;
1197 }
1198
1199 /**
1200 * megasas_get_frame_count - Computes the number of frames
1201 * @frame_type : type of frame- io or pthru frame
1202 * @sge_count : number of sg elements
1203 *
1204 * Returns the number of frames required for numnber of sge's (sge_count)
1205 */
1206
1207 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1208 u8 sge_count, u8 frame_type)
1209 {
1210 int num_cnt;
1211 int sge_bytes;
1212 u32 sge_sz;
1213 u32 frame_count = 0;
1214
1215 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1216 sizeof(struct megasas_sge32);
1217
1218 if (instance->flag_ieee) {
1219 sge_sz = sizeof(struct megasas_sge_skinny);
1220 }
1221
1222 /*
1223 * Main frame can contain 2 SGEs for 64-bit SGLs and
1224 * 3 SGEs for 32-bit SGLs for ldio &
1225 * 1 SGEs for 64-bit SGLs and
1226 * 2 SGEs for 32-bit SGLs for pthru frame
1227 */
1228 if (unlikely(frame_type == PTHRU_FRAME)) {
1229 if (instance->flag_ieee == 1) {
1230 num_cnt = sge_count - 1;
1231 } else if (IS_DMA64)
1232 num_cnt = sge_count - 1;
1233 else
1234 num_cnt = sge_count - 2;
1235 } else {
1236 if (instance->flag_ieee == 1) {
1237 num_cnt = sge_count - 1;
1238 } else if (IS_DMA64)
1239 num_cnt = sge_count - 2;
1240 else
1241 num_cnt = sge_count - 3;
1242 }
1243
1244 if (num_cnt > 0) {
1245 sge_bytes = sge_sz * num_cnt;
1246
1247 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1248 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1249 }
1250 /* Main frame */
1251 frame_count += 1;
1252
1253 if (frame_count > 7)
1254 frame_count = 8;
1255 return frame_count;
1256 }
1257
1258 /**
1259 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1260 * @instance: Adapter soft state
1261 * @scp: SCSI command
1262 * @cmd: Command to be prepared in
1263 *
1264 * This function prepares CDB commands. These are typcially pass-through
1265 * commands to the devices.
1266 */
1267 static int
1268 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1269 struct megasas_cmd *cmd)
1270 {
1271 u32 is_logical;
1272 u32 device_id;
1273 u16 flags = 0;
1274 struct megasas_pthru_frame *pthru;
1275
1276 is_logical = MEGASAS_IS_LOGICAL(scp);
1277 device_id = MEGASAS_DEV_INDEX(scp);
1278 pthru = (struct megasas_pthru_frame *)cmd->frame;
1279
1280 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1281 flags = MFI_FRAME_DIR_WRITE;
1282 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1283 flags = MFI_FRAME_DIR_READ;
1284 else if (scp->sc_data_direction == PCI_DMA_NONE)
1285 flags = MFI_FRAME_DIR_NONE;
1286
1287 if (instance->flag_ieee == 1) {
1288 flags |= MFI_FRAME_IEEE;
1289 }
1290
1291 /*
1292 * Prepare the DCDB frame
1293 */
1294 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1295 pthru->cmd_status = 0x0;
1296 pthru->scsi_status = 0x0;
1297 pthru->target_id = device_id;
1298 pthru->lun = scp->device->lun;
1299 pthru->cdb_len = scp->cmd_len;
1300 pthru->timeout = 0;
1301 pthru->pad_0 = 0;
1302 pthru->flags = cpu_to_le16(flags);
1303 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1304
1305 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1306
1307 /*
1308 * If the command is for the tape device, set the
1309 * pthru timeout to the os layer timeout value.
1310 */
1311 if (scp->device->type == TYPE_TAPE) {
1312 if ((scp->request->timeout / HZ) > 0xFFFF)
1313 pthru->timeout = cpu_to_le16(0xFFFF);
1314 else
1315 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1316 }
1317
1318 /*
1319 * Construct SGL
1320 */
1321 if (instance->flag_ieee == 1) {
1322 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1323 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1324 &pthru->sgl);
1325 } else if (IS_DMA64) {
1326 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1327 pthru->sge_count = megasas_make_sgl64(instance, scp,
1328 &pthru->sgl);
1329 } else
1330 pthru->sge_count = megasas_make_sgl32(instance, scp,
1331 &pthru->sgl);
1332
1333 if (pthru->sge_count > instance->max_num_sge) {
1334 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1335 pthru->sge_count);
1336 return 0;
1337 }
1338
1339 /*
1340 * Sense info specific
1341 */
1342 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1343 pthru->sense_buf_phys_addr_hi =
1344 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1345 pthru->sense_buf_phys_addr_lo =
1346 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1347
1348 /*
1349 * Compute the total number of frames this command consumes. FW uses
1350 * this number to pull sufficient number of frames from host memory.
1351 */
1352 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1353 PTHRU_FRAME);
1354
1355 return cmd->frame_count;
1356 }
1357
1358 /**
1359 * megasas_build_ldio - Prepares IOs to logical devices
1360 * @instance: Adapter soft state
1361 * @scp: SCSI command
1362 * @cmd: Command to be prepared
1363 *
1364 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1365 */
1366 static int
1367 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1368 struct megasas_cmd *cmd)
1369 {
1370 u32 device_id;
1371 u8 sc = scp->cmnd[0];
1372 u16 flags = 0;
1373 struct megasas_io_frame *ldio;
1374
1375 device_id = MEGASAS_DEV_INDEX(scp);
1376 ldio = (struct megasas_io_frame *)cmd->frame;
1377
1378 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1379 flags = MFI_FRAME_DIR_WRITE;
1380 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1381 flags = MFI_FRAME_DIR_READ;
1382
1383 if (instance->flag_ieee == 1) {
1384 flags |= MFI_FRAME_IEEE;
1385 }
1386
1387 /*
1388 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1389 */
1390 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1391 ldio->cmd_status = 0x0;
1392 ldio->scsi_status = 0x0;
1393 ldio->target_id = device_id;
1394 ldio->timeout = 0;
1395 ldio->reserved_0 = 0;
1396 ldio->pad_0 = 0;
1397 ldio->flags = cpu_to_le16(flags);
1398 ldio->start_lba_hi = 0;
1399 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1400
1401 /*
1402 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1403 */
1404 if (scp->cmd_len == 6) {
1405 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1406 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1407 ((u32) scp->cmnd[2] << 8) |
1408 (u32) scp->cmnd[3]);
1409
1410 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1411 }
1412
1413 /*
1414 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1415 */
1416 else if (scp->cmd_len == 10) {
1417 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1418 ((u32) scp->cmnd[7] << 8));
1419 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1420 ((u32) scp->cmnd[3] << 16) |
1421 ((u32) scp->cmnd[4] << 8) |
1422 (u32) scp->cmnd[5]);
1423 }
1424
1425 /*
1426 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1427 */
1428 else if (scp->cmd_len == 12) {
1429 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1430 ((u32) scp->cmnd[7] << 16) |
1431 ((u32) scp->cmnd[8] << 8) |
1432 (u32) scp->cmnd[9]);
1433
1434 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1435 ((u32) scp->cmnd[3] << 16) |
1436 ((u32) scp->cmnd[4] << 8) |
1437 (u32) scp->cmnd[5]);
1438 }
1439
1440 /*
1441 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1442 */
1443 else if (scp->cmd_len == 16) {
1444 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1445 ((u32) scp->cmnd[11] << 16) |
1446 ((u32) scp->cmnd[12] << 8) |
1447 (u32) scp->cmnd[13]);
1448
1449 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1450 ((u32) scp->cmnd[7] << 16) |
1451 ((u32) scp->cmnd[8] << 8) |
1452 (u32) scp->cmnd[9]);
1453
1454 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1455 ((u32) scp->cmnd[3] << 16) |
1456 ((u32) scp->cmnd[4] << 8) |
1457 (u32) scp->cmnd[5]);
1458
1459 }
1460
1461 /*
1462 * Construct SGL
1463 */
1464 if (instance->flag_ieee) {
1465 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1466 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1467 &ldio->sgl);
1468 } else if (IS_DMA64) {
1469 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1470 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1471 } else
1472 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1473
1474 if (ldio->sge_count > instance->max_num_sge) {
1475 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1476 ldio->sge_count);
1477 return 0;
1478 }
1479
1480 /*
1481 * Sense info specific
1482 */
1483 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1484 ldio->sense_buf_phys_addr_hi = 0;
1485 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1486
1487 /*
1488 * Compute the total number of frames this command consumes. FW uses
1489 * this number to pull sufficient number of frames from host memory.
1490 */
1491 cmd->frame_count = megasas_get_frame_count(instance,
1492 ldio->sge_count, IO_FRAME);
1493
1494 return cmd->frame_count;
1495 }
1496
1497 /**
1498 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1499 * and whether it's RW or non RW
1500 * @scmd: SCSI command
1501 *
1502 */
1503 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1504 {
1505 int ret;
1506
1507 switch (cmd->cmnd[0]) {
1508 case READ_10:
1509 case WRITE_10:
1510 case READ_12:
1511 case WRITE_12:
1512 case READ_6:
1513 case WRITE_6:
1514 case READ_16:
1515 case WRITE_16:
1516 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1517 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1518 break;
1519 default:
1520 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1521 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1522 }
1523 return ret;
1524 }
1525
1526 /**
1527 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1528 * in FW
1529 * @instance: Adapter soft state
1530 */
1531 static inline void
1532 megasas_dump_pending_frames(struct megasas_instance *instance)
1533 {
1534 struct megasas_cmd *cmd;
1535 int i,n;
1536 union megasas_sgl *mfi_sgl;
1537 struct megasas_io_frame *ldio;
1538 struct megasas_pthru_frame *pthru;
1539 u32 sgcount;
1540 u32 max_cmd = instance->max_fw_cmds;
1541
1542 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1543 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1544 if (IS_DMA64)
1545 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1546 else
1547 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1548
1549 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1550 for (i = 0; i < max_cmd; i++) {
1551 cmd = instance->cmd_list[i];
1552 if (!cmd->scmd)
1553 continue;
1554 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1555 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1556 ldio = (struct megasas_io_frame *)cmd->frame;
1557 mfi_sgl = &ldio->sgl;
1558 sgcount = ldio->sge_count;
1559 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1560 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1561 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1562 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1563 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1564 } else {
1565 pthru = (struct megasas_pthru_frame *) cmd->frame;
1566 mfi_sgl = &pthru->sgl;
1567 sgcount = pthru->sge_count;
1568 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1569 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1570 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1571 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1572 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1573 }
1574 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1575 for (n = 0; n < sgcount; n++) {
1576 if (IS_DMA64)
1577 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1578 le32_to_cpu(mfi_sgl->sge64[n].length),
1579 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1580 else
1581 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1582 le32_to_cpu(mfi_sgl->sge32[n].length),
1583 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1584 }
1585 }
1586 } /*for max_cmd*/
1587 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1588 for (i = 0; i < max_cmd; i++) {
1589
1590 cmd = instance->cmd_list[i];
1591
1592 if (cmd->sync_cmd == 1)
1593 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1594 }
1595 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1596 }
1597
1598 u32
1599 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1600 struct scsi_cmnd *scmd)
1601 {
1602 struct megasas_cmd *cmd;
1603 u32 frame_count;
1604
1605 cmd = megasas_get_cmd(instance);
1606 if (!cmd)
1607 return SCSI_MLQUEUE_HOST_BUSY;
1608
1609 /*
1610 * Logical drive command
1611 */
1612 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1613 frame_count = megasas_build_ldio(instance, scmd, cmd);
1614 else
1615 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1616
1617 if (!frame_count)
1618 goto out_return_cmd;
1619
1620 cmd->scmd = scmd;
1621 scmd->SCp.ptr = (char *)cmd;
1622
1623 /*
1624 * Issue the command to the FW
1625 */
1626 atomic_inc(&instance->fw_outstanding);
1627
1628 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1629 cmd->frame_count-1, instance->reg_set);
1630
1631 return 0;
1632 out_return_cmd:
1633 megasas_return_cmd(instance, cmd);
1634 return SCSI_MLQUEUE_HOST_BUSY;
1635 }
1636
1637
1638 /**
1639 * megasas_queue_command - Queue entry point
1640 * @scmd: SCSI command to be queued
1641 * @done: Callback entry point
1642 */
1643 static int
1644 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1645 {
1646 struct megasas_instance *instance;
1647 struct MR_PRIV_DEVICE *mr_device_priv_data;
1648
1649 instance = (struct megasas_instance *)
1650 scmd->device->host->hostdata;
1651
1652 if (instance->unload == 1) {
1653 scmd->result = DID_NO_CONNECT << 16;
1654 scmd->scsi_done(scmd);
1655 return 0;
1656 }
1657
1658 if (instance->issuepend_done == 0)
1659 return SCSI_MLQUEUE_HOST_BUSY;
1660
1661
1662 /* Check for an mpio path and adjust behavior */
1663 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1664 if (megasas_check_mpio_paths(instance, scmd) ==
1665 (DID_RESET << 16)) {
1666 return SCSI_MLQUEUE_HOST_BUSY;
1667 } else {
1668 scmd->result = DID_NO_CONNECT << 16;
1669 scmd->scsi_done(scmd);
1670 return 0;
1671 }
1672 }
1673
1674 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1675 scmd->result = DID_NO_CONNECT << 16;
1676 scmd->scsi_done(scmd);
1677 return 0;
1678 }
1679
1680 mr_device_priv_data = scmd->device->hostdata;
1681 if (!mr_device_priv_data) {
1682 scmd->result = DID_NO_CONNECT << 16;
1683 scmd->scsi_done(scmd);
1684 return 0;
1685 }
1686
1687 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1688 return SCSI_MLQUEUE_HOST_BUSY;
1689
1690 if (mr_device_priv_data->tm_busy)
1691 return SCSI_MLQUEUE_DEVICE_BUSY;
1692
1693
1694 scmd->result = 0;
1695
1696 if (MEGASAS_IS_LOGICAL(scmd) &&
1697 (scmd->device->id >= instance->fw_supported_vd_count ||
1698 scmd->device->lun)) {
1699 scmd->result = DID_BAD_TARGET << 16;
1700 goto out_done;
1701 }
1702
1703 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd) &&
1704 (!instance->fw_sync_cache_support)) {
1705 scmd->result = DID_OK << 16;
1706 goto out_done;
1707 }
1708
1709 return instance->instancet->build_and_issue_cmd(instance, scmd);
1710
1711 out_done:
1712 scmd->scsi_done(scmd);
1713 return 0;
1714 }
1715
1716 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1717 {
1718 int i;
1719
1720 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1721
1722 if ((megasas_mgmt_info.instance[i]) &&
1723 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1724 return megasas_mgmt_info.instance[i];
1725 }
1726
1727 return NULL;
1728 }
1729
1730 /*
1731 * megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
1732 *
1733 * @sdev: OS provided scsi device
1734 *
1735 * Returns void
1736 */
1737 void megasas_update_sdev_properties(struct scsi_device *sdev)
1738 {
1739 u16 pd_index = 0;
1740 u32 device_id, ld;
1741 struct megasas_instance *instance;
1742 struct fusion_context *fusion;
1743 struct MR_PRIV_DEVICE *mr_device_priv_data;
1744 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1745 struct MR_LD_RAID *raid;
1746 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1747
1748 instance = megasas_lookup_instance(sdev->host->host_no);
1749 fusion = instance->ctrl_context;
1750 mr_device_priv_data = sdev->hostdata;
1751
1752 if (!fusion)
1753 return;
1754
1755 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1756 instance->use_seqnum_jbod_fp) {
1757 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1758 sdev->id;
1759 pd_sync = (void *)fusion->pd_seq_sync
1760 [(instance->pd_seq_map_id - 1) & 1];
1761 mr_device_priv_data->is_tm_capable =
1762 pd_sync->seq[pd_index].capability.tmCapable;
1763 } else {
1764 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1765 + sdev->id;
1766 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1767 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1768 raid = MR_LdRaidGet(ld, local_map_ptr);
1769
1770 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1771 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1772 mr_device_priv_data->is_tm_capable =
1773 raid->capability.tmCapable;
1774 }
1775 }
1776
1777 static void megasas_set_device_queue_depth(struct scsi_device *sdev)
1778 {
1779 u16 pd_index = 0;
1780 int ret = DCMD_FAILED;
1781 struct megasas_instance *instance;
1782
1783 instance = megasas_lookup_instance(sdev->host->host_no);
1784
1785 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1786 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1787
1788 if (instance->pd_info) {
1789 mutex_lock(&instance->hba_mutex);
1790 ret = megasas_get_pd_info(instance, pd_index);
1791 mutex_unlock(&instance->hba_mutex);
1792 }
1793
1794 if (ret != DCMD_SUCCESS)
1795 return;
1796
1797 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
1798
1799 switch (instance->pd_list[pd_index].interface) {
1800 case SAS_PD:
1801 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
1802 break;
1803
1804 case SATA_PD:
1805 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
1806 break;
1807
1808 default:
1809 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
1810 }
1811 }
1812 }
1813 }
1814
1815
1816 static int megasas_slave_configure(struct scsi_device *sdev)
1817 {
1818 u16 pd_index = 0;
1819 struct megasas_instance *instance;
1820
1821 instance = megasas_lookup_instance(sdev->host->host_no);
1822 if (instance->pd_list_not_supported) {
1823 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1824 sdev->type == TYPE_DISK) {
1825 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1826 sdev->id;
1827 if (instance->pd_list[pd_index].driveState !=
1828 MR_PD_STATE_SYSTEM)
1829 return -ENXIO;
1830 }
1831 }
1832 megasas_set_device_queue_depth(sdev);
1833 megasas_update_sdev_properties(sdev);
1834
1835 /*
1836 * The RAID firmware may require extended timeouts.
1837 */
1838 blk_queue_rq_timeout(sdev->request_queue,
1839 scmd_timeout * HZ);
1840
1841 return 0;
1842 }
1843
1844 static int megasas_slave_alloc(struct scsi_device *sdev)
1845 {
1846 u16 pd_index = 0;
1847 struct megasas_instance *instance ;
1848 struct MR_PRIV_DEVICE *mr_device_priv_data;
1849
1850 instance = megasas_lookup_instance(sdev->host->host_no);
1851 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1852 /*
1853 * Open the OS scan to the SYSTEM PD
1854 */
1855 pd_index =
1856 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1857 sdev->id;
1858 if ((instance->pd_list_not_supported ||
1859 instance->pd_list[pd_index].driveState ==
1860 MR_PD_STATE_SYSTEM)) {
1861 goto scan_target;
1862 }
1863 return -ENXIO;
1864 }
1865
1866 scan_target:
1867 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1868 GFP_KERNEL);
1869 if (!mr_device_priv_data)
1870 return -ENOMEM;
1871 sdev->hostdata = mr_device_priv_data;
1872 return 0;
1873 }
1874
1875 static void megasas_slave_destroy(struct scsi_device *sdev)
1876 {
1877 kfree(sdev->hostdata);
1878 sdev->hostdata = NULL;
1879 }
1880
1881 /*
1882 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1883 * kill adapter
1884 * @instance: Adapter soft state
1885 *
1886 */
1887 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1888 {
1889 int i;
1890 struct megasas_cmd *cmd_mfi;
1891 struct megasas_cmd_fusion *cmd_fusion;
1892 struct fusion_context *fusion = instance->ctrl_context;
1893
1894 /* Find all outstanding ioctls */
1895 if (fusion) {
1896 for (i = 0; i < instance->max_fw_cmds; i++) {
1897 cmd_fusion = fusion->cmd_list[i];
1898 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1899 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1900 if (cmd_mfi->sync_cmd &&
1901 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1902 megasas_complete_cmd(instance,
1903 cmd_mfi, DID_OK);
1904 }
1905 }
1906 } else {
1907 for (i = 0; i < instance->max_fw_cmds; i++) {
1908 cmd_mfi = instance->cmd_list[i];
1909 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
1910 MFI_CMD_ABORT)
1911 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
1912 }
1913 }
1914 }
1915
1916
1917 void megaraid_sas_kill_hba(struct megasas_instance *instance)
1918 {
1919 /* Set critical error to block I/O & ioctls in case caller didn't */
1920 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
1921 /* Wait 1 second to ensure IO or ioctls in build have posted */
1922 msleep(1000);
1923 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1924 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1925 (instance->ctrl_context)) {
1926 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1927 /* Flush */
1928 readl(&instance->reg_set->doorbell);
1929 if (instance->requestorId && instance->peerIsPresent)
1930 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
1931 } else {
1932 writel(MFI_STOP_ADP,
1933 &instance->reg_set->inbound_doorbell);
1934 }
1935 /* Complete outstanding ioctls when adapter is killed */
1936 megasas_complete_outstanding_ioctls(instance);
1937 }
1938
1939 /**
1940 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
1941 * restored to max value
1942 * @instance: Adapter soft state
1943 *
1944 */
1945 void
1946 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
1947 {
1948 unsigned long flags;
1949
1950 if (instance->flag & MEGASAS_FW_BUSY
1951 && time_after(jiffies, instance->last_time + 5 * HZ)
1952 && atomic_read(&instance->fw_outstanding) <
1953 instance->throttlequeuedepth + 1) {
1954
1955 spin_lock_irqsave(instance->host->host_lock, flags);
1956 instance->flag &= ~MEGASAS_FW_BUSY;
1957
1958 instance->host->can_queue = instance->cur_can_queue;
1959 spin_unlock_irqrestore(instance->host->host_lock, flags);
1960 }
1961 }
1962
1963 /**
1964 * megasas_complete_cmd_dpc - Returns FW's controller structure
1965 * @instance_addr: Address of adapter soft state
1966 *
1967 * Tasklet to complete cmds
1968 */
1969 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1970 {
1971 u32 producer;
1972 u32 consumer;
1973 u32 context;
1974 struct megasas_cmd *cmd;
1975 struct megasas_instance *instance =
1976 (struct megasas_instance *)instance_addr;
1977 unsigned long flags;
1978
1979 /* If we have already declared adapter dead, donot complete cmds */
1980 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
1981 return;
1982
1983 spin_lock_irqsave(&instance->completion_lock, flags);
1984
1985 producer = le32_to_cpu(*instance->producer);
1986 consumer = le32_to_cpu(*instance->consumer);
1987
1988 while (consumer != producer) {
1989 context = le32_to_cpu(instance->reply_queue[consumer]);
1990 if (context >= instance->max_fw_cmds) {
1991 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
1992 context);
1993 BUG();
1994 }
1995
1996 cmd = instance->cmd_list[context];
1997
1998 megasas_complete_cmd(instance, cmd, DID_OK);
1999
2000 consumer++;
2001 if (consumer == (instance->max_fw_cmds + 1)) {
2002 consumer = 0;
2003 }
2004 }
2005
2006 *instance->consumer = cpu_to_le32(producer);
2007
2008 spin_unlock_irqrestore(&instance->completion_lock, flags);
2009
2010 /*
2011 * Check if we can restore can_queue
2012 */
2013 megasas_check_and_restore_queue_depth(instance);
2014 }
2015
2016 /**
2017 * megasas_start_timer - Initializes a timer object
2018 * @instance: Adapter soft state
2019 * @timer: timer object to be initialized
2020 * @fn: timer function
2021 * @interval: time interval between timer function call
2022 *
2023 */
2024 void megasas_start_timer(struct megasas_instance *instance,
2025 struct timer_list *timer,
2026 void *fn, unsigned long interval)
2027 {
2028 init_timer(timer);
2029 timer->expires = jiffies + interval;
2030 timer->data = (unsigned long)instance;
2031 timer->function = fn;
2032 add_timer(timer);
2033 }
2034
2035 static void
2036 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2037
2038 static void
2039 process_fw_state_change_wq(struct work_struct *work);
2040
2041 void megasas_do_ocr(struct megasas_instance *instance)
2042 {
2043 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2044 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2045 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2046 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2047 }
2048 instance->instancet->disable_intr(instance);
2049 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2050 instance->issuepend_done = 0;
2051
2052 atomic_set(&instance->fw_outstanding, 0);
2053 megasas_internal_reset_defer_cmds(instance);
2054 process_fw_state_change_wq(&instance->work_init);
2055 }
2056
2057 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2058 int initial)
2059 {
2060 struct megasas_cmd *cmd;
2061 struct megasas_dcmd_frame *dcmd;
2062 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2063 dma_addr_t new_affiliation_111_h;
2064 int ld, retval = 0;
2065 u8 thisVf;
2066
2067 cmd = megasas_get_cmd(instance);
2068
2069 if (!cmd) {
2070 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2071 "Failed to get cmd for scsi%d\n",
2072 instance->host->host_no);
2073 return -ENOMEM;
2074 }
2075
2076 dcmd = &cmd->frame->dcmd;
2077
2078 if (!instance->vf_affiliation_111) {
2079 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2080 "affiliation for scsi%d\n", instance->host->host_no);
2081 megasas_return_cmd(instance, cmd);
2082 return -ENOMEM;
2083 }
2084
2085 if (initial)
2086 memset(instance->vf_affiliation_111, 0,
2087 sizeof(struct MR_LD_VF_AFFILIATION_111));
2088 else {
2089 new_affiliation_111 =
2090 pci_alloc_consistent(instance->pdev,
2091 sizeof(struct MR_LD_VF_AFFILIATION_111),
2092 &new_affiliation_111_h);
2093 if (!new_affiliation_111) {
2094 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2095 "memory for new affiliation for scsi%d\n",
2096 instance->host->host_no);
2097 megasas_return_cmd(instance, cmd);
2098 return -ENOMEM;
2099 }
2100 memset(new_affiliation_111, 0,
2101 sizeof(struct MR_LD_VF_AFFILIATION_111));
2102 }
2103
2104 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2105
2106 dcmd->cmd = MFI_CMD_DCMD;
2107 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2108 dcmd->sge_count = 1;
2109 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2110 dcmd->timeout = 0;
2111 dcmd->pad_0 = 0;
2112 dcmd->data_xfer_len =
2113 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2114 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2115
2116 if (initial)
2117 dcmd->sgl.sge32[0].phys_addr =
2118 cpu_to_le32(instance->vf_affiliation_111_h);
2119 else
2120 dcmd->sgl.sge32[0].phys_addr =
2121 cpu_to_le32(new_affiliation_111_h);
2122
2123 dcmd->sgl.sge32[0].length = cpu_to_le32(
2124 sizeof(struct MR_LD_VF_AFFILIATION_111));
2125
2126 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2127 "scsi%d\n", instance->host->host_no);
2128
2129 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2130 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2131 " failed with status 0x%x for scsi%d\n",
2132 dcmd->cmd_status, instance->host->host_no);
2133 retval = 1; /* Do a scan if we couldn't get affiliation */
2134 goto out;
2135 }
2136
2137 if (!initial) {
2138 thisVf = new_affiliation_111->thisVf;
2139 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2140 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2141 new_affiliation_111->map[ld].policy[thisVf]) {
2142 dev_warn(&instance->pdev->dev, "SR-IOV: "
2143 "Got new LD/VF affiliation for scsi%d\n",
2144 instance->host->host_no);
2145 memcpy(instance->vf_affiliation_111,
2146 new_affiliation_111,
2147 sizeof(struct MR_LD_VF_AFFILIATION_111));
2148 retval = 1;
2149 goto out;
2150 }
2151 }
2152 out:
2153 if (new_affiliation_111) {
2154 pci_free_consistent(instance->pdev,
2155 sizeof(struct MR_LD_VF_AFFILIATION_111),
2156 new_affiliation_111,
2157 new_affiliation_111_h);
2158 }
2159
2160 megasas_return_cmd(instance, cmd);
2161
2162 return retval;
2163 }
2164
2165 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2166 int initial)
2167 {
2168 struct megasas_cmd *cmd;
2169 struct megasas_dcmd_frame *dcmd;
2170 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2171 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2172 dma_addr_t new_affiliation_h;
2173 int i, j, retval = 0, found = 0, doscan = 0;
2174 u8 thisVf;
2175
2176 cmd = megasas_get_cmd(instance);
2177
2178 if (!cmd) {
2179 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2180 "Failed to get cmd for scsi%d\n",
2181 instance->host->host_no);
2182 return -ENOMEM;
2183 }
2184
2185 dcmd = &cmd->frame->dcmd;
2186
2187 if (!instance->vf_affiliation) {
2188 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2189 "affiliation for scsi%d\n", instance->host->host_no);
2190 megasas_return_cmd(instance, cmd);
2191 return -ENOMEM;
2192 }
2193
2194 if (initial)
2195 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2196 sizeof(struct MR_LD_VF_AFFILIATION));
2197 else {
2198 new_affiliation =
2199 pci_alloc_consistent(instance->pdev,
2200 (MAX_LOGICAL_DRIVES + 1) *
2201 sizeof(struct MR_LD_VF_AFFILIATION),
2202 &new_affiliation_h);
2203 if (!new_affiliation) {
2204 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2205 "memory for new affiliation for scsi%d\n",
2206 instance->host->host_no);
2207 megasas_return_cmd(instance, cmd);
2208 return -ENOMEM;
2209 }
2210 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2211 sizeof(struct MR_LD_VF_AFFILIATION));
2212 }
2213
2214 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2215
2216 dcmd->cmd = MFI_CMD_DCMD;
2217 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2218 dcmd->sge_count = 1;
2219 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2220 dcmd->timeout = 0;
2221 dcmd->pad_0 = 0;
2222 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2223 sizeof(struct MR_LD_VF_AFFILIATION));
2224 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2225
2226 if (initial)
2227 dcmd->sgl.sge32[0].phys_addr =
2228 cpu_to_le32(instance->vf_affiliation_h);
2229 else
2230 dcmd->sgl.sge32[0].phys_addr =
2231 cpu_to_le32(new_affiliation_h);
2232
2233 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2234 sizeof(struct MR_LD_VF_AFFILIATION));
2235
2236 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2237 "scsi%d\n", instance->host->host_no);
2238
2239
2240 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2241 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2242 " failed with status 0x%x for scsi%d\n",
2243 dcmd->cmd_status, instance->host->host_no);
2244 retval = 1; /* Do a scan if we couldn't get affiliation */
2245 goto out;
2246 }
2247
2248 if (!initial) {
2249 if (!new_affiliation->ldCount) {
2250 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2251 "affiliation for passive path for scsi%d\n",
2252 instance->host->host_no);
2253 retval = 1;
2254 goto out;
2255 }
2256 newmap = new_affiliation->map;
2257 savedmap = instance->vf_affiliation->map;
2258 thisVf = new_affiliation->thisVf;
2259 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2260 found = 0;
2261 for (j = 0; j < instance->vf_affiliation->ldCount;
2262 j++) {
2263 if (newmap->ref.targetId ==
2264 savedmap->ref.targetId) {
2265 found = 1;
2266 if (newmap->policy[thisVf] !=
2267 savedmap->policy[thisVf]) {
2268 doscan = 1;
2269 goto out;
2270 }
2271 }
2272 savedmap = (struct MR_LD_VF_MAP *)
2273 ((unsigned char *)savedmap +
2274 savedmap->size);
2275 }
2276 if (!found && newmap->policy[thisVf] !=
2277 MR_LD_ACCESS_HIDDEN) {
2278 doscan = 1;
2279 goto out;
2280 }
2281 newmap = (struct MR_LD_VF_MAP *)
2282 ((unsigned char *)newmap + newmap->size);
2283 }
2284
2285 newmap = new_affiliation->map;
2286 savedmap = instance->vf_affiliation->map;
2287
2288 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2289 found = 0;
2290 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2291 if (savedmap->ref.targetId ==
2292 newmap->ref.targetId) {
2293 found = 1;
2294 if (savedmap->policy[thisVf] !=
2295 newmap->policy[thisVf]) {
2296 doscan = 1;
2297 goto out;
2298 }
2299 }
2300 newmap = (struct MR_LD_VF_MAP *)
2301 ((unsigned char *)newmap +
2302 newmap->size);
2303 }
2304 if (!found && savedmap->policy[thisVf] !=
2305 MR_LD_ACCESS_HIDDEN) {
2306 doscan = 1;
2307 goto out;
2308 }
2309 savedmap = (struct MR_LD_VF_MAP *)
2310 ((unsigned char *)savedmap +
2311 savedmap->size);
2312 }
2313 }
2314 out:
2315 if (doscan) {
2316 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2317 "affiliation for scsi%d\n", instance->host->host_no);
2318 memcpy(instance->vf_affiliation, new_affiliation,
2319 new_affiliation->size);
2320 retval = 1;
2321 }
2322
2323 if (new_affiliation)
2324 pci_free_consistent(instance->pdev,
2325 (MAX_LOGICAL_DRIVES + 1) *
2326 sizeof(struct MR_LD_VF_AFFILIATION),
2327 new_affiliation, new_affiliation_h);
2328 megasas_return_cmd(instance, cmd);
2329
2330 return retval;
2331 }
2332
2333 /* This function will get the current SR-IOV LD/VF affiliation */
2334 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2335 int initial)
2336 {
2337 int retval;
2338
2339 if (instance->PlasmaFW111)
2340 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2341 else
2342 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2343 return retval;
2344 }
2345
2346 /* This function will tell FW to start the SR-IOV heartbeat */
2347 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2348 int initial)
2349 {
2350 struct megasas_cmd *cmd;
2351 struct megasas_dcmd_frame *dcmd;
2352 int retval = 0;
2353
2354 cmd = megasas_get_cmd(instance);
2355
2356 if (!cmd) {
2357 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2358 "Failed to get cmd for scsi%d\n",
2359 instance->host->host_no);
2360 return -ENOMEM;
2361 }
2362
2363 dcmd = &cmd->frame->dcmd;
2364
2365 if (initial) {
2366 instance->hb_host_mem =
2367 pci_zalloc_consistent(instance->pdev,
2368 sizeof(struct MR_CTRL_HB_HOST_MEM),
2369 &instance->hb_host_mem_h);
2370 if (!instance->hb_host_mem) {
2371 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2372 " memory for heartbeat host memory for scsi%d\n",
2373 instance->host->host_no);
2374 retval = -ENOMEM;
2375 goto out;
2376 }
2377 }
2378
2379 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2380
2381 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2382 dcmd->cmd = MFI_CMD_DCMD;
2383 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2384 dcmd->sge_count = 1;
2385 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2386 dcmd->timeout = 0;
2387 dcmd->pad_0 = 0;
2388 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2389 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2390 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2391 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2392
2393 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2394 instance->host->host_no);
2395
2396 if (instance->ctrl_context && !instance->mask_interrupts)
2397 retval = megasas_issue_blocked_cmd(instance, cmd,
2398 MEGASAS_ROUTINE_WAIT_TIME_VF);
2399 else
2400 retval = megasas_issue_polled(instance, cmd);
2401
2402 if (retval) {
2403 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2404 "_MEM_ALLOC DCMD %s for scsi%d\n",
2405 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2406 "timed out" : "failed", instance->host->host_no);
2407 retval = 1;
2408 }
2409
2410 out:
2411 megasas_return_cmd(instance, cmd);
2412
2413 return retval;
2414 }
2415
2416 /* Handler for SR-IOV heartbeat */
2417 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2418 {
2419 struct megasas_instance *instance =
2420 (struct megasas_instance *)instance_addr;
2421
2422 if (instance->hb_host_mem->HB.fwCounter !=
2423 instance->hb_host_mem->HB.driverCounter) {
2424 instance->hb_host_mem->HB.driverCounter =
2425 instance->hb_host_mem->HB.fwCounter;
2426 mod_timer(&instance->sriov_heartbeat_timer,
2427 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2428 } else {
2429 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2430 "completed for scsi%d\n", instance->host->host_no);
2431 schedule_work(&instance->work_init);
2432 }
2433 }
2434
2435 /**
2436 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2437 * @instance: Adapter soft state
2438 *
2439 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2440 * complete all its outstanding commands. Returns error if one or more IOs
2441 * are pending after this time period. It also marks the controller dead.
2442 */
2443 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2444 {
2445 int i, sl, outstanding;
2446 u32 reset_index;
2447 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2448 unsigned long flags;
2449 struct list_head clist_local;
2450 struct megasas_cmd *reset_cmd;
2451 u32 fw_state;
2452
2453 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2454 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2455 __func__, __LINE__);
2456 return FAILED;
2457 }
2458
2459 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2460
2461 INIT_LIST_HEAD(&clist_local);
2462 spin_lock_irqsave(&instance->hba_lock, flags);
2463 list_splice_init(&instance->internal_reset_pending_q,
2464 &clist_local);
2465 spin_unlock_irqrestore(&instance->hba_lock, flags);
2466
2467 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2468 for (i = 0; i < wait_time; i++) {
2469 msleep(1000);
2470 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2471 break;
2472 }
2473
2474 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2475 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2476 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2477 return FAILED;
2478 }
2479
2480 reset_index = 0;
2481 while (!list_empty(&clist_local)) {
2482 reset_cmd = list_entry((&clist_local)->next,
2483 struct megasas_cmd, list);
2484 list_del_init(&reset_cmd->list);
2485 if (reset_cmd->scmd) {
2486 reset_cmd->scmd->result = DID_RESET << 16;
2487 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2488 reset_index, reset_cmd,
2489 reset_cmd->scmd->cmnd[0]);
2490
2491 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2492 megasas_return_cmd(instance, reset_cmd);
2493 } else if (reset_cmd->sync_cmd) {
2494 dev_notice(&instance->pdev->dev, "%p synch cmds"
2495 "reset queue\n",
2496 reset_cmd);
2497
2498 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2499 instance->instancet->fire_cmd(instance,
2500 reset_cmd->frame_phys_addr,
2501 0, instance->reg_set);
2502 } else {
2503 dev_notice(&instance->pdev->dev, "%p unexpected"
2504 "cmds lst\n",
2505 reset_cmd);
2506 }
2507 reset_index++;
2508 }
2509
2510 return SUCCESS;
2511 }
2512
2513 for (i = 0; i < resetwaittime; i++) {
2514 outstanding = atomic_read(&instance->fw_outstanding);
2515
2516 if (!outstanding)
2517 break;
2518
2519 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2520 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2521 "commands to complete\n",i,outstanding);
2522 /*
2523 * Call cmd completion routine. Cmd to be
2524 * be completed directly without depending on isr.
2525 */
2526 megasas_complete_cmd_dpc((unsigned long)instance);
2527 }
2528
2529 msleep(1000);
2530 }
2531
2532 i = 0;
2533 outstanding = atomic_read(&instance->fw_outstanding);
2534 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2535
2536 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2537 goto no_outstanding;
2538
2539 if (instance->disableOnlineCtrlReset)
2540 goto kill_hba_and_failed;
2541 do {
2542 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2543 dev_info(&instance->pdev->dev,
2544 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2545 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2546 if (i == 3)
2547 goto kill_hba_and_failed;
2548 megasas_do_ocr(instance);
2549
2550 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2551 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2552 __func__, __LINE__);
2553 return FAILED;
2554 }
2555 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2556 __func__, __LINE__);
2557
2558 for (sl = 0; sl < 10; sl++)
2559 msleep(500);
2560
2561 outstanding = atomic_read(&instance->fw_outstanding);
2562
2563 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2564 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2565 goto no_outstanding;
2566 }
2567 i++;
2568 } while (i <= 3);
2569
2570 no_outstanding:
2571
2572 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2573 __func__, __LINE__);
2574 return SUCCESS;
2575
2576 kill_hba_and_failed:
2577
2578 /* Reset not supported, kill adapter */
2579 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2580 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2581 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2582 atomic_read(&instance->fw_outstanding));
2583 megasas_dump_pending_frames(instance);
2584 megaraid_sas_kill_hba(instance);
2585
2586 return FAILED;
2587 }
2588
2589 /**
2590 * megasas_generic_reset - Generic reset routine
2591 * @scmd: Mid-layer SCSI command
2592 *
2593 * This routine implements a generic reset handler for device, bus and host
2594 * reset requests. Device, bus and host specific reset handlers can use this
2595 * function after they do their specific tasks.
2596 */
2597 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2598 {
2599 int ret_val;
2600 struct megasas_instance *instance;
2601
2602 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2603
2604 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2605 scmd->cmnd[0], scmd->retries);
2606
2607 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2608 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2609 return FAILED;
2610 }
2611
2612 ret_val = megasas_wait_for_outstanding(instance);
2613 if (ret_val == SUCCESS)
2614 dev_notice(&instance->pdev->dev, "reset successful\n");
2615 else
2616 dev_err(&instance->pdev->dev, "failed to do reset\n");
2617
2618 return ret_val;
2619 }
2620
2621 /**
2622 * megasas_reset_timer - quiesce the adapter if required
2623 * @scmd: scsi cmnd
2624 *
2625 * Sets the FW busy flag and reduces the host->can_queue if the
2626 * cmd has not been completed within the timeout period.
2627 */
2628 static enum
2629 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2630 {
2631 struct megasas_instance *instance;
2632 unsigned long flags;
2633
2634 if (time_after(jiffies, scmd->jiffies_at_alloc +
2635 (scmd_timeout * 2) * HZ)) {
2636 return BLK_EH_NOT_HANDLED;
2637 }
2638
2639 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2640 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2641 /* FW is busy, throttle IO */
2642 spin_lock_irqsave(instance->host->host_lock, flags);
2643
2644 instance->host->can_queue = instance->throttlequeuedepth;
2645 instance->last_time = jiffies;
2646 instance->flag |= MEGASAS_FW_BUSY;
2647
2648 spin_unlock_irqrestore(instance->host->host_lock, flags);
2649 }
2650 return BLK_EH_RESET_TIMER;
2651 }
2652
2653 /**
2654 * megasas_reset_bus_host - Bus & host reset handler entry point
2655 */
2656 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2657 {
2658 int ret;
2659 struct megasas_instance *instance;
2660
2661 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2662
2663 /*
2664 * First wait for all commands to complete
2665 */
2666 if (instance->ctrl_context)
2667 ret = megasas_reset_fusion(scmd->device->host, 1);
2668 else
2669 ret = megasas_generic_reset(scmd);
2670
2671 return ret;
2672 }
2673
2674 /**
2675 * megasas_task_abort - Issues task abort request to firmware
2676 * (supported only for fusion adapters)
2677 * @scmd: SCSI command pointer
2678 */
2679 static int megasas_task_abort(struct scsi_cmnd *scmd)
2680 {
2681 int ret;
2682 struct megasas_instance *instance;
2683
2684 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2685
2686 if (instance->ctrl_context)
2687 ret = megasas_task_abort_fusion(scmd);
2688 else {
2689 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2690 ret = FAILED;
2691 }
2692
2693 return ret;
2694 }
2695
2696 /**
2697 * megasas_reset_target: Issues target reset request to firmware
2698 * (supported only for fusion adapters)
2699 * @scmd: SCSI command pointer
2700 */
2701 static int megasas_reset_target(struct scsi_cmnd *scmd)
2702 {
2703 int ret;
2704 struct megasas_instance *instance;
2705
2706 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2707
2708 if (instance->ctrl_context)
2709 ret = megasas_reset_target_fusion(scmd);
2710 else {
2711 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2712 ret = FAILED;
2713 }
2714
2715 return ret;
2716 }
2717
2718 /**
2719 * megasas_bios_param - Returns disk geometry for a disk
2720 * @sdev: device handle
2721 * @bdev: block device
2722 * @capacity: drive capacity
2723 * @geom: geometry parameters
2724 */
2725 static int
2726 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2727 sector_t capacity, int geom[])
2728 {
2729 int heads;
2730 int sectors;
2731 sector_t cylinders;
2732 unsigned long tmp;
2733
2734 /* Default heads (64) & sectors (32) */
2735 heads = 64;
2736 sectors = 32;
2737
2738 tmp = heads * sectors;
2739 cylinders = capacity;
2740
2741 sector_div(cylinders, tmp);
2742
2743 /*
2744 * Handle extended translation size for logical drives > 1Gb
2745 */
2746
2747 if (capacity >= 0x200000) {
2748 heads = 255;
2749 sectors = 63;
2750 tmp = heads*sectors;
2751 cylinders = capacity;
2752 sector_div(cylinders, tmp);
2753 }
2754
2755 geom[0] = heads;
2756 geom[1] = sectors;
2757 geom[2] = cylinders;
2758
2759 return 0;
2760 }
2761
2762 static void megasas_aen_polling(struct work_struct *work);
2763
2764 /**
2765 * megasas_service_aen - Processes an event notification
2766 * @instance: Adapter soft state
2767 * @cmd: AEN command completed by the ISR
2768 *
2769 * For AEN, driver sends a command down to FW that is held by the FW till an
2770 * event occurs. When an event of interest occurs, FW completes the command
2771 * that it was previously holding.
2772 *
2773 * This routines sends SIGIO signal to processes that have registered with the
2774 * driver for AEN.
2775 */
2776 static void
2777 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2778 {
2779 unsigned long flags;
2780
2781 /*
2782 * Don't signal app if it is just an aborted previously registered aen
2783 */
2784 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2785 spin_lock_irqsave(&poll_aen_lock, flags);
2786 megasas_poll_wait_aen = 1;
2787 spin_unlock_irqrestore(&poll_aen_lock, flags);
2788 wake_up(&megasas_poll_wait);
2789 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2790 }
2791 else
2792 cmd->abort_aen = 0;
2793
2794 instance->aen_cmd = NULL;
2795
2796 megasas_return_cmd(instance, cmd);
2797
2798 if ((instance->unload == 0) &&
2799 ((instance->issuepend_done == 1))) {
2800 struct megasas_aen_event *ev;
2801
2802 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2803 if (!ev) {
2804 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2805 } else {
2806 ev->instance = instance;
2807 instance->ev = ev;
2808 INIT_DELAYED_WORK(&ev->hotplug_work,
2809 megasas_aen_polling);
2810 schedule_delayed_work(&ev->hotplug_work, 0);
2811 }
2812 }
2813 }
2814
2815 static ssize_t
2816 megasas_fw_crash_buffer_store(struct device *cdev,
2817 struct device_attribute *attr, const char *buf, size_t count)
2818 {
2819 struct Scsi_Host *shost = class_to_shost(cdev);
2820 struct megasas_instance *instance =
2821 (struct megasas_instance *) shost->hostdata;
2822 int val = 0;
2823 unsigned long flags;
2824
2825 if (kstrtoint(buf, 0, &val) != 0)
2826 return -EINVAL;
2827
2828 spin_lock_irqsave(&instance->crashdump_lock, flags);
2829 instance->fw_crash_buffer_offset = val;
2830 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2831 return strlen(buf);
2832 }
2833
2834 static ssize_t
2835 megasas_fw_crash_buffer_show(struct device *cdev,
2836 struct device_attribute *attr, char *buf)
2837 {
2838 struct Scsi_Host *shost = class_to_shost(cdev);
2839 struct megasas_instance *instance =
2840 (struct megasas_instance *) shost->hostdata;
2841 u32 size;
2842 unsigned long buff_addr;
2843 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2844 unsigned long src_addr;
2845 unsigned long flags;
2846 u32 buff_offset;
2847
2848 spin_lock_irqsave(&instance->crashdump_lock, flags);
2849 buff_offset = instance->fw_crash_buffer_offset;
2850 if (!instance->crash_dump_buf &&
2851 !((instance->fw_crash_state == AVAILABLE) ||
2852 (instance->fw_crash_state == COPYING))) {
2853 dev_err(&instance->pdev->dev,
2854 "Firmware crash dump is not available\n");
2855 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2856 return -EINVAL;
2857 }
2858
2859 buff_addr = (unsigned long) buf;
2860
2861 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2862 dev_err(&instance->pdev->dev,
2863 "Firmware crash dump offset is out of range\n");
2864 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2865 return 0;
2866 }
2867
2868 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2869 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2870
2871 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2872 (buff_offset % dmachunk);
2873 memcpy(buf, (void *)src_addr, size);
2874 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2875
2876 return size;
2877 }
2878
2879 static ssize_t
2880 megasas_fw_crash_buffer_size_show(struct device *cdev,
2881 struct device_attribute *attr, char *buf)
2882 {
2883 struct Scsi_Host *shost = class_to_shost(cdev);
2884 struct megasas_instance *instance =
2885 (struct megasas_instance *) shost->hostdata;
2886
2887 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
2888 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
2889 }
2890
2891 static ssize_t
2892 megasas_fw_crash_state_store(struct device *cdev,
2893 struct device_attribute *attr, const char *buf, size_t count)
2894 {
2895 struct Scsi_Host *shost = class_to_shost(cdev);
2896 struct megasas_instance *instance =
2897 (struct megasas_instance *) shost->hostdata;
2898 int val = 0;
2899 unsigned long flags;
2900
2901 if (kstrtoint(buf, 0, &val) != 0)
2902 return -EINVAL;
2903
2904 if ((val <= AVAILABLE || val > COPY_ERROR)) {
2905 dev_err(&instance->pdev->dev, "application updates invalid "
2906 "firmware crash state\n");
2907 return -EINVAL;
2908 }
2909
2910 instance->fw_crash_state = val;
2911
2912 if ((val == COPIED) || (val == COPY_ERROR)) {
2913 spin_lock_irqsave(&instance->crashdump_lock, flags);
2914 megasas_free_host_crash_buffer(instance);
2915 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2916 if (val == COPY_ERROR)
2917 dev_info(&instance->pdev->dev, "application failed to "
2918 "copy Firmware crash dump\n");
2919 else
2920 dev_info(&instance->pdev->dev, "Firmware crash dump "
2921 "copied successfully\n");
2922 }
2923 return strlen(buf);
2924 }
2925
2926 static ssize_t
2927 megasas_fw_crash_state_show(struct device *cdev,
2928 struct device_attribute *attr, char *buf)
2929 {
2930 struct Scsi_Host *shost = class_to_shost(cdev);
2931 struct megasas_instance *instance =
2932 (struct megasas_instance *) shost->hostdata;
2933
2934 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
2935 }
2936
2937 static ssize_t
2938 megasas_page_size_show(struct device *cdev,
2939 struct device_attribute *attr, char *buf)
2940 {
2941 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
2942 }
2943
2944 static ssize_t
2945 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
2946 char *buf)
2947 {
2948 struct Scsi_Host *shost = class_to_shost(cdev);
2949 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
2950
2951 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
2952 }
2953
2954 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
2955 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
2956 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
2957 megasas_fw_crash_buffer_size_show, NULL);
2958 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
2959 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
2960 static DEVICE_ATTR(page_size, S_IRUGO,
2961 megasas_page_size_show, NULL);
2962 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
2963 megasas_ldio_outstanding_show, NULL);
2964
2965 struct device_attribute *megaraid_host_attrs[] = {
2966 &dev_attr_fw_crash_buffer_size,
2967 &dev_attr_fw_crash_buffer,
2968 &dev_attr_fw_crash_state,
2969 &dev_attr_page_size,
2970 &dev_attr_ldio_outstanding,
2971 NULL,
2972 };
2973
2974 /*
2975 * Scsi host template for megaraid_sas driver
2976 */
2977 static struct scsi_host_template megasas_template = {
2978
2979 .module = THIS_MODULE,
2980 .name = "Avago SAS based MegaRAID driver",
2981 .proc_name = "megaraid_sas",
2982 .slave_configure = megasas_slave_configure,
2983 .slave_alloc = megasas_slave_alloc,
2984 .slave_destroy = megasas_slave_destroy,
2985 .queuecommand = megasas_queue_command,
2986 .eh_target_reset_handler = megasas_reset_target,
2987 .eh_abort_handler = megasas_task_abort,
2988 .eh_host_reset_handler = megasas_reset_bus_host,
2989 .eh_timed_out = megasas_reset_timer,
2990 .shost_attrs = megaraid_host_attrs,
2991 .bios_param = megasas_bios_param,
2992 .use_clustering = ENABLE_CLUSTERING,
2993 .change_queue_depth = scsi_change_queue_depth,
2994 .no_write_same = 1,
2995 };
2996
2997 /**
2998 * megasas_complete_int_cmd - Completes an internal command
2999 * @instance: Adapter soft state
3000 * @cmd: Command to be completed
3001 *
3002 * The megasas_issue_blocked_cmd() function waits for a command to complete
3003 * after it issues a command. This function wakes up that waiting routine by
3004 * calling wake_up() on the wait queue.
3005 */
3006 static void
3007 megasas_complete_int_cmd(struct megasas_instance *instance,
3008 struct megasas_cmd *cmd)
3009 {
3010 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3011 wake_up(&instance->int_cmd_wait_q);
3012 }
3013
3014 /**
3015 * megasas_complete_abort - Completes aborting a command
3016 * @instance: Adapter soft state
3017 * @cmd: Cmd that was issued to abort another cmd
3018 *
3019 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3020 * after it issues an abort on a previously issued command. This function
3021 * wakes up all functions waiting on the same wait queue.
3022 */
3023 static void
3024 megasas_complete_abort(struct megasas_instance *instance,
3025 struct megasas_cmd *cmd)
3026 {
3027 if (cmd->sync_cmd) {
3028 cmd->sync_cmd = 0;
3029 cmd->cmd_status_drv = 0;
3030 wake_up(&instance->abort_cmd_wait_q);
3031 }
3032 }
3033
3034 /**
3035 * megasas_complete_cmd - Completes a command
3036 * @instance: Adapter soft state
3037 * @cmd: Command to be completed
3038 * @alt_status: If non-zero, use this value as status to
3039 * SCSI mid-layer instead of the value returned
3040 * by the FW. This should be used if caller wants
3041 * an alternate status (as in the case of aborted
3042 * commands)
3043 */
3044 void
3045 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3046 u8 alt_status)
3047 {
3048 int exception = 0;
3049 struct megasas_header *hdr = &cmd->frame->hdr;
3050 unsigned long flags;
3051 struct fusion_context *fusion = instance->ctrl_context;
3052 u32 opcode, status;
3053
3054 /* flag for the retry reset */
3055 cmd->retry_for_fw_reset = 0;
3056
3057 if (cmd->scmd)
3058 cmd->scmd->SCp.ptr = NULL;
3059
3060 switch (hdr->cmd) {
3061 case MFI_CMD_INVALID:
3062 /* Some older 1068 controller FW may keep a pended
3063 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3064 when booting the kdump kernel. Ignore this command to
3065 prevent a kernel panic on shutdown of the kdump kernel. */
3066 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3067 "completed\n");
3068 dev_warn(&instance->pdev->dev, "If you have a controller "
3069 "other than PERC5, please upgrade your firmware\n");
3070 break;
3071 case MFI_CMD_PD_SCSI_IO:
3072 case MFI_CMD_LD_SCSI_IO:
3073
3074 /*
3075 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3076 * issued either through an IO path or an IOCTL path. If it
3077 * was via IOCTL, we will send it to internal completion.
3078 */
3079 if (cmd->sync_cmd) {
3080 cmd->sync_cmd = 0;
3081 megasas_complete_int_cmd(instance, cmd);
3082 break;
3083 }
3084
3085 case MFI_CMD_LD_READ:
3086 case MFI_CMD_LD_WRITE:
3087
3088 if (alt_status) {
3089 cmd->scmd->result = alt_status << 16;
3090 exception = 1;
3091 }
3092
3093 if (exception) {
3094
3095 atomic_dec(&instance->fw_outstanding);
3096
3097 scsi_dma_unmap(cmd->scmd);
3098 cmd->scmd->scsi_done(cmd->scmd);
3099 megasas_return_cmd(instance, cmd);
3100
3101 break;
3102 }
3103
3104 switch (hdr->cmd_status) {
3105
3106 case MFI_STAT_OK:
3107 cmd->scmd->result = DID_OK << 16;
3108 break;
3109
3110 case MFI_STAT_SCSI_IO_FAILED:
3111 case MFI_STAT_LD_INIT_IN_PROGRESS:
3112 cmd->scmd->result =
3113 (DID_ERROR << 16) | hdr->scsi_status;
3114 break;
3115
3116 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3117
3118 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3119
3120 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3121 memset(cmd->scmd->sense_buffer, 0,
3122 SCSI_SENSE_BUFFERSIZE);
3123 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3124 hdr->sense_len);
3125
3126 cmd->scmd->result |= DRIVER_SENSE << 24;
3127 }
3128
3129 break;
3130
3131 case MFI_STAT_LD_OFFLINE:
3132 case MFI_STAT_DEVICE_NOT_FOUND:
3133 cmd->scmd->result = DID_BAD_TARGET << 16;
3134 break;
3135
3136 default:
3137 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3138 hdr->cmd_status);
3139 cmd->scmd->result = DID_ERROR << 16;
3140 break;
3141 }
3142
3143 atomic_dec(&instance->fw_outstanding);
3144
3145 scsi_dma_unmap(cmd->scmd);
3146 cmd->scmd->scsi_done(cmd->scmd);
3147 megasas_return_cmd(instance, cmd);
3148
3149 break;
3150
3151 case MFI_CMD_SMP:
3152 case MFI_CMD_STP:
3153 case MFI_CMD_DCMD:
3154 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3155 /* Check for LD map update */
3156 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3157 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3158 fusion->fast_path_io = 0;
3159 spin_lock_irqsave(instance->host->host_lock, flags);
3160 instance->map_update_cmd = NULL;
3161 if (cmd->frame->hdr.cmd_status != 0) {
3162 if (cmd->frame->hdr.cmd_status !=
3163 MFI_STAT_NOT_FOUND)
3164 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3165 cmd->frame->hdr.cmd_status);
3166 else {
3167 megasas_return_cmd(instance, cmd);
3168 spin_unlock_irqrestore(
3169 instance->host->host_lock,
3170 flags);
3171 break;
3172 }
3173 } else
3174 instance->map_id++;
3175 megasas_return_cmd(instance, cmd);
3176
3177 /*
3178 * Set fast path IO to ZERO.
3179 * Validate Map will set proper value.
3180 * Meanwhile all IOs will go as LD IO.
3181 */
3182 if (MR_ValidateMapInfo(instance))
3183 fusion->fast_path_io = 1;
3184 else
3185 fusion->fast_path_io = 0;
3186 megasas_sync_map_info(instance);
3187 spin_unlock_irqrestore(instance->host->host_lock,
3188 flags);
3189 break;
3190 }
3191 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3192 opcode == MR_DCMD_CTRL_EVENT_GET) {
3193 spin_lock_irqsave(&poll_aen_lock, flags);
3194 megasas_poll_wait_aen = 0;
3195 spin_unlock_irqrestore(&poll_aen_lock, flags);
3196 }
3197
3198 /* FW has an updated PD sequence */
3199 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3200 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3201
3202 spin_lock_irqsave(instance->host->host_lock, flags);
3203 status = cmd->frame->hdr.cmd_status;
3204 instance->jbod_seq_cmd = NULL;
3205 megasas_return_cmd(instance, cmd);
3206
3207 if (status == MFI_STAT_OK) {
3208 instance->pd_seq_map_id++;
3209 /* Re-register a pd sync seq num cmd */
3210 if (megasas_sync_pd_seq_num(instance, true))
3211 instance->use_seqnum_jbod_fp = false;
3212 } else
3213 instance->use_seqnum_jbod_fp = false;
3214
3215 spin_unlock_irqrestore(instance->host->host_lock, flags);
3216 break;
3217 }
3218
3219 /*
3220 * See if got an event notification
3221 */
3222 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3223 megasas_service_aen(instance, cmd);
3224 else
3225 megasas_complete_int_cmd(instance, cmd);
3226
3227 break;
3228
3229 case MFI_CMD_ABORT:
3230 /*
3231 * Cmd issued to abort another cmd returned
3232 */
3233 megasas_complete_abort(instance, cmd);
3234 break;
3235
3236 default:
3237 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3238 hdr->cmd);
3239 break;
3240 }
3241 }
3242
3243 /**
3244 * megasas_issue_pending_cmds_again - issue all pending cmds
3245 * in FW again because of the fw reset
3246 * @instance: Adapter soft state
3247 */
3248 static inline void
3249 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3250 {
3251 struct megasas_cmd *cmd;
3252 struct list_head clist_local;
3253 union megasas_evt_class_locale class_locale;
3254 unsigned long flags;
3255 u32 seq_num;
3256
3257 INIT_LIST_HEAD(&clist_local);
3258 spin_lock_irqsave(&instance->hba_lock, flags);
3259 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3260 spin_unlock_irqrestore(&instance->hba_lock, flags);
3261
3262 while (!list_empty(&clist_local)) {
3263 cmd = list_entry((&clist_local)->next,
3264 struct megasas_cmd, list);
3265 list_del_init(&cmd->list);
3266
3267 if (cmd->sync_cmd || cmd->scmd) {
3268 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3269 "detected to be pending while HBA reset\n",
3270 cmd, cmd->scmd, cmd->sync_cmd);
3271
3272 cmd->retry_for_fw_reset++;
3273
3274 if (cmd->retry_for_fw_reset == 3) {
3275 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3276 "was tried multiple times during reset."
3277 "Shutting down the HBA\n",
3278 cmd, cmd->scmd, cmd->sync_cmd);
3279 instance->instancet->disable_intr(instance);
3280 atomic_set(&instance->fw_reset_no_pci_access, 1);
3281 megaraid_sas_kill_hba(instance);
3282 return;
3283 }
3284 }
3285
3286 if (cmd->sync_cmd == 1) {
3287 if (cmd->scmd) {
3288 dev_notice(&instance->pdev->dev, "unexpected"
3289 "cmd attached to internal command!\n");
3290 }
3291 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3292 "on the internal reset queue,"
3293 "issue it again.\n", cmd);
3294 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3295 instance->instancet->fire_cmd(instance,
3296 cmd->frame_phys_addr,
3297 0, instance->reg_set);
3298 } else if (cmd->scmd) {
3299 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3300 "detected on the internal queue, issue again.\n",
3301 cmd, cmd->scmd->cmnd[0]);
3302
3303 atomic_inc(&instance->fw_outstanding);
3304 instance->instancet->fire_cmd(instance,
3305 cmd->frame_phys_addr,
3306 cmd->frame_count-1, instance->reg_set);
3307 } else {
3308 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3309 "internal reset defer list while re-issue!!\n",
3310 cmd);
3311 }
3312 }
3313
3314 if (instance->aen_cmd) {
3315 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3316 megasas_return_cmd(instance, instance->aen_cmd);
3317
3318 instance->aen_cmd = NULL;
3319 }
3320
3321 /*
3322 * Initiate AEN (Asynchronous Event Notification)
3323 */
3324 seq_num = instance->last_seq_num;
3325 class_locale.members.reserved = 0;
3326 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3327 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3328
3329 megasas_register_aen(instance, seq_num, class_locale.word);
3330 }
3331
3332 /**
3333 * Move the internal reset pending commands to a deferred queue.
3334 *
3335 * We move the commands pending at internal reset time to a
3336 * pending queue. This queue would be flushed after successful
3337 * completion of the internal reset sequence. if the internal reset
3338 * did not complete in time, the kernel reset handler would flush
3339 * these commands.
3340 **/
3341 static void
3342 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3343 {
3344 struct megasas_cmd *cmd;
3345 int i;
3346 u32 max_cmd = instance->max_fw_cmds;
3347 u32 defer_index;
3348 unsigned long flags;
3349
3350 defer_index = 0;
3351 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3352 for (i = 0; i < max_cmd; i++) {
3353 cmd = instance->cmd_list[i];
3354 if (cmd->sync_cmd == 1 || cmd->scmd) {
3355 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3356 "on the defer queue as internal\n",
3357 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3358
3359 if (!list_empty(&cmd->list)) {
3360 dev_notice(&instance->pdev->dev, "ERROR while"
3361 " moving this cmd:%p, %d %p, it was"
3362 "discovered on some list?\n",
3363 cmd, cmd->sync_cmd, cmd->scmd);
3364
3365 list_del_init(&cmd->list);
3366 }
3367 defer_index++;
3368 list_add_tail(&cmd->list,
3369 &instance->internal_reset_pending_q);
3370 }
3371 }
3372 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3373 }
3374
3375
3376 static void
3377 process_fw_state_change_wq(struct work_struct *work)
3378 {
3379 struct megasas_instance *instance =
3380 container_of(work, struct megasas_instance, work_init);
3381 u32 wait;
3382 unsigned long flags;
3383
3384 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3385 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3386 atomic_read(&instance->adprecovery));
3387 return ;
3388 }
3389
3390 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3391 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3392 "state, restarting it...\n");
3393
3394 instance->instancet->disable_intr(instance);
3395 atomic_set(&instance->fw_outstanding, 0);
3396
3397 atomic_set(&instance->fw_reset_no_pci_access, 1);
3398 instance->instancet->adp_reset(instance, instance->reg_set);
3399 atomic_set(&instance->fw_reset_no_pci_access, 0);
3400
3401 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3402 "initiating next stage...\n");
3403
3404 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3405 "state 2 starting...\n");
3406
3407 /* waiting for about 20 second before start the second init */
3408 for (wait = 0; wait < 30; wait++) {
3409 msleep(1000);
3410 }
3411
3412 if (megasas_transition_to_ready(instance, 1)) {
3413 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3414
3415 atomic_set(&instance->fw_reset_no_pci_access, 1);
3416 megaraid_sas_kill_hba(instance);
3417 return ;
3418 }
3419
3420 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3421 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3422 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3423 ) {
3424 *instance->consumer = *instance->producer;
3425 } else {
3426 *instance->consumer = 0;
3427 *instance->producer = 0;
3428 }
3429
3430 megasas_issue_init_mfi(instance);
3431
3432 spin_lock_irqsave(&instance->hba_lock, flags);
3433 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3434 spin_unlock_irqrestore(&instance->hba_lock, flags);
3435 instance->instancet->enable_intr(instance);
3436
3437 megasas_issue_pending_cmds_again(instance);
3438 instance->issuepend_done = 1;
3439 }
3440 }
3441
3442 /**
3443 * megasas_deplete_reply_queue - Processes all completed commands
3444 * @instance: Adapter soft state
3445 * @alt_status: Alternate status to be returned to
3446 * SCSI mid-layer instead of the status
3447 * returned by the FW
3448 * Note: this must be called with hba lock held
3449 */
3450 static int
3451 megasas_deplete_reply_queue(struct megasas_instance *instance,
3452 u8 alt_status)
3453 {
3454 u32 mfiStatus;
3455 u32 fw_state;
3456
3457 if ((mfiStatus = instance->instancet->check_reset(instance,
3458 instance->reg_set)) == 1) {
3459 return IRQ_HANDLED;
3460 }
3461
3462 if ((mfiStatus = instance->instancet->clear_intr(
3463 instance->reg_set)
3464 ) == 0) {
3465 /* Hardware may not set outbound_intr_status in MSI-X mode */
3466 if (!instance->msix_vectors)
3467 return IRQ_NONE;
3468 }
3469
3470 instance->mfiStatus = mfiStatus;
3471
3472 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3473 fw_state = instance->instancet->read_fw_status_reg(
3474 instance->reg_set) & MFI_STATE_MASK;
3475
3476 if (fw_state != MFI_STATE_FAULT) {
3477 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3478 fw_state);
3479 }
3480
3481 if ((fw_state == MFI_STATE_FAULT) &&
3482 (instance->disableOnlineCtrlReset == 0)) {
3483 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3484
3485 if ((instance->pdev->device ==
3486 PCI_DEVICE_ID_LSI_SAS1064R) ||
3487 (instance->pdev->device ==
3488 PCI_DEVICE_ID_DELL_PERC5) ||
3489 (instance->pdev->device ==
3490 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3491
3492 *instance->consumer =
3493 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3494 }
3495
3496
3497 instance->instancet->disable_intr(instance);
3498 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3499 instance->issuepend_done = 0;
3500
3501 atomic_set(&instance->fw_outstanding, 0);
3502 megasas_internal_reset_defer_cmds(instance);
3503
3504 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3505 fw_state, atomic_read(&instance->adprecovery));
3506
3507 schedule_work(&instance->work_init);
3508 return IRQ_HANDLED;
3509
3510 } else {
3511 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3512 fw_state, instance->disableOnlineCtrlReset);
3513 }
3514 }
3515
3516 tasklet_schedule(&instance->isr_tasklet);
3517 return IRQ_HANDLED;
3518 }
3519 /**
3520 * megasas_isr - isr entry point
3521 */
3522 static irqreturn_t megasas_isr(int irq, void *devp)
3523 {
3524 struct megasas_irq_context *irq_context = devp;
3525 struct megasas_instance *instance = irq_context->instance;
3526 unsigned long flags;
3527 irqreturn_t rc;
3528
3529 if (atomic_read(&instance->fw_reset_no_pci_access))
3530 return IRQ_HANDLED;
3531
3532 spin_lock_irqsave(&instance->hba_lock, flags);
3533 rc = megasas_deplete_reply_queue(instance, DID_OK);
3534 spin_unlock_irqrestore(&instance->hba_lock, flags);
3535
3536 return rc;
3537 }
3538
3539 /**
3540 * megasas_transition_to_ready - Move the FW to READY state
3541 * @instance: Adapter soft state
3542 *
3543 * During the initialization, FW passes can potentially be in any one of
3544 * several possible states. If the FW in operational, waiting-for-handshake
3545 * states, driver must take steps to bring it to ready state. Otherwise, it
3546 * has to wait for the ready state.
3547 */
3548 int
3549 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3550 {
3551 int i;
3552 u8 max_wait;
3553 u32 fw_state;
3554 u32 cur_state;
3555 u32 abs_state, curr_abs_state;
3556
3557 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3558 fw_state = abs_state & MFI_STATE_MASK;
3559
3560 if (fw_state != MFI_STATE_READY)
3561 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3562 " state\n");
3563
3564 while (fw_state != MFI_STATE_READY) {
3565
3566 switch (fw_state) {
3567
3568 case MFI_STATE_FAULT:
3569 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3570 if (ocr) {
3571 max_wait = MEGASAS_RESET_WAIT_TIME;
3572 cur_state = MFI_STATE_FAULT;
3573 break;
3574 } else
3575 return -ENODEV;
3576
3577 case MFI_STATE_WAIT_HANDSHAKE:
3578 /*
3579 * Set the CLR bit in inbound doorbell
3580 */
3581 if ((instance->pdev->device ==
3582 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3583 (instance->pdev->device ==
3584 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3585 (instance->ctrl_context))
3586 writel(
3587 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3588 &instance->reg_set->doorbell);
3589 else
3590 writel(
3591 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3592 &instance->reg_set->inbound_doorbell);
3593
3594 max_wait = MEGASAS_RESET_WAIT_TIME;
3595 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3596 break;
3597
3598 case MFI_STATE_BOOT_MESSAGE_PENDING:
3599 if ((instance->pdev->device ==
3600 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3601 (instance->pdev->device ==
3602 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3603 (instance->ctrl_context))
3604 writel(MFI_INIT_HOTPLUG,
3605 &instance->reg_set->doorbell);
3606 else
3607 writel(MFI_INIT_HOTPLUG,
3608 &instance->reg_set->inbound_doorbell);
3609
3610 max_wait = MEGASAS_RESET_WAIT_TIME;
3611 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3612 break;
3613
3614 case MFI_STATE_OPERATIONAL:
3615 /*
3616 * Bring it to READY state; assuming max wait 10 secs
3617 */
3618 instance->instancet->disable_intr(instance);
3619 if ((instance->pdev->device ==
3620 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3621 (instance->pdev->device ==
3622 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3623 (instance->ctrl_context)) {
3624 writel(MFI_RESET_FLAGS,
3625 &instance->reg_set->doorbell);
3626
3627 if (instance->ctrl_context) {
3628 for (i = 0; i < (10 * 1000); i += 20) {
3629 if (readl(
3630 &instance->
3631 reg_set->
3632 doorbell) & 1)
3633 msleep(20);
3634 else
3635 break;
3636 }
3637 }
3638 } else
3639 writel(MFI_RESET_FLAGS,
3640 &instance->reg_set->inbound_doorbell);
3641
3642 max_wait = MEGASAS_RESET_WAIT_TIME;
3643 cur_state = MFI_STATE_OPERATIONAL;
3644 break;
3645
3646 case MFI_STATE_UNDEFINED:
3647 /*
3648 * This state should not last for more than 2 seconds
3649 */
3650 max_wait = MEGASAS_RESET_WAIT_TIME;
3651 cur_state = MFI_STATE_UNDEFINED;
3652 break;
3653
3654 case MFI_STATE_BB_INIT:
3655 max_wait = MEGASAS_RESET_WAIT_TIME;
3656 cur_state = MFI_STATE_BB_INIT;
3657 break;
3658
3659 case MFI_STATE_FW_INIT:
3660 max_wait = MEGASAS_RESET_WAIT_TIME;
3661 cur_state = MFI_STATE_FW_INIT;
3662 break;
3663
3664 case MFI_STATE_FW_INIT_2:
3665 max_wait = MEGASAS_RESET_WAIT_TIME;
3666 cur_state = MFI_STATE_FW_INIT_2;
3667 break;
3668
3669 case MFI_STATE_DEVICE_SCAN:
3670 max_wait = MEGASAS_RESET_WAIT_TIME;
3671 cur_state = MFI_STATE_DEVICE_SCAN;
3672 break;
3673
3674 case MFI_STATE_FLUSH_CACHE:
3675 max_wait = MEGASAS_RESET_WAIT_TIME;
3676 cur_state = MFI_STATE_FLUSH_CACHE;
3677 break;
3678
3679 default:
3680 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3681 fw_state);
3682 return -ENODEV;
3683 }
3684
3685 /*
3686 * The cur_state should not last for more than max_wait secs
3687 */
3688 for (i = 0; i < (max_wait * 1000); i++) {
3689 curr_abs_state = instance->instancet->
3690 read_fw_status_reg(instance->reg_set);
3691
3692 if (abs_state == curr_abs_state) {
3693 msleep(1);
3694 } else
3695 break;
3696 }
3697
3698 /*
3699 * Return error if fw_state hasn't changed after max_wait
3700 */
3701 if (curr_abs_state == abs_state) {
3702 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3703 "in %d secs\n", fw_state, max_wait);
3704 return -ENODEV;
3705 }
3706
3707 abs_state = curr_abs_state;
3708 fw_state = curr_abs_state & MFI_STATE_MASK;
3709 }
3710 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3711
3712 return 0;
3713 }
3714
3715 /**
3716 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3717 * @instance: Adapter soft state
3718 */
3719 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3720 {
3721 int i;
3722 u32 max_cmd = instance->max_mfi_cmds;
3723 struct megasas_cmd *cmd;
3724
3725 if (!instance->frame_dma_pool)
3726 return;
3727
3728 /*
3729 * Return all frames to pool
3730 */
3731 for (i = 0; i < max_cmd; i++) {
3732
3733 cmd = instance->cmd_list[i];
3734
3735 if (cmd->frame)
3736 pci_pool_free(instance->frame_dma_pool, cmd->frame,
3737 cmd->frame_phys_addr);
3738
3739 if (cmd->sense)
3740 pci_pool_free(instance->sense_dma_pool, cmd->sense,
3741 cmd->sense_phys_addr);
3742 }
3743
3744 /*
3745 * Now destroy the pool itself
3746 */
3747 pci_pool_destroy(instance->frame_dma_pool);
3748 pci_pool_destroy(instance->sense_dma_pool);
3749
3750 instance->frame_dma_pool = NULL;
3751 instance->sense_dma_pool = NULL;
3752 }
3753
3754 /**
3755 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3756 * @instance: Adapter soft state
3757 *
3758 * Each command packet has an embedded DMA memory buffer that is used for
3759 * filling MFI frame and the SG list that immediately follows the frame. This
3760 * function creates those DMA memory buffers for each command packet by using
3761 * PCI pool facility.
3762 */
3763 static int megasas_create_frame_pool(struct megasas_instance *instance)
3764 {
3765 int i;
3766 u32 max_cmd;
3767 u32 sge_sz;
3768 u32 total_sz;
3769 u32 frame_count;
3770 struct megasas_cmd *cmd;
3771
3772 max_cmd = instance->max_mfi_cmds;
3773
3774 /*
3775 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3776 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3777 */
3778 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3779 sizeof(struct megasas_sge32);
3780
3781 if (instance->flag_ieee)
3782 sge_sz = sizeof(struct megasas_sge_skinny);
3783
3784 /*
3785 * For MFI controllers.
3786 * max_num_sge = 60
3787 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3788 * Total 960 byte (15 MFI frame of 64 byte)
3789 *
3790 * Fusion adapter require only 3 extra frame.
3791 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3792 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3793 * Total 192 byte (3 MFI frame of 64 byte)
3794 */
3795 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3796 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
3797 /*
3798 * Use DMA pool facility provided by PCI layer
3799 */
3800 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
3801 instance->pdev, total_sz, 256, 0);
3802
3803 if (!instance->frame_dma_pool) {
3804 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3805 return -ENOMEM;
3806 }
3807
3808 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
3809 instance->pdev, 128, 4, 0);
3810
3811 if (!instance->sense_dma_pool) {
3812 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3813
3814 pci_pool_destroy(instance->frame_dma_pool);
3815 instance->frame_dma_pool = NULL;
3816
3817 return -ENOMEM;
3818 }
3819
3820 /*
3821 * Allocate and attach a frame to each of the commands in cmd_list.
3822 * By making cmd->index as the context instead of the &cmd, we can
3823 * always use 32bit context regardless of the architecture
3824 */
3825 for (i = 0; i < max_cmd; i++) {
3826
3827 cmd = instance->cmd_list[i];
3828
3829 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
3830 GFP_KERNEL, &cmd->frame_phys_addr);
3831
3832 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
3833 GFP_KERNEL, &cmd->sense_phys_addr);
3834
3835 /*
3836 * megasas_teardown_frame_pool() takes care of freeing
3837 * whatever has been allocated
3838 */
3839 if (!cmd->frame || !cmd->sense) {
3840 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
3841 megasas_teardown_frame_pool(instance);
3842 return -ENOMEM;
3843 }
3844
3845 memset(cmd->frame, 0, total_sz);
3846 cmd->frame->io.context = cpu_to_le32(cmd->index);
3847 cmd->frame->io.pad_0 = 0;
3848 if (!instance->ctrl_context && reset_devices)
3849 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3850 }
3851
3852 return 0;
3853 }
3854
3855 /**
3856 * megasas_free_cmds - Free all the cmds in the free cmd pool
3857 * @instance: Adapter soft state
3858 */
3859 void megasas_free_cmds(struct megasas_instance *instance)
3860 {
3861 int i;
3862
3863 /* First free the MFI frame pool */
3864 megasas_teardown_frame_pool(instance);
3865
3866 /* Free all the commands in the cmd_list */
3867 for (i = 0; i < instance->max_mfi_cmds; i++)
3868
3869 kfree(instance->cmd_list[i]);
3870
3871 /* Free the cmd_list buffer itself */
3872 kfree(instance->cmd_list);
3873 instance->cmd_list = NULL;
3874
3875 INIT_LIST_HEAD(&instance->cmd_pool);
3876 }
3877
3878 /**
3879 * megasas_alloc_cmds - Allocates the command packets
3880 * @instance: Adapter soft state
3881 *
3882 * Each command that is issued to the FW, whether IO commands from the OS or
3883 * internal commands like IOCTLs, are wrapped in local data structure called
3884 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
3885 * the FW.
3886 *
3887 * Each frame has a 32-bit field called context (tag). This context is used
3888 * to get back the megasas_cmd from the frame when a frame gets completed in
3889 * the ISR. Typically the address of the megasas_cmd itself would be used as
3890 * the context. But we wanted to keep the differences between 32 and 64 bit
3891 * systems to the mininum. We always use 32 bit integers for the context. In
3892 * this driver, the 32 bit values are the indices into an array cmd_list.
3893 * This array is used only to look up the megasas_cmd given the context. The
3894 * free commands themselves are maintained in a linked list called cmd_pool.
3895 */
3896 int megasas_alloc_cmds(struct megasas_instance *instance)
3897 {
3898 int i;
3899 int j;
3900 u32 max_cmd;
3901 struct megasas_cmd *cmd;
3902 struct fusion_context *fusion;
3903
3904 fusion = instance->ctrl_context;
3905 max_cmd = instance->max_mfi_cmds;
3906
3907 /*
3908 * instance->cmd_list is an array of struct megasas_cmd pointers.
3909 * Allocate the dynamic array first and then allocate individual
3910 * commands.
3911 */
3912 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
3913
3914 if (!instance->cmd_list) {
3915 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
3916 return -ENOMEM;
3917 }
3918
3919 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
3920
3921 for (i = 0; i < max_cmd; i++) {
3922 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
3923 GFP_KERNEL);
3924
3925 if (!instance->cmd_list[i]) {
3926
3927 for (j = 0; j < i; j++)
3928 kfree(instance->cmd_list[j]);
3929
3930 kfree(instance->cmd_list);
3931 instance->cmd_list = NULL;
3932
3933 return -ENOMEM;
3934 }
3935 }
3936
3937 for (i = 0; i < max_cmd; i++) {
3938 cmd = instance->cmd_list[i];
3939 memset(cmd, 0, sizeof(struct megasas_cmd));
3940 cmd->index = i;
3941 cmd->scmd = NULL;
3942 cmd->instance = instance;
3943
3944 list_add_tail(&cmd->list, &instance->cmd_pool);
3945 }
3946
3947 /*
3948 * Create a frame pool and assign one frame to each cmd
3949 */
3950 if (megasas_create_frame_pool(instance)) {
3951 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
3952 megasas_free_cmds(instance);
3953 }
3954
3955 return 0;
3956 }
3957
3958 /*
3959 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
3960 * @instance: Adapter soft state
3961 *
3962 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
3963 * or FW is not under OCR.
3964 */
3965 inline int
3966 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
3967
3968 if (!instance->ctrl_context)
3969 return KILL_ADAPTER;
3970 else if (instance->unload ||
3971 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
3972 return IGNORE_TIMEOUT;
3973 else
3974 return INITIATE_OCR;
3975 }
3976
3977 static int
3978 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
3979 {
3980 int ret;
3981 struct megasas_cmd *cmd;
3982 struct megasas_dcmd_frame *dcmd;
3983
3984 cmd = megasas_get_cmd(instance);
3985
3986 if (!cmd) {
3987 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
3988 return -ENOMEM;
3989 }
3990
3991 dcmd = &cmd->frame->dcmd;
3992
3993 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
3994 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3995
3996 dcmd->mbox.s[0] = cpu_to_le16(device_id);
3997 dcmd->cmd = MFI_CMD_DCMD;
3998 dcmd->cmd_status = 0xFF;
3999 dcmd->sge_count = 1;
4000 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4001 dcmd->timeout = 0;
4002 dcmd->pad_0 = 0;
4003 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4004 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4005 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4006 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4007
4008 if (instance->ctrl_context && !instance->mask_interrupts)
4009 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4010 else
4011 ret = megasas_issue_polled(instance, cmd);
4012
4013 switch (ret) {
4014 case DCMD_SUCCESS:
4015 instance->pd_list[device_id].interface =
4016 instance->pd_info->state.ddf.pdType.intf;
4017 break;
4018
4019 case DCMD_TIMEOUT:
4020
4021 switch (dcmd_timeout_ocr_possible(instance)) {
4022 case INITIATE_OCR:
4023 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4024 megasas_reset_fusion(instance->host,
4025 MFI_IO_TIMEOUT_OCR);
4026 break;
4027 case KILL_ADAPTER:
4028 megaraid_sas_kill_hba(instance);
4029 break;
4030 case IGNORE_TIMEOUT:
4031 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4032 __func__, __LINE__);
4033 break;
4034 }
4035
4036 break;
4037 }
4038
4039 if (ret != DCMD_TIMEOUT)
4040 megasas_return_cmd(instance, cmd);
4041
4042 return ret;
4043 }
4044 /*
4045 * megasas_get_pd_list_info - Returns FW's pd_list structure
4046 * @instance: Adapter soft state
4047 * @pd_list: pd_list structure
4048 *
4049 * Issues an internal command (DCMD) to get the FW's controller PD
4050 * list structure. This information is mainly used to find out SYSTEM
4051 * supported by the FW.
4052 */
4053 static int
4054 megasas_get_pd_list(struct megasas_instance *instance)
4055 {
4056 int ret = 0, pd_index = 0;
4057 struct megasas_cmd *cmd;
4058 struct megasas_dcmd_frame *dcmd;
4059 struct MR_PD_LIST *ci;
4060 struct MR_PD_ADDRESS *pd_addr;
4061 dma_addr_t ci_h = 0;
4062
4063 if (instance->pd_list_not_supported) {
4064 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4065 "not supported by firmware\n");
4066 return ret;
4067 }
4068
4069 cmd = megasas_get_cmd(instance);
4070
4071 if (!cmd) {
4072 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4073 return -ENOMEM;
4074 }
4075
4076 dcmd = &cmd->frame->dcmd;
4077
4078 ci = pci_alloc_consistent(instance->pdev,
4079 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4080
4081 if (!ci) {
4082 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4083 megasas_return_cmd(instance, cmd);
4084 return -ENOMEM;
4085 }
4086
4087 memset(ci, 0, sizeof(*ci));
4088 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4089
4090 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4091 dcmd->mbox.b[1] = 0;
4092 dcmd->cmd = MFI_CMD_DCMD;
4093 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4094 dcmd->sge_count = 1;
4095 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4096 dcmd->timeout = 0;
4097 dcmd->pad_0 = 0;
4098 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4099 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4100 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4101 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4102
4103 if (instance->ctrl_context && !instance->mask_interrupts)
4104 ret = megasas_issue_blocked_cmd(instance, cmd,
4105 MFI_IO_TIMEOUT_SECS);
4106 else
4107 ret = megasas_issue_polled(instance, cmd);
4108
4109 switch (ret) {
4110 case DCMD_FAILED:
4111 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4112 "failed/not supported by firmware\n");
4113
4114 if (instance->ctrl_context)
4115 megaraid_sas_kill_hba(instance);
4116 else
4117 instance->pd_list_not_supported = 1;
4118 break;
4119 case DCMD_TIMEOUT:
4120
4121 switch (dcmd_timeout_ocr_possible(instance)) {
4122 case INITIATE_OCR:
4123 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4124 /*
4125 * DCMD failed from AEN path.
4126 * AEN path already hold reset_mutex to avoid PCI access
4127 * while OCR is in progress.
4128 */
4129 mutex_unlock(&instance->reset_mutex);
4130 megasas_reset_fusion(instance->host,
4131 MFI_IO_TIMEOUT_OCR);
4132 mutex_lock(&instance->reset_mutex);
4133 break;
4134 case KILL_ADAPTER:
4135 megaraid_sas_kill_hba(instance);
4136 break;
4137 case IGNORE_TIMEOUT:
4138 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4139 __func__, __LINE__);
4140 break;
4141 }
4142
4143 break;
4144
4145 case DCMD_SUCCESS:
4146 pd_addr = ci->addr;
4147
4148 if ((le32_to_cpu(ci->count) >
4149 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4150 break;
4151
4152 memset(instance->local_pd_list, 0,
4153 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4154
4155 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4156 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4157 le16_to_cpu(pd_addr->deviceId);
4158 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4159 pd_addr->scsiDevType;
4160 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4161 MR_PD_STATE_SYSTEM;
4162 pd_addr++;
4163 }
4164
4165 memcpy(instance->pd_list, instance->local_pd_list,
4166 sizeof(instance->pd_list));
4167 break;
4168
4169 }
4170
4171 pci_free_consistent(instance->pdev,
4172 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4173 ci, ci_h);
4174
4175 if (ret != DCMD_TIMEOUT)
4176 megasas_return_cmd(instance, cmd);
4177
4178 return ret;
4179 }
4180
4181 /*
4182 * megasas_get_ld_list_info - Returns FW's ld_list structure
4183 * @instance: Adapter soft state
4184 * @ld_list: ld_list structure
4185 *
4186 * Issues an internal command (DCMD) to get the FW's controller PD
4187 * list structure. This information is mainly used to find out SYSTEM
4188 * supported by the FW.
4189 */
4190 static int
4191 megasas_get_ld_list(struct megasas_instance *instance)
4192 {
4193 int ret = 0, ld_index = 0, ids = 0;
4194 struct megasas_cmd *cmd;
4195 struct megasas_dcmd_frame *dcmd;
4196 struct MR_LD_LIST *ci;
4197 dma_addr_t ci_h = 0;
4198 u32 ld_count;
4199
4200 cmd = megasas_get_cmd(instance);
4201
4202 if (!cmd) {
4203 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4204 return -ENOMEM;
4205 }
4206
4207 dcmd = &cmd->frame->dcmd;
4208
4209 ci = pci_alloc_consistent(instance->pdev,
4210 sizeof(struct MR_LD_LIST),
4211 &ci_h);
4212
4213 if (!ci) {
4214 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4215 megasas_return_cmd(instance, cmd);
4216 return -ENOMEM;
4217 }
4218
4219 memset(ci, 0, sizeof(*ci));
4220 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4221
4222 if (instance->supportmax256vd)
4223 dcmd->mbox.b[0] = 1;
4224 dcmd->cmd = MFI_CMD_DCMD;
4225 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4226 dcmd->sge_count = 1;
4227 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4228 dcmd->timeout = 0;
4229 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4230 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4231 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4232 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4233 dcmd->pad_0 = 0;
4234
4235 if (instance->ctrl_context && !instance->mask_interrupts)
4236 ret = megasas_issue_blocked_cmd(instance, cmd,
4237 MFI_IO_TIMEOUT_SECS);
4238 else
4239 ret = megasas_issue_polled(instance, cmd);
4240
4241 ld_count = le32_to_cpu(ci->ldCount);
4242
4243 switch (ret) {
4244 case DCMD_FAILED:
4245 megaraid_sas_kill_hba(instance);
4246 break;
4247 case DCMD_TIMEOUT:
4248
4249 switch (dcmd_timeout_ocr_possible(instance)) {
4250 case INITIATE_OCR:
4251 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4252 /*
4253 * DCMD failed from AEN path.
4254 * AEN path already hold reset_mutex to avoid PCI access
4255 * while OCR is in progress.
4256 */
4257 mutex_unlock(&instance->reset_mutex);
4258 megasas_reset_fusion(instance->host,
4259 MFI_IO_TIMEOUT_OCR);
4260 mutex_lock(&instance->reset_mutex);
4261 break;
4262 case KILL_ADAPTER:
4263 megaraid_sas_kill_hba(instance);
4264 break;
4265 case IGNORE_TIMEOUT:
4266 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4267 __func__, __LINE__);
4268 break;
4269 }
4270
4271 break;
4272
4273 case DCMD_SUCCESS:
4274 if (ld_count > instance->fw_supported_vd_count)
4275 break;
4276
4277 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4278
4279 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4280 if (ci->ldList[ld_index].state != 0) {
4281 ids = ci->ldList[ld_index].ref.targetId;
4282 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4283 }
4284 }
4285
4286 break;
4287 }
4288
4289 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4290
4291 if (ret != DCMD_TIMEOUT)
4292 megasas_return_cmd(instance, cmd);
4293
4294 return ret;
4295 }
4296
4297 /**
4298 * megasas_ld_list_query - Returns FW's ld_list structure
4299 * @instance: Adapter soft state
4300 * @ld_list: ld_list structure
4301 *
4302 * Issues an internal command (DCMD) to get the FW's controller PD
4303 * list structure. This information is mainly used to find out SYSTEM
4304 * supported by the FW.
4305 */
4306 static int
4307 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4308 {
4309 int ret = 0, ld_index = 0, ids = 0;
4310 struct megasas_cmd *cmd;
4311 struct megasas_dcmd_frame *dcmd;
4312 struct MR_LD_TARGETID_LIST *ci;
4313 dma_addr_t ci_h = 0;
4314 u32 tgtid_count;
4315
4316 cmd = megasas_get_cmd(instance);
4317
4318 if (!cmd) {
4319 dev_warn(&instance->pdev->dev,
4320 "megasas_ld_list_query: Failed to get cmd\n");
4321 return -ENOMEM;
4322 }
4323
4324 dcmd = &cmd->frame->dcmd;
4325
4326 ci = pci_alloc_consistent(instance->pdev,
4327 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4328
4329 if (!ci) {
4330 dev_warn(&instance->pdev->dev,
4331 "Failed to alloc mem for ld_list_query\n");
4332 megasas_return_cmd(instance, cmd);
4333 return -ENOMEM;
4334 }
4335
4336 memset(ci, 0, sizeof(*ci));
4337 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4338
4339 dcmd->mbox.b[0] = query_type;
4340 if (instance->supportmax256vd)
4341 dcmd->mbox.b[2] = 1;
4342
4343 dcmd->cmd = MFI_CMD_DCMD;
4344 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4345 dcmd->sge_count = 1;
4346 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4347 dcmd->timeout = 0;
4348 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4349 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4350 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4351 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4352 dcmd->pad_0 = 0;
4353
4354 if (instance->ctrl_context && !instance->mask_interrupts)
4355 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4356 else
4357 ret = megasas_issue_polled(instance, cmd);
4358
4359 switch (ret) {
4360 case DCMD_FAILED:
4361 dev_info(&instance->pdev->dev,
4362 "DCMD not supported by firmware - %s %d\n",
4363 __func__, __LINE__);
4364 ret = megasas_get_ld_list(instance);
4365 break;
4366 case DCMD_TIMEOUT:
4367 switch (dcmd_timeout_ocr_possible(instance)) {
4368 case INITIATE_OCR:
4369 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4370 /*
4371 * DCMD failed from AEN path.
4372 * AEN path already hold reset_mutex to avoid PCI access
4373 * while OCR is in progress.
4374 */
4375 mutex_unlock(&instance->reset_mutex);
4376 megasas_reset_fusion(instance->host,
4377 MFI_IO_TIMEOUT_OCR);
4378 mutex_lock(&instance->reset_mutex);
4379 break;
4380 case KILL_ADAPTER:
4381 megaraid_sas_kill_hba(instance);
4382 break;
4383 case IGNORE_TIMEOUT:
4384 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4385 __func__, __LINE__);
4386 break;
4387 }
4388
4389 break;
4390 case DCMD_SUCCESS:
4391 tgtid_count = le32_to_cpu(ci->count);
4392
4393 if ((tgtid_count > (instance->fw_supported_vd_count)))
4394 break;
4395
4396 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4397 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4398 ids = ci->targetId[ld_index];
4399 instance->ld_ids[ids] = ci->targetId[ld_index];
4400 }
4401
4402 break;
4403 }
4404
4405 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4406 ci, ci_h);
4407
4408 if (ret != DCMD_TIMEOUT)
4409 megasas_return_cmd(instance, cmd);
4410
4411 return ret;
4412 }
4413
4414 /*
4415 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4416 * instance : Controller's instance
4417 */
4418 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4419 {
4420 struct fusion_context *fusion;
4421 u32 old_map_sz;
4422 u32 new_map_sz;
4423
4424 fusion = instance->ctrl_context;
4425 /* For MFI based controllers return dummy success */
4426 if (!fusion)
4427 return;
4428
4429 instance->supportmax256vd =
4430 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4431 /* Below is additional check to address future FW enhancement */
4432 if (instance->ctrl_info->max_lds > 64)
4433 instance->supportmax256vd = 1;
4434
4435 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4436 * MEGASAS_MAX_DEV_PER_CHANNEL;
4437 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4438 * MEGASAS_MAX_DEV_PER_CHANNEL;
4439 if (instance->supportmax256vd) {
4440 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4441 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4442 } else {
4443 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4444 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4445 }
4446
4447 dev_info(&instance->pdev->dev,
4448 "firmware type\t: %s\n",
4449 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4450 "Legacy(64 VD) firmware");
4451
4452 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4453 (sizeof(struct MR_LD_SPAN_MAP) *
4454 (instance->fw_supported_vd_count - 1));
4455 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4456 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4457 (sizeof(struct MR_LD_SPAN_MAP) *
4458 (instance->drv_supported_vd_count - 1));
4459
4460 fusion->max_map_sz = max(old_map_sz, new_map_sz);
4461
4462
4463 if (instance->supportmax256vd)
4464 fusion->current_map_sz = new_map_sz;
4465 else
4466 fusion->current_map_sz = old_map_sz;
4467 }
4468
4469 /**
4470 * megasas_get_controller_info - Returns FW's controller structure
4471 * @instance: Adapter soft state
4472 *
4473 * Issues an internal command (DCMD) to get the FW's controller structure.
4474 * This information is mainly used to find out the maximum IO transfer per
4475 * command supported by the FW.
4476 */
4477 int
4478 megasas_get_ctrl_info(struct megasas_instance *instance)
4479 {
4480 int ret = 0;
4481 struct megasas_cmd *cmd;
4482 struct megasas_dcmd_frame *dcmd;
4483 struct megasas_ctrl_info *ci;
4484 struct megasas_ctrl_info *ctrl_info;
4485 dma_addr_t ci_h = 0;
4486
4487 ctrl_info = instance->ctrl_info;
4488
4489 cmd = megasas_get_cmd(instance);
4490
4491 if (!cmd) {
4492 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4493 return -ENOMEM;
4494 }
4495
4496 dcmd = &cmd->frame->dcmd;
4497
4498 ci = pci_alloc_consistent(instance->pdev,
4499 sizeof(struct megasas_ctrl_info), &ci_h);
4500
4501 if (!ci) {
4502 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4503 megasas_return_cmd(instance, cmd);
4504 return -ENOMEM;
4505 }
4506
4507 memset(ci, 0, sizeof(*ci));
4508 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4509
4510 dcmd->cmd = MFI_CMD_DCMD;
4511 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4512 dcmd->sge_count = 1;
4513 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4514 dcmd->timeout = 0;
4515 dcmd->pad_0 = 0;
4516 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4517 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4518 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4519 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4520 dcmd->mbox.b[0] = 1;
4521
4522 if (instance->ctrl_context && !instance->mask_interrupts)
4523 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4524 else
4525 ret = megasas_issue_polled(instance, cmd);
4526
4527 switch (ret) {
4528 case DCMD_SUCCESS:
4529 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4530 /* Save required controller information in
4531 * CPU endianness format.
4532 */
4533 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4534 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4535 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4536
4537 /* Update the latest Ext VD info.
4538 * From Init path, store current firmware details.
4539 * From OCR path, detect any firmware properties changes.
4540 * in case of Firmware upgrade without system reboot.
4541 */
4542 megasas_update_ext_vd_details(instance);
4543 instance->use_seqnum_jbod_fp =
4544 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4545
4546 /*Check whether controller is iMR or MR */
4547 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4548 dev_info(&instance->pdev->dev,
4549 "controller type\t: %s(%dMB)\n",
4550 instance->is_imr ? "iMR" : "MR",
4551 le16_to_cpu(ctrl_info->memory_size));
4552
4553 instance->disableOnlineCtrlReset =
4554 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4555 instance->secure_jbod_support =
4556 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4557 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4558 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4559 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4560 instance->secure_jbod_support ? "Yes" : "No");
4561 break;
4562
4563 case DCMD_TIMEOUT:
4564 switch (dcmd_timeout_ocr_possible(instance)) {
4565 case INITIATE_OCR:
4566 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4567 megasas_reset_fusion(instance->host,
4568 MFI_IO_TIMEOUT_OCR);
4569 break;
4570 case KILL_ADAPTER:
4571 megaraid_sas_kill_hba(instance);
4572 break;
4573 case IGNORE_TIMEOUT:
4574 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4575 __func__, __LINE__);
4576 break;
4577 }
4578 case DCMD_FAILED:
4579 megaraid_sas_kill_hba(instance);
4580 break;
4581
4582 }
4583
4584 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4585 ci, ci_h);
4586
4587 megasas_return_cmd(instance, cmd);
4588
4589
4590 return ret;
4591 }
4592
4593 /*
4594 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4595 * to firmware
4596 *
4597 * @instance: Adapter soft state
4598 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4599 MR_CRASH_BUF_TURN_OFF = 0
4600 MR_CRASH_BUF_TURN_ON = 1
4601 * @return 0 on success non-zero on failure.
4602 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4603 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4604 * that driver supports crash dump feature. This DCMD will be sent only if
4605 * crash dump feature is supported by the FW.
4606 *
4607 */
4608 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4609 u8 crash_buf_state)
4610 {
4611 int ret = 0;
4612 struct megasas_cmd *cmd;
4613 struct megasas_dcmd_frame *dcmd;
4614
4615 cmd = megasas_get_cmd(instance);
4616
4617 if (!cmd) {
4618 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4619 return -ENOMEM;
4620 }
4621
4622
4623 dcmd = &cmd->frame->dcmd;
4624
4625 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4626 dcmd->mbox.b[0] = crash_buf_state;
4627 dcmd->cmd = MFI_CMD_DCMD;
4628 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4629 dcmd->sge_count = 1;
4630 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4631 dcmd->timeout = 0;
4632 dcmd->pad_0 = 0;
4633 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4634 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4635 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4636 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4637
4638 if (instance->ctrl_context && !instance->mask_interrupts)
4639 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4640 else
4641 ret = megasas_issue_polled(instance, cmd);
4642
4643 if (ret == DCMD_TIMEOUT) {
4644 switch (dcmd_timeout_ocr_possible(instance)) {
4645 case INITIATE_OCR:
4646 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4647 megasas_reset_fusion(instance->host,
4648 MFI_IO_TIMEOUT_OCR);
4649 break;
4650 case KILL_ADAPTER:
4651 megaraid_sas_kill_hba(instance);
4652 break;
4653 case IGNORE_TIMEOUT:
4654 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4655 __func__, __LINE__);
4656 break;
4657 }
4658 } else
4659 megasas_return_cmd(instance, cmd);
4660
4661 return ret;
4662 }
4663
4664 /**
4665 * megasas_issue_init_mfi - Initializes the FW
4666 * @instance: Adapter soft state
4667 *
4668 * Issues the INIT MFI cmd
4669 */
4670 static int
4671 megasas_issue_init_mfi(struct megasas_instance *instance)
4672 {
4673 __le32 context;
4674 struct megasas_cmd *cmd;
4675 struct megasas_init_frame *init_frame;
4676 struct megasas_init_queue_info *initq_info;
4677 dma_addr_t init_frame_h;
4678 dma_addr_t initq_info_h;
4679
4680 /*
4681 * Prepare a init frame. Note the init frame points to queue info
4682 * structure. Each frame has SGL allocated after first 64 bytes. For
4683 * this frame - since we don't need any SGL - we use SGL's space as
4684 * queue info structure
4685 *
4686 * We will not get a NULL command below. We just created the pool.
4687 */
4688 cmd = megasas_get_cmd(instance);
4689
4690 init_frame = (struct megasas_init_frame *)cmd->frame;
4691 initq_info = (struct megasas_init_queue_info *)
4692 ((unsigned long)init_frame + 64);
4693
4694 init_frame_h = cmd->frame_phys_addr;
4695 initq_info_h = init_frame_h + 64;
4696
4697 context = init_frame->context;
4698 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4699 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4700 init_frame->context = context;
4701
4702 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4703 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4704
4705 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4706 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4707
4708 init_frame->cmd = MFI_CMD_INIT;
4709 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4710 init_frame->queue_info_new_phys_addr_lo =
4711 cpu_to_le32(lower_32_bits(initq_info_h));
4712 init_frame->queue_info_new_phys_addr_hi =
4713 cpu_to_le32(upper_32_bits(initq_info_h));
4714
4715 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4716
4717 /*
4718 * disable the intr before firing the init frame to FW
4719 */
4720 instance->instancet->disable_intr(instance);
4721
4722 /*
4723 * Issue the init frame in polled mode
4724 */
4725
4726 if (megasas_issue_polled(instance, cmd)) {
4727 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4728 megasas_return_cmd(instance, cmd);
4729 goto fail_fw_init;
4730 }
4731
4732 megasas_return_cmd(instance, cmd);
4733
4734 return 0;
4735
4736 fail_fw_init:
4737 return -EINVAL;
4738 }
4739
4740 static u32
4741 megasas_init_adapter_mfi(struct megasas_instance *instance)
4742 {
4743 struct megasas_register_set __iomem *reg_set;
4744 u32 context_sz;
4745 u32 reply_q_sz;
4746
4747 reg_set = instance->reg_set;
4748
4749 /*
4750 * Get various operational parameters from status register
4751 */
4752 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4753 /*
4754 * Reduce the max supported cmds by 1. This is to ensure that the
4755 * reply_q_sz (1 more than the max cmd that driver may send)
4756 * does not exceed max cmds that the FW can support
4757 */
4758 instance->max_fw_cmds = instance->max_fw_cmds-1;
4759 instance->max_mfi_cmds = instance->max_fw_cmds;
4760 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4761 0x10;
4762 /*
4763 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4764 * are reserved for IOCTL + driver's internal DCMDs.
4765 */
4766 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4767 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4768 instance->max_scsi_cmds = (instance->max_fw_cmds -
4769 MEGASAS_SKINNY_INT_CMDS);
4770 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4771 } else {
4772 instance->max_scsi_cmds = (instance->max_fw_cmds -
4773 MEGASAS_INT_CMDS);
4774 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4775 }
4776
4777 instance->cur_can_queue = instance->max_scsi_cmds;
4778 /*
4779 * Create a pool of commands
4780 */
4781 if (megasas_alloc_cmds(instance))
4782 goto fail_alloc_cmds;
4783
4784 /*
4785 * Allocate memory for reply queue. Length of reply queue should
4786 * be _one_ more than the maximum commands handled by the firmware.
4787 *
4788 * Note: When FW completes commands, it places corresponding contex
4789 * values in this circular reply queue. This circular queue is a fairly
4790 * typical producer-consumer queue. FW is the producer (of completed
4791 * commands) and the driver is the consumer.
4792 */
4793 context_sz = sizeof(u32);
4794 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4795
4796 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4797 reply_q_sz,
4798 &instance->reply_queue_h);
4799
4800 if (!instance->reply_queue) {
4801 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4802 goto fail_reply_queue;
4803 }
4804
4805 if (megasas_issue_init_mfi(instance))
4806 goto fail_fw_init;
4807
4808 if (megasas_get_ctrl_info(instance)) {
4809 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4810 "Fail from %s %d\n", instance->unique_id,
4811 __func__, __LINE__);
4812 goto fail_fw_init;
4813 }
4814
4815 instance->fw_support_ieee = 0;
4816 instance->fw_support_ieee =
4817 (instance->instancet->read_fw_status_reg(reg_set) &
4818 0x04000000);
4819
4820 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4821 instance->fw_support_ieee);
4822
4823 if (instance->fw_support_ieee)
4824 instance->flag_ieee = 1;
4825
4826 return 0;
4827
4828 fail_fw_init:
4829
4830 pci_free_consistent(instance->pdev, reply_q_sz,
4831 instance->reply_queue, instance->reply_queue_h);
4832 fail_reply_queue:
4833 megasas_free_cmds(instance);
4834
4835 fail_alloc_cmds:
4836 return 1;
4837 }
4838
4839 /*
4840 * megasas_setup_irqs_ioapic - register legacy interrupts.
4841 * @instance: Adapter soft state
4842 *
4843 * Do not enable interrupt, only setup ISRs.
4844 *
4845 * Return 0 on success.
4846 */
4847 static int
4848 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4849 {
4850 struct pci_dev *pdev;
4851
4852 pdev = instance->pdev;
4853 instance->irq_context[0].instance = instance;
4854 instance->irq_context[0].MSIxIndex = 0;
4855 if (request_irq(pci_irq_vector(pdev, 0),
4856 instance->instancet->service_isr, IRQF_SHARED,
4857 "megasas", &instance->irq_context[0])) {
4858 dev_err(&instance->pdev->dev,
4859 "Failed to register IRQ from %s %d\n",
4860 __func__, __LINE__);
4861 return -1;
4862 }
4863 return 0;
4864 }
4865
4866 /**
4867 * megasas_setup_irqs_msix - register MSI-x interrupts.
4868 * @instance: Adapter soft state
4869 * @is_probe: Driver probe check
4870 *
4871 * Do not enable interrupt, only setup ISRs.
4872 *
4873 * Return 0 on success.
4874 */
4875 static int
4876 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4877 {
4878 int i, j;
4879 struct pci_dev *pdev;
4880
4881 pdev = instance->pdev;
4882
4883 /* Try MSI-x */
4884 for (i = 0; i < instance->msix_vectors; i++) {
4885 instance->irq_context[i].instance = instance;
4886 instance->irq_context[i].MSIxIndex = i;
4887 if (request_irq(pci_irq_vector(pdev, i),
4888 instance->instancet->service_isr, 0, "megasas",
4889 &instance->irq_context[i])) {
4890 dev_err(&instance->pdev->dev,
4891 "Failed to register IRQ for vector %d.\n", i);
4892 for (j = 0; j < i; j++)
4893 free_irq(pci_irq_vector(pdev, j),
4894 &instance->irq_context[j]);
4895 /* Retry irq register for IO_APIC*/
4896 instance->msix_vectors = 0;
4897 if (is_probe)
4898 return megasas_setup_irqs_ioapic(instance);
4899 else
4900 return -1;
4901 }
4902 }
4903 return 0;
4904 }
4905
4906 /*
4907 * megasas_destroy_irqs- unregister interrupts.
4908 * @instance: Adapter soft state
4909 * return: void
4910 */
4911 static void
4912 megasas_destroy_irqs(struct megasas_instance *instance) {
4913
4914 int i;
4915
4916 if (instance->msix_vectors)
4917 for (i = 0; i < instance->msix_vectors; i++) {
4918 free_irq(pci_irq_vector(instance->pdev, i),
4919 &instance->irq_context[i]);
4920 }
4921 else
4922 free_irq(pci_irq_vector(instance->pdev, 0),
4923 &instance->irq_context[0]);
4924 }
4925
4926 /**
4927 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
4928 * @instance: Adapter soft state
4929 * @is_probe: Driver probe check
4930 *
4931 * Return 0 on success.
4932 */
4933 void
4934 megasas_setup_jbod_map(struct megasas_instance *instance)
4935 {
4936 int i;
4937 struct fusion_context *fusion = instance->ctrl_context;
4938 u32 pd_seq_map_sz;
4939
4940 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4941 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
4942
4943 if (reset_devices || !fusion ||
4944 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
4945 dev_info(&instance->pdev->dev,
4946 "Jbod map is not supported %s %d\n",
4947 __func__, __LINE__);
4948 instance->use_seqnum_jbod_fp = false;
4949 return;
4950 }
4951
4952 if (fusion->pd_seq_sync[0])
4953 goto skip_alloc;
4954
4955 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
4956 fusion->pd_seq_sync[i] = dma_alloc_coherent
4957 (&instance->pdev->dev, pd_seq_map_sz,
4958 &fusion->pd_seq_phys[i], GFP_KERNEL);
4959 if (!fusion->pd_seq_sync[i]) {
4960 dev_err(&instance->pdev->dev,
4961 "Failed to allocate memory from %s %d\n",
4962 __func__, __LINE__);
4963 if (i == 1) {
4964 dma_free_coherent(&instance->pdev->dev,
4965 pd_seq_map_sz, fusion->pd_seq_sync[0],
4966 fusion->pd_seq_phys[0]);
4967 fusion->pd_seq_sync[0] = NULL;
4968 }
4969 instance->use_seqnum_jbod_fp = false;
4970 return;
4971 }
4972 }
4973
4974 skip_alloc:
4975 if (!megasas_sync_pd_seq_num(instance, false) &&
4976 !megasas_sync_pd_seq_num(instance, true))
4977 instance->use_seqnum_jbod_fp = true;
4978 else
4979 instance->use_seqnum_jbod_fp = false;
4980 }
4981
4982 /**
4983 * megasas_init_fw - Initializes the FW
4984 * @instance: Adapter soft state
4985 *
4986 * This is the main function for initializing firmware
4987 */
4988
4989 static int megasas_init_fw(struct megasas_instance *instance)
4990 {
4991 u32 max_sectors_1;
4992 u32 max_sectors_2;
4993 u32 tmp_sectors, msix_enable, scratch_pad_2;
4994 resource_size_t base_addr;
4995 struct megasas_register_set __iomem *reg_set;
4996 struct megasas_ctrl_info *ctrl_info = NULL;
4997 unsigned long bar_list;
4998 int i, loop, fw_msix_count = 0;
4999 struct IOV_111 *iovPtr;
5000 struct fusion_context *fusion;
5001
5002 fusion = instance->ctrl_context;
5003
5004 /* Find first memory bar */
5005 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5006 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5007 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5008 "megasas: LSI")) {
5009 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5010 return -EBUSY;
5011 }
5012
5013 base_addr = pci_resource_start(instance->pdev, instance->bar);
5014 instance->reg_set = ioremap_nocache(base_addr, 8192);
5015
5016 if (!instance->reg_set) {
5017 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5018 goto fail_ioremap;
5019 }
5020
5021 reg_set = instance->reg_set;
5022
5023 switch (instance->pdev->device) {
5024 case PCI_DEVICE_ID_LSI_FUSION:
5025 case PCI_DEVICE_ID_LSI_PLASMA:
5026 case PCI_DEVICE_ID_LSI_INVADER:
5027 case PCI_DEVICE_ID_LSI_FURY:
5028 case PCI_DEVICE_ID_LSI_INTRUDER:
5029 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5030 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5031 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5032 instance->instancet = &megasas_instance_template_fusion;
5033 break;
5034 case PCI_DEVICE_ID_LSI_SAS1078R:
5035 case PCI_DEVICE_ID_LSI_SAS1078DE:
5036 instance->instancet = &megasas_instance_template_ppc;
5037 break;
5038 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5039 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5040 instance->instancet = &megasas_instance_template_gen2;
5041 break;
5042 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5043 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5044 instance->instancet = &megasas_instance_template_skinny;
5045 break;
5046 case PCI_DEVICE_ID_LSI_SAS1064R:
5047 case PCI_DEVICE_ID_DELL_PERC5:
5048 default:
5049 instance->instancet = &megasas_instance_template_xscale;
5050 break;
5051 }
5052
5053 if (megasas_transition_to_ready(instance, 0)) {
5054 atomic_set(&instance->fw_reset_no_pci_access, 1);
5055 instance->instancet->adp_reset
5056 (instance, instance->reg_set);
5057 atomic_set(&instance->fw_reset_no_pci_access, 0);
5058 dev_info(&instance->pdev->dev,
5059 "FW restarted successfully from %s!\n",
5060 __func__);
5061
5062 /*waitting for about 30 second before retry*/
5063 ssleep(30);
5064
5065 if (megasas_transition_to_ready(instance, 0))
5066 goto fail_ready_state;
5067 }
5068
5069 /*
5070 * MSI-X host index 0 is common for all adapter.
5071 * It is used for all MPT based Adapters.
5072 */
5073 instance->reply_post_host_index_addr[0] =
5074 (u32 __iomem *)((u8 __iomem *)instance->reg_set +
5075 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5076
5077 /* Check if MSI-X is supported while in ready state */
5078 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5079 0x4000000) >> 0x1a;
5080 if (msix_enable && !msix_disable) {
5081 int irq_flags = PCI_IRQ_MSIX;
5082
5083 scratch_pad_2 = readl
5084 (&instance->reg_set->outbound_scratch_pad_2);
5085 /* Check max MSI-X vectors */
5086 if (fusion) {
5087 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5088 instance->msix_vectors = (scratch_pad_2
5089 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5090 fw_msix_count = instance->msix_vectors;
5091 } else { /* Invader series supports more than 8 MSI-x vectors*/
5092 instance->msix_vectors = ((scratch_pad_2
5093 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5094 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5095 if (rdpq_enable)
5096 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5097 1 : 0;
5098 fw_msix_count = instance->msix_vectors;
5099 /* Save 1-15 reply post index address to local memory
5100 * Index 0 is already saved from reg offset
5101 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5102 */
5103 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5104 instance->reply_post_host_index_addr[loop] =
5105 (u32 __iomem *)
5106 ((u8 __iomem *)instance->reg_set +
5107 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5108 + (loop * 0x10));
5109 }
5110 }
5111 if (msix_vectors)
5112 instance->msix_vectors = min(msix_vectors,
5113 instance->msix_vectors);
5114 } else /* MFI adapters */
5115 instance->msix_vectors = 1;
5116 /* Don't bother allocating more MSI-X vectors than cpus */
5117 instance->msix_vectors = min(instance->msix_vectors,
5118 (unsigned int)num_online_cpus());
5119 if (smp_affinity_enable)
5120 irq_flags |= PCI_IRQ_AFFINITY;
5121 i = pci_alloc_irq_vectors(instance->pdev, 1,
5122 instance->msix_vectors, irq_flags);
5123 if (i > 0)
5124 instance->msix_vectors = i;
5125 else
5126 instance->msix_vectors = 0;
5127 }
5128 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5129 if (i < 0)
5130 goto fail_setup_irqs;
5131
5132 dev_info(&instance->pdev->dev,
5133 "firmware supports msix\t: (%d)", fw_msix_count);
5134 dev_info(&instance->pdev->dev,
5135 "current msix/online cpus\t: (%d/%d)\n",
5136 instance->msix_vectors, (unsigned int)num_online_cpus());
5137 dev_info(&instance->pdev->dev,
5138 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5139
5140 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5141 (unsigned long)instance);
5142
5143 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5144 GFP_KERNEL);
5145 if (instance->ctrl_info == NULL)
5146 goto fail_init_adapter;
5147
5148 /*
5149 * Below are default value for legacy Firmware.
5150 * non-fusion based controllers
5151 */
5152 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5153 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5154 /* Get operational params, sge flags, send init cmd to controller */
5155 if (instance->instancet->init_adapter(instance))
5156 goto fail_init_adapter;
5157
5158 if (instance->msix_vectors ?
5159 megasas_setup_irqs_msix(instance, 1) :
5160 megasas_setup_irqs_ioapic(instance))
5161 goto fail_init_adapter;
5162
5163 instance->instancet->enable_intr(instance);
5164
5165 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5166
5167 megasas_setup_jbod_map(instance);
5168
5169 /** for passthrough
5170 * the following function will get the PD LIST.
5171 */
5172 memset(instance->pd_list, 0,
5173 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5174 if (megasas_get_pd_list(instance) < 0) {
5175 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5176 goto fail_get_pd_list;
5177 }
5178
5179 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5180 if (megasas_ld_list_query(instance,
5181 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5182 megasas_get_ld_list(instance);
5183
5184 /*
5185 * Compute the max allowed sectors per IO: The controller info has two
5186 * limits on max sectors. Driver should use the minimum of these two.
5187 *
5188 * 1 << stripe_sz_ops.min = max sectors per strip
5189 *
5190 * Note that older firmwares ( < FW ver 30) didn't report information
5191 * to calculate max_sectors_1. So the number ended up as zero always.
5192 */
5193 tmp_sectors = 0;
5194 ctrl_info = instance->ctrl_info;
5195
5196 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5197 le16_to_cpu(ctrl_info->max_strips_per_io);
5198 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5199
5200 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5201
5202 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5203 instance->passive = ctrl_info->cluster.passive;
5204 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5205 instance->UnevenSpanSupport =
5206 ctrl_info->adapterOperations2.supportUnevenSpans;
5207 if (instance->UnevenSpanSupport) {
5208 struct fusion_context *fusion = instance->ctrl_context;
5209 if (MR_ValidateMapInfo(instance))
5210 fusion->fast_path_io = 1;
5211 else
5212 fusion->fast_path_io = 0;
5213
5214 }
5215 if (ctrl_info->host_interface.SRIOV) {
5216 instance->requestorId = ctrl_info->iov.requestorId;
5217 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5218 if (!ctrl_info->adapterOperations2.activePassive)
5219 instance->PlasmaFW111 = 1;
5220
5221 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5222 instance->PlasmaFW111 ? "1.11" : "new");
5223
5224 if (instance->PlasmaFW111) {
5225 iovPtr = (struct IOV_111 *)
5226 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5227 instance->requestorId = iovPtr->requestorId;
5228 }
5229 }
5230 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5231 instance->requestorId);
5232 }
5233
5234 instance->crash_dump_fw_support =
5235 ctrl_info->adapterOperations3.supportCrashDump;
5236 instance->crash_dump_drv_support =
5237 (instance->crash_dump_fw_support &&
5238 instance->crash_dump_buf);
5239 if (instance->crash_dump_drv_support)
5240 megasas_set_crash_dump_params(instance,
5241 MR_CRASH_BUF_TURN_OFF);
5242
5243 else {
5244 if (instance->crash_dump_buf)
5245 pci_free_consistent(instance->pdev,
5246 CRASH_DMA_BUF_SIZE,
5247 instance->crash_dump_buf,
5248 instance->crash_dump_h);
5249 instance->crash_dump_buf = NULL;
5250 }
5251
5252
5253 dev_info(&instance->pdev->dev,
5254 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5255 le16_to_cpu(ctrl_info->pci.vendor_id),
5256 le16_to_cpu(ctrl_info->pci.device_id),
5257 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5258 le16_to_cpu(ctrl_info->pci.sub_device_id));
5259 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5260 instance->UnevenSpanSupport ? "yes" : "no");
5261 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5262 instance->crash_dump_drv_support ? "yes" : "no");
5263 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5264 instance->use_seqnum_jbod_fp ? "yes" : "no");
5265
5266
5267 instance->max_sectors_per_req = instance->max_num_sge *
5268 SGE_BUFFER_SIZE / 512;
5269 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5270 instance->max_sectors_per_req = tmp_sectors;
5271
5272 /* Check for valid throttlequeuedepth module parameter */
5273 if (throttlequeuedepth &&
5274 throttlequeuedepth <= instance->max_scsi_cmds)
5275 instance->throttlequeuedepth = throttlequeuedepth;
5276 else
5277 instance->throttlequeuedepth =
5278 MEGASAS_THROTTLE_QUEUE_DEPTH;
5279
5280 if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
5281 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5282
5283 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5284 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5285
5286 /* Launch SR-IOV heartbeat timer */
5287 if (instance->requestorId) {
5288 if (!megasas_sriov_start_heartbeat(instance, 1))
5289 megasas_start_timer(instance,
5290 &instance->sriov_heartbeat_timer,
5291 megasas_sriov_heartbeat_handler,
5292 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5293 else
5294 instance->skip_heartbeat_timer_del = 1;
5295 }
5296
5297 return 0;
5298
5299 fail_get_pd_list:
5300 instance->instancet->disable_intr(instance);
5301 fail_init_adapter:
5302 megasas_destroy_irqs(instance);
5303 fail_setup_irqs:
5304 if (instance->msix_vectors)
5305 pci_free_irq_vectors(instance->pdev);
5306 instance->msix_vectors = 0;
5307 fail_ready_state:
5308 kfree(instance->ctrl_info);
5309 instance->ctrl_info = NULL;
5310 iounmap(instance->reg_set);
5311
5312 fail_ioremap:
5313 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5314
5315 return -EINVAL;
5316 }
5317
5318 /**
5319 * megasas_release_mfi - Reverses the FW initialization
5320 * @instance: Adapter soft state
5321 */
5322 static void megasas_release_mfi(struct megasas_instance *instance)
5323 {
5324 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5325
5326 if (instance->reply_queue)
5327 pci_free_consistent(instance->pdev, reply_q_sz,
5328 instance->reply_queue, instance->reply_queue_h);
5329
5330 megasas_free_cmds(instance);
5331
5332 iounmap(instance->reg_set);
5333
5334 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5335 }
5336
5337 /**
5338 * megasas_get_seq_num - Gets latest event sequence numbers
5339 * @instance: Adapter soft state
5340 * @eli: FW event log sequence numbers information
5341 *
5342 * FW maintains a log of all events in a non-volatile area. Upper layers would
5343 * usually find out the latest sequence number of the events, the seq number at
5344 * the boot etc. They would "read" all the events below the latest seq number
5345 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5346 * number), they would subsribe to AEN (asynchronous event notification) and
5347 * wait for the events to happen.
5348 */
5349 static int
5350 megasas_get_seq_num(struct megasas_instance *instance,
5351 struct megasas_evt_log_info *eli)
5352 {
5353 struct megasas_cmd *cmd;
5354 struct megasas_dcmd_frame *dcmd;
5355 struct megasas_evt_log_info *el_info;
5356 dma_addr_t el_info_h = 0;
5357
5358 cmd = megasas_get_cmd(instance);
5359
5360 if (!cmd) {
5361 return -ENOMEM;
5362 }
5363
5364 dcmd = &cmd->frame->dcmd;
5365 el_info = pci_alloc_consistent(instance->pdev,
5366 sizeof(struct megasas_evt_log_info),
5367 &el_info_h);
5368
5369 if (!el_info) {
5370 megasas_return_cmd(instance, cmd);
5371 return -ENOMEM;
5372 }
5373
5374 memset(el_info, 0, sizeof(*el_info));
5375 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5376
5377 dcmd->cmd = MFI_CMD_DCMD;
5378 dcmd->cmd_status = 0x0;
5379 dcmd->sge_count = 1;
5380 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5381 dcmd->timeout = 0;
5382 dcmd->pad_0 = 0;
5383 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5384 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5385 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5386 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5387
5388 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5389 DCMD_SUCCESS) {
5390 /*
5391 * Copy the data back into callers buffer
5392 */
5393 eli->newest_seq_num = el_info->newest_seq_num;
5394 eli->oldest_seq_num = el_info->oldest_seq_num;
5395 eli->clear_seq_num = el_info->clear_seq_num;
5396 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5397 eli->boot_seq_num = el_info->boot_seq_num;
5398 } else
5399 dev_err(&instance->pdev->dev, "DCMD failed "
5400 "from %s\n", __func__);
5401
5402 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5403 el_info, el_info_h);
5404
5405 megasas_return_cmd(instance, cmd);
5406
5407 return 0;
5408 }
5409
5410 /**
5411 * megasas_register_aen - Registers for asynchronous event notification
5412 * @instance: Adapter soft state
5413 * @seq_num: The starting sequence number
5414 * @class_locale: Class of the event
5415 *
5416 * This function subscribes for AEN for events beyond the @seq_num. It requests
5417 * to be notified if and only if the event is of type @class_locale
5418 */
5419 static int
5420 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5421 u32 class_locale_word)
5422 {
5423 int ret_val;
5424 struct megasas_cmd *cmd;
5425 struct megasas_dcmd_frame *dcmd;
5426 union megasas_evt_class_locale curr_aen;
5427 union megasas_evt_class_locale prev_aen;
5428
5429 /*
5430 * If there an AEN pending already (aen_cmd), check if the
5431 * class_locale of that pending AEN is inclusive of the new
5432 * AEN request we currently have. If it is, then we don't have
5433 * to do anything. In other words, whichever events the current
5434 * AEN request is subscribing to, have already been subscribed
5435 * to.
5436 *
5437 * If the old_cmd is _not_ inclusive, then we have to abort
5438 * that command, form a class_locale that is superset of both
5439 * old and current and re-issue to the FW
5440 */
5441
5442 curr_aen.word = class_locale_word;
5443
5444 if (instance->aen_cmd) {
5445
5446 prev_aen.word =
5447 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5448
5449 /*
5450 * A class whose enum value is smaller is inclusive of all
5451 * higher values. If a PROGRESS (= -1) was previously
5452 * registered, then a new registration requests for higher
5453 * classes need not be sent to FW. They are automatically
5454 * included.
5455 *
5456 * Locale numbers don't have such hierarchy. They are bitmap
5457 * values
5458 */
5459 if ((prev_aen.members.class <= curr_aen.members.class) &&
5460 !((prev_aen.members.locale & curr_aen.members.locale) ^
5461 curr_aen.members.locale)) {
5462 /*
5463 * Previously issued event registration includes
5464 * current request. Nothing to do.
5465 */
5466 return 0;
5467 } else {
5468 curr_aen.members.locale |= prev_aen.members.locale;
5469
5470 if (prev_aen.members.class < curr_aen.members.class)
5471 curr_aen.members.class = prev_aen.members.class;
5472
5473 instance->aen_cmd->abort_aen = 1;
5474 ret_val = megasas_issue_blocked_abort_cmd(instance,
5475 instance->
5476 aen_cmd, 30);
5477
5478 if (ret_val) {
5479 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5480 "previous AEN command\n");
5481 return ret_val;
5482 }
5483 }
5484 }
5485
5486 cmd = megasas_get_cmd(instance);
5487
5488 if (!cmd)
5489 return -ENOMEM;
5490
5491 dcmd = &cmd->frame->dcmd;
5492
5493 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5494
5495 /*
5496 * Prepare DCMD for aen registration
5497 */
5498 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5499
5500 dcmd->cmd = MFI_CMD_DCMD;
5501 dcmd->cmd_status = 0x0;
5502 dcmd->sge_count = 1;
5503 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5504 dcmd->timeout = 0;
5505 dcmd->pad_0 = 0;
5506 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5507 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5508 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5509 instance->last_seq_num = seq_num;
5510 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5511 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5512 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5513
5514 if (instance->aen_cmd != NULL) {
5515 megasas_return_cmd(instance, cmd);
5516 return 0;
5517 }
5518
5519 /*
5520 * Store reference to the cmd used to register for AEN. When an
5521 * application wants us to register for AEN, we have to abort this
5522 * cmd and re-register with a new EVENT LOCALE supplied by that app
5523 */
5524 instance->aen_cmd = cmd;
5525
5526 /*
5527 * Issue the aen registration frame
5528 */
5529 instance->instancet->issue_dcmd(instance, cmd);
5530
5531 return 0;
5532 }
5533
5534 /**
5535 * megasas_start_aen - Subscribes to AEN during driver load time
5536 * @instance: Adapter soft state
5537 */
5538 static int megasas_start_aen(struct megasas_instance *instance)
5539 {
5540 struct megasas_evt_log_info eli;
5541 union megasas_evt_class_locale class_locale;
5542
5543 /*
5544 * Get the latest sequence number from FW
5545 */
5546 memset(&eli, 0, sizeof(eli));
5547
5548 if (megasas_get_seq_num(instance, &eli))
5549 return -1;
5550
5551 /*
5552 * Register AEN with FW for latest sequence number plus 1
5553 */
5554 class_locale.members.reserved = 0;
5555 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5556 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5557
5558 return megasas_register_aen(instance,
5559 le32_to_cpu(eli.newest_seq_num) + 1,
5560 class_locale.word);
5561 }
5562
5563 /**
5564 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5565 * @instance: Adapter soft state
5566 */
5567 static int megasas_io_attach(struct megasas_instance *instance)
5568 {
5569 struct Scsi_Host *host = instance->host;
5570
5571 /*
5572 * Export parameters required by SCSI mid-layer
5573 */
5574 host->unique_id = instance->unique_id;
5575 host->can_queue = instance->max_scsi_cmds;
5576 host->this_id = instance->init_id;
5577 host->sg_tablesize = instance->max_num_sge;
5578
5579 if (instance->fw_support_ieee)
5580 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5581
5582 /*
5583 * Check if the module parameter value for max_sectors can be used
5584 */
5585 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5586 instance->max_sectors_per_req = max_sectors;
5587 else {
5588 if (max_sectors) {
5589 if (((instance->pdev->device ==
5590 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5591 (instance->pdev->device ==
5592 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5593 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5594 instance->max_sectors_per_req = max_sectors;
5595 } else {
5596 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5597 "and <= %d (or < 1MB for GEN2 controller)\n",
5598 instance->max_sectors_per_req);
5599 }
5600 }
5601 }
5602
5603 host->max_sectors = instance->max_sectors_per_req;
5604 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5605 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5606 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5607 host->max_lun = MEGASAS_MAX_LUN;
5608 host->max_cmd_len = 16;
5609
5610 /*
5611 * Notify the mid-layer about the new controller
5612 */
5613 if (scsi_add_host(host, &instance->pdev->dev)) {
5614 dev_err(&instance->pdev->dev,
5615 "Failed to add host from %s %d\n",
5616 __func__, __LINE__);
5617 return -ENODEV;
5618 }
5619
5620 return 0;
5621 }
5622
5623 static int
5624 megasas_set_dma_mask(struct pci_dev *pdev)
5625 {
5626 /*
5627 * All our controllers are capable of performing 64-bit DMA
5628 */
5629 if (IS_DMA64) {
5630 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5631
5632 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5633 goto fail_set_dma_mask;
5634 }
5635 } else {
5636 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5637 goto fail_set_dma_mask;
5638 }
5639 /*
5640 * Ensure that all data structures are allocated in 32-bit
5641 * memory.
5642 */
5643 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5644 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5645 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5646 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5647 dev_info(&pdev->dev, "set 32bit DMA mask"
5648 "and 32 bit consistent mask\n");
5649 else
5650 goto fail_set_dma_mask;
5651 }
5652
5653 return 0;
5654
5655 fail_set_dma_mask:
5656 return 1;
5657 }
5658
5659 /**
5660 * megasas_probe_one - PCI hotplug entry point
5661 * @pdev: PCI device structure
5662 * @id: PCI ids of supported hotplugged adapter
5663 */
5664 static int megasas_probe_one(struct pci_dev *pdev,
5665 const struct pci_device_id *id)
5666 {
5667 int rval, pos;
5668 struct Scsi_Host *host;
5669 struct megasas_instance *instance;
5670 u16 control = 0;
5671 struct fusion_context *fusion = NULL;
5672
5673 /* Reset MSI-X in the kdump kernel */
5674 if (reset_devices) {
5675 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5676 if (pos) {
5677 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5678 &control);
5679 if (control & PCI_MSIX_FLAGS_ENABLE) {
5680 dev_info(&pdev->dev, "resetting MSI-X\n");
5681 pci_write_config_word(pdev,
5682 pos + PCI_MSIX_FLAGS,
5683 control &
5684 ~PCI_MSIX_FLAGS_ENABLE);
5685 }
5686 }
5687 }
5688
5689 /*
5690 * PCI prepping: enable device set bus mastering and dma mask
5691 */
5692 rval = pci_enable_device_mem(pdev);
5693
5694 if (rval) {
5695 return rval;
5696 }
5697
5698 pci_set_master(pdev);
5699
5700 if (megasas_set_dma_mask(pdev))
5701 goto fail_set_dma_mask;
5702
5703 host = scsi_host_alloc(&megasas_template,
5704 sizeof(struct megasas_instance));
5705
5706 if (!host) {
5707 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
5708 goto fail_alloc_instance;
5709 }
5710
5711 instance = (struct megasas_instance *)host->hostdata;
5712 memset(instance, 0, sizeof(*instance));
5713 atomic_set(&instance->fw_reset_no_pci_access, 0);
5714 instance->pdev = pdev;
5715
5716 switch (instance->pdev->device) {
5717 case PCI_DEVICE_ID_LSI_FUSION:
5718 case PCI_DEVICE_ID_LSI_PLASMA:
5719 case PCI_DEVICE_ID_LSI_INVADER:
5720 case PCI_DEVICE_ID_LSI_FURY:
5721 case PCI_DEVICE_ID_LSI_INTRUDER:
5722 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5723 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5724 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5725 {
5726 instance->ctrl_context_pages =
5727 get_order(sizeof(struct fusion_context));
5728 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
5729 instance->ctrl_context_pages);
5730 if (!instance->ctrl_context) {
5731 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5732 "memory for Fusion context info\n");
5733 goto fail_alloc_dma_buf;
5734 }
5735 fusion = instance->ctrl_context;
5736 memset(fusion, 0,
5737 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5738 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
5739 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
5740 fusion->adapter_type = THUNDERBOLT_SERIES;
5741 else
5742 fusion->adapter_type = INVADER_SERIES;
5743 }
5744 break;
5745 default: /* For all other supported controllers */
5746
5747 instance->producer =
5748 pci_alloc_consistent(pdev, sizeof(u32),
5749 &instance->producer_h);
5750 instance->consumer =
5751 pci_alloc_consistent(pdev, sizeof(u32),
5752 &instance->consumer_h);
5753
5754 if (!instance->producer || !instance->consumer) {
5755 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5756 "memory for producer, consumer\n");
5757 goto fail_alloc_dma_buf;
5758 }
5759
5760 *instance->producer = 0;
5761 *instance->consumer = 0;
5762 break;
5763 }
5764
5765 /* Crash dump feature related initialisation*/
5766 instance->drv_buf_index = 0;
5767 instance->drv_buf_alloc = 0;
5768 instance->crash_dump_fw_support = 0;
5769 instance->crash_dump_app_support = 0;
5770 instance->fw_crash_state = UNAVAILABLE;
5771 spin_lock_init(&instance->crashdump_lock);
5772 instance->crash_dump_buf = NULL;
5773
5774 megasas_poll_wait_aen = 0;
5775 instance->flag_ieee = 0;
5776 instance->ev = NULL;
5777 instance->issuepend_done = 1;
5778 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5779 instance->is_imr = 0;
5780
5781 instance->evt_detail = pci_alloc_consistent(pdev,
5782 sizeof(struct
5783 megasas_evt_detail),
5784 &instance->evt_detail_h);
5785
5786 if (!instance->evt_detail) {
5787 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
5788 "event detail structure\n");
5789 goto fail_alloc_dma_buf;
5790 }
5791
5792 if (!reset_devices) {
5793 instance->system_info_buf = pci_zalloc_consistent(pdev,
5794 sizeof(struct MR_DRV_SYSTEM_INFO),
5795 &instance->system_info_h);
5796 if (!instance->system_info_buf)
5797 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
5798
5799 instance->pd_info = pci_alloc_consistent(pdev,
5800 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
5801
5802 if (!instance->pd_info)
5803 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
5804
5805 instance->crash_dump_buf = pci_alloc_consistent(pdev,
5806 CRASH_DMA_BUF_SIZE,
5807 &instance->crash_dump_h);
5808 if (!instance->crash_dump_buf)
5809 dev_err(&pdev->dev, "Can't allocate Firmware "
5810 "crash dump DMA buffer\n");
5811 }
5812
5813 /*
5814 * Initialize locks and queues
5815 */
5816 INIT_LIST_HEAD(&instance->cmd_pool);
5817 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
5818
5819 atomic_set(&instance->fw_outstanding,0);
5820
5821 init_waitqueue_head(&instance->int_cmd_wait_q);
5822 init_waitqueue_head(&instance->abort_cmd_wait_q);
5823
5824 spin_lock_init(&instance->mfi_pool_lock);
5825 spin_lock_init(&instance->hba_lock);
5826 spin_lock_init(&instance->completion_lock);
5827
5828 mutex_init(&instance->reset_mutex);
5829 mutex_init(&instance->hba_mutex);
5830
5831 /*
5832 * Initialize PCI related and misc parameters
5833 */
5834 instance->host = host;
5835 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
5836 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
5837 instance->ctrl_info = NULL;
5838
5839
5840 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5841 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
5842 instance->flag_ieee = 1;
5843
5844 megasas_dbg_lvl = 0;
5845 instance->flag = 0;
5846 instance->unload = 1;
5847 instance->last_time = 0;
5848 instance->disableOnlineCtrlReset = 1;
5849 instance->UnevenSpanSupport = 0;
5850
5851 if (instance->ctrl_context) {
5852 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
5853 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
5854 } else
5855 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
5856
5857 /*
5858 * Initialize MFI Firmware
5859 */
5860 if (megasas_init_fw(instance))
5861 goto fail_init_mfi;
5862
5863 if (instance->requestorId) {
5864 if (instance->PlasmaFW111) {
5865 instance->vf_affiliation_111 =
5866 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
5867 &instance->vf_affiliation_111_h);
5868 if (!instance->vf_affiliation_111)
5869 dev_warn(&pdev->dev, "Can't allocate "
5870 "memory for VF affiliation buffer\n");
5871 } else {
5872 instance->vf_affiliation =
5873 pci_alloc_consistent(pdev,
5874 (MAX_LOGICAL_DRIVES + 1) *
5875 sizeof(struct MR_LD_VF_AFFILIATION),
5876 &instance->vf_affiliation_h);
5877 if (!instance->vf_affiliation)
5878 dev_warn(&pdev->dev, "Can't allocate "
5879 "memory for VF affiliation buffer\n");
5880 }
5881 }
5882
5883 /*
5884 * Store instance in PCI softstate
5885 */
5886 pci_set_drvdata(pdev, instance);
5887
5888 /*
5889 * Add this controller to megasas_mgmt_info structure so that it
5890 * can be exported to management applications
5891 */
5892 megasas_mgmt_info.count++;
5893 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
5894 megasas_mgmt_info.max_index++;
5895
5896 /*
5897 * Register with SCSI mid-layer
5898 */
5899 if (megasas_io_attach(instance))
5900 goto fail_io_attach;
5901
5902 instance->unload = 0;
5903 /*
5904 * Trigger SCSI to scan our drives
5905 */
5906 scsi_scan_host(host);
5907
5908 /*
5909 * Initiate AEN (Asynchronous Event Notification)
5910 */
5911 if (megasas_start_aen(instance)) {
5912 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
5913 goto fail_start_aen;
5914 }
5915
5916 /* Get current SR-IOV LD/VF affiliation */
5917 if (instance->requestorId)
5918 megasas_get_ld_vf_affiliation(instance, 1);
5919
5920 return 0;
5921
5922 fail_start_aen:
5923 fail_io_attach:
5924 megasas_mgmt_info.count--;
5925 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
5926 megasas_mgmt_info.max_index--;
5927
5928 instance->instancet->disable_intr(instance);
5929 megasas_destroy_irqs(instance);
5930
5931 if (instance->ctrl_context)
5932 megasas_release_fusion(instance);
5933 else
5934 megasas_release_mfi(instance);
5935 if (instance->msix_vectors)
5936 pci_free_irq_vectors(instance->pdev);
5937 fail_init_mfi:
5938 fail_alloc_dma_buf:
5939 if (instance->evt_detail)
5940 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
5941 instance->evt_detail,
5942 instance->evt_detail_h);
5943
5944 if (instance->pd_info)
5945 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
5946 instance->pd_info,
5947 instance->pd_info_h);
5948 if (instance->producer)
5949 pci_free_consistent(pdev, sizeof(u32), instance->producer,
5950 instance->producer_h);
5951 if (instance->consumer)
5952 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
5953 instance->consumer_h);
5954 scsi_host_put(host);
5955
5956 fail_alloc_instance:
5957 fail_set_dma_mask:
5958 pci_disable_device(pdev);
5959
5960 return -ENODEV;
5961 }
5962
5963 /**
5964 * megasas_flush_cache - Requests FW to flush all its caches
5965 * @instance: Adapter soft state
5966 */
5967 static void megasas_flush_cache(struct megasas_instance *instance)
5968 {
5969 struct megasas_cmd *cmd;
5970 struct megasas_dcmd_frame *dcmd;
5971
5972 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
5973 return;
5974
5975 cmd = megasas_get_cmd(instance);
5976
5977 if (!cmd)
5978 return;
5979
5980 dcmd = &cmd->frame->dcmd;
5981
5982 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5983
5984 dcmd->cmd = MFI_CMD_DCMD;
5985 dcmd->cmd_status = 0x0;
5986 dcmd->sge_count = 0;
5987 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
5988 dcmd->timeout = 0;
5989 dcmd->pad_0 = 0;
5990 dcmd->data_xfer_len = 0;
5991 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
5992 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
5993
5994 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
5995 != DCMD_SUCCESS) {
5996 dev_err(&instance->pdev->dev,
5997 "return from %s %d\n", __func__, __LINE__);
5998 return;
5999 }
6000
6001 megasas_return_cmd(instance, cmd);
6002 }
6003
6004 /**
6005 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6006 * @instance: Adapter soft state
6007 * @opcode: Shutdown/Hibernate
6008 */
6009 static void megasas_shutdown_controller(struct megasas_instance *instance,
6010 u32 opcode)
6011 {
6012 struct megasas_cmd *cmd;
6013 struct megasas_dcmd_frame *dcmd;
6014
6015 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6016 return;
6017
6018 cmd = megasas_get_cmd(instance);
6019
6020 if (!cmd)
6021 return;
6022
6023 if (instance->aen_cmd)
6024 megasas_issue_blocked_abort_cmd(instance,
6025 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6026 if (instance->map_update_cmd)
6027 megasas_issue_blocked_abort_cmd(instance,
6028 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6029 if (instance->jbod_seq_cmd)
6030 megasas_issue_blocked_abort_cmd(instance,
6031 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6032
6033 dcmd = &cmd->frame->dcmd;
6034
6035 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6036
6037 dcmd->cmd = MFI_CMD_DCMD;
6038 dcmd->cmd_status = 0x0;
6039 dcmd->sge_count = 0;
6040 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6041 dcmd->timeout = 0;
6042 dcmd->pad_0 = 0;
6043 dcmd->data_xfer_len = 0;
6044 dcmd->opcode = cpu_to_le32(opcode);
6045
6046 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6047 != DCMD_SUCCESS) {
6048 dev_err(&instance->pdev->dev,
6049 "return from %s %d\n", __func__, __LINE__);
6050 return;
6051 }
6052
6053 megasas_return_cmd(instance, cmd);
6054 }
6055
6056 #ifdef CONFIG_PM
6057 /**
6058 * megasas_suspend - driver suspend entry point
6059 * @pdev: PCI device structure
6060 * @state: PCI power state to suspend routine
6061 */
6062 static int
6063 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6064 {
6065 struct Scsi_Host *host;
6066 struct megasas_instance *instance;
6067
6068 instance = pci_get_drvdata(pdev);
6069 host = instance->host;
6070 instance->unload = 1;
6071
6072 /* Shutdown SR-IOV heartbeat timer */
6073 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6074 del_timer_sync(&instance->sriov_heartbeat_timer);
6075
6076 megasas_flush_cache(instance);
6077 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6078
6079 /* cancel the delayed work if this work still in queue */
6080 if (instance->ev != NULL) {
6081 struct megasas_aen_event *ev = instance->ev;
6082 cancel_delayed_work_sync(&ev->hotplug_work);
6083 instance->ev = NULL;
6084 }
6085
6086 tasklet_kill(&instance->isr_tasklet);
6087
6088 pci_set_drvdata(instance->pdev, instance);
6089 instance->instancet->disable_intr(instance);
6090
6091 megasas_destroy_irqs(instance);
6092
6093 if (instance->msix_vectors)
6094 pci_free_irq_vectors(instance->pdev);
6095
6096 pci_save_state(pdev);
6097 pci_disable_device(pdev);
6098
6099 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6100
6101 return 0;
6102 }
6103
6104 /**
6105 * megasas_resume- driver resume entry point
6106 * @pdev: PCI device structure
6107 */
6108 static int
6109 megasas_resume(struct pci_dev *pdev)
6110 {
6111 int rval;
6112 struct Scsi_Host *host;
6113 struct megasas_instance *instance;
6114 int irq_flags = PCI_IRQ_LEGACY;
6115
6116 instance = pci_get_drvdata(pdev);
6117 host = instance->host;
6118 pci_set_power_state(pdev, PCI_D0);
6119 pci_enable_wake(pdev, PCI_D0, 0);
6120 pci_restore_state(pdev);
6121
6122 /*
6123 * PCI prepping: enable device set bus mastering and dma mask
6124 */
6125 rval = pci_enable_device_mem(pdev);
6126
6127 if (rval) {
6128 dev_err(&pdev->dev, "Enable device failed\n");
6129 return rval;
6130 }
6131
6132 pci_set_master(pdev);
6133
6134 if (megasas_set_dma_mask(pdev))
6135 goto fail_set_dma_mask;
6136
6137 /*
6138 * Initialize MFI Firmware
6139 */
6140
6141 atomic_set(&instance->fw_outstanding, 0);
6142
6143 /*
6144 * We expect the FW state to be READY
6145 */
6146 if (megasas_transition_to_ready(instance, 0))
6147 goto fail_ready_state;
6148
6149 /* Now re-enable MSI-X */
6150 if (instance->msix_vectors) {
6151 irq_flags = PCI_IRQ_MSIX;
6152 if (smp_affinity_enable)
6153 irq_flags |= PCI_IRQ_AFFINITY;
6154 }
6155 rval = pci_alloc_irq_vectors(instance->pdev, 1,
6156 instance->msix_vectors ?
6157 instance->msix_vectors : 1, irq_flags);
6158 if (rval < 0)
6159 goto fail_reenable_msix;
6160
6161 if (instance->ctrl_context) {
6162 megasas_reset_reply_desc(instance);
6163 if (megasas_ioc_init_fusion(instance)) {
6164 megasas_free_cmds(instance);
6165 megasas_free_cmds_fusion(instance);
6166 goto fail_init_mfi;
6167 }
6168 if (!megasas_get_map_info(instance))
6169 megasas_sync_map_info(instance);
6170 } else {
6171 *instance->producer = 0;
6172 *instance->consumer = 0;
6173 if (megasas_issue_init_mfi(instance))
6174 goto fail_init_mfi;
6175 }
6176
6177 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6178 (unsigned long)instance);
6179
6180 if (instance->msix_vectors ?
6181 megasas_setup_irqs_msix(instance, 0) :
6182 megasas_setup_irqs_ioapic(instance))
6183 goto fail_init_mfi;
6184
6185 /* Re-launch SR-IOV heartbeat timer */
6186 if (instance->requestorId) {
6187 if (!megasas_sriov_start_heartbeat(instance, 0))
6188 megasas_start_timer(instance,
6189 &instance->sriov_heartbeat_timer,
6190 megasas_sriov_heartbeat_handler,
6191 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6192 else {
6193 instance->skip_heartbeat_timer_del = 1;
6194 goto fail_init_mfi;
6195 }
6196 }
6197
6198 instance->instancet->enable_intr(instance);
6199 megasas_setup_jbod_map(instance);
6200 instance->unload = 0;
6201
6202 /*
6203 * Initiate AEN (Asynchronous Event Notification)
6204 */
6205 if (megasas_start_aen(instance))
6206 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6207
6208 return 0;
6209
6210 fail_init_mfi:
6211 if (instance->evt_detail)
6212 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6213 instance->evt_detail,
6214 instance->evt_detail_h);
6215
6216 if (instance->pd_info)
6217 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6218 instance->pd_info,
6219 instance->pd_info_h);
6220 if (instance->producer)
6221 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6222 instance->producer_h);
6223 if (instance->consumer)
6224 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6225 instance->consumer_h);
6226 scsi_host_put(host);
6227
6228 fail_set_dma_mask:
6229 fail_ready_state:
6230 fail_reenable_msix:
6231
6232 pci_disable_device(pdev);
6233
6234 return -ENODEV;
6235 }
6236 #else
6237 #define megasas_suspend NULL
6238 #define megasas_resume NULL
6239 #endif
6240
6241 static inline int
6242 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6243 {
6244 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6245 int i;
6246
6247 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6248 return 1;
6249
6250 for (i = 0; i < wait_time; i++) {
6251 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
6252 break;
6253
6254 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6255 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6256
6257 msleep(1000);
6258 }
6259
6260 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6261 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6262 __func__);
6263 return 1;
6264 }
6265
6266 return 0;
6267 }
6268
6269 /**
6270 * megasas_detach_one - PCI hot"un"plug entry point
6271 * @pdev: PCI device structure
6272 */
6273 static void megasas_detach_one(struct pci_dev *pdev)
6274 {
6275 int i;
6276 struct Scsi_Host *host;
6277 struct megasas_instance *instance;
6278 struct fusion_context *fusion;
6279 u32 pd_seq_map_sz;
6280
6281 instance = pci_get_drvdata(pdev);
6282 instance->unload = 1;
6283 host = instance->host;
6284 fusion = instance->ctrl_context;
6285
6286 /* Shutdown SR-IOV heartbeat timer */
6287 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6288 del_timer_sync(&instance->sriov_heartbeat_timer);
6289
6290 if (instance->fw_crash_state != UNAVAILABLE)
6291 megasas_free_host_crash_buffer(instance);
6292 scsi_remove_host(instance->host);
6293
6294 if (megasas_wait_for_adapter_operational(instance))
6295 goto skip_firing_dcmds;
6296
6297 megasas_flush_cache(instance);
6298 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6299
6300 skip_firing_dcmds:
6301 /* cancel the delayed work if this work still in queue*/
6302 if (instance->ev != NULL) {
6303 struct megasas_aen_event *ev = instance->ev;
6304 cancel_delayed_work_sync(&ev->hotplug_work);
6305 instance->ev = NULL;
6306 }
6307
6308 /* cancel all wait events */
6309 wake_up_all(&instance->int_cmd_wait_q);
6310
6311 tasklet_kill(&instance->isr_tasklet);
6312
6313 /*
6314 * Take the instance off the instance array. Note that we will not
6315 * decrement the max_index. We let this array be sparse array
6316 */
6317 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6318 if (megasas_mgmt_info.instance[i] == instance) {
6319 megasas_mgmt_info.count--;
6320 megasas_mgmt_info.instance[i] = NULL;
6321
6322 break;
6323 }
6324 }
6325
6326 instance->instancet->disable_intr(instance);
6327
6328 megasas_destroy_irqs(instance);
6329
6330 if (instance->msix_vectors)
6331 pci_free_irq_vectors(instance->pdev);
6332
6333 if (instance->ctrl_context) {
6334 megasas_release_fusion(instance);
6335 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6336 (sizeof(struct MR_PD_CFG_SEQ) *
6337 (MAX_PHYSICAL_DEVICES - 1));
6338 for (i = 0; i < 2 ; i++) {
6339 if (fusion->ld_map[i])
6340 dma_free_coherent(&instance->pdev->dev,
6341 fusion->max_map_sz,
6342 fusion->ld_map[i],
6343 fusion->ld_map_phys[i]);
6344 if (fusion->ld_drv_map[i])
6345 free_pages((ulong)fusion->ld_drv_map[i],
6346 fusion->drv_map_pages);
6347 if (fusion->pd_seq_sync[i])
6348 dma_free_coherent(&instance->pdev->dev,
6349 pd_seq_map_sz,
6350 fusion->pd_seq_sync[i],
6351 fusion->pd_seq_phys[i]);
6352 }
6353 free_pages((ulong)instance->ctrl_context,
6354 instance->ctrl_context_pages);
6355 } else {
6356 megasas_release_mfi(instance);
6357 pci_free_consistent(pdev, sizeof(u32),
6358 instance->producer,
6359 instance->producer_h);
6360 pci_free_consistent(pdev, sizeof(u32),
6361 instance->consumer,
6362 instance->consumer_h);
6363 }
6364
6365 kfree(instance->ctrl_info);
6366
6367 if (instance->evt_detail)
6368 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6369 instance->evt_detail, instance->evt_detail_h);
6370
6371 if (instance->pd_info)
6372 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6373 instance->pd_info,
6374 instance->pd_info_h);
6375 if (instance->vf_affiliation)
6376 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6377 sizeof(struct MR_LD_VF_AFFILIATION),
6378 instance->vf_affiliation,
6379 instance->vf_affiliation_h);
6380
6381 if (instance->vf_affiliation_111)
6382 pci_free_consistent(pdev,
6383 sizeof(struct MR_LD_VF_AFFILIATION_111),
6384 instance->vf_affiliation_111,
6385 instance->vf_affiliation_111_h);
6386
6387 if (instance->hb_host_mem)
6388 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6389 instance->hb_host_mem,
6390 instance->hb_host_mem_h);
6391
6392 if (instance->crash_dump_buf)
6393 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6394 instance->crash_dump_buf, instance->crash_dump_h);
6395
6396 if (instance->system_info_buf)
6397 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6398 instance->system_info_buf, instance->system_info_h);
6399
6400 scsi_host_put(host);
6401
6402 pci_disable_device(pdev);
6403 }
6404
6405 /**
6406 * megasas_shutdown - Shutdown entry point
6407 * @device: Generic device structure
6408 */
6409 static void megasas_shutdown(struct pci_dev *pdev)
6410 {
6411 struct megasas_instance *instance = pci_get_drvdata(pdev);
6412
6413 instance->unload = 1;
6414
6415 if (megasas_wait_for_adapter_operational(instance))
6416 goto skip_firing_dcmds;
6417
6418 megasas_flush_cache(instance);
6419 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6420
6421 skip_firing_dcmds:
6422 instance->instancet->disable_intr(instance);
6423 megasas_destroy_irqs(instance);
6424
6425 if (instance->msix_vectors)
6426 pci_free_irq_vectors(instance->pdev);
6427 }
6428
6429 /**
6430 * megasas_mgmt_open - char node "open" entry point
6431 */
6432 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6433 {
6434 /*
6435 * Allow only those users with admin rights
6436 */
6437 if (!capable(CAP_SYS_ADMIN))
6438 return -EACCES;
6439
6440 return 0;
6441 }
6442
6443 /**
6444 * megasas_mgmt_fasync - Async notifier registration from applications
6445 *
6446 * This function adds the calling process to a driver global queue. When an
6447 * event occurs, SIGIO will be sent to all processes in this queue.
6448 */
6449 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6450 {
6451 int rc;
6452
6453 mutex_lock(&megasas_async_queue_mutex);
6454
6455 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6456
6457 mutex_unlock(&megasas_async_queue_mutex);
6458
6459 if (rc >= 0) {
6460 /* For sanity check when we get ioctl */
6461 filep->private_data = filep;
6462 return 0;
6463 }
6464
6465 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6466
6467 return rc;
6468 }
6469
6470 /**
6471 * megasas_mgmt_poll - char node "poll" entry point
6472 * */
6473 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6474 {
6475 unsigned int mask;
6476 unsigned long flags;
6477
6478 poll_wait(file, &megasas_poll_wait, wait);
6479 spin_lock_irqsave(&poll_aen_lock, flags);
6480 if (megasas_poll_wait_aen)
6481 mask = (POLLIN | POLLRDNORM);
6482 else
6483 mask = 0;
6484 megasas_poll_wait_aen = 0;
6485 spin_unlock_irqrestore(&poll_aen_lock, flags);
6486 return mask;
6487 }
6488
6489 /*
6490 * megasas_set_crash_dump_params_ioctl:
6491 * Send CRASH_DUMP_MODE DCMD to all controllers
6492 * @cmd: MFI command frame
6493 */
6494
6495 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6496 {
6497 struct megasas_instance *local_instance;
6498 int i, error = 0;
6499 int crash_support;
6500
6501 crash_support = cmd->frame->dcmd.mbox.w[0];
6502
6503 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6504 local_instance = megasas_mgmt_info.instance[i];
6505 if (local_instance && local_instance->crash_dump_drv_support) {
6506 if ((atomic_read(&local_instance->adprecovery) ==
6507 MEGASAS_HBA_OPERATIONAL) &&
6508 !megasas_set_crash_dump_params(local_instance,
6509 crash_support)) {
6510 local_instance->crash_dump_app_support =
6511 crash_support;
6512 dev_info(&local_instance->pdev->dev,
6513 "Application firmware crash "
6514 "dump mode set success\n");
6515 error = 0;
6516 } else {
6517 dev_info(&local_instance->pdev->dev,
6518 "Application firmware crash "
6519 "dump mode set failed\n");
6520 error = -1;
6521 }
6522 }
6523 }
6524 return error;
6525 }
6526
6527 /**
6528 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6529 * @instance: Adapter soft state
6530 * @argp: User's ioctl packet
6531 */
6532 static int
6533 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6534 struct megasas_iocpacket __user * user_ioc,
6535 struct megasas_iocpacket *ioc)
6536 {
6537 struct megasas_sge32 *kern_sge32;
6538 struct megasas_cmd *cmd;
6539 void *kbuff_arr[MAX_IOCTL_SGE];
6540 dma_addr_t buf_handle = 0;
6541 int error = 0, i;
6542 void *sense = NULL;
6543 dma_addr_t sense_handle;
6544 unsigned long *sense_ptr;
6545
6546 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6547
6548 if (ioc->sge_count > MAX_IOCTL_SGE) {
6549 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6550 ioc->sge_count, MAX_IOCTL_SGE);
6551 return -EINVAL;
6552 }
6553
6554 cmd = megasas_get_cmd(instance);
6555 if (!cmd) {
6556 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6557 return -ENOMEM;
6558 }
6559
6560 /*
6561 * User's IOCTL packet has 2 frames (maximum). Copy those two
6562 * frames into our cmd's frames. cmd->frame's context will get
6563 * overwritten when we copy from user's frames. So set that value
6564 * alone separately
6565 */
6566 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6567 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6568 cmd->frame->hdr.pad_0 = 0;
6569 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6570 MFI_FRAME_SGL64 |
6571 MFI_FRAME_SENSE64));
6572
6573 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6574 error = megasas_set_crash_dump_params_ioctl(cmd);
6575 megasas_return_cmd(instance, cmd);
6576 return error;
6577 }
6578
6579 /*
6580 * The management interface between applications and the fw uses
6581 * MFI frames. E.g, RAID configuration changes, LD property changes
6582 * etc are accomplishes through different kinds of MFI frames. The
6583 * driver needs to care only about substituting user buffers with
6584 * kernel buffers in SGLs. The location of SGL is embedded in the
6585 * struct iocpacket itself.
6586 */
6587 kern_sge32 = (struct megasas_sge32 *)
6588 ((unsigned long)cmd->frame + ioc->sgl_off);
6589
6590 /*
6591 * For each user buffer, create a mirror buffer and copy in
6592 */
6593 for (i = 0; i < ioc->sge_count; i++) {
6594 if (!ioc->sgl[i].iov_len)
6595 continue;
6596
6597 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6598 ioc->sgl[i].iov_len,
6599 &buf_handle, GFP_KERNEL);
6600 if (!kbuff_arr[i]) {
6601 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6602 "kernel SGL buffer for IOCTL\n");
6603 error = -ENOMEM;
6604 goto out;
6605 }
6606
6607 /*
6608 * We don't change the dma_coherent_mask, so
6609 * pci_alloc_consistent only returns 32bit addresses
6610 */
6611 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6612 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6613
6614 /*
6615 * We created a kernel buffer corresponding to the
6616 * user buffer. Now copy in from the user buffer
6617 */
6618 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6619 (u32) (ioc->sgl[i].iov_len))) {
6620 error = -EFAULT;
6621 goto out;
6622 }
6623 }
6624
6625 if (ioc->sense_len) {
6626 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6627 &sense_handle, GFP_KERNEL);
6628 if (!sense) {
6629 error = -ENOMEM;
6630 goto out;
6631 }
6632
6633 sense_ptr =
6634 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6635 *sense_ptr = cpu_to_le32(sense_handle);
6636 }
6637
6638 /*
6639 * Set the sync_cmd flag so that the ISR knows not to complete this
6640 * cmd to the SCSI mid-layer
6641 */
6642 cmd->sync_cmd = 1;
6643 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6644 cmd->sync_cmd = 0;
6645 dev_err(&instance->pdev->dev,
6646 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6647 __func__, __LINE__, cmd->frame->dcmd.opcode,
6648 cmd->cmd_status_drv);
6649 return -EBUSY;
6650 }
6651
6652 cmd->sync_cmd = 0;
6653
6654 if (instance->unload == 1) {
6655 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6656 "don't submit data to application\n");
6657 goto out;
6658 }
6659 /*
6660 * copy out the kernel buffers to user buffers
6661 */
6662 for (i = 0; i < ioc->sge_count; i++) {
6663 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
6664 ioc->sgl[i].iov_len)) {
6665 error = -EFAULT;
6666 goto out;
6667 }
6668 }
6669
6670 /*
6671 * copy out the sense
6672 */
6673 if (ioc->sense_len) {
6674 /*
6675 * sense_ptr points to the location that has the user
6676 * sense buffer address
6677 */
6678 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
6679 ioc->sense_off);
6680
6681 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
6682 sense, ioc->sense_len)) {
6683 dev_err(&instance->pdev->dev, "Failed to copy out to user "
6684 "sense data\n");
6685 error = -EFAULT;
6686 goto out;
6687 }
6688 }
6689
6690 /*
6691 * copy the status codes returned by the fw
6692 */
6693 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
6694 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
6695 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
6696 error = -EFAULT;
6697 }
6698
6699 out:
6700 if (sense) {
6701 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
6702 sense, sense_handle);
6703 }
6704
6705 for (i = 0; i < ioc->sge_count; i++) {
6706 if (kbuff_arr[i]) {
6707 dma_free_coherent(&instance->pdev->dev,
6708 le32_to_cpu(kern_sge32[i].length),
6709 kbuff_arr[i],
6710 le32_to_cpu(kern_sge32[i].phys_addr));
6711 kbuff_arr[i] = NULL;
6712 }
6713 }
6714
6715 megasas_return_cmd(instance, cmd);
6716 return error;
6717 }
6718
6719 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6720 {
6721 struct megasas_iocpacket __user *user_ioc =
6722 (struct megasas_iocpacket __user *)arg;
6723 struct megasas_iocpacket *ioc;
6724 struct megasas_instance *instance;
6725 int error;
6726 int i;
6727 unsigned long flags;
6728 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6729
6730 ioc = memdup_user(user_ioc, sizeof(*ioc));
6731 if (IS_ERR(ioc))
6732 return PTR_ERR(ioc);
6733
6734 instance = megasas_lookup_instance(ioc->host_no);
6735 if (!instance) {
6736 error = -ENODEV;
6737 goto out_kfree_ioc;
6738 }
6739
6740 /* Adjust ioctl wait time for VF mode */
6741 if (instance->requestorId)
6742 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
6743
6744 /* Block ioctls in VF mode */
6745 if (instance->requestorId && !allow_vf_ioctls) {
6746 error = -ENODEV;
6747 goto out_kfree_ioc;
6748 }
6749
6750 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6751 dev_err(&instance->pdev->dev, "Controller in crit error\n");
6752 error = -ENODEV;
6753 goto out_kfree_ioc;
6754 }
6755
6756 if (instance->unload == 1) {
6757 error = -ENODEV;
6758 goto out_kfree_ioc;
6759 }
6760
6761 if (down_interruptible(&instance->ioctl_sem)) {
6762 error = -ERESTARTSYS;
6763 goto out_kfree_ioc;
6764 }
6765
6766 for (i = 0; i < wait_time; i++) {
6767
6768 spin_lock_irqsave(&instance->hba_lock, flags);
6769 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6770 spin_unlock_irqrestore(&instance->hba_lock, flags);
6771 break;
6772 }
6773 spin_unlock_irqrestore(&instance->hba_lock, flags);
6774
6775 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6776 dev_notice(&instance->pdev->dev, "waiting"
6777 "for controller reset to finish\n");
6778 }
6779
6780 msleep(1000);
6781 }
6782
6783 spin_lock_irqsave(&instance->hba_lock, flags);
6784 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6785 spin_unlock_irqrestore(&instance->hba_lock, flags);
6786
6787 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
6788 error = -ENODEV;
6789 goto out_up;
6790 }
6791 spin_unlock_irqrestore(&instance->hba_lock, flags);
6792
6793 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
6794 out_up:
6795 up(&instance->ioctl_sem);
6796
6797 out_kfree_ioc:
6798 kfree(ioc);
6799 return error;
6800 }
6801
6802 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6803 {
6804 struct megasas_instance *instance;
6805 struct megasas_aen aen;
6806 int error;
6807 int i;
6808 unsigned long flags;
6809 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6810
6811 if (file->private_data != file) {
6812 printk(KERN_DEBUG "megasas: fasync_helper was not "
6813 "called first\n");
6814 return -EINVAL;
6815 }
6816
6817 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
6818 return -EFAULT;
6819
6820 instance = megasas_lookup_instance(aen.host_no);
6821
6822 if (!instance)
6823 return -ENODEV;
6824
6825 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6826 return -ENODEV;
6827 }
6828
6829 if (instance->unload == 1) {
6830 return -ENODEV;
6831 }
6832
6833 for (i = 0; i < wait_time; i++) {
6834
6835 spin_lock_irqsave(&instance->hba_lock, flags);
6836 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6837 spin_unlock_irqrestore(&instance->hba_lock,
6838 flags);
6839 break;
6840 }
6841
6842 spin_unlock_irqrestore(&instance->hba_lock, flags);
6843
6844 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6845 dev_notice(&instance->pdev->dev, "waiting for"
6846 "controller reset to finish\n");
6847 }
6848
6849 msleep(1000);
6850 }
6851
6852 spin_lock_irqsave(&instance->hba_lock, flags);
6853 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6854 spin_unlock_irqrestore(&instance->hba_lock, flags);
6855 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
6856 return -ENODEV;
6857 }
6858 spin_unlock_irqrestore(&instance->hba_lock, flags);
6859
6860 mutex_lock(&instance->reset_mutex);
6861 error = megasas_register_aen(instance, aen.seq_num,
6862 aen.class_locale_word);
6863 mutex_unlock(&instance->reset_mutex);
6864 return error;
6865 }
6866
6867 /**
6868 * megasas_mgmt_ioctl - char node ioctl entry point
6869 */
6870 static long
6871 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6872 {
6873 switch (cmd) {
6874 case MEGASAS_IOC_FIRMWARE:
6875 return megasas_mgmt_ioctl_fw(file, arg);
6876
6877 case MEGASAS_IOC_GET_AEN:
6878 return megasas_mgmt_ioctl_aen(file, arg);
6879 }
6880
6881 return -ENOTTY;
6882 }
6883
6884 #ifdef CONFIG_COMPAT
6885 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
6886 {
6887 struct compat_megasas_iocpacket __user *cioc =
6888 (struct compat_megasas_iocpacket __user *)arg;
6889 struct megasas_iocpacket __user *ioc =
6890 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
6891 int i;
6892 int error = 0;
6893 compat_uptr_t ptr;
6894 u32 local_sense_off;
6895 u32 local_sense_len;
6896 u32 user_sense_off;
6897
6898 if (clear_user(ioc, sizeof(*ioc)))
6899 return -EFAULT;
6900
6901 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
6902 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
6903 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
6904 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
6905 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
6906 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
6907 return -EFAULT;
6908
6909 /*
6910 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
6911 * sense_len is not null, so prepare the 64bit value under
6912 * the same condition.
6913 */
6914 if (get_user(local_sense_off, &ioc->sense_off) ||
6915 get_user(local_sense_len, &ioc->sense_len) ||
6916 get_user(user_sense_off, &cioc->sense_off))
6917 return -EFAULT;
6918
6919 if (local_sense_len) {
6920 void __user **sense_ioc_ptr =
6921 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
6922 compat_uptr_t *sense_cioc_ptr =
6923 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
6924 if (get_user(ptr, sense_cioc_ptr) ||
6925 put_user(compat_ptr(ptr), sense_ioc_ptr))
6926 return -EFAULT;
6927 }
6928
6929 for (i = 0; i < MAX_IOCTL_SGE; i++) {
6930 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
6931 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
6932 copy_in_user(&ioc->sgl[i].iov_len,
6933 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
6934 return -EFAULT;
6935 }
6936
6937 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
6938
6939 if (copy_in_user(&cioc->frame.hdr.cmd_status,
6940 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
6941 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
6942 return -EFAULT;
6943 }
6944 return error;
6945 }
6946
6947 static long
6948 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
6949 unsigned long arg)
6950 {
6951 switch (cmd) {
6952 case MEGASAS_IOC_FIRMWARE32:
6953 return megasas_mgmt_compat_ioctl_fw(file, arg);
6954 case MEGASAS_IOC_GET_AEN:
6955 return megasas_mgmt_ioctl_aen(file, arg);
6956 }
6957
6958 return -ENOTTY;
6959 }
6960 #endif
6961
6962 /*
6963 * File operations structure for management interface
6964 */
6965 static const struct file_operations megasas_mgmt_fops = {
6966 .owner = THIS_MODULE,
6967 .open = megasas_mgmt_open,
6968 .fasync = megasas_mgmt_fasync,
6969 .unlocked_ioctl = megasas_mgmt_ioctl,
6970 .poll = megasas_mgmt_poll,
6971 #ifdef CONFIG_COMPAT
6972 .compat_ioctl = megasas_mgmt_compat_ioctl,
6973 #endif
6974 .llseek = noop_llseek,
6975 };
6976
6977 /*
6978 * PCI hotplug support registration structure
6979 */
6980 static struct pci_driver megasas_pci_driver = {
6981
6982 .name = "megaraid_sas",
6983 .id_table = megasas_pci_table,
6984 .probe = megasas_probe_one,
6985 .remove = megasas_detach_one,
6986 .suspend = megasas_suspend,
6987 .resume = megasas_resume,
6988 .shutdown = megasas_shutdown,
6989 };
6990
6991 /*
6992 * Sysfs driver attributes
6993 */
6994 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6995 {
6996 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
6997 MEGASAS_VERSION);
6998 }
6999
7000 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
7001
7002 static ssize_t
7003 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
7004 {
7005 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7006 MEGASAS_RELDATE);
7007 }
7008
7009 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
7010
7011 static ssize_t
7012 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
7013 {
7014 return sprintf(buf, "%u\n", support_poll_for_event);
7015 }
7016
7017 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
7018 megasas_sysfs_show_support_poll_for_event, NULL);
7019
7020 static ssize_t
7021 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
7022 {
7023 return sprintf(buf, "%u\n", support_device_change);
7024 }
7025
7026 static DRIVER_ATTR(support_device_change, S_IRUGO,
7027 megasas_sysfs_show_support_device_change, NULL);
7028
7029 static ssize_t
7030 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
7031 {
7032 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7033 }
7034
7035 static ssize_t
7036 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
7037 {
7038 int retval = count;
7039
7040 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7041 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7042 retval = -EINVAL;
7043 }
7044 return retval;
7045 }
7046
7047 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
7048 megasas_sysfs_set_dbg_lvl);
7049
7050 static void
7051 megasas_aen_polling(struct work_struct *work)
7052 {
7053 struct megasas_aen_event *ev =
7054 container_of(work, struct megasas_aen_event, hotplug_work.work);
7055 struct megasas_instance *instance = ev->instance;
7056 union megasas_evt_class_locale class_locale;
7057 struct Scsi_Host *host;
7058 struct scsi_device *sdev1;
7059 u16 pd_index = 0;
7060 u16 ld_index = 0;
7061 int i, j, doscan = 0;
7062 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7063 int error;
7064 u8 dcmd_ret = DCMD_SUCCESS;
7065
7066 if (!instance) {
7067 printk(KERN_ERR "invalid instance!\n");
7068 kfree(ev);
7069 return;
7070 }
7071
7072 /* Adjust event workqueue thread wait time for VF mode */
7073 if (instance->requestorId)
7074 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7075
7076 /* Don't run the event workqueue thread if OCR is running */
7077 mutex_lock(&instance->reset_mutex);
7078
7079 instance->ev = NULL;
7080 host = instance->host;
7081 if (instance->evt_detail) {
7082 megasas_decode_evt(instance);
7083
7084 switch (le32_to_cpu(instance->evt_detail->code)) {
7085
7086 case MR_EVT_PD_INSERTED:
7087 case MR_EVT_PD_REMOVED:
7088 dcmd_ret = megasas_get_pd_list(instance);
7089 if (dcmd_ret == DCMD_SUCCESS)
7090 doscan = SCAN_PD_CHANNEL;
7091 break;
7092
7093 case MR_EVT_LD_OFFLINE:
7094 case MR_EVT_CFG_CLEARED:
7095 case MR_EVT_LD_DELETED:
7096 case MR_EVT_LD_CREATED:
7097 if (!instance->requestorId ||
7098 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7099 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7100
7101 if (dcmd_ret == DCMD_SUCCESS)
7102 doscan = SCAN_VD_CHANNEL;
7103
7104 break;
7105
7106 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7107 case MR_EVT_FOREIGN_CFG_IMPORTED:
7108 case MR_EVT_LD_STATE_CHANGE:
7109 dcmd_ret = megasas_get_pd_list(instance);
7110
7111 if (dcmd_ret != DCMD_SUCCESS)
7112 break;
7113
7114 if (!instance->requestorId ||
7115 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7116 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7117
7118 if (dcmd_ret != DCMD_SUCCESS)
7119 break;
7120
7121 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7122 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7123 instance->host->host_no);
7124 break;
7125
7126 case MR_EVT_CTRL_PROP_CHANGED:
7127 dcmd_ret = megasas_get_ctrl_info(instance);
7128 break;
7129 default:
7130 doscan = 0;
7131 break;
7132 }
7133 } else {
7134 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7135 mutex_unlock(&instance->reset_mutex);
7136 kfree(ev);
7137 return;
7138 }
7139
7140 mutex_unlock(&instance->reset_mutex);
7141
7142 if (doscan & SCAN_PD_CHANNEL) {
7143 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7144 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7145 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7146 sdev1 = scsi_device_lookup(host, i, j, 0);
7147 if (instance->pd_list[pd_index].driveState ==
7148 MR_PD_STATE_SYSTEM) {
7149 if (!sdev1)
7150 scsi_add_device(host, i, j, 0);
7151 else
7152 scsi_device_put(sdev1);
7153 } else {
7154 if (sdev1) {
7155 scsi_remove_device(sdev1);
7156 scsi_device_put(sdev1);
7157 }
7158 }
7159 }
7160 }
7161 }
7162
7163 if (doscan & SCAN_VD_CHANNEL) {
7164 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7165 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7166 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7167 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7168 if (instance->ld_ids[ld_index] != 0xff) {
7169 if (!sdev1)
7170 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7171 else
7172 scsi_device_put(sdev1);
7173 } else {
7174 if (sdev1) {
7175 scsi_remove_device(sdev1);
7176 scsi_device_put(sdev1);
7177 }
7178 }
7179 }
7180 }
7181 }
7182
7183 if (dcmd_ret == DCMD_SUCCESS)
7184 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7185 else
7186 seq_num = instance->last_seq_num;
7187
7188 /* Register AEN with FW for latest sequence number plus 1 */
7189 class_locale.members.reserved = 0;
7190 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7191 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7192
7193 if (instance->aen_cmd != NULL) {
7194 kfree(ev);
7195 return;
7196 }
7197
7198 mutex_lock(&instance->reset_mutex);
7199 error = megasas_register_aen(instance, seq_num,
7200 class_locale.word);
7201 if (error)
7202 dev_err(&instance->pdev->dev,
7203 "register aen failed error %x\n", error);
7204
7205 mutex_unlock(&instance->reset_mutex);
7206 kfree(ev);
7207 }
7208
7209 /**
7210 * megasas_init - Driver load entry point
7211 */
7212 static int __init megasas_init(void)
7213 {
7214 int rval;
7215
7216 /*
7217 * Booted in kdump kernel, minimize memory footprints by
7218 * disabling few features
7219 */
7220 if (reset_devices) {
7221 msix_vectors = 1;
7222 rdpq_enable = 0;
7223 dual_qdepth_disable = 1;
7224 }
7225
7226 /*
7227 * Announce driver version and other information
7228 */
7229 pr_info("megasas: %s\n", MEGASAS_VERSION);
7230
7231 spin_lock_init(&poll_aen_lock);
7232
7233 support_poll_for_event = 2;
7234 support_device_change = 1;
7235
7236 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7237
7238 /*
7239 * Register character device node
7240 */
7241 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7242
7243 if (rval < 0) {
7244 printk(KERN_DEBUG "megasas: failed to open device node\n");
7245 return rval;
7246 }
7247
7248 megasas_mgmt_majorno = rval;
7249
7250 /*
7251 * Register ourselves as PCI hotplug module
7252 */
7253 rval = pci_register_driver(&megasas_pci_driver);
7254
7255 if (rval) {
7256 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7257 goto err_pcidrv;
7258 }
7259
7260 rval = driver_create_file(&megasas_pci_driver.driver,
7261 &driver_attr_version);
7262 if (rval)
7263 goto err_dcf_attr_ver;
7264
7265 rval = driver_create_file(&megasas_pci_driver.driver,
7266 &driver_attr_release_date);
7267 if (rval)
7268 goto err_dcf_rel_date;
7269
7270 rval = driver_create_file(&megasas_pci_driver.driver,
7271 &driver_attr_support_poll_for_event);
7272 if (rval)
7273 goto err_dcf_support_poll_for_event;
7274
7275 rval = driver_create_file(&megasas_pci_driver.driver,
7276 &driver_attr_dbg_lvl);
7277 if (rval)
7278 goto err_dcf_dbg_lvl;
7279 rval = driver_create_file(&megasas_pci_driver.driver,
7280 &driver_attr_support_device_change);
7281 if (rval)
7282 goto err_dcf_support_device_change;
7283
7284 return rval;
7285
7286 err_dcf_support_device_change:
7287 driver_remove_file(&megasas_pci_driver.driver,
7288 &driver_attr_dbg_lvl);
7289 err_dcf_dbg_lvl:
7290 driver_remove_file(&megasas_pci_driver.driver,
7291 &driver_attr_support_poll_for_event);
7292 err_dcf_support_poll_for_event:
7293 driver_remove_file(&megasas_pci_driver.driver,
7294 &driver_attr_release_date);
7295 err_dcf_rel_date:
7296 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7297 err_dcf_attr_ver:
7298 pci_unregister_driver(&megasas_pci_driver);
7299 err_pcidrv:
7300 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7301 return rval;
7302 }
7303
7304 /**
7305 * megasas_exit - Driver unload entry point
7306 */
7307 static void __exit megasas_exit(void)
7308 {
7309 driver_remove_file(&megasas_pci_driver.driver,
7310 &driver_attr_dbg_lvl);
7311 driver_remove_file(&megasas_pci_driver.driver,
7312 &driver_attr_support_poll_for_event);
7313 driver_remove_file(&megasas_pci_driver.driver,
7314 &driver_attr_support_device_change);
7315 driver_remove_file(&megasas_pci_driver.driver,
7316 &driver_attr_release_date);
7317 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7318
7319 pci_unregister_driver(&megasas_pci_driver);
7320 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7321 }
7322
7323 module_init(megasas_init);
7324 module_exit(megasas_exit);