]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_base.c
scsi: megaraid_sas: Fix data integrity failure for JBOD (passthrough) devices
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <asm/uaccess.h>
46 #include <linux/fs.h>
47 #include <linux/compat.h>
48 #include <linux/blkdev.h>
49 #include <linux/mutex.h>
50 #include <linux/poll.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_tcq.h>
57 #include "megaraid_sas_fusion.h"
58 #include "megaraid_sas.h"
59
60 /*
61 * Number of sectors per IO command
62 * Will be set in megasas_init_mfi if user does not provide
63 */
64 static unsigned int max_sectors;
65 module_param_named(max_sectors, max_sectors, int, 0);
66 MODULE_PARM_DESC(max_sectors,
67 "Maximum number of sectors per IO command");
68
69 static int msix_disable;
70 module_param(msix_disable, int, S_IRUGO);
71 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
72
73 static unsigned int msix_vectors;
74 module_param(msix_vectors, int, S_IRUGO);
75 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
76
77 static int allow_vf_ioctls;
78 module_param(allow_vf_ioctls, int, S_IRUGO);
79 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
80
81 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
82 module_param(throttlequeuedepth, int, S_IRUGO);
83 MODULE_PARM_DESC(throttlequeuedepth,
84 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
85
86 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
87 module_param(resetwaittime, int, S_IRUGO);
88 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
89 "before resetting adapter. Default: 180");
90
91 int smp_affinity_enable = 1;
92 module_param(smp_affinity_enable, int, S_IRUGO);
93 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
94
95 int rdpq_enable = 1;
96 module_param(rdpq_enable, int, S_IRUGO);
97 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
98
99 unsigned int dual_qdepth_disable;
100 module_param(dual_qdepth_disable, int, S_IRUGO);
101 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
102
103 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
104 module_param(scmd_timeout, int, S_IRUGO);
105 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
106
107 MODULE_LICENSE("GPL");
108 MODULE_VERSION(MEGASAS_VERSION);
109 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
110 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
111
112 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
113 static int megasas_get_pd_list(struct megasas_instance *instance);
114 static int megasas_ld_list_query(struct megasas_instance *instance,
115 u8 query_type);
116 static int megasas_issue_init_mfi(struct megasas_instance *instance);
117 static int megasas_register_aen(struct megasas_instance *instance,
118 u32 seq_num, u32 class_locale_word);
119 static int
120 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id);
121 /*
122 * PCI ID table for all supported controllers
123 */
124 static struct pci_device_id megasas_pci_table[] = {
125
126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
127 /* xscale IOP */
128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
129 /* ppc IOP */
130 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
131 /* ppc IOP */
132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
133 /* gen2*/
134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
135 /* gen2*/
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
137 /* skinny*/
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
139 /* skinny*/
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
141 /* xscale IOP, vega */
142 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
143 /* xscale IOP */
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
145 /* Fusion */
146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
147 /* Plasma */
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
149 /* Invader */
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
151 /* Fury */
152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
153 /* Intruder */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
155 /* Intruder 24 port*/
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
158 {}
159 };
160
161 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
162
163 static int megasas_mgmt_majorno;
164 struct megasas_mgmt_info megasas_mgmt_info;
165 static struct fasync_struct *megasas_async_queue;
166 static DEFINE_MUTEX(megasas_async_queue_mutex);
167
168 static int megasas_poll_wait_aen;
169 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
170 static u32 support_poll_for_event;
171 u32 megasas_dbg_lvl;
172 static u32 support_device_change;
173
174 /* define lock for aen poll */
175 spinlock_t poll_aen_lock;
176
177 void
178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
179 u8 alt_status);
180 static u32
181 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
182 static int
183 megasas_adp_reset_gen2(struct megasas_instance *instance,
184 struct megasas_register_set __iomem *reg_set);
185 static irqreturn_t megasas_isr(int irq, void *devp);
186 static u32
187 megasas_init_adapter_mfi(struct megasas_instance *instance);
188 u32
189 megasas_build_and_issue_cmd(struct megasas_instance *instance,
190 struct scsi_cmnd *scmd);
191 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
192 int
193 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
194 int seconds);
195 void megasas_fusion_ocr_wq(struct work_struct *work);
196 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
197 int initial);
198
199 int
200 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
201 {
202 instance->instancet->fire_cmd(instance,
203 cmd->frame_phys_addr, 0, instance->reg_set);
204 return 0;
205 }
206
207 /**
208 * megasas_get_cmd - Get a command from the free pool
209 * @instance: Adapter soft state
210 *
211 * Returns a free command from the pool
212 */
213 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
214 *instance)
215 {
216 unsigned long flags;
217 struct megasas_cmd *cmd = NULL;
218
219 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
220
221 if (!list_empty(&instance->cmd_pool)) {
222 cmd = list_entry((&instance->cmd_pool)->next,
223 struct megasas_cmd, list);
224 list_del_init(&cmd->list);
225 } else {
226 dev_err(&instance->pdev->dev, "Command pool empty!\n");
227 }
228
229 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
230 return cmd;
231 }
232
233 /**
234 * megasas_return_cmd - Return a cmd to free command pool
235 * @instance: Adapter soft state
236 * @cmd: Command packet to be returned to free command pool
237 */
238 inline void
239 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
240 {
241 unsigned long flags;
242 u32 blk_tags;
243 struct megasas_cmd_fusion *cmd_fusion;
244 struct fusion_context *fusion = instance->ctrl_context;
245
246 /* This flag is used only for fusion adapter.
247 * Wait for Interrupt for Polled mode DCMD
248 */
249 if (cmd->flags & DRV_DCMD_POLLED_MODE)
250 return;
251
252 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
253
254 if (fusion) {
255 blk_tags = instance->max_scsi_cmds + cmd->index;
256 cmd_fusion = fusion->cmd_list[blk_tags];
257 megasas_return_cmd_fusion(instance, cmd_fusion);
258 }
259 cmd->scmd = NULL;
260 cmd->frame_count = 0;
261 cmd->flags = 0;
262 if (!fusion && reset_devices)
263 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
264 list_add(&cmd->list, (&instance->cmd_pool)->next);
265
266 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
267
268 }
269
270 static const char *
271 format_timestamp(uint32_t timestamp)
272 {
273 static char buffer[32];
274
275 if ((timestamp & 0xff000000) == 0xff000000)
276 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
277 0x00ffffff);
278 else
279 snprintf(buffer, sizeof(buffer), "%us", timestamp);
280 return buffer;
281 }
282
283 static const char *
284 format_class(int8_t class)
285 {
286 static char buffer[6];
287
288 switch (class) {
289 case MFI_EVT_CLASS_DEBUG:
290 return "debug";
291 case MFI_EVT_CLASS_PROGRESS:
292 return "progress";
293 case MFI_EVT_CLASS_INFO:
294 return "info";
295 case MFI_EVT_CLASS_WARNING:
296 return "WARN";
297 case MFI_EVT_CLASS_CRITICAL:
298 return "CRIT";
299 case MFI_EVT_CLASS_FATAL:
300 return "FATAL";
301 case MFI_EVT_CLASS_DEAD:
302 return "DEAD";
303 default:
304 snprintf(buffer, sizeof(buffer), "%d", class);
305 return buffer;
306 }
307 }
308
309 /**
310 * megasas_decode_evt: Decode FW AEN event and print critical event
311 * for information.
312 * @instance: Adapter soft state
313 */
314 static void
315 megasas_decode_evt(struct megasas_instance *instance)
316 {
317 struct megasas_evt_detail *evt_detail = instance->evt_detail;
318 union megasas_evt_class_locale class_locale;
319 class_locale.word = le32_to_cpu(evt_detail->cl.word);
320
321 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
322 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
323 le32_to_cpu(evt_detail->seq_num),
324 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
325 (class_locale.members.locale),
326 format_class(class_locale.members.class),
327 evt_detail->description);
328 }
329
330 /**
331 * The following functions are defined for xscale
332 * (deviceid : 1064R, PERC5) controllers
333 */
334
335 /**
336 * megasas_enable_intr_xscale - Enables interrupts
337 * @regs: MFI register set
338 */
339 static inline void
340 megasas_enable_intr_xscale(struct megasas_instance *instance)
341 {
342 struct megasas_register_set __iomem *regs;
343
344 regs = instance->reg_set;
345 writel(0, &(regs)->outbound_intr_mask);
346
347 /* Dummy readl to force pci flush */
348 readl(&regs->outbound_intr_mask);
349 }
350
351 /**
352 * megasas_disable_intr_xscale -Disables interrupt
353 * @regs: MFI register set
354 */
355 static inline void
356 megasas_disable_intr_xscale(struct megasas_instance *instance)
357 {
358 struct megasas_register_set __iomem *regs;
359 u32 mask = 0x1f;
360
361 regs = instance->reg_set;
362 writel(mask, &regs->outbound_intr_mask);
363 /* Dummy readl to force pci flush */
364 readl(&regs->outbound_intr_mask);
365 }
366
367 /**
368 * megasas_read_fw_status_reg_xscale - returns the current FW status value
369 * @regs: MFI register set
370 */
371 static u32
372 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
373 {
374 return readl(&(regs)->outbound_msg_0);
375 }
376 /**
377 * megasas_clear_interrupt_xscale - Check & clear interrupt
378 * @regs: MFI register set
379 */
380 static int
381 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
382 {
383 u32 status;
384 u32 mfiStatus = 0;
385
386 /*
387 * Check if it is our interrupt
388 */
389 status = readl(&regs->outbound_intr_status);
390
391 if (status & MFI_OB_INTR_STATUS_MASK)
392 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
393 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
394 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
395
396 /*
397 * Clear the interrupt by writing back the same value
398 */
399 if (mfiStatus)
400 writel(status, &regs->outbound_intr_status);
401
402 /* Dummy readl to force pci flush */
403 readl(&regs->outbound_intr_status);
404
405 return mfiStatus;
406 }
407
408 /**
409 * megasas_fire_cmd_xscale - Sends command to the FW
410 * @frame_phys_addr : Physical address of cmd
411 * @frame_count : Number of frames for the command
412 * @regs : MFI register set
413 */
414 static inline void
415 megasas_fire_cmd_xscale(struct megasas_instance *instance,
416 dma_addr_t frame_phys_addr,
417 u32 frame_count,
418 struct megasas_register_set __iomem *regs)
419 {
420 unsigned long flags;
421
422 spin_lock_irqsave(&instance->hba_lock, flags);
423 writel((frame_phys_addr >> 3)|(frame_count),
424 &(regs)->inbound_queue_port);
425 spin_unlock_irqrestore(&instance->hba_lock, flags);
426 }
427
428 /**
429 * megasas_adp_reset_xscale - For controller reset
430 * @regs: MFI register set
431 */
432 static int
433 megasas_adp_reset_xscale(struct megasas_instance *instance,
434 struct megasas_register_set __iomem *regs)
435 {
436 u32 i;
437 u32 pcidata;
438
439 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
440
441 for (i = 0; i < 3; i++)
442 msleep(1000); /* sleep for 3 secs */
443 pcidata = 0;
444 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
445 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
446 if (pcidata & 0x2) {
447 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
448 pcidata &= ~0x2;
449 pci_write_config_dword(instance->pdev,
450 MFI_1068_PCSR_OFFSET, pcidata);
451
452 for (i = 0; i < 2; i++)
453 msleep(1000); /* need to wait 2 secs again */
454
455 pcidata = 0;
456 pci_read_config_dword(instance->pdev,
457 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
458 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
459 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
460 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
461 pcidata = 0;
462 pci_write_config_dword(instance->pdev,
463 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
464 }
465 }
466 return 0;
467 }
468
469 /**
470 * megasas_check_reset_xscale - For controller reset check
471 * @regs: MFI register set
472 */
473 static int
474 megasas_check_reset_xscale(struct megasas_instance *instance,
475 struct megasas_register_set __iomem *regs)
476 {
477 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
478 (le32_to_cpu(*instance->consumer) ==
479 MEGASAS_ADPRESET_INPROG_SIGN))
480 return 1;
481 return 0;
482 }
483
484 static struct megasas_instance_template megasas_instance_template_xscale = {
485
486 .fire_cmd = megasas_fire_cmd_xscale,
487 .enable_intr = megasas_enable_intr_xscale,
488 .disable_intr = megasas_disable_intr_xscale,
489 .clear_intr = megasas_clear_intr_xscale,
490 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
491 .adp_reset = megasas_adp_reset_xscale,
492 .check_reset = megasas_check_reset_xscale,
493 .service_isr = megasas_isr,
494 .tasklet = megasas_complete_cmd_dpc,
495 .init_adapter = megasas_init_adapter_mfi,
496 .build_and_issue_cmd = megasas_build_and_issue_cmd,
497 .issue_dcmd = megasas_issue_dcmd,
498 };
499
500 /**
501 * This is the end of set of functions & definitions specific
502 * to xscale (deviceid : 1064R, PERC5) controllers
503 */
504
505 /**
506 * The following functions are defined for ppc (deviceid : 0x60)
507 * controllers
508 */
509
510 /**
511 * megasas_enable_intr_ppc - Enables interrupts
512 * @regs: MFI register set
513 */
514 static inline void
515 megasas_enable_intr_ppc(struct megasas_instance *instance)
516 {
517 struct megasas_register_set __iomem *regs;
518
519 regs = instance->reg_set;
520 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
521
522 writel(~0x80000000, &(regs)->outbound_intr_mask);
523
524 /* Dummy readl to force pci flush */
525 readl(&regs->outbound_intr_mask);
526 }
527
528 /**
529 * megasas_disable_intr_ppc - Disable interrupt
530 * @regs: MFI register set
531 */
532 static inline void
533 megasas_disable_intr_ppc(struct megasas_instance *instance)
534 {
535 struct megasas_register_set __iomem *regs;
536 u32 mask = 0xFFFFFFFF;
537
538 regs = instance->reg_set;
539 writel(mask, &regs->outbound_intr_mask);
540 /* Dummy readl to force pci flush */
541 readl(&regs->outbound_intr_mask);
542 }
543
544 /**
545 * megasas_read_fw_status_reg_ppc - returns the current FW status value
546 * @regs: MFI register set
547 */
548 static u32
549 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
550 {
551 return readl(&(regs)->outbound_scratch_pad);
552 }
553
554 /**
555 * megasas_clear_interrupt_ppc - Check & clear interrupt
556 * @regs: MFI register set
557 */
558 static int
559 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
560 {
561 u32 status, mfiStatus = 0;
562
563 /*
564 * Check if it is our interrupt
565 */
566 status = readl(&regs->outbound_intr_status);
567
568 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
569 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
570
571 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
572 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
573
574 /*
575 * Clear the interrupt by writing back the same value
576 */
577 writel(status, &regs->outbound_doorbell_clear);
578
579 /* Dummy readl to force pci flush */
580 readl(&regs->outbound_doorbell_clear);
581
582 return mfiStatus;
583 }
584
585 /**
586 * megasas_fire_cmd_ppc - Sends command to the FW
587 * @frame_phys_addr : Physical address of cmd
588 * @frame_count : Number of frames for the command
589 * @regs : MFI register set
590 */
591 static inline void
592 megasas_fire_cmd_ppc(struct megasas_instance *instance,
593 dma_addr_t frame_phys_addr,
594 u32 frame_count,
595 struct megasas_register_set __iomem *regs)
596 {
597 unsigned long flags;
598
599 spin_lock_irqsave(&instance->hba_lock, flags);
600 writel((frame_phys_addr | (frame_count<<1))|1,
601 &(regs)->inbound_queue_port);
602 spin_unlock_irqrestore(&instance->hba_lock, flags);
603 }
604
605 /**
606 * megasas_check_reset_ppc - For controller reset check
607 * @regs: MFI register set
608 */
609 static int
610 megasas_check_reset_ppc(struct megasas_instance *instance,
611 struct megasas_register_set __iomem *regs)
612 {
613 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
614 return 1;
615
616 return 0;
617 }
618
619 static struct megasas_instance_template megasas_instance_template_ppc = {
620
621 .fire_cmd = megasas_fire_cmd_ppc,
622 .enable_intr = megasas_enable_intr_ppc,
623 .disable_intr = megasas_disable_intr_ppc,
624 .clear_intr = megasas_clear_intr_ppc,
625 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
626 .adp_reset = megasas_adp_reset_xscale,
627 .check_reset = megasas_check_reset_ppc,
628 .service_isr = megasas_isr,
629 .tasklet = megasas_complete_cmd_dpc,
630 .init_adapter = megasas_init_adapter_mfi,
631 .build_and_issue_cmd = megasas_build_and_issue_cmd,
632 .issue_dcmd = megasas_issue_dcmd,
633 };
634
635 /**
636 * megasas_enable_intr_skinny - Enables interrupts
637 * @regs: MFI register set
638 */
639 static inline void
640 megasas_enable_intr_skinny(struct megasas_instance *instance)
641 {
642 struct megasas_register_set __iomem *regs;
643
644 regs = instance->reg_set;
645 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
646
647 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
648
649 /* Dummy readl to force pci flush */
650 readl(&regs->outbound_intr_mask);
651 }
652
653 /**
654 * megasas_disable_intr_skinny - Disables interrupt
655 * @regs: MFI register set
656 */
657 static inline void
658 megasas_disable_intr_skinny(struct megasas_instance *instance)
659 {
660 struct megasas_register_set __iomem *regs;
661 u32 mask = 0xFFFFFFFF;
662
663 regs = instance->reg_set;
664 writel(mask, &regs->outbound_intr_mask);
665 /* Dummy readl to force pci flush */
666 readl(&regs->outbound_intr_mask);
667 }
668
669 /**
670 * megasas_read_fw_status_reg_skinny - returns the current FW status value
671 * @regs: MFI register set
672 */
673 static u32
674 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
675 {
676 return readl(&(regs)->outbound_scratch_pad);
677 }
678
679 /**
680 * megasas_clear_interrupt_skinny - Check & clear interrupt
681 * @regs: MFI register set
682 */
683 static int
684 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
685 {
686 u32 status;
687 u32 mfiStatus = 0;
688
689 /*
690 * Check if it is our interrupt
691 */
692 status = readl(&regs->outbound_intr_status);
693
694 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
695 return 0;
696 }
697
698 /*
699 * Check if it is our interrupt
700 */
701 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
702 MFI_STATE_FAULT) {
703 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
704 } else
705 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
706
707 /*
708 * Clear the interrupt by writing back the same value
709 */
710 writel(status, &regs->outbound_intr_status);
711
712 /*
713 * dummy read to flush PCI
714 */
715 readl(&regs->outbound_intr_status);
716
717 return mfiStatus;
718 }
719
720 /**
721 * megasas_fire_cmd_skinny - Sends command to the FW
722 * @frame_phys_addr : Physical address of cmd
723 * @frame_count : Number of frames for the command
724 * @regs : MFI register set
725 */
726 static inline void
727 megasas_fire_cmd_skinny(struct megasas_instance *instance,
728 dma_addr_t frame_phys_addr,
729 u32 frame_count,
730 struct megasas_register_set __iomem *regs)
731 {
732 unsigned long flags;
733
734 spin_lock_irqsave(&instance->hba_lock, flags);
735 writel(upper_32_bits(frame_phys_addr),
736 &(regs)->inbound_high_queue_port);
737 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
738 &(regs)->inbound_low_queue_port);
739 mmiowb();
740 spin_unlock_irqrestore(&instance->hba_lock, flags);
741 }
742
743 /**
744 * megasas_check_reset_skinny - For controller reset check
745 * @regs: MFI register set
746 */
747 static int
748 megasas_check_reset_skinny(struct megasas_instance *instance,
749 struct megasas_register_set __iomem *regs)
750 {
751 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
752 return 1;
753
754 return 0;
755 }
756
757 static struct megasas_instance_template megasas_instance_template_skinny = {
758
759 .fire_cmd = megasas_fire_cmd_skinny,
760 .enable_intr = megasas_enable_intr_skinny,
761 .disable_intr = megasas_disable_intr_skinny,
762 .clear_intr = megasas_clear_intr_skinny,
763 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
764 .adp_reset = megasas_adp_reset_gen2,
765 .check_reset = megasas_check_reset_skinny,
766 .service_isr = megasas_isr,
767 .tasklet = megasas_complete_cmd_dpc,
768 .init_adapter = megasas_init_adapter_mfi,
769 .build_and_issue_cmd = megasas_build_and_issue_cmd,
770 .issue_dcmd = megasas_issue_dcmd,
771 };
772
773
774 /**
775 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
776 * controllers
777 */
778
779 /**
780 * megasas_enable_intr_gen2 - Enables interrupts
781 * @regs: MFI register set
782 */
783 static inline void
784 megasas_enable_intr_gen2(struct megasas_instance *instance)
785 {
786 struct megasas_register_set __iomem *regs;
787
788 regs = instance->reg_set;
789 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
790
791 /* write ~0x00000005 (4 & 1) to the intr mask*/
792 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
793
794 /* Dummy readl to force pci flush */
795 readl(&regs->outbound_intr_mask);
796 }
797
798 /**
799 * megasas_disable_intr_gen2 - Disables interrupt
800 * @regs: MFI register set
801 */
802 static inline void
803 megasas_disable_intr_gen2(struct megasas_instance *instance)
804 {
805 struct megasas_register_set __iomem *regs;
806 u32 mask = 0xFFFFFFFF;
807
808 regs = instance->reg_set;
809 writel(mask, &regs->outbound_intr_mask);
810 /* Dummy readl to force pci flush */
811 readl(&regs->outbound_intr_mask);
812 }
813
814 /**
815 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
816 * @regs: MFI register set
817 */
818 static u32
819 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
820 {
821 return readl(&(regs)->outbound_scratch_pad);
822 }
823
824 /**
825 * megasas_clear_interrupt_gen2 - Check & clear interrupt
826 * @regs: MFI register set
827 */
828 static int
829 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
830 {
831 u32 status;
832 u32 mfiStatus = 0;
833
834 /*
835 * Check if it is our interrupt
836 */
837 status = readl(&regs->outbound_intr_status);
838
839 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
840 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
841 }
842 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
843 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
844 }
845
846 /*
847 * Clear the interrupt by writing back the same value
848 */
849 if (mfiStatus)
850 writel(status, &regs->outbound_doorbell_clear);
851
852 /* Dummy readl to force pci flush */
853 readl(&regs->outbound_intr_status);
854
855 return mfiStatus;
856 }
857 /**
858 * megasas_fire_cmd_gen2 - Sends command to the FW
859 * @frame_phys_addr : Physical address of cmd
860 * @frame_count : Number of frames for the command
861 * @regs : MFI register set
862 */
863 static inline void
864 megasas_fire_cmd_gen2(struct megasas_instance *instance,
865 dma_addr_t frame_phys_addr,
866 u32 frame_count,
867 struct megasas_register_set __iomem *regs)
868 {
869 unsigned long flags;
870
871 spin_lock_irqsave(&instance->hba_lock, flags);
872 writel((frame_phys_addr | (frame_count<<1))|1,
873 &(regs)->inbound_queue_port);
874 spin_unlock_irqrestore(&instance->hba_lock, flags);
875 }
876
877 /**
878 * megasas_adp_reset_gen2 - For controller reset
879 * @regs: MFI register set
880 */
881 static int
882 megasas_adp_reset_gen2(struct megasas_instance *instance,
883 struct megasas_register_set __iomem *reg_set)
884 {
885 u32 retry = 0 ;
886 u32 HostDiag;
887 u32 __iomem *seq_offset = &reg_set->seq_offset;
888 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
889
890 if (instance->instancet == &megasas_instance_template_skinny) {
891 seq_offset = &reg_set->fusion_seq_offset;
892 hostdiag_offset = &reg_set->fusion_host_diag;
893 }
894
895 writel(0, seq_offset);
896 writel(4, seq_offset);
897 writel(0xb, seq_offset);
898 writel(2, seq_offset);
899 writel(7, seq_offset);
900 writel(0xd, seq_offset);
901
902 msleep(1000);
903
904 HostDiag = (u32)readl(hostdiag_offset);
905
906 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
907 msleep(100);
908 HostDiag = (u32)readl(hostdiag_offset);
909 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
910 retry, HostDiag);
911
912 if (retry++ >= 100)
913 return 1;
914
915 }
916
917 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
918
919 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
920
921 ssleep(10);
922
923 HostDiag = (u32)readl(hostdiag_offset);
924 while (HostDiag & DIAG_RESET_ADAPTER) {
925 msleep(100);
926 HostDiag = (u32)readl(hostdiag_offset);
927 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
928 retry, HostDiag);
929
930 if (retry++ >= 1000)
931 return 1;
932
933 }
934 return 0;
935 }
936
937 /**
938 * megasas_check_reset_gen2 - For controller reset check
939 * @regs: MFI register set
940 */
941 static int
942 megasas_check_reset_gen2(struct megasas_instance *instance,
943 struct megasas_register_set __iomem *regs)
944 {
945 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
946 return 1;
947
948 return 0;
949 }
950
951 static struct megasas_instance_template megasas_instance_template_gen2 = {
952
953 .fire_cmd = megasas_fire_cmd_gen2,
954 .enable_intr = megasas_enable_intr_gen2,
955 .disable_intr = megasas_disable_intr_gen2,
956 .clear_intr = megasas_clear_intr_gen2,
957 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
958 .adp_reset = megasas_adp_reset_gen2,
959 .check_reset = megasas_check_reset_gen2,
960 .service_isr = megasas_isr,
961 .tasklet = megasas_complete_cmd_dpc,
962 .init_adapter = megasas_init_adapter_mfi,
963 .build_and_issue_cmd = megasas_build_and_issue_cmd,
964 .issue_dcmd = megasas_issue_dcmd,
965 };
966
967 /**
968 * This is the end of set of functions & definitions
969 * specific to gen2 (deviceid : 0x78, 0x79) controllers
970 */
971
972 /*
973 * Template added for TB (Fusion)
974 */
975 extern struct megasas_instance_template megasas_instance_template_fusion;
976
977 /**
978 * megasas_issue_polled - Issues a polling command
979 * @instance: Adapter soft state
980 * @cmd: Command packet to be issued
981 *
982 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
983 */
984 int
985 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
986 {
987 struct megasas_header *frame_hdr = &cmd->frame->hdr;
988
989 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
990 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
991
992 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
993 (instance->instancet->issue_dcmd(instance, cmd))) {
994 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
995 __func__, __LINE__);
996 return DCMD_NOT_FIRED;
997 }
998
999 return wait_and_poll(instance, cmd, instance->requestorId ?
1000 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1001 }
1002
1003 /**
1004 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1005 * @instance: Adapter soft state
1006 * @cmd: Command to be issued
1007 * @timeout: Timeout in seconds
1008 *
1009 * This function waits on an event for the command to be returned from ISR.
1010 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1011 * Used to issue ioctl commands.
1012 */
1013 int
1014 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1015 struct megasas_cmd *cmd, int timeout)
1016 {
1017 int ret = 0;
1018 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1019
1020 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1021 (instance->instancet->issue_dcmd(instance, cmd))) {
1022 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1023 __func__, __LINE__);
1024 return DCMD_NOT_FIRED;
1025 }
1026
1027 if (timeout) {
1028 ret = wait_event_timeout(instance->int_cmd_wait_q,
1029 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1030 if (!ret) {
1031 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1032 __func__, __LINE__);
1033 return DCMD_TIMEOUT;
1034 }
1035 } else
1036 wait_event(instance->int_cmd_wait_q,
1037 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1038
1039 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1040 DCMD_SUCCESS : DCMD_FAILED;
1041 }
1042
1043 /**
1044 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1045 * @instance: Adapter soft state
1046 * @cmd_to_abort: Previously issued cmd to be aborted
1047 * @timeout: Timeout in seconds
1048 *
1049 * MFI firmware can abort previously issued AEN comamnd (automatic event
1050 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1051 * cmd and waits for return status.
1052 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1053 */
1054 static int
1055 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1056 struct megasas_cmd *cmd_to_abort, int timeout)
1057 {
1058 struct megasas_cmd *cmd;
1059 struct megasas_abort_frame *abort_fr;
1060 int ret = 0;
1061
1062 cmd = megasas_get_cmd(instance);
1063
1064 if (!cmd)
1065 return -1;
1066
1067 abort_fr = &cmd->frame->abort;
1068
1069 /*
1070 * Prepare and issue the abort frame
1071 */
1072 abort_fr->cmd = MFI_CMD_ABORT;
1073 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1074 abort_fr->flags = cpu_to_le16(0);
1075 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1076 abort_fr->abort_mfi_phys_addr_lo =
1077 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1078 abort_fr->abort_mfi_phys_addr_hi =
1079 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1080
1081 cmd->sync_cmd = 1;
1082 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1083
1084 if ((atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) ||
1085 (instance->instancet->issue_dcmd(instance, cmd))) {
1086 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1087 __func__, __LINE__);
1088 return DCMD_NOT_FIRED;
1089 }
1090
1091 if (timeout) {
1092 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1093 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1094 if (!ret) {
1095 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1096 __func__, __LINE__);
1097 return DCMD_TIMEOUT;
1098 }
1099 } else
1100 wait_event(instance->abort_cmd_wait_q,
1101 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1102
1103 cmd->sync_cmd = 0;
1104
1105 megasas_return_cmd(instance, cmd);
1106 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1107 DCMD_SUCCESS : DCMD_FAILED;
1108 }
1109
1110 /**
1111 * megasas_make_sgl32 - Prepares 32-bit SGL
1112 * @instance: Adapter soft state
1113 * @scp: SCSI command from the mid-layer
1114 * @mfi_sgl: SGL to be filled in
1115 *
1116 * If successful, this function returns the number of SG elements. Otherwise,
1117 * it returnes -1.
1118 */
1119 static int
1120 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1121 union megasas_sgl *mfi_sgl)
1122 {
1123 int i;
1124 int sge_count;
1125 struct scatterlist *os_sgl;
1126
1127 sge_count = scsi_dma_map(scp);
1128 BUG_ON(sge_count < 0);
1129
1130 if (sge_count) {
1131 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1132 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1133 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1134 }
1135 }
1136 return sge_count;
1137 }
1138
1139 /**
1140 * megasas_make_sgl64 - Prepares 64-bit SGL
1141 * @instance: Adapter soft state
1142 * @scp: SCSI command from the mid-layer
1143 * @mfi_sgl: SGL to be filled in
1144 *
1145 * If successful, this function returns the number of SG elements. Otherwise,
1146 * it returnes -1.
1147 */
1148 static int
1149 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1150 union megasas_sgl *mfi_sgl)
1151 {
1152 int i;
1153 int sge_count;
1154 struct scatterlist *os_sgl;
1155
1156 sge_count = scsi_dma_map(scp);
1157 BUG_ON(sge_count < 0);
1158
1159 if (sge_count) {
1160 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1161 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1162 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1163 }
1164 }
1165 return sge_count;
1166 }
1167
1168 /**
1169 * megasas_make_sgl_skinny - Prepares IEEE SGL
1170 * @instance: Adapter soft state
1171 * @scp: SCSI command from the mid-layer
1172 * @mfi_sgl: SGL to be filled in
1173 *
1174 * If successful, this function returns the number of SG elements. Otherwise,
1175 * it returnes -1.
1176 */
1177 static int
1178 megasas_make_sgl_skinny(struct megasas_instance *instance,
1179 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1180 {
1181 int i;
1182 int sge_count;
1183 struct scatterlist *os_sgl;
1184
1185 sge_count = scsi_dma_map(scp);
1186
1187 if (sge_count) {
1188 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1189 mfi_sgl->sge_skinny[i].length =
1190 cpu_to_le32(sg_dma_len(os_sgl));
1191 mfi_sgl->sge_skinny[i].phys_addr =
1192 cpu_to_le64(sg_dma_address(os_sgl));
1193 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1194 }
1195 }
1196 return sge_count;
1197 }
1198
1199 /**
1200 * megasas_get_frame_count - Computes the number of frames
1201 * @frame_type : type of frame- io or pthru frame
1202 * @sge_count : number of sg elements
1203 *
1204 * Returns the number of frames required for numnber of sge's (sge_count)
1205 */
1206
1207 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1208 u8 sge_count, u8 frame_type)
1209 {
1210 int num_cnt;
1211 int sge_bytes;
1212 u32 sge_sz;
1213 u32 frame_count = 0;
1214
1215 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1216 sizeof(struct megasas_sge32);
1217
1218 if (instance->flag_ieee) {
1219 sge_sz = sizeof(struct megasas_sge_skinny);
1220 }
1221
1222 /*
1223 * Main frame can contain 2 SGEs for 64-bit SGLs and
1224 * 3 SGEs for 32-bit SGLs for ldio &
1225 * 1 SGEs for 64-bit SGLs and
1226 * 2 SGEs for 32-bit SGLs for pthru frame
1227 */
1228 if (unlikely(frame_type == PTHRU_FRAME)) {
1229 if (instance->flag_ieee == 1) {
1230 num_cnt = sge_count - 1;
1231 } else if (IS_DMA64)
1232 num_cnt = sge_count - 1;
1233 else
1234 num_cnt = sge_count - 2;
1235 } else {
1236 if (instance->flag_ieee == 1) {
1237 num_cnt = sge_count - 1;
1238 } else if (IS_DMA64)
1239 num_cnt = sge_count - 2;
1240 else
1241 num_cnt = sge_count - 3;
1242 }
1243
1244 if (num_cnt > 0) {
1245 sge_bytes = sge_sz * num_cnt;
1246
1247 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1248 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1249 }
1250 /* Main frame */
1251 frame_count += 1;
1252
1253 if (frame_count > 7)
1254 frame_count = 8;
1255 return frame_count;
1256 }
1257
1258 /**
1259 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1260 * @instance: Adapter soft state
1261 * @scp: SCSI command
1262 * @cmd: Command to be prepared in
1263 *
1264 * This function prepares CDB commands. These are typcially pass-through
1265 * commands to the devices.
1266 */
1267 static int
1268 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1269 struct megasas_cmd *cmd)
1270 {
1271 u32 is_logical;
1272 u32 device_id;
1273 u16 flags = 0;
1274 struct megasas_pthru_frame *pthru;
1275
1276 is_logical = MEGASAS_IS_LOGICAL(scp);
1277 device_id = MEGASAS_DEV_INDEX(scp);
1278 pthru = (struct megasas_pthru_frame *)cmd->frame;
1279
1280 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1281 flags = MFI_FRAME_DIR_WRITE;
1282 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1283 flags = MFI_FRAME_DIR_READ;
1284 else if (scp->sc_data_direction == PCI_DMA_NONE)
1285 flags = MFI_FRAME_DIR_NONE;
1286
1287 if (instance->flag_ieee == 1) {
1288 flags |= MFI_FRAME_IEEE;
1289 }
1290
1291 /*
1292 * Prepare the DCDB frame
1293 */
1294 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1295 pthru->cmd_status = 0x0;
1296 pthru->scsi_status = 0x0;
1297 pthru->target_id = device_id;
1298 pthru->lun = scp->device->lun;
1299 pthru->cdb_len = scp->cmd_len;
1300 pthru->timeout = 0;
1301 pthru->pad_0 = 0;
1302 pthru->flags = cpu_to_le16(flags);
1303 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1304
1305 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1306
1307 /*
1308 * If the command is for the tape device, set the
1309 * pthru timeout to the os layer timeout value.
1310 */
1311 if (scp->device->type == TYPE_TAPE) {
1312 if ((scp->request->timeout / HZ) > 0xFFFF)
1313 pthru->timeout = cpu_to_le16(0xFFFF);
1314 else
1315 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1316 }
1317
1318 /*
1319 * Construct SGL
1320 */
1321 if (instance->flag_ieee == 1) {
1322 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1323 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1324 &pthru->sgl);
1325 } else if (IS_DMA64) {
1326 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1327 pthru->sge_count = megasas_make_sgl64(instance, scp,
1328 &pthru->sgl);
1329 } else
1330 pthru->sge_count = megasas_make_sgl32(instance, scp,
1331 &pthru->sgl);
1332
1333 if (pthru->sge_count > instance->max_num_sge) {
1334 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1335 pthru->sge_count);
1336 return 0;
1337 }
1338
1339 /*
1340 * Sense info specific
1341 */
1342 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1343 pthru->sense_buf_phys_addr_hi =
1344 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1345 pthru->sense_buf_phys_addr_lo =
1346 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1347
1348 /*
1349 * Compute the total number of frames this command consumes. FW uses
1350 * this number to pull sufficient number of frames from host memory.
1351 */
1352 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1353 PTHRU_FRAME);
1354
1355 return cmd->frame_count;
1356 }
1357
1358 /**
1359 * megasas_build_ldio - Prepares IOs to logical devices
1360 * @instance: Adapter soft state
1361 * @scp: SCSI command
1362 * @cmd: Command to be prepared
1363 *
1364 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1365 */
1366 static int
1367 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1368 struct megasas_cmd *cmd)
1369 {
1370 u32 device_id;
1371 u8 sc = scp->cmnd[0];
1372 u16 flags = 0;
1373 struct megasas_io_frame *ldio;
1374
1375 device_id = MEGASAS_DEV_INDEX(scp);
1376 ldio = (struct megasas_io_frame *)cmd->frame;
1377
1378 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1379 flags = MFI_FRAME_DIR_WRITE;
1380 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1381 flags = MFI_FRAME_DIR_READ;
1382
1383 if (instance->flag_ieee == 1) {
1384 flags |= MFI_FRAME_IEEE;
1385 }
1386
1387 /*
1388 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1389 */
1390 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1391 ldio->cmd_status = 0x0;
1392 ldio->scsi_status = 0x0;
1393 ldio->target_id = device_id;
1394 ldio->timeout = 0;
1395 ldio->reserved_0 = 0;
1396 ldio->pad_0 = 0;
1397 ldio->flags = cpu_to_le16(flags);
1398 ldio->start_lba_hi = 0;
1399 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1400
1401 /*
1402 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1403 */
1404 if (scp->cmd_len == 6) {
1405 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1406 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1407 ((u32) scp->cmnd[2] << 8) |
1408 (u32) scp->cmnd[3]);
1409
1410 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1411 }
1412
1413 /*
1414 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1415 */
1416 else if (scp->cmd_len == 10) {
1417 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1418 ((u32) scp->cmnd[7] << 8));
1419 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1420 ((u32) scp->cmnd[3] << 16) |
1421 ((u32) scp->cmnd[4] << 8) |
1422 (u32) scp->cmnd[5]);
1423 }
1424
1425 /*
1426 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1427 */
1428 else if (scp->cmd_len == 12) {
1429 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1430 ((u32) scp->cmnd[7] << 16) |
1431 ((u32) scp->cmnd[8] << 8) |
1432 (u32) scp->cmnd[9]);
1433
1434 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1435 ((u32) scp->cmnd[3] << 16) |
1436 ((u32) scp->cmnd[4] << 8) |
1437 (u32) scp->cmnd[5]);
1438 }
1439
1440 /*
1441 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1442 */
1443 else if (scp->cmd_len == 16) {
1444 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1445 ((u32) scp->cmnd[11] << 16) |
1446 ((u32) scp->cmnd[12] << 8) |
1447 (u32) scp->cmnd[13]);
1448
1449 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1450 ((u32) scp->cmnd[7] << 16) |
1451 ((u32) scp->cmnd[8] << 8) |
1452 (u32) scp->cmnd[9]);
1453
1454 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1455 ((u32) scp->cmnd[3] << 16) |
1456 ((u32) scp->cmnd[4] << 8) |
1457 (u32) scp->cmnd[5]);
1458
1459 }
1460
1461 /*
1462 * Construct SGL
1463 */
1464 if (instance->flag_ieee) {
1465 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1466 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1467 &ldio->sgl);
1468 } else if (IS_DMA64) {
1469 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1470 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1471 } else
1472 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1473
1474 if (ldio->sge_count > instance->max_num_sge) {
1475 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1476 ldio->sge_count);
1477 return 0;
1478 }
1479
1480 /*
1481 * Sense info specific
1482 */
1483 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1484 ldio->sense_buf_phys_addr_hi = 0;
1485 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1486
1487 /*
1488 * Compute the total number of frames this command consumes. FW uses
1489 * this number to pull sufficient number of frames from host memory.
1490 */
1491 cmd->frame_count = megasas_get_frame_count(instance,
1492 ldio->sge_count, IO_FRAME);
1493
1494 return cmd->frame_count;
1495 }
1496
1497 /**
1498 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1499 * and whether it's RW or non RW
1500 * @scmd: SCSI command
1501 *
1502 */
1503 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1504 {
1505 int ret;
1506
1507 switch (cmd->cmnd[0]) {
1508 case READ_10:
1509 case WRITE_10:
1510 case READ_12:
1511 case WRITE_12:
1512 case READ_6:
1513 case WRITE_6:
1514 case READ_16:
1515 case WRITE_16:
1516 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1517 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1518 break;
1519 default:
1520 ret = (MEGASAS_IS_LOGICAL(cmd)) ?
1521 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1522 }
1523 return ret;
1524 }
1525
1526 /**
1527 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1528 * in FW
1529 * @instance: Adapter soft state
1530 */
1531 static inline void
1532 megasas_dump_pending_frames(struct megasas_instance *instance)
1533 {
1534 struct megasas_cmd *cmd;
1535 int i,n;
1536 union megasas_sgl *mfi_sgl;
1537 struct megasas_io_frame *ldio;
1538 struct megasas_pthru_frame *pthru;
1539 u32 sgcount;
1540 u32 max_cmd = instance->max_fw_cmds;
1541
1542 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1543 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1544 if (IS_DMA64)
1545 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1546 else
1547 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1548
1549 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1550 for (i = 0; i < max_cmd; i++) {
1551 cmd = instance->cmd_list[i];
1552 if (!cmd->scmd)
1553 continue;
1554 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1555 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1556 ldio = (struct megasas_io_frame *)cmd->frame;
1557 mfi_sgl = &ldio->sgl;
1558 sgcount = ldio->sge_count;
1559 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1560 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1561 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1562 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1563 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1564 } else {
1565 pthru = (struct megasas_pthru_frame *) cmd->frame;
1566 mfi_sgl = &pthru->sgl;
1567 sgcount = pthru->sge_count;
1568 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1569 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1570 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1571 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1572 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1573 }
1574 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1575 for (n = 0; n < sgcount; n++) {
1576 if (IS_DMA64)
1577 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1578 le32_to_cpu(mfi_sgl->sge64[n].length),
1579 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1580 else
1581 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1582 le32_to_cpu(mfi_sgl->sge32[n].length),
1583 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1584 }
1585 }
1586 } /*for max_cmd*/
1587 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1588 for (i = 0; i < max_cmd; i++) {
1589
1590 cmd = instance->cmd_list[i];
1591
1592 if (cmd->sync_cmd == 1)
1593 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1594 }
1595 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1596 }
1597
1598 u32
1599 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1600 struct scsi_cmnd *scmd)
1601 {
1602 struct megasas_cmd *cmd;
1603 u32 frame_count;
1604
1605 cmd = megasas_get_cmd(instance);
1606 if (!cmd)
1607 return SCSI_MLQUEUE_HOST_BUSY;
1608
1609 /*
1610 * Logical drive command
1611 */
1612 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1613 frame_count = megasas_build_ldio(instance, scmd, cmd);
1614 else
1615 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1616
1617 if (!frame_count)
1618 goto out_return_cmd;
1619
1620 cmd->scmd = scmd;
1621 scmd->SCp.ptr = (char *)cmd;
1622
1623 /*
1624 * Issue the command to the FW
1625 */
1626 atomic_inc(&instance->fw_outstanding);
1627
1628 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1629 cmd->frame_count-1, instance->reg_set);
1630
1631 return 0;
1632 out_return_cmd:
1633 megasas_return_cmd(instance, cmd);
1634 return SCSI_MLQUEUE_HOST_BUSY;
1635 }
1636
1637
1638 /**
1639 * megasas_queue_command - Queue entry point
1640 * @scmd: SCSI command to be queued
1641 * @done: Callback entry point
1642 */
1643 static int
1644 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1645 {
1646 struct megasas_instance *instance;
1647 struct MR_PRIV_DEVICE *mr_device_priv_data;
1648
1649 instance = (struct megasas_instance *)
1650 scmd->device->host->hostdata;
1651
1652 if (instance->unload == 1) {
1653 scmd->result = DID_NO_CONNECT << 16;
1654 scmd->scsi_done(scmd);
1655 return 0;
1656 }
1657
1658 if (instance->issuepend_done == 0)
1659 return SCSI_MLQUEUE_HOST_BUSY;
1660
1661
1662 /* Check for an mpio path and adjust behavior */
1663 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1664 if (megasas_check_mpio_paths(instance, scmd) ==
1665 (DID_RESET << 16)) {
1666 return SCSI_MLQUEUE_HOST_BUSY;
1667 } else {
1668 scmd->result = DID_NO_CONNECT << 16;
1669 scmd->scsi_done(scmd);
1670 return 0;
1671 }
1672 }
1673
1674 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1675 scmd->result = DID_NO_CONNECT << 16;
1676 scmd->scsi_done(scmd);
1677 return 0;
1678 }
1679
1680 mr_device_priv_data = scmd->device->hostdata;
1681 if (!mr_device_priv_data) {
1682 scmd->result = DID_NO_CONNECT << 16;
1683 scmd->scsi_done(scmd);
1684 return 0;
1685 }
1686
1687 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1688 return SCSI_MLQUEUE_HOST_BUSY;
1689
1690 if (mr_device_priv_data->tm_busy)
1691 return SCSI_MLQUEUE_DEVICE_BUSY;
1692
1693
1694 scmd->result = 0;
1695
1696 if (MEGASAS_IS_LOGICAL(scmd) &&
1697 (scmd->device->id >= instance->fw_supported_vd_count ||
1698 scmd->device->lun)) {
1699 scmd->result = DID_BAD_TARGET << 16;
1700 goto out_done;
1701 }
1702
1703 /*
1704 * FW takes care of flush cache on its own for Virtual Disk.
1705 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
1706 */
1707 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
1708 scmd->result = DID_OK << 16;
1709 goto out_done;
1710 }
1711
1712 return instance->instancet->build_and_issue_cmd(instance, scmd);
1713
1714 out_done:
1715 scmd->scsi_done(scmd);
1716 return 0;
1717 }
1718
1719 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1720 {
1721 int i;
1722
1723 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1724
1725 if ((megasas_mgmt_info.instance[i]) &&
1726 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1727 return megasas_mgmt_info.instance[i];
1728 }
1729
1730 return NULL;
1731 }
1732
1733 /*
1734 * megasas_update_sdev_properties - Update sdev structure based on controller's FW capabilities
1735 *
1736 * @sdev: OS provided scsi device
1737 *
1738 * Returns void
1739 */
1740 void megasas_update_sdev_properties(struct scsi_device *sdev)
1741 {
1742 u16 pd_index = 0;
1743 u32 device_id, ld;
1744 struct megasas_instance *instance;
1745 struct fusion_context *fusion;
1746 struct MR_PRIV_DEVICE *mr_device_priv_data;
1747 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1748 struct MR_LD_RAID *raid;
1749 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1750
1751 instance = megasas_lookup_instance(sdev->host->host_no);
1752 fusion = instance->ctrl_context;
1753 mr_device_priv_data = sdev->hostdata;
1754
1755 if (!fusion)
1756 return;
1757
1758 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1759 instance->use_seqnum_jbod_fp) {
1760 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1761 sdev->id;
1762 pd_sync = (void *)fusion->pd_seq_sync
1763 [(instance->pd_seq_map_id - 1) & 1];
1764 mr_device_priv_data->is_tm_capable =
1765 pd_sync->seq[pd_index].capability.tmCapable;
1766 } else {
1767 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1768 + sdev->id;
1769 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1770 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1771 raid = MR_LdRaidGet(ld, local_map_ptr);
1772
1773 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1774 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1775 mr_device_priv_data->is_tm_capable =
1776 raid->capability.tmCapable;
1777 }
1778 }
1779
1780 static void megasas_set_device_queue_depth(struct scsi_device *sdev)
1781 {
1782 u16 pd_index = 0;
1783 int ret = DCMD_FAILED;
1784 struct megasas_instance *instance;
1785
1786 instance = megasas_lookup_instance(sdev->host->host_no);
1787
1788 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1789 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1790
1791 if (instance->pd_info) {
1792 mutex_lock(&instance->hba_mutex);
1793 ret = megasas_get_pd_info(instance, pd_index);
1794 mutex_unlock(&instance->hba_mutex);
1795 }
1796
1797 if (ret != DCMD_SUCCESS)
1798 return;
1799
1800 if (instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
1801
1802 switch (instance->pd_list[pd_index].interface) {
1803 case SAS_PD:
1804 scsi_change_queue_depth(sdev, MEGASAS_SAS_QD);
1805 break;
1806
1807 case SATA_PD:
1808 scsi_change_queue_depth(sdev, MEGASAS_SATA_QD);
1809 break;
1810
1811 default:
1812 scsi_change_queue_depth(sdev, MEGASAS_DEFAULT_PD_QD);
1813 }
1814 }
1815 }
1816 }
1817
1818
1819 static int megasas_slave_configure(struct scsi_device *sdev)
1820 {
1821 u16 pd_index = 0;
1822 struct megasas_instance *instance;
1823
1824 instance = megasas_lookup_instance(sdev->host->host_no);
1825 if (instance->pd_list_not_supported) {
1826 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1827 sdev->type == TYPE_DISK) {
1828 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1829 sdev->id;
1830 if (instance->pd_list[pd_index].driveState !=
1831 MR_PD_STATE_SYSTEM)
1832 return -ENXIO;
1833 }
1834 }
1835 megasas_set_device_queue_depth(sdev);
1836 megasas_update_sdev_properties(sdev);
1837
1838 /*
1839 * The RAID firmware may require extended timeouts.
1840 */
1841 blk_queue_rq_timeout(sdev->request_queue,
1842 scmd_timeout * HZ);
1843
1844 return 0;
1845 }
1846
1847 static int megasas_slave_alloc(struct scsi_device *sdev)
1848 {
1849 u16 pd_index = 0;
1850 struct megasas_instance *instance ;
1851 struct MR_PRIV_DEVICE *mr_device_priv_data;
1852
1853 instance = megasas_lookup_instance(sdev->host->host_no);
1854 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1855 /*
1856 * Open the OS scan to the SYSTEM PD
1857 */
1858 pd_index =
1859 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1860 sdev->id;
1861 if ((instance->pd_list_not_supported ||
1862 instance->pd_list[pd_index].driveState ==
1863 MR_PD_STATE_SYSTEM)) {
1864 goto scan_target;
1865 }
1866 return -ENXIO;
1867 }
1868
1869 scan_target:
1870 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1871 GFP_KERNEL);
1872 if (!mr_device_priv_data)
1873 return -ENOMEM;
1874 sdev->hostdata = mr_device_priv_data;
1875 return 0;
1876 }
1877
1878 static void megasas_slave_destroy(struct scsi_device *sdev)
1879 {
1880 kfree(sdev->hostdata);
1881 sdev->hostdata = NULL;
1882 }
1883
1884 /*
1885 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1886 * kill adapter
1887 * @instance: Adapter soft state
1888 *
1889 */
1890 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1891 {
1892 int i;
1893 struct megasas_cmd *cmd_mfi;
1894 struct megasas_cmd_fusion *cmd_fusion;
1895 struct fusion_context *fusion = instance->ctrl_context;
1896
1897 /* Find all outstanding ioctls */
1898 if (fusion) {
1899 for (i = 0; i < instance->max_fw_cmds; i++) {
1900 cmd_fusion = fusion->cmd_list[i];
1901 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1902 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1903 if (cmd_mfi->sync_cmd &&
1904 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1905 megasas_complete_cmd(instance,
1906 cmd_mfi, DID_OK);
1907 }
1908 }
1909 } else {
1910 for (i = 0; i < instance->max_fw_cmds; i++) {
1911 cmd_mfi = instance->cmd_list[i];
1912 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
1913 MFI_CMD_ABORT)
1914 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
1915 }
1916 }
1917 }
1918
1919
1920 void megaraid_sas_kill_hba(struct megasas_instance *instance)
1921 {
1922 /* Set critical error to block I/O & ioctls in case caller didn't */
1923 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
1924 /* Wait 1 second to ensure IO or ioctls in build have posted */
1925 msleep(1000);
1926 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1927 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1928 (instance->ctrl_context)) {
1929 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1930 /* Flush */
1931 readl(&instance->reg_set->doorbell);
1932 if (instance->requestorId && instance->peerIsPresent)
1933 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
1934 } else {
1935 writel(MFI_STOP_ADP,
1936 &instance->reg_set->inbound_doorbell);
1937 }
1938 /* Complete outstanding ioctls when adapter is killed */
1939 megasas_complete_outstanding_ioctls(instance);
1940 }
1941
1942 /**
1943 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
1944 * restored to max value
1945 * @instance: Adapter soft state
1946 *
1947 */
1948 void
1949 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
1950 {
1951 unsigned long flags;
1952
1953 if (instance->flag & MEGASAS_FW_BUSY
1954 && time_after(jiffies, instance->last_time + 5 * HZ)
1955 && atomic_read(&instance->fw_outstanding) <
1956 instance->throttlequeuedepth + 1) {
1957
1958 spin_lock_irqsave(instance->host->host_lock, flags);
1959 instance->flag &= ~MEGASAS_FW_BUSY;
1960
1961 instance->host->can_queue = instance->cur_can_queue;
1962 spin_unlock_irqrestore(instance->host->host_lock, flags);
1963 }
1964 }
1965
1966 /**
1967 * megasas_complete_cmd_dpc - Returns FW's controller structure
1968 * @instance_addr: Address of adapter soft state
1969 *
1970 * Tasklet to complete cmds
1971 */
1972 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1973 {
1974 u32 producer;
1975 u32 consumer;
1976 u32 context;
1977 struct megasas_cmd *cmd;
1978 struct megasas_instance *instance =
1979 (struct megasas_instance *)instance_addr;
1980 unsigned long flags;
1981
1982 /* If we have already declared adapter dead, donot complete cmds */
1983 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
1984 return;
1985
1986 spin_lock_irqsave(&instance->completion_lock, flags);
1987
1988 producer = le32_to_cpu(*instance->producer);
1989 consumer = le32_to_cpu(*instance->consumer);
1990
1991 while (consumer != producer) {
1992 context = le32_to_cpu(instance->reply_queue[consumer]);
1993 if (context >= instance->max_fw_cmds) {
1994 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
1995 context);
1996 BUG();
1997 }
1998
1999 cmd = instance->cmd_list[context];
2000
2001 megasas_complete_cmd(instance, cmd, DID_OK);
2002
2003 consumer++;
2004 if (consumer == (instance->max_fw_cmds + 1)) {
2005 consumer = 0;
2006 }
2007 }
2008
2009 *instance->consumer = cpu_to_le32(producer);
2010
2011 spin_unlock_irqrestore(&instance->completion_lock, flags);
2012
2013 /*
2014 * Check if we can restore can_queue
2015 */
2016 megasas_check_and_restore_queue_depth(instance);
2017 }
2018
2019 /**
2020 * megasas_start_timer - Initializes a timer object
2021 * @instance: Adapter soft state
2022 * @timer: timer object to be initialized
2023 * @fn: timer function
2024 * @interval: time interval between timer function call
2025 *
2026 */
2027 void megasas_start_timer(struct megasas_instance *instance,
2028 struct timer_list *timer,
2029 void *fn, unsigned long interval)
2030 {
2031 init_timer(timer);
2032 timer->expires = jiffies + interval;
2033 timer->data = (unsigned long)instance;
2034 timer->function = fn;
2035 add_timer(timer);
2036 }
2037
2038 static void
2039 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2040
2041 static void
2042 process_fw_state_change_wq(struct work_struct *work);
2043
2044 void megasas_do_ocr(struct megasas_instance *instance)
2045 {
2046 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2047 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2048 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2049 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2050 }
2051 instance->instancet->disable_intr(instance);
2052 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2053 instance->issuepend_done = 0;
2054
2055 atomic_set(&instance->fw_outstanding, 0);
2056 megasas_internal_reset_defer_cmds(instance);
2057 process_fw_state_change_wq(&instance->work_init);
2058 }
2059
2060 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2061 int initial)
2062 {
2063 struct megasas_cmd *cmd;
2064 struct megasas_dcmd_frame *dcmd;
2065 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2066 dma_addr_t new_affiliation_111_h;
2067 int ld, retval = 0;
2068 u8 thisVf;
2069
2070 cmd = megasas_get_cmd(instance);
2071
2072 if (!cmd) {
2073 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2074 "Failed to get cmd for scsi%d\n",
2075 instance->host->host_no);
2076 return -ENOMEM;
2077 }
2078
2079 dcmd = &cmd->frame->dcmd;
2080
2081 if (!instance->vf_affiliation_111) {
2082 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2083 "affiliation for scsi%d\n", instance->host->host_no);
2084 megasas_return_cmd(instance, cmd);
2085 return -ENOMEM;
2086 }
2087
2088 if (initial)
2089 memset(instance->vf_affiliation_111, 0,
2090 sizeof(struct MR_LD_VF_AFFILIATION_111));
2091 else {
2092 new_affiliation_111 =
2093 pci_alloc_consistent(instance->pdev,
2094 sizeof(struct MR_LD_VF_AFFILIATION_111),
2095 &new_affiliation_111_h);
2096 if (!new_affiliation_111) {
2097 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2098 "memory for new affiliation for scsi%d\n",
2099 instance->host->host_no);
2100 megasas_return_cmd(instance, cmd);
2101 return -ENOMEM;
2102 }
2103 memset(new_affiliation_111, 0,
2104 sizeof(struct MR_LD_VF_AFFILIATION_111));
2105 }
2106
2107 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2108
2109 dcmd->cmd = MFI_CMD_DCMD;
2110 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2111 dcmd->sge_count = 1;
2112 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2113 dcmd->timeout = 0;
2114 dcmd->pad_0 = 0;
2115 dcmd->data_xfer_len =
2116 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2117 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2118
2119 if (initial)
2120 dcmd->sgl.sge32[0].phys_addr =
2121 cpu_to_le32(instance->vf_affiliation_111_h);
2122 else
2123 dcmd->sgl.sge32[0].phys_addr =
2124 cpu_to_le32(new_affiliation_111_h);
2125
2126 dcmd->sgl.sge32[0].length = cpu_to_le32(
2127 sizeof(struct MR_LD_VF_AFFILIATION_111));
2128
2129 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2130 "scsi%d\n", instance->host->host_no);
2131
2132 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2133 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2134 " failed with status 0x%x for scsi%d\n",
2135 dcmd->cmd_status, instance->host->host_no);
2136 retval = 1; /* Do a scan if we couldn't get affiliation */
2137 goto out;
2138 }
2139
2140 if (!initial) {
2141 thisVf = new_affiliation_111->thisVf;
2142 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2143 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2144 new_affiliation_111->map[ld].policy[thisVf]) {
2145 dev_warn(&instance->pdev->dev, "SR-IOV: "
2146 "Got new LD/VF affiliation for scsi%d\n",
2147 instance->host->host_no);
2148 memcpy(instance->vf_affiliation_111,
2149 new_affiliation_111,
2150 sizeof(struct MR_LD_VF_AFFILIATION_111));
2151 retval = 1;
2152 goto out;
2153 }
2154 }
2155 out:
2156 if (new_affiliation_111) {
2157 pci_free_consistent(instance->pdev,
2158 sizeof(struct MR_LD_VF_AFFILIATION_111),
2159 new_affiliation_111,
2160 new_affiliation_111_h);
2161 }
2162
2163 megasas_return_cmd(instance, cmd);
2164
2165 return retval;
2166 }
2167
2168 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2169 int initial)
2170 {
2171 struct megasas_cmd *cmd;
2172 struct megasas_dcmd_frame *dcmd;
2173 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2174 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2175 dma_addr_t new_affiliation_h;
2176 int i, j, retval = 0, found = 0, doscan = 0;
2177 u8 thisVf;
2178
2179 cmd = megasas_get_cmd(instance);
2180
2181 if (!cmd) {
2182 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2183 "Failed to get cmd for scsi%d\n",
2184 instance->host->host_no);
2185 return -ENOMEM;
2186 }
2187
2188 dcmd = &cmd->frame->dcmd;
2189
2190 if (!instance->vf_affiliation) {
2191 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2192 "affiliation for scsi%d\n", instance->host->host_no);
2193 megasas_return_cmd(instance, cmd);
2194 return -ENOMEM;
2195 }
2196
2197 if (initial)
2198 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2199 sizeof(struct MR_LD_VF_AFFILIATION));
2200 else {
2201 new_affiliation =
2202 pci_alloc_consistent(instance->pdev,
2203 (MAX_LOGICAL_DRIVES + 1) *
2204 sizeof(struct MR_LD_VF_AFFILIATION),
2205 &new_affiliation_h);
2206 if (!new_affiliation) {
2207 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2208 "memory for new affiliation for scsi%d\n",
2209 instance->host->host_no);
2210 megasas_return_cmd(instance, cmd);
2211 return -ENOMEM;
2212 }
2213 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2214 sizeof(struct MR_LD_VF_AFFILIATION));
2215 }
2216
2217 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2218
2219 dcmd->cmd = MFI_CMD_DCMD;
2220 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2221 dcmd->sge_count = 1;
2222 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2223 dcmd->timeout = 0;
2224 dcmd->pad_0 = 0;
2225 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2226 sizeof(struct MR_LD_VF_AFFILIATION));
2227 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2228
2229 if (initial)
2230 dcmd->sgl.sge32[0].phys_addr =
2231 cpu_to_le32(instance->vf_affiliation_h);
2232 else
2233 dcmd->sgl.sge32[0].phys_addr =
2234 cpu_to_le32(new_affiliation_h);
2235
2236 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2237 sizeof(struct MR_LD_VF_AFFILIATION));
2238
2239 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2240 "scsi%d\n", instance->host->host_no);
2241
2242
2243 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2244 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2245 " failed with status 0x%x for scsi%d\n",
2246 dcmd->cmd_status, instance->host->host_no);
2247 retval = 1; /* Do a scan if we couldn't get affiliation */
2248 goto out;
2249 }
2250
2251 if (!initial) {
2252 if (!new_affiliation->ldCount) {
2253 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2254 "affiliation for passive path for scsi%d\n",
2255 instance->host->host_no);
2256 retval = 1;
2257 goto out;
2258 }
2259 newmap = new_affiliation->map;
2260 savedmap = instance->vf_affiliation->map;
2261 thisVf = new_affiliation->thisVf;
2262 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2263 found = 0;
2264 for (j = 0; j < instance->vf_affiliation->ldCount;
2265 j++) {
2266 if (newmap->ref.targetId ==
2267 savedmap->ref.targetId) {
2268 found = 1;
2269 if (newmap->policy[thisVf] !=
2270 savedmap->policy[thisVf]) {
2271 doscan = 1;
2272 goto out;
2273 }
2274 }
2275 savedmap = (struct MR_LD_VF_MAP *)
2276 ((unsigned char *)savedmap +
2277 savedmap->size);
2278 }
2279 if (!found && newmap->policy[thisVf] !=
2280 MR_LD_ACCESS_HIDDEN) {
2281 doscan = 1;
2282 goto out;
2283 }
2284 newmap = (struct MR_LD_VF_MAP *)
2285 ((unsigned char *)newmap + newmap->size);
2286 }
2287
2288 newmap = new_affiliation->map;
2289 savedmap = instance->vf_affiliation->map;
2290
2291 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2292 found = 0;
2293 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2294 if (savedmap->ref.targetId ==
2295 newmap->ref.targetId) {
2296 found = 1;
2297 if (savedmap->policy[thisVf] !=
2298 newmap->policy[thisVf]) {
2299 doscan = 1;
2300 goto out;
2301 }
2302 }
2303 newmap = (struct MR_LD_VF_MAP *)
2304 ((unsigned char *)newmap +
2305 newmap->size);
2306 }
2307 if (!found && savedmap->policy[thisVf] !=
2308 MR_LD_ACCESS_HIDDEN) {
2309 doscan = 1;
2310 goto out;
2311 }
2312 savedmap = (struct MR_LD_VF_MAP *)
2313 ((unsigned char *)savedmap +
2314 savedmap->size);
2315 }
2316 }
2317 out:
2318 if (doscan) {
2319 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2320 "affiliation for scsi%d\n", instance->host->host_no);
2321 memcpy(instance->vf_affiliation, new_affiliation,
2322 new_affiliation->size);
2323 retval = 1;
2324 }
2325
2326 if (new_affiliation)
2327 pci_free_consistent(instance->pdev,
2328 (MAX_LOGICAL_DRIVES + 1) *
2329 sizeof(struct MR_LD_VF_AFFILIATION),
2330 new_affiliation, new_affiliation_h);
2331 megasas_return_cmd(instance, cmd);
2332
2333 return retval;
2334 }
2335
2336 /* This function will get the current SR-IOV LD/VF affiliation */
2337 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2338 int initial)
2339 {
2340 int retval;
2341
2342 if (instance->PlasmaFW111)
2343 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2344 else
2345 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2346 return retval;
2347 }
2348
2349 /* This function will tell FW to start the SR-IOV heartbeat */
2350 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2351 int initial)
2352 {
2353 struct megasas_cmd *cmd;
2354 struct megasas_dcmd_frame *dcmd;
2355 int retval = 0;
2356
2357 cmd = megasas_get_cmd(instance);
2358
2359 if (!cmd) {
2360 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2361 "Failed to get cmd for scsi%d\n",
2362 instance->host->host_no);
2363 return -ENOMEM;
2364 }
2365
2366 dcmd = &cmd->frame->dcmd;
2367
2368 if (initial) {
2369 instance->hb_host_mem =
2370 pci_zalloc_consistent(instance->pdev,
2371 sizeof(struct MR_CTRL_HB_HOST_MEM),
2372 &instance->hb_host_mem_h);
2373 if (!instance->hb_host_mem) {
2374 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2375 " memory for heartbeat host memory for scsi%d\n",
2376 instance->host->host_no);
2377 retval = -ENOMEM;
2378 goto out;
2379 }
2380 }
2381
2382 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2383
2384 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2385 dcmd->cmd = MFI_CMD_DCMD;
2386 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2387 dcmd->sge_count = 1;
2388 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2389 dcmd->timeout = 0;
2390 dcmd->pad_0 = 0;
2391 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2392 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2393 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2394 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2395
2396 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2397 instance->host->host_no);
2398
2399 if (instance->ctrl_context && !instance->mask_interrupts)
2400 retval = megasas_issue_blocked_cmd(instance, cmd,
2401 MEGASAS_ROUTINE_WAIT_TIME_VF);
2402 else
2403 retval = megasas_issue_polled(instance, cmd);
2404
2405 if (retval) {
2406 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2407 "_MEM_ALLOC DCMD %s for scsi%d\n",
2408 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2409 "timed out" : "failed", instance->host->host_no);
2410 retval = 1;
2411 }
2412
2413 out:
2414 megasas_return_cmd(instance, cmd);
2415
2416 return retval;
2417 }
2418
2419 /* Handler for SR-IOV heartbeat */
2420 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2421 {
2422 struct megasas_instance *instance =
2423 (struct megasas_instance *)instance_addr;
2424
2425 if (instance->hb_host_mem->HB.fwCounter !=
2426 instance->hb_host_mem->HB.driverCounter) {
2427 instance->hb_host_mem->HB.driverCounter =
2428 instance->hb_host_mem->HB.fwCounter;
2429 mod_timer(&instance->sriov_heartbeat_timer,
2430 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2431 } else {
2432 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2433 "completed for scsi%d\n", instance->host->host_no);
2434 schedule_work(&instance->work_init);
2435 }
2436 }
2437
2438 /**
2439 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2440 * @instance: Adapter soft state
2441 *
2442 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2443 * complete all its outstanding commands. Returns error if one or more IOs
2444 * are pending after this time period. It also marks the controller dead.
2445 */
2446 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2447 {
2448 int i, sl, outstanding;
2449 u32 reset_index;
2450 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2451 unsigned long flags;
2452 struct list_head clist_local;
2453 struct megasas_cmd *reset_cmd;
2454 u32 fw_state;
2455
2456 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2457 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2458 __func__, __LINE__);
2459 return FAILED;
2460 }
2461
2462 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2463
2464 INIT_LIST_HEAD(&clist_local);
2465 spin_lock_irqsave(&instance->hba_lock, flags);
2466 list_splice_init(&instance->internal_reset_pending_q,
2467 &clist_local);
2468 spin_unlock_irqrestore(&instance->hba_lock, flags);
2469
2470 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2471 for (i = 0; i < wait_time; i++) {
2472 msleep(1000);
2473 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2474 break;
2475 }
2476
2477 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2478 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2479 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2480 return FAILED;
2481 }
2482
2483 reset_index = 0;
2484 while (!list_empty(&clist_local)) {
2485 reset_cmd = list_entry((&clist_local)->next,
2486 struct megasas_cmd, list);
2487 list_del_init(&reset_cmd->list);
2488 if (reset_cmd->scmd) {
2489 reset_cmd->scmd->result = DID_RESET << 16;
2490 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2491 reset_index, reset_cmd,
2492 reset_cmd->scmd->cmnd[0]);
2493
2494 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2495 megasas_return_cmd(instance, reset_cmd);
2496 } else if (reset_cmd->sync_cmd) {
2497 dev_notice(&instance->pdev->dev, "%p synch cmds"
2498 "reset queue\n",
2499 reset_cmd);
2500
2501 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2502 instance->instancet->fire_cmd(instance,
2503 reset_cmd->frame_phys_addr,
2504 0, instance->reg_set);
2505 } else {
2506 dev_notice(&instance->pdev->dev, "%p unexpected"
2507 "cmds lst\n",
2508 reset_cmd);
2509 }
2510 reset_index++;
2511 }
2512
2513 return SUCCESS;
2514 }
2515
2516 for (i = 0; i < resetwaittime; i++) {
2517 outstanding = atomic_read(&instance->fw_outstanding);
2518
2519 if (!outstanding)
2520 break;
2521
2522 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2523 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2524 "commands to complete\n",i,outstanding);
2525 /*
2526 * Call cmd completion routine. Cmd to be
2527 * be completed directly without depending on isr.
2528 */
2529 megasas_complete_cmd_dpc((unsigned long)instance);
2530 }
2531
2532 msleep(1000);
2533 }
2534
2535 i = 0;
2536 outstanding = atomic_read(&instance->fw_outstanding);
2537 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2538
2539 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2540 goto no_outstanding;
2541
2542 if (instance->disableOnlineCtrlReset)
2543 goto kill_hba_and_failed;
2544 do {
2545 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2546 dev_info(&instance->pdev->dev,
2547 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2548 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2549 if (i == 3)
2550 goto kill_hba_and_failed;
2551 megasas_do_ocr(instance);
2552
2553 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2554 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2555 __func__, __LINE__);
2556 return FAILED;
2557 }
2558 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2559 __func__, __LINE__);
2560
2561 for (sl = 0; sl < 10; sl++)
2562 msleep(500);
2563
2564 outstanding = atomic_read(&instance->fw_outstanding);
2565
2566 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2567 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2568 goto no_outstanding;
2569 }
2570 i++;
2571 } while (i <= 3);
2572
2573 no_outstanding:
2574
2575 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2576 __func__, __LINE__);
2577 return SUCCESS;
2578
2579 kill_hba_and_failed:
2580
2581 /* Reset not supported, kill adapter */
2582 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2583 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2584 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2585 atomic_read(&instance->fw_outstanding));
2586 megasas_dump_pending_frames(instance);
2587 megaraid_sas_kill_hba(instance);
2588
2589 return FAILED;
2590 }
2591
2592 /**
2593 * megasas_generic_reset - Generic reset routine
2594 * @scmd: Mid-layer SCSI command
2595 *
2596 * This routine implements a generic reset handler for device, bus and host
2597 * reset requests. Device, bus and host specific reset handlers can use this
2598 * function after they do their specific tasks.
2599 */
2600 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2601 {
2602 int ret_val;
2603 struct megasas_instance *instance;
2604
2605 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2606
2607 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2608 scmd->cmnd[0], scmd->retries);
2609
2610 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2611 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2612 return FAILED;
2613 }
2614
2615 ret_val = megasas_wait_for_outstanding(instance);
2616 if (ret_val == SUCCESS)
2617 dev_notice(&instance->pdev->dev, "reset successful\n");
2618 else
2619 dev_err(&instance->pdev->dev, "failed to do reset\n");
2620
2621 return ret_val;
2622 }
2623
2624 /**
2625 * megasas_reset_timer - quiesce the adapter if required
2626 * @scmd: scsi cmnd
2627 *
2628 * Sets the FW busy flag and reduces the host->can_queue if the
2629 * cmd has not been completed within the timeout period.
2630 */
2631 static enum
2632 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2633 {
2634 struct megasas_instance *instance;
2635 unsigned long flags;
2636
2637 if (time_after(jiffies, scmd->jiffies_at_alloc +
2638 (scmd_timeout * 2) * HZ)) {
2639 return BLK_EH_NOT_HANDLED;
2640 }
2641
2642 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2643 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2644 /* FW is busy, throttle IO */
2645 spin_lock_irqsave(instance->host->host_lock, flags);
2646
2647 instance->host->can_queue = instance->throttlequeuedepth;
2648 instance->last_time = jiffies;
2649 instance->flag |= MEGASAS_FW_BUSY;
2650
2651 spin_unlock_irqrestore(instance->host->host_lock, flags);
2652 }
2653 return BLK_EH_RESET_TIMER;
2654 }
2655
2656 /**
2657 * megasas_reset_bus_host - Bus & host reset handler entry point
2658 */
2659 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2660 {
2661 int ret;
2662 struct megasas_instance *instance;
2663
2664 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2665
2666 /*
2667 * First wait for all commands to complete
2668 */
2669 if (instance->ctrl_context)
2670 ret = megasas_reset_fusion(scmd->device->host, 1);
2671 else
2672 ret = megasas_generic_reset(scmd);
2673
2674 return ret;
2675 }
2676
2677 /**
2678 * megasas_task_abort - Issues task abort request to firmware
2679 * (supported only for fusion adapters)
2680 * @scmd: SCSI command pointer
2681 */
2682 static int megasas_task_abort(struct scsi_cmnd *scmd)
2683 {
2684 int ret;
2685 struct megasas_instance *instance;
2686
2687 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2688
2689 if (instance->ctrl_context)
2690 ret = megasas_task_abort_fusion(scmd);
2691 else {
2692 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2693 ret = FAILED;
2694 }
2695
2696 return ret;
2697 }
2698
2699 /**
2700 * megasas_reset_target: Issues target reset request to firmware
2701 * (supported only for fusion adapters)
2702 * @scmd: SCSI command pointer
2703 */
2704 static int megasas_reset_target(struct scsi_cmnd *scmd)
2705 {
2706 int ret;
2707 struct megasas_instance *instance;
2708
2709 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2710
2711 if (instance->ctrl_context)
2712 ret = megasas_reset_target_fusion(scmd);
2713 else {
2714 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2715 ret = FAILED;
2716 }
2717
2718 return ret;
2719 }
2720
2721 /**
2722 * megasas_bios_param - Returns disk geometry for a disk
2723 * @sdev: device handle
2724 * @bdev: block device
2725 * @capacity: drive capacity
2726 * @geom: geometry parameters
2727 */
2728 static int
2729 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2730 sector_t capacity, int geom[])
2731 {
2732 int heads;
2733 int sectors;
2734 sector_t cylinders;
2735 unsigned long tmp;
2736
2737 /* Default heads (64) & sectors (32) */
2738 heads = 64;
2739 sectors = 32;
2740
2741 tmp = heads * sectors;
2742 cylinders = capacity;
2743
2744 sector_div(cylinders, tmp);
2745
2746 /*
2747 * Handle extended translation size for logical drives > 1Gb
2748 */
2749
2750 if (capacity >= 0x200000) {
2751 heads = 255;
2752 sectors = 63;
2753 tmp = heads*sectors;
2754 cylinders = capacity;
2755 sector_div(cylinders, tmp);
2756 }
2757
2758 geom[0] = heads;
2759 geom[1] = sectors;
2760 geom[2] = cylinders;
2761
2762 return 0;
2763 }
2764
2765 static void megasas_aen_polling(struct work_struct *work);
2766
2767 /**
2768 * megasas_service_aen - Processes an event notification
2769 * @instance: Adapter soft state
2770 * @cmd: AEN command completed by the ISR
2771 *
2772 * For AEN, driver sends a command down to FW that is held by the FW till an
2773 * event occurs. When an event of interest occurs, FW completes the command
2774 * that it was previously holding.
2775 *
2776 * This routines sends SIGIO signal to processes that have registered with the
2777 * driver for AEN.
2778 */
2779 static void
2780 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2781 {
2782 unsigned long flags;
2783
2784 /*
2785 * Don't signal app if it is just an aborted previously registered aen
2786 */
2787 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2788 spin_lock_irqsave(&poll_aen_lock, flags);
2789 megasas_poll_wait_aen = 1;
2790 spin_unlock_irqrestore(&poll_aen_lock, flags);
2791 wake_up(&megasas_poll_wait);
2792 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2793 }
2794 else
2795 cmd->abort_aen = 0;
2796
2797 instance->aen_cmd = NULL;
2798
2799 megasas_return_cmd(instance, cmd);
2800
2801 if ((instance->unload == 0) &&
2802 ((instance->issuepend_done == 1))) {
2803 struct megasas_aen_event *ev;
2804
2805 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2806 if (!ev) {
2807 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2808 } else {
2809 ev->instance = instance;
2810 instance->ev = ev;
2811 INIT_DELAYED_WORK(&ev->hotplug_work,
2812 megasas_aen_polling);
2813 schedule_delayed_work(&ev->hotplug_work, 0);
2814 }
2815 }
2816 }
2817
2818 static ssize_t
2819 megasas_fw_crash_buffer_store(struct device *cdev,
2820 struct device_attribute *attr, const char *buf, size_t count)
2821 {
2822 struct Scsi_Host *shost = class_to_shost(cdev);
2823 struct megasas_instance *instance =
2824 (struct megasas_instance *) shost->hostdata;
2825 int val = 0;
2826 unsigned long flags;
2827
2828 if (kstrtoint(buf, 0, &val) != 0)
2829 return -EINVAL;
2830
2831 spin_lock_irqsave(&instance->crashdump_lock, flags);
2832 instance->fw_crash_buffer_offset = val;
2833 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2834 return strlen(buf);
2835 }
2836
2837 static ssize_t
2838 megasas_fw_crash_buffer_show(struct device *cdev,
2839 struct device_attribute *attr, char *buf)
2840 {
2841 struct Scsi_Host *shost = class_to_shost(cdev);
2842 struct megasas_instance *instance =
2843 (struct megasas_instance *) shost->hostdata;
2844 u32 size;
2845 unsigned long buff_addr;
2846 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2847 unsigned long src_addr;
2848 unsigned long flags;
2849 u32 buff_offset;
2850
2851 spin_lock_irqsave(&instance->crashdump_lock, flags);
2852 buff_offset = instance->fw_crash_buffer_offset;
2853 if (!instance->crash_dump_buf &&
2854 !((instance->fw_crash_state == AVAILABLE) ||
2855 (instance->fw_crash_state == COPYING))) {
2856 dev_err(&instance->pdev->dev,
2857 "Firmware crash dump is not available\n");
2858 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2859 return -EINVAL;
2860 }
2861
2862 buff_addr = (unsigned long) buf;
2863
2864 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2865 dev_err(&instance->pdev->dev,
2866 "Firmware crash dump offset is out of range\n");
2867 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2868 return 0;
2869 }
2870
2871 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2872 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2873
2874 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2875 (buff_offset % dmachunk);
2876 memcpy(buf, (void *)src_addr, size);
2877 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2878
2879 return size;
2880 }
2881
2882 static ssize_t
2883 megasas_fw_crash_buffer_size_show(struct device *cdev,
2884 struct device_attribute *attr, char *buf)
2885 {
2886 struct Scsi_Host *shost = class_to_shost(cdev);
2887 struct megasas_instance *instance =
2888 (struct megasas_instance *) shost->hostdata;
2889
2890 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
2891 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
2892 }
2893
2894 static ssize_t
2895 megasas_fw_crash_state_store(struct device *cdev,
2896 struct device_attribute *attr, const char *buf, size_t count)
2897 {
2898 struct Scsi_Host *shost = class_to_shost(cdev);
2899 struct megasas_instance *instance =
2900 (struct megasas_instance *) shost->hostdata;
2901 int val = 0;
2902 unsigned long flags;
2903
2904 if (kstrtoint(buf, 0, &val) != 0)
2905 return -EINVAL;
2906
2907 if ((val <= AVAILABLE || val > COPY_ERROR)) {
2908 dev_err(&instance->pdev->dev, "application updates invalid "
2909 "firmware crash state\n");
2910 return -EINVAL;
2911 }
2912
2913 instance->fw_crash_state = val;
2914
2915 if ((val == COPIED) || (val == COPY_ERROR)) {
2916 spin_lock_irqsave(&instance->crashdump_lock, flags);
2917 megasas_free_host_crash_buffer(instance);
2918 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2919 if (val == COPY_ERROR)
2920 dev_info(&instance->pdev->dev, "application failed to "
2921 "copy Firmware crash dump\n");
2922 else
2923 dev_info(&instance->pdev->dev, "Firmware crash dump "
2924 "copied successfully\n");
2925 }
2926 return strlen(buf);
2927 }
2928
2929 static ssize_t
2930 megasas_fw_crash_state_show(struct device *cdev,
2931 struct device_attribute *attr, char *buf)
2932 {
2933 struct Scsi_Host *shost = class_to_shost(cdev);
2934 struct megasas_instance *instance =
2935 (struct megasas_instance *) shost->hostdata;
2936
2937 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
2938 }
2939
2940 static ssize_t
2941 megasas_page_size_show(struct device *cdev,
2942 struct device_attribute *attr, char *buf)
2943 {
2944 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
2945 }
2946
2947 static ssize_t
2948 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
2949 char *buf)
2950 {
2951 struct Scsi_Host *shost = class_to_shost(cdev);
2952 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
2953
2954 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
2955 }
2956
2957 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
2958 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
2959 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
2960 megasas_fw_crash_buffer_size_show, NULL);
2961 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
2962 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
2963 static DEVICE_ATTR(page_size, S_IRUGO,
2964 megasas_page_size_show, NULL);
2965 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
2966 megasas_ldio_outstanding_show, NULL);
2967
2968 struct device_attribute *megaraid_host_attrs[] = {
2969 &dev_attr_fw_crash_buffer_size,
2970 &dev_attr_fw_crash_buffer,
2971 &dev_attr_fw_crash_state,
2972 &dev_attr_page_size,
2973 &dev_attr_ldio_outstanding,
2974 NULL,
2975 };
2976
2977 /*
2978 * Scsi host template for megaraid_sas driver
2979 */
2980 static struct scsi_host_template megasas_template = {
2981
2982 .module = THIS_MODULE,
2983 .name = "Avago SAS based MegaRAID driver",
2984 .proc_name = "megaraid_sas",
2985 .slave_configure = megasas_slave_configure,
2986 .slave_alloc = megasas_slave_alloc,
2987 .slave_destroy = megasas_slave_destroy,
2988 .queuecommand = megasas_queue_command,
2989 .eh_target_reset_handler = megasas_reset_target,
2990 .eh_abort_handler = megasas_task_abort,
2991 .eh_host_reset_handler = megasas_reset_bus_host,
2992 .eh_timed_out = megasas_reset_timer,
2993 .shost_attrs = megaraid_host_attrs,
2994 .bios_param = megasas_bios_param,
2995 .use_clustering = ENABLE_CLUSTERING,
2996 .change_queue_depth = scsi_change_queue_depth,
2997 .no_write_same = 1,
2998 };
2999
3000 /**
3001 * megasas_complete_int_cmd - Completes an internal command
3002 * @instance: Adapter soft state
3003 * @cmd: Command to be completed
3004 *
3005 * The megasas_issue_blocked_cmd() function waits for a command to complete
3006 * after it issues a command. This function wakes up that waiting routine by
3007 * calling wake_up() on the wait queue.
3008 */
3009 static void
3010 megasas_complete_int_cmd(struct megasas_instance *instance,
3011 struct megasas_cmd *cmd)
3012 {
3013 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3014 wake_up(&instance->int_cmd_wait_q);
3015 }
3016
3017 /**
3018 * megasas_complete_abort - Completes aborting a command
3019 * @instance: Adapter soft state
3020 * @cmd: Cmd that was issued to abort another cmd
3021 *
3022 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3023 * after it issues an abort on a previously issued command. This function
3024 * wakes up all functions waiting on the same wait queue.
3025 */
3026 static void
3027 megasas_complete_abort(struct megasas_instance *instance,
3028 struct megasas_cmd *cmd)
3029 {
3030 if (cmd->sync_cmd) {
3031 cmd->sync_cmd = 0;
3032 cmd->cmd_status_drv = 0;
3033 wake_up(&instance->abort_cmd_wait_q);
3034 }
3035 }
3036
3037 /**
3038 * megasas_complete_cmd - Completes a command
3039 * @instance: Adapter soft state
3040 * @cmd: Command to be completed
3041 * @alt_status: If non-zero, use this value as status to
3042 * SCSI mid-layer instead of the value returned
3043 * by the FW. This should be used if caller wants
3044 * an alternate status (as in the case of aborted
3045 * commands)
3046 */
3047 void
3048 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3049 u8 alt_status)
3050 {
3051 int exception = 0;
3052 struct megasas_header *hdr = &cmd->frame->hdr;
3053 unsigned long flags;
3054 struct fusion_context *fusion = instance->ctrl_context;
3055 u32 opcode, status;
3056
3057 /* flag for the retry reset */
3058 cmd->retry_for_fw_reset = 0;
3059
3060 if (cmd->scmd)
3061 cmd->scmd->SCp.ptr = NULL;
3062
3063 switch (hdr->cmd) {
3064 case MFI_CMD_INVALID:
3065 /* Some older 1068 controller FW may keep a pended
3066 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3067 when booting the kdump kernel. Ignore this command to
3068 prevent a kernel panic on shutdown of the kdump kernel. */
3069 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3070 "completed\n");
3071 dev_warn(&instance->pdev->dev, "If you have a controller "
3072 "other than PERC5, please upgrade your firmware\n");
3073 break;
3074 case MFI_CMD_PD_SCSI_IO:
3075 case MFI_CMD_LD_SCSI_IO:
3076
3077 /*
3078 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3079 * issued either through an IO path or an IOCTL path. If it
3080 * was via IOCTL, we will send it to internal completion.
3081 */
3082 if (cmd->sync_cmd) {
3083 cmd->sync_cmd = 0;
3084 megasas_complete_int_cmd(instance, cmd);
3085 break;
3086 }
3087
3088 case MFI_CMD_LD_READ:
3089 case MFI_CMD_LD_WRITE:
3090
3091 if (alt_status) {
3092 cmd->scmd->result = alt_status << 16;
3093 exception = 1;
3094 }
3095
3096 if (exception) {
3097
3098 atomic_dec(&instance->fw_outstanding);
3099
3100 scsi_dma_unmap(cmd->scmd);
3101 cmd->scmd->scsi_done(cmd->scmd);
3102 megasas_return_cmd(instance, cmd);
3103
3104 break;
3105 }
3106
3107 switch (hdr->cmd_status) {
3108
3109 case MFI_STAT_OK:
3110 cmd->scmd->result = DID_OK << 16;
3111 break;
3112
3113 case MFI_STAT_SCSI_IO_FAILED:
3114 case MFI_STAT_LD_INIT_IN_PROGRESS:
3115 cmd->scmd->result =
3116 (DID_ERROR << 16) | hdr->scsi_status;
3117 break;
3118
3119 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3120
3121 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3122
3123 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3124 memset(cmd->scmd->sense_buffer, 0,
3125 SCSI_SENSE_BUFFERSIZE);
3126 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3127 hdr->sense_len);
3128
3129 cmd->scmd->result |= DRIVER_SENSE << 24;
3130 }
3131
3132 break;
3133
3134 case MFI_STAT_LD_OFFLINE:
3135 case MFI_STAT_DEVICE_NOT_FOUND:
3136 cmd->scmd->result = DID_BAD_TARGET << 16;
3137 break;
3138
3139 default:
3140 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3141 hdr->cmd_status);
3142 cmd->scmd->result = DID_ERROR << 16;
3143 break;
3144 }
3145
3146 atomic_dec(&instance->fw_outstanding);
3147
3148 scsi_dma_unmap(cmd->scmd);
3149 cmd->scmd->scsi_done(cmd->scmd);
3150 megasas_return_cmd(instance, cmd);
3151
3152 break;
3153
3154 case MFI_CMD_SMP:
3155 case MFI_CMD_STP:
3156 case MFI_CMD_DCMD:
3157 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3158 /* Check for LD map update */
3159 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3160 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3161 fusion->fast_path_io = 0;
3162 spin_lock_irqsave(instance->host->host_lock, flags);
3163 instance->map_update_cmd = NULL;
3164 if (cmd->frame->hdr.cmd_status != 0) {
3165 if (cmd->frame->hdr.cmd_status !=
3166 MFI_STAT_NOT_FOUND)
3167 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3168 cmd->frame->hdr.cmd_status);
3169 else {
3170 megasas_return_cmd(instance, cmd);
3171 spin_unlock_irqrestore(
3172 instance->host->host_lock,
3173 flags);
3174 break;
3175 }
3176 } else
3177 instance->map_id++;
3178 megasas_return_cmd(instance, cmd);
3179
3180 /*
3181 * Set fast path IO to ZERO.
3182 * Validate Map will set proper value.
3183 * Meanwhile all IOs will go as LD IO.
3184 */
3185 if (MR_ValidateMapInfo(instance))
3186 fusion->fast_path_io = 1;
3187 else
3188 fusion->fast_path_io = 0;
3189 megasas_sync_map_info(instance);
3190 spin_unlock_irqrestore(instance->host->host_lock,
3191 flags);
3192 break;
3193 }
3194 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3195 opcode == MR_DCMD_CTRL_EVENT_GET) {
3196 spin_lock_irqsave(&poll_aen_lock, flags);
3197 megasas_poll_wait_aen = 0;
3198 spin_unlock_irqrestore(&poll_aen_lock, flags);
3199 }
3200
3201 /* FW has an updated PD sequence */
3202 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3203 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3204
3205 spin_lock_irqsave(instance->host->host_lock, flags);
3206 status = cmd->frame->hdr.cmd_status;
3207 instance->jbod_seq_cmd = NULL;
3208 megasas_return_cmd(instance, cmd);
3209
3210 if (status == MFI_STAT_OK) {
3211 instance->pd_seq_map_id++;
3212 /* Re-register a pd sync seq num cmd */
3213 if (megasas_sync_pd_seq_num(instance, true))
3214 instance->use_seqnum_jbod_fp = false;
3215 } else
3216 instance->use_seqnum_jbod_fp = false;
3217
3218 spin_unlock_irqrestore(instance->host->host_lock, flags);
3219 break;
3220 }
3221
3222 /*
3223 * See if got an event notification
3224 */
3225 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3226 megasas_service_aen(instance, cmd);
3227 else
3228 megasas_complete_int_cmd(instance, cmd);
3229
3230 break;
3231
3232 case MFI_CMD_ABORT:
3233 /*
3234 * Cmd issued to abort another cmd returned
3235 */
3236 megasas_complete_abort(instance, cmd);
3237 break;
3238
3239 default:
3240 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3241 hdr->cmd);
3242 break;
3243 }
3244 }
3245
3246 /**
3247 * megasas_issue_pending_cmds_again - issue all pending cmds
3248 * in FW again because of the fw reset
3249 * @instance: Adapter soft state
3250 */
3251 static inline void
3252 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3253 {
3254 struct megasas_cmd *cmd;
3255 struct list_head clist_local;
3256 union megasas_evt_class_locale class_locale;
3257 unsigned long flags;
3258 u32 seq_num;
3259
3260 INIT_LIST_HEAD(&clist_local);
3261 spin_lock_irqsave(&instance->hba_lock, flags);
3262 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3263 spin_unlock_irqrestore(&instance->hba_lock, flags);
3264
3265 while (!list_empty(&clist_local)) {
3266 cmd = list_entry((&clist_local)->next,
3267 struct megasas_cmd, list);
3268 list_del_init(&cmd->list);
3269
3270 if (cmd->sync_cmd || cmd->scmd) {
3271 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3272 "detected to be pending while HBA reset\n",
3273 cmd, cmd->scmd, cmd->sync_cmd);
3274
3275 cmd->retry_for_fw_reset++;
3276
3277 if (cmd->retry_for_fw_reset == 3) {
3278 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3279 "was tried multiple times during reset."
3280 "Shutting down the HBA\n",
3281 cmd, cmd->scmd, cmd->sync_cmd);
3282 instance->instancet->disable_intr(instance);
3283 atomic_set(&instance->fw_reset_no_pci_access, 1);
3284 megaraid_sas_kill_hba(instance);
3285 return;
3286 }
3287 }
3288
3289 if (cmd->sync_cmd == 1) {
3290 if (cmd->scmd) {
3291 dev_notice(&instance->pdev->dev, "unexpected"
3292 "cmd attached to internal command!\n");
3293 }
3294 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3295 "on the internal reset queue,"
3296 "issue it again.\n", cmd);
3297 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3298 instance->instancet->fire_cmd(instance,
3299 cmd->frame_phys_addr,
3300 0, instance->reg_set);
3301 } else if (cmd->scmd) {
3302 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3303 "detected on the internal queue, issue again.\n",
3304 cmd, cmd->scmd->cmnd[0]);
3305
3306 atomic_inc(&instance->fw_outstanding);
3307 instance->instancet->fire_cmd(instance,
3308 cmd->frame_phys_addr,
3309 cmd->frame_count-1, instance->reg_set);
3310 } else {
3311 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3312 "internal reset defer list while re-issue!!\n",
3313 cmd);
3314 }
3315 }
3316
3317 if (instance->aen_cmd) {
3318 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3319 megasas_return_cmd(instance, instance->aen_cmd);
3320
3321 instance->aen_cmd = NULL;
3322 }
3323
3324 /*
3325 * Initiate AEN (Asynchronous Event Notification)
3326 */
3327 seq_num = instance->last_seq_num;
3328 class_locale.members.reserved = 0;
3329 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3330 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3331
3332 megasas_register_aen(instance, seq_num, class_locale.word);
3333 }
3334
3335 /**
3336 * Move the internal reset pending commands to a deferred queue.
3337 *
3338 * We move the commands pending at internal reset time to a
3339 * pending queue. This queue would be flushed after successful
3340 * completion of the internal reset sequence. if the internal reset
3341 * did not complete in time, the kernel reset handler would flush
3342 * these commands.
3343 **/
3344 static void
3345 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3346 {
3347 struct megasas_cmd *cmd;
3348 int i;
3349 u32 max_cmd = instance->max_fw_cmds;
3350 u32 defer_index;
3351 unsigned long flags;
3352
3353 defer_index = 0;
3354 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3355 for (i = 0; i < max_cmd; i++) {
3356 cmd = instance->cmd_list[i];
3357 if (cmd->sync_cmd == 1 || cmd->scmd) {
3358 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3359 "on the defer queue as internal\n",
3360 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3361
3362 if (!list_empty(&cmd->list)) {
3363 dev_notice(&instance->pdev->dev, "ERROR while"
3364 " moving this cmd:%p, %d %p, it was"
3365 "discovered on some list?\n",
3366 cmd, cmd->sync_cmd, cmd->scmd);
3367
3368 list_del_init(&cmd->list);
3369 }
3370 defer_index++;
3371 list_add_tail(&cmd->list,
3372 &instance->internal_reset_pending_q);
3373 }
3374 }
3375 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3376 }
3377
3378
3379 static void
3380 process_fw_state_change_wq(struct work_struct *work)
3381 {
3382 struct megasas_instance *instance =
3383 container_of(work, struct megasas_instance, work_init);
3384 u32 wait;
3385 unsigned long flags;
3386
3387 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3388 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3389 atomic_read(&instance->adprecovery));
3390 return ;
3391 }
3392
3393 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3394 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3395 "state, restarting it...\n");
3396
3397 instance->instancet->disable_intr(instance);
3398 atomic_set(&instance->fw_outstanding, 0);
3399
3400 atomic_set(&instance->fw_reset_no_pci_access, 1);
3401 instance->instancet->adp_reset(instance, instance->reg_set);
3402 atomic_set(&instance->fw_reset_no_pci_access, 0);
3403
3404 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3405 "initiating next stage...\n");
3406
3407 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3408 "state 2 starting...\n");
3409
3410 /* waiting for about 20 second before start the second init */
3411 for (wait = 0; wait < 30; wait++) {
3412 msleep(1000);
3413 }
3414
3415 if (megasas_transition_to_ready(instance, 1)) {
3416 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3417
3418 atomic_set(&instance->fw_reset_no_pci_access, 1);
3419 megaraid_sas_kill_hba(instance);
3420 return ;
3421 }
3422
3423 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3424 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3425 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3426 ) {
3427 *instance->consumer = *instance->producer;
3428 } else {
3429 *instance->consumer = 0;
3430 *instance->producer = 0;
3431 }
3432
3433 megasas_issue_init_mfi(instance);
3434
3435 spin_lock_irqsave(&instance->hba_lock, flags);
3436 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3437 spin_unlock_irqrestore(&instance->hba_lock, flags);
3438 instance->instancet->enable_intr(instance);
3439
3440 megasas_issue_pending_cmds_again(instance);
3441 instance->issuepend_done = 1;
3442 }
3443 }
3444
3445 /**
3446 * megasas_deplete_reply_queue - Processes all completed commands
3447 * @instance: Adapter soft state
3448 * @alt_status: Alternate status to be returned to
3449 * SCSI mid-layer instead of the status
3450 * returned by the FW
3451 * Note: this must be called with hba lock held
3452 */
3453 static int
3454 megasas_deplete_reply_queue(struct megasas_instance *instance,
3455 u8 alt_status)
3456 {
3457 u32 mfiStatus;
3458 u32 fw_state;
3459
3460 if ((mfiStatus = instance->instancet->check_reset(instance,
3461 instance->reg_set)) == 1) {
3462 return IRQ_HANDLED;
3463 }
3464
3465 if ((mfiStatus = instance->instancet->clear_intr(
3466 instance->reg_set)
3467 ) == 0) {
3468 /* Hardware may not set outbound_intr_status in MSI-X mode */
3469 if (!instance->msix_vectors)
3470 return IRQ_NONE;
3471 }
3472
3473 instance->mfiStatus = mfiStatus;
3474
3475 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3476 fw_state = instance->instancet->read_fw_status_reg(
3477 instance->reg_set) & MFI_STATE_MASK;
3478
3479 if (fw_state != MFI_STATE_FAULT) {
3480 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3481 fw_state);
3482 }
3483
3484 if ((fw_state == MFI_STATE_FAULT) &&
3485 (instance->disableOnlineCtrlReset == 0)) {
3486 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3487
3488 if ((instance->pdev->device ==
3489 PCI_DEVICE_ID_LSI_SAS1064R) ||
3490 (instance->pdev->device ==
3491 PCI_DEVICE_ID_DELL_PERC5) ||
3492 (instance->pdev->device ==
3493 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3494
3495 *instance->consumer =
3496 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3497 }
3498
3499
3500 instance->instancet->disable_intr(instance);
3501 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3502 instance->issuepend_done = 0;
3503
3504 atomic_set(&instance->fw_outstanding, 0);
3505 megasas_internal_reset_defer_cmds(instance);
3506
3507 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3508 fw_state, atomic_read(&instance->adprecovery));
3509
3510 schedule_work(&instance->work_init);
3511 return IRQ_HANDLED;
3512
3513 } else {
3514 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3515 fw_state, instance->disableOnlineCtrlReset);
3516 }
3517 }
3518
3519 tasklet_schedule(&instance->isr_tasklet);
3520 return IRQ_HANDLED;
3521 }
3522 /**
3523 * megasas_isr - isr entry point
3524 */
3525 static irqreturn_t megasas_isr(int irq, void *devp)
3526 {
3527 struct megasas_irq_context *irq_context = devp;
3528 struct megasas_instance *instance = irq_context->instance;
3529 unsigned long flags;
3530 irqreturn_t rc;
3531
3532 if (atomic_read(&instance->fw_reset_no_pci_access))
3533 return IRQ_HANDLED;
3534
3535 spin_lock_irqsave(&instance->hba_lock, flags);
3536 rc = megasas_deplete_reply_queue(instance, DID_OK);
3537 spin_unlock_irqrestore(&instance->hba_lock, flags);
3538
3539 return rc;
3540 }
3541
3542 /**
3543 * megasas_transition_to_ready - Move the FW to READY state
3544 * @instance: Adapter soft state
3545 *
3546 * During the initialization, FW passes can potentially be in any one of
3547 * several possible states. If the FW in operational, waiting-for-handshake
3548 * states, driver must take steps to bring it to ready state. Otherwise, it
3549 * has to wait for the ready state.
3550 */
3551 int
3552 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3553 {
3554 int i;
3555 u8 max_wait;
3556 u32 fw_state;
3557 u32 cur_state;
3558 u32 abs_state, curr_abs_state;
3559
3560 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3561 fw_state = abs_state & MFI_STATE_MASK;
3562
3563 if (fw_state != MFI_STATE_READY)
3564 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3565 " state\n");
3566
3567 while (fw_state != MFI_STATE_READY) {
3568
3569 switch (fw_state) {
3570
3571 case MFI_STATE_FAULT:
3572 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3573 if (ocr) {
3574 max_wait = MEGASAS_RESET_WAIT_TIME;
3575 cur_state = MFI_STATE_FAULT;
3576 break;
3577 } else
3578 return -ENODEV;
3579
3580 case MFI_STATE_WAIT_HANDSHAKE:
3581 /*
3582 * Set the CLR bit in inbound doorbell
3583 */
3584 if ((instance->pdev->device ==
3585 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3586 (instance->pdev->device ==
3587 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3588 (instance->ctrl_context))
3589 writel(
3590 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3591 &instance->reg_set->doorbell);
3592 else
3593 writel(
3594 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3595 &instance->reg_set->inbound_doorbell);
3596
3597 max_wait = MEGASAS_RESET_WAIT_TIME;
3598 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3599 break;
3600
3601 case MFI_STATE_BOOT_MESSAGE_PENDING:
3602 if ((instance->pdev->device ==
3603 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3604 (instance->pdev->device ==
3605 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3606 (instance->ctrl_context))
3607 writel(MFI_INIT_HOTPLUG,
3608 &instance->reg_set->doorbell);
3609 else
3610 writel(MFI_INIT_HOTPLUG,
3611 &instance->reg_set->inbound_doorbell);
3612
3613 max_wait = MEGASAS_RESET_WAIT_TIME;
3614 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3615 break;
3616
3617 case MFI_STATE_OPERATIONAL:
3618 /*
3619 * Bring it to READY state; assuming max wait 10 secs
3620 */
3621 instance->instancet->disable_intr(instance);
3622 if ((instance->pdev->device ==
3623 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3624 (instance->pdev->device ==
3625 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3626 (instance->ctrl_context)) {
3627 writel(MFI_RESET_FLAGS,
3628 &instance->reg_set->doorbell);
3629
3630 if (instance->ctrl_context) {
3631 for (i = 0; i < (10 * 1000); i += 20) {
3632 if (readl(
3633 &instance->
3634 reg_set->
3635 doorbell) & 1)
3636 msleep(20);
3637 else
3638 break;
3639 }
3640 }
3641 } else
3642 writel(MFI_RESET_FLAGS,
3643 &instance->reg_set->inbound_doorbell);
3644
3645 max_wait = MEGASAS_RESET_WAIT_TIME;
3646 cur_state = MFI_STATE_OPERATIONAL;
3647 break;
3648
3649 case MFI_STATE_UNDEFINED:
3650 /*
3651 * This state should not last for more than 2 seconds
3652 */
3653 max_wait = MEGASAS_RESET_WAIT_TIME;
3654 cur_state = MFI_STATE_UNDEFINED;
3655 break;
3656
3657 case MFI_STATE_BB_INIT:
3658 max_wait = MEGASAS_RESET_WAIT_TIME;
3659 cur_state = MFI_STATE_BB_INIT;
3660 break;
3661
3662 case MFI_STATE_FW_INIT:
3663 max_wait = MEGASAS_RESET_WAIT_TIME;
3664 cur_state = MFI_STATE_FW_INIT;
3665 break;
3666
3667 case MFI_STATE_FW_INIT_2:
3668 max_wait = MEGASAS_RESET_WAIT_TIME;
3669 cur_state = MFI_STATE_FW_INIT_2;
3670 break;
3671
3672 case MFI_STATE_DEVICE_SCAN:
3673 max_wait = MEGASAS_RESET_WAIT_TIME;
3674 cur_state = MFI_STATE_DEVICE_SCAN;
3675 break;
3676
3677 case MFI_STATE_FLUSH_CACHE:
3678 max_wait = MEGASAS_RESET_WAIT_TIME;
3679 cur_state = MFI_STATE_FLUSH_CACHE;
3680 break;
3681
3682 default:
3683 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3684 fw_state);
3685 return -ENODEV;
3686 }
3687
3688 /*
3689 * The cur_state should not last for more than max_wait secs
3690 */
3691 for (i = 0; i < (max_wait * 1000); i++) {
3692 curr_abs_state = instance->instancet->
3693 read_fw_status_reg(instance->reg_set);
3694
3695 if (abs_state == curr_abs_state) {
3696 msleep(1);
3697 } else
3698 break;
3699 }
3700
3701 /*
3702 * Return error if fw_state hasn't changed after max_wait
3703 */
3704 if (curr_abs_state == abs_state) {
3705 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3706 "in %d secs\n", fw_state, max_wait);
3707 return -ENODEV;
3708 }
3709
3710 abs_state = curr_abs_state;
3711 fw_state = curr_abs_state & MFI_STATE_MASK;
3712 }
3713 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3714
3715 return 0;
3716 }
3717
3718 /**
3719 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3720 * @instance: Adapter soft state
3721 */
3722 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3723 {
3724 int i;
3725 u32 max_cmd = instance->max_mfi_cmds;
3726 struct megasas_cmd *cmd;
3727
3728 if (!instance->frame_dma_pool)
3729 return;
3730
3731 /*
3732 * Return all frames to pool
3733 */
3734 for (i = 0; i < max_cmd; i++) {
3735
3736 cmd = instance->cmd_list[i];
3737
3738 if (cmd->frame)
3739 pci_pool_free(instance->frame_dma_pool, cmd->frame,
3740 cmd->frame_phys_addr);
3741
3742 if (cmd->sense)
3743 pci_pool_free(instance->sense_dma_pool, cmd->sense,
3744 cmd->sense_phys_addr);
3745 }
3746
3747 /*
3748 * Now destroy the pool itself
3749 */
3750 pci_pool_destroy(instance->frame_dma_pool);
3751 pci_pool_destroy(instance->sense_dma_pool);
3752
3753 instance->frame_dma_pool = NULL;
3754 instance->sense_dma_pool = NULL;
3755 }
3756
3757 /**
3758 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3759 * @instance: Adapter soft state
3760 *
3761 * Each command packet has an embedded DMA memory buffer that is used for
3762 * filling MFI frame and the SG list that immediately follows the frame. This
3763 * function creates those DMA memory buffers for each command packet by using
3764 * PCI pool facility.
3765 */
3766 static int megasas_create_frame_pool(struct megasas_instance *instance)
3767 {
3768 int i;
3769 u32 max_cmd;
3770 u32 sge_sz;
3771 u32 total_sz;
3772 u32 frame_count;
3773 struct megasas_cmd *cmd;
3774
3775 max_cmd = instance->max_mfi_cmds;
3776
3777 /*
3778 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3779 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3780 */
3781 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3782 sizeof(struct megasas_sge32);
3783
3784 if (instance->flag_ieee)
3785 sge_sz = sizeof(struct megasas_sge_skinny);
3786
3787 /*
3788 * For MFI controllers.
3789 * max_num_sge = 60
3790 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3791 * Total 960 byte (15 MFI frame of 64 byte)
3792 *
3793 * Fusion adapter require only 3 extra frame.
3794 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3795 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3796 * Total 192 byte (3 MFI frame of 64 byte)
3797 */
3798 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3799 total_sz = MEGAMFI_FRAME_SIZE * frame_count;
3800 /*
3801 * Use DMA pool facility provided by PCI layer
3802 */
3803 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
3804 instance->pdev, total_sz, 256, 0);
3805
3806 if (!instance->frame_dma_pool) {
3807 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3808 return -ENOMEM;
3809 }
3810
3811 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
3812 instance->pdev, 128, 4, 0);
3813
3814 if (!instance->sense_dma_pool) {
3815 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3816
3817 pci_pool_destroy(instance->frame_dma_pool);
3818 instance->frame_dma_pool = NULL;
3819
3820 return -ENOMEM;
3821 }
3822
3823 /*
3824 * Allocate and attach a frame to each of the commands in cmd_list.
3825 * By making cmd->index as the context instead of the &cmd, we can
3826 * always use 32bit context regardless of the architecture
3827 */
3828 for (i = 0; i < max_cmd; i++) {
3829
3830 cmd = instance->cmd_list[i];
3831
3832 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
3833 GFP_KERNEL, &cmd->frame_phys_addr);
3834
3835 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
3836 GFP_KERNEL, &cmd->sense_phys_addr);
3837
3838 /*
3839 * megasas_teardown_frame_pool() takes care of freeing
3840 * whatever has been allocated
3841 */
3842 if (!cmd->frame || !cmd->sense) {
3843 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
3844 megasas_teardown_frame_pool(instance);
3845 return -ENOMEM;
3846 }
3847
3848 memset(cmd->frame, 0, total_sz);
3849 cmd->frame->io.context = cpu_to_le32(cmd->index);
3850 cmd->frame->io.pad_0 = 0;
3851 if (!instance->ctrl_context && reset_devices)
3852 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3853 }
3854
3855 return 0;
3856 }
3857
3858 /**
3859 * megasas_free_cmds - Free all the cmds in the free cmd pool
3860 * @instance: Adapter soft state
3861 */
3862 void megasas_free_cmds(struct megasas_instance *instance)
3863 {
3864 int i;
3865
3866 /* First free the MFI frame pool */
3867 megasas_teardown_frame_pool(instance);
3868
3869 /* Free all the commands in the cmd_list */
3870 for (i = 0; i < instance->max_mfi_cmds; i++)
3871
3872 kfree(instance->cmd_list[i]);
3873
3874 /* Free the cmd_list buffer itself */
3875 kfree(instance->cmd_list);
3876 instance->cmd_list = NULL;
3877
3878 INIT_LIST_HEAD(&instance->cmd_pool);
3879 }
3880
3881 /**
3882 * megasas_alloc_cmds - Allocates the command packets
3883 * @instance: Adapter soft state
3884 *
3885 * Each command that is issued to the FW, whether IO commands from the OS or
3886 * internal commands like IOCTLs, are wrapped in local data structure called
3887 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
3888 * the FW.
3889 *
3890 * Each frame has a 32-bit field called context (tag). This context is used
3891 * to get back the megasas_cmd from the frame when a frame gets completed in
3892 * the ISR. Typically the address of the megasas_cmd itself would be used as
3893 * the context. But we wanted to keep the differences between 32 and 64 bit
3894 * systems to the mininum. We always use 32 bit integers for the context. In
3895 * this driver, the 32 bit values are the indices into an array cmd_list.
3896 * This array is used only to look up the megasas_cmd given the context. The
3897 * free commands themselves are maintained in a linked list called cmd_pool.
3898 */
3899 int megasas_alloc_cmds(struct megasas_instance *instance)
3900 {
3901 int i;
3902 int j;
3903 u32 max_cmd;
3904 struct megasas_cmd *cmd;
3905 struct fusion_context *fusion;
3906
3907 fusion = instance->ctrl_context;
3908 max_cmd = instance->max_mfi_cmds;
3909
3910 /*
3911 * instance->cmd_list is an array of struct megasas_cmd pointers.
3912 * Allocate the dynamic array first and then allocate individual
3913 * commands.
3914 */
3915 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
3916
3917 if (!instance->cmd_list) {
3918 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
3919 return -ENOMEM;
3920 }
3921
3922 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
3923
3924 for (i = 0; i < max_cmd; i++) {
3925 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
3926 GFP_KERNEL);
3927
3928 if (!instance->cmd_list[i]) {
3929
3930 for (j = 0; j < i; j++)
3931 kfree(instance->cmd_list[j]);
3932
3933 kfree(instance->cmd_list);
3934 instance->cmd_list = NULL;
3935
3936 return -ENOMEM;
3937 }
3938 }
3939
3940 for (i = 0; i < max_cmd; i++) {
3941 cmd = instance->cmd_list[i];
3942 memset(cmd, 0, sizeof(struct megasas_cmd));
3943 cmd->index = i;
3944 cmd->scmd = NULL;
3945 cmd->instance = instance;
3946
3947 list_add_tail(&cmd->list, &instance->cmd_pool);
3948 }
3949
3950 /*
3951 * Create a frame pool and assign one frame to each cmd
3952 */
3953 if (megasas_create_frame_pool(instance)) {
3954 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
3955 megasas_free_cmds(instance);
3956 }
3957
3958 return 0;
3959 }
3960
3961 /*
3962 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
3963 * @instance: Adapter soft state
3964 *
3965 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
3966 * or FW is not under OCR.
3967 */
3968 inline int
3969 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
3970
3971 if (!instance->ctrl_context)
3972 return KILL_ADAPTER;
3973 else if (instance->unload ||
3974 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
3975 return IGNORE_TIMEOUT;
3976 else
3977 return INITIATE_OCR;
3978 }
3979
3980 static int
3981 megasas_get_pd_info(struct megasas_instance *instance, u16 device_id)
3982 {
3983 int ret;
3984 struct megasas_cmd *cmd;
3985 struct megasas_dcmd_frame *dcmd;
3986
3987 cmd = megasas_get_cmd(instance);
3988
3989 if (!cmd) {
3990 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
3991 return -ENOMEM;
3992 }
3993
3994 dcmd = &cmd->frame->dcmd;
3995
3996 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
3997 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3998
3999 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4000 dcmd->cmd = MFI_CMD_DCMD;
4001 dcmd->cmd_status = 0xFF;
4002 dcmd->sge_count = 1;
4003 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4004 dcmd->timeout = 0;
4005 dcmd->pad_0 = 0;
4006 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4007 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4008 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4009 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4010
4011 if (instance->ctrl_context && !instance->mask_interrupts)
4012 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4013 else
4014 ret = megasas_issue_polled(instance, cmd);
4015
4016 switch (ret) {
4017 case DCMD_SUCCESS:
4018 instance->pd_list[device_id].interface =
4019 instance->pd_info->state.ddf.pdType.intf;
4020 break;
4021
4022 case DCMD_TIMEOUT:
4023
4024 switch (dcmd_timeout_ocr_possible(instance)) {
4025 case INITIATE_OCR:
4026 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4027 megasas_reset_fusion(instance->host,
4028 MFI_IO_TIMEOUT_OCR);
4029 break;
4030 case KILL_ADAPTER:
4031 megaraid_sas_kill_hba(instance);
4032 break;
4033 case IGNORE_TIMEOUT:
4034 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4035 __func__, __LINE__);
4036 break;
4037 }
4038
4039 break;
4040 }
4041
4042 if (ret != DCMD_TIMEOUT)
4043 megasas_return_cmd(instance, cmd);
4044
4045 return ret;
4046 }
4047 /*
4048 * megasas_get_pd_list_info - Returns FW's pd_list structure
4049 * @instance: Adapter soft state
4050 * @pd_list: pd_list structure
4051 *
4052 * Issues an internal command (DCMD) to get the FW's controller PD
4053 * list structure. This information is mainly used to find out SYSTEM
4054 * supported by the FW.
4055 */
4056 static int
4057 megasas_get_pd_list(struct megasas_instance *instance)
4058 {
4059 int ret = 0, pd_index = 0;
4060 struct megasas_cmd *cmd;
4061 struct megasas_dcmd_frame *dcmd;
4062 struct MR_PD_LIST *ci;
4063 struct MR_PD_ADDRESS *pd_addr;
4064 dma_addr_t ci_h = 0;
4065
4066 if (instance->pd_list_not_supported) {
4067 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4068 "not supported by firmware\n");
4069 return ret;
4070 }
4071
4072 cmd = megasas_get_cmd(instance);
4073
4074 if (!cmd) {
4075 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4076 return -ENOMEM;
4077 }
4078
4079 dcmd = &cmd->frame->dcmd;
4080
4081 ci = pci_alloc_consistent(instance->pdev,
4082 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4083
4084 if (!ci) {
4085 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4086 megasas_return_cmd(instance, cmd);
4087 return -ENOMEM;
4088 }
4089
4090 memset(ci, 0, sizeof(*ci));
4091 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4092
4093 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4094 dcmd->mbox.b[1] = 0;
4095 dcmd->cmd = MFI_CMD_DCMD;
4096 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4097 dcmd->sge_count = 1;
4098 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4099 dcmd->timeout = 0;
4100 dcmd->pad_0 = 0;
4101 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4102 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4103 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4104 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4105
4106 if (instance->ctrl_context && !instance->mask_interrupts)
4107 ret = megasas_issue_blocked_cmd(instance, cmd,
4108 MFI_IO_TIMEOUT_SECS);
4109 else
4110 ret = megasas_issue_polled(instance, cmd);
4111
4112 switch (ret) {
4113 case DCMD_FAILED:
4114 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4115 "failed/not supported by firmware\n");
4116
4117 if (instance->ctrl_context)
4118 megaraid_sas_kill_hba(instance);
4119 else
4120 instance->pd_list_not_supported = 1;
4121 break;
4122 case DCMD_TIMEOUT:
4123
4124 switch (dcmd_timeout_ocr_possible(instance)) {
4125 case INITIATE_OCR:
4126 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4127 /*
4128 * DCMD failed from AEN path.
4129 * AEN path already hold reset_mutex to avoid PCI access
4130 * while OCR is in progress.
4131 */
4132 mutex_unlock(&instance->reset_mutex);
4133 megasas_reset_fusion(instance->host,
4134 MFI_IO_TIMEOUT_OCR);
4135 mutex_lock(&instance->reset_mutex);
4136 break;
4137 case KILL_ADAPTER:
4138 megaraid_sas_kill_hba(instance);
4139 break;
4140 case IGNORE_TIMEOUT:
4141 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4142 __func__, __LINE__);
4143 break;
4144 }
4145
4146 break;
4147
4148 case DCMD_SUCCESS:
4149 pd_addr = ci->addr;
4150
4151 if ((le32_to_cpu(ci->count) >
4152 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4153 break;
4154
4155 memset(instance->local_pd_list, 0,
4156 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4157
4158 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4159 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4160 le16_to_cpu(pd_addr->deviceId);
4161 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4162 pd_addr->scsiDevType;
4163 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4164 MR_PD_STATE_SYSTEM;
4165 pd_addr++;
4166 }
4167
4168 memcpy(instance->pd_list, instance->local_pd_list,
4169 sizeof(instance->pd_list));
4170 break;
4171
4172 }
4173
4174 pci_free_consistent(instance->pdev,
4175 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4176 ci, ci_h);
4177
4178 if (ret != DCMD_TIMEOUT)
4179 megasas_return_cmd(instance, cmd);
4180
4181 return ret;
4182 }
4183
4184 /*
4185 * megasas_get_ld_list_info - Returns FW's ld_list structure
4186 * @instance: Adapter soft state
4187 * @ld_list: ld_list structure
4188 *
4189 * Issues an internal command (DCMD) to get the FW's controller PD
4190 * list structure. This information is mainly used to find out SYSTEM
4191 * supported by the FW.
4192 */
4193 static int
4194 megasas_get_ld_list(struct megasas_instance *instance)
4195 {
4196 int ret = 0, ld_index = 0, ids = 0;
4197 struct megasas_cmd *cmd;
4198 struct megasas_dcmd_frame *dcmd;
4199 struct MR_LD_LIST *ci;
4200 dma_addr_t ci_h = 0;
4201 u32 ld_count;
4202
4203 cmd = megasas_get_cmd(instance);
4204
4205 if (!cmd) {
4206 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4207 return -ENOMEM;
4208 }
4209
4210 dcmd = &cmd->frame->dcmd;
4211
4212 ci = pci_alloc_consistent(instance->pdev,
4213 sizeof(struct MR_LD_LIST),
4214 &ci_h);
4215
4216 if (!ci) {
4217 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4218 megasas_return_cmd(instance, cmd);
4219 return -ENOMEM;
4220 }
4221
4222 memset(ci, 0, sizeof(*ci));
4223 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4224
4225 if (instance->supportmax256vd)
4226 dcmd->mbox.b[0] = 1;
4227 dcmd->cmd = MFI_CMD_DCMD;
4228 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4229 dcmd->sge_count = 1;
4230 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4231 dcmd->timeout = 0;
4232 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4233 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4234 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4235 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4236 dcmd->pad_0 = 0;
4237
4238 if (instance->ctrl_context && !instance->mask_interrupts)
4239 ret = megasas_issue_blocked_cmd(instance, cmd,
4240 MFI_IO_TIMEOUT_SECS);
4241 else
4242 ret = megasas_issue_polled(instance, cmd);
4243
4244 ld_count = le32_to_cpu(ci->ldCount);
4245
4246 switch (ret) {
4247 case DCMD_FAILED:
4248 megaraid_sas_kill_hba(instance);
4249 break;
4250 case DCMD_TIMEOUT:
4251
4252 switch (dcmd_timeout_ocr_possible(instance)) {
4253 case INITIATE_OCR:
4254 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4255 /*
4256 * DCMD failed from AEN path.
4257 * AEN path already hold reset_mutex to avoid PCI access
4258 * while OCR is in progress.
4259 */
4260 mutex_unlock(&instance->reset_mutex);
4261 megasas_reset_fusion(instance->host,
4262 MFI_IO_TIMEOUT_OCR);
4263 mutex_lock(&instance->reset_mutex);
4264 break;
4265 case KILL_ADAPTER:
4266 megaraid_sas_kill_hba(instance);
4267 break;
4268 case IGNORE_TIMEOUT:
4269 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4270 __func__, __LINE__);
4271 break;
4272 }
4273
4274 break;
4275
4276 case DCMD_SUCCESS:
4277 if (ld_count > instance->fw_supported_vd_count)
4278 break;
4279
4280 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4281
4282 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4283 if (ci->ldList[ld_index].state != 0) {
4284 ids = ci->ldList[ld_index].ref.targetId;
4285 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4286 }
4287 }
4288
4289 break;
4290 }
4291
4292 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4293
4294 if (ret != DCMD_TIMEOUT)
4295 megasas_return_cmd(instance, cmd);
4296
4297 return ret;
4298 }
4299
4300 /**
4301 * megasas_ld_list_query - Returns FW's ld_list structure
4302 * @instance: Adapter soft state
4303 * @ld_list: ld_list structure
4304 *
4305 * Issues an internal command (DCMD) to get the FW's controller PD
4306 * list structure. This information is mainly used to find out SYSTEM
4307 * supported by the FW.
4308 */
4309 static int
4310 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4311 {
4312 int ret = 0, ld_index = 0, ids = 0;
4313 struct megasas_cmd *cmd;
4314 struct megasas_dcmd_frame *dcmd;
4315 struct MR_LD_TARGETID_LIST *ci;
4316 dma_addr_t ci_h = 0;
4317 u32 tgtid_count;
4318
4319 cmd = megasas_get_cmd(instance);
4320
4321 if (!cmd) {
4322 dev_warn(&instance->pdev->dev,
4323 "megasas_ld_list_query: Failed to get cmd\n");
4324 return -ENOMEM;
4325 }
4326
4327 dcmd = &cmd->frame->dcmd;
4328
4329 ci = pci_alloc_consistent(instance->pdev,
4330 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4331
4332 if (!ci) {
4333 dev_warn(&instance->pdev->dev,
4334 "Failed to alloc mem for ld_list_query\n");
4335 megasas_return_cmd(instance, cmd);
4336 return -ENOMEM;
4337 }
4338
4339 memset(ci, 0, sizeof(*ci));
4340 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4341
4342 dcmd->mbox.b[0] = query_type;
4343 if (instance->supportmax256vd)
4344 dcmd->mbox.b[2] = 1;
4345
4346 dcmd->cmd = MFI_CMD_DCMD;
4347 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4348 dcmd->sge_count = 1;
4349 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4350 dcmd->timeout = 0;
4351 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4352 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4353 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4354 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4355 dcmd->pad_0 = 0;
4356
4357 if (instance->ctrl_context && !instance->mask_interrupts)
4358 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4359 else
4360 ret = megasas_issue_polled(instance, cmd);
4361
4362 switch (ret) {
4363 case DCMD_FAILED:
4364 dev_info(&instance->pdev->dev,
4365 "DCMD not supported by firmware - %s %d\n",
4366 __func__, __LINE__);
4367 ret = megasas_get_ld_list(instance);
4368 break;
4369 case DCMD_TIMEOUT:
4370 switch (dcmd_timeout_ocr_possible(instance)) {
4371 case INITIATE_OCR:
4372 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4373 /*
4374 * DCMD failed from AEN path.
4375 * AEN path already hold reset_mutex to avoid PCI access
4376 * while OCR is in progress.
4377 */
4378 mutex_unlock(&instance->reset_mutex);
4379 megasas_reset_fusion(instance->host,
4380 MFI_IO_TIMEOUT_OCR);
4381 mutex_lock(&instance->reset_mutex);
4382 break;
4383 case KILL_ADAPTER:
4384 megaraid_sas_kill_hba(instance);
4385 break;
4386 case IGNORE_TIMEOUT:
4387 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4388 __func__, __LINE__);
4389 break;
4390 }
4391
4392 break;
4393 case DCMD_SUCCESS:
4394 tgtid_count = le32_to_cpu(ci->count);
4395
4396 if ((tgtid_count > (instance->fw_supported_vd_count)))
4397 break;
4398
4399 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4400 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4401 ids = ci->targetId[ld_index];
4402 instance->ld_ids[ids] = ci->targetId[ld_index];
4403 }
4404
4405 break;
4406 }
4407
4408 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4409 ci, ci_h);
4410
4411 if (ret != DCMD_TIMEOUT)
4412 megasas_return_cmd(instance, cmd);
4413
4414 return ret;
4415 }
4416
4417 /*
4418 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4419 * instance : Controller's instance
4420 */
4421 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4422 {
4423 struct fusion_context *fusion;
4424 u32 old_map_sz;
4425 u32 new_map_sz;
4426
4427 fusion = instance->ctrl_context;
4428 /* For MFI based controllers return dummy success */
4429 if (!fusion)
4430 return;
4431
4432 instance->supportmax256vd =
4433 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4434 /* Below is additional check to address future FW enhancement */
4435 if (instance->ctrl_info->max_lds > 64)
4436 instance->supportmax256vd = 1;
4437
4438 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4439 * MEGASAS_MAX_DEV_PER_CHANNEL;
4440 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4441 * MEGASAS_MAX_DEV_PER_CHANNEL;
4442 if (instance->supportmax256vd) {
4443 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4444 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4445 } else {
4446 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4447 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4448 }
4449
4450 dev_info(&instance->pdev->dev,
4451 "firmware type\t: %s\n",
4452 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4453 "Legacy(64 VD) firmware");
4454
4455 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4456 (sizeof(struct MR_LD_SPAN_MAP) *
4457 (instance->fw_supported_vd_count - 1));
4458 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4459 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4460 (sizeof(struct MR_LD_SPAN_MAP) *
4461 (instance->drv_supported_vd_count - 1));
4462
4463 fusion->max_map_sz = max(old_map_sz, new_map_sz);
4464
4465
4466 if (instance->supportmax256vd)
4467 fusion->current_map_sz = new_map_sz;
4468 else
4469 fusion->current_map_sz = old_map_sz;
4470 }
4471
4472 /**
4473 * megasas_get_controller_info - Returns FW's controller structure
4474 * @instance: Adapter soft state
4475 *
4476 * Issues an internal command (DCMD) to get the FW's controller structure.
4477 * This information is mainly used to find out the maximum IO transfer per
4478 * command supported by the FW.
4479 */
4480 int
4481 megasas_get_ctrl_info(struct megasas_instance *instance)
4482 {
4483 int ret = 0;
4484 struct megasas_cmd *cmd;
4485 struct megasas_dcmd_frame *dcmd;
4486 struct megasas_ctrl_info *ci;
4487 struct megasas_ctrl_info *ctrl_info;
4488 dma_addr_t ci_h = 0;
4489
4490 ctrl_info = instance->ctrl_info;
4491
4492 cmd = megasas_get_cmd(instance);
4493
4494 if (!cmd) {
4495 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4496 return -ENOMEM;
4497 }
4498
4499 dcmd = &cmd->frame->dcmd;
4500
4501 ci = pci_alloc_consistent(instance->pdev,
4502 sizeof(struct megasas_ctrl_info), &ci_h);
4503
4504 if (!ci) {
4505 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4506 megasas_return_cmd(instance, cmd);
4507 return -ENOMEM;
4508 }
4509
4510 memset(ci, 0, sizeof(*ci));
4511 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4512
4513 dcmd->cmd = MFI_CMD_DCMD;
4514 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4515 dcmd->sge_count = 1;
4516 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4517 dcmd->timeout = 0;
4518 dcmd->pad_0 = 0;
4519 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4520 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4521 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4522 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4523 dcmd->mbox.b[0] = 1;
4524
4525 if (instance->ctrl_context && !instance->mask_interrupts)
4526 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4527 else
4528 ret = megasas_issue_polled(instance, cmd);
4529
4530 switch (ret) {
4531 case DCMD_SUCCESS:
4532 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4533 /* Save required controller information in
4534 * CPU endianness format.
4535 */
4536 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4537 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4538 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4539
4540 /* Update the latest Ext VD info.
4541 * From Init path, store current firmware details.
4542 * From OCR path, detect any firmware properties changes.
4543 * in case of Firmware upgrade without system reboot.
4544 */
4545 megasas_update_ext_vd_details(instance);
4546 instance->use_seqnum_jbod_fp =
4547 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4548
4549 /*Check whether controller is iMR or MR */
4550 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4551 dev_info(&instance->pdev->dev,
4552 "controller type\t: %s(%dMB)\n",
4553 instance->is_imr ? "iMR" : "MR",
4554 le16_to_cpu(ctrl_info->memory_size));
4555
4556 instance->disableOnlineCtrlReset =
4557 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4558 instance->secure_jbod_support =
4559 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4560 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4561 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4562 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4563 instance->secure_jbod_support ? "Yes" : "No");
4564 break;
4565
4566 case DCMD_TIMEOUT:
4567 switch (dcmd_timeout_ocr_possible(instance)) {
4568 case INITIATE_OCR:
4569 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4570 megasas_reset_fusion(instance->host,
4571 MFI_IO_TIMEOUT_OCR);
4572 break;
4573 case KILL_ADAPTER:
4574 megaraid_sas_kill_hba(instance);
4575 break;
4576 case IGNORE_TIMEOUT:
4577 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4578 __func__, __LINE__);
4579 break;
4580 }
4581 case DCMD_FAILED:
4582 megaraid_sas_kill_hba(instance);
4583 break;
4584
4585 }
4586
4587 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4588 ci, ci_h);
4589
4590 megasas_return_cmd(instance, cmd);
4591
4592
4593 return ret;
4594 }
4595
4596 /*
4597 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4598 * to firmware
4599 *
4600 * @instance: Adapter soft state
4601 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4602 MR_CRASH_BUF_TURN_OFF = 0
4603 MR_CRASH_BUF_TURN_ON = 1
4604 * @return 0 on success non-zero on failure.
4605 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4606 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4607 * that driver supports crash dump feature. This DCMD will be sent only if
4608 * crash dump feature is supported by the FW.
4609 *
4610 */
4611 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4612 u8 crash_buf_state)
4613 {
4614 int ret = 0;
4615 struct megasas_cmd *cmd;
4616 struct megasas_dcmd_frame *dcmd;
4617
4618 cmd = megasas_get_cmd(instance);
4619
4620 if (!cmd) {
4621 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4622 return -ENOMEM;
4623 }
4624
4625
4626 dcmd = &cmd->frame->dcmd;
4627
4628 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4629 dcmd->mbox.b[0] = crash_buf_state;
4630 dcmd->cmd = MFI_CMD_DCMD;
4631 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4632 dcmd->sge_count = 1;
4633 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4634 dcmd->timeout = 0;
4635 dcmd->pad_0 = 0;
4636 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4637 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4638 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4639 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4640
4641 if (instance->ctrl_context && !instance->mask_interrupts)
4642 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4643 else
4644 ret = megasas_issue_polled(instance, cmd);
4645
4646 if (ret == DCMD_TIMEOUT) {
4647 switch (dcmd_timeout_ocr_possible(instance)) {
4648 case INITIATE_OCR:
4649 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4650 megasas_reset_fusion(instance->host,
4651 MFI_IO_TIMEOUT_OCR);
4652 break;
4653 case KILL_ADAPTER:
4654 megaraid_sas_kill_hba(instance);
4655 break;
4656 case IGNORE_TIMEOUT:
4657 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4658 __func__, __LINE__);
4659 break;
4660 }
4661 } else
4662 megasas_return_cmd(instance, cmd);
4663
4664 return ret;
4665 }
4666
4667 /**
4668 * megasas_issue_init_mfi - Initializes the FW
4669 * @instance: Adapter soft state
4670 *
4671 * Issues the INIT MFI cmd
4672 */
4673 static int
4674 megasas_issue_init_mfi(struct megasas_instance *instance)
4675 {
4676 __le32 context;
4677 struct megasas_cmd *cmd;
4678 struct megasas_init_frame *init_frame;
4679 struct megasas_init_queue_info *initq_info;
4680 dma_addr_t init_frame_h;
4681 dma_addr_t initq_info_h;
4682
4683 /*
4684 * Prepare a init frame. Note the init frame points to queue info
4685 * structure. Each frame has SGL allocated after first 64 bytes. For
4686 * this frame - since we don't need any SGL - we use SGL's space as
4687 * queue info structure
4688 *
4689 * We will not get a NULL command below. We just created the pool.
4690 */
4691 cmd = megasas_get_cmd(instance);
4692
4693 init_frame = (struct megasas_init_frame *)cmd->frame;
4694 initq_info = (struct megasas_init_queue_info *)
4695 ((unsigned long)init_frame + 64);
4696
4697 init_frame_h = cmd->frame_phys_addr;
4698 initq_info_h = init_frame_h + 64;
4699
4700 context = init_frame->context;
4701 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4702 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4703 init_frame->context = context;
4704
4705 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4706 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4707
4708 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4709 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4710
4711 init_frame->cmd = MFI_CMD_INIT;
4712 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4713 init_frame->queue_info_new_phys_addr_lo =
4714 cpu_to_le32(lower_32_bits(initq_info_h));
4715 init_frame->queue_info_new_phys_addr_hi =
4716 cpu_to_le32(upper_32_bits(initq_info_h));
4717
4718 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4719
4720 /*
4721 * disable the intr before firing the init frame to FW
4722 */
4723 instance->instancet->disable_intr(instance);
4724
4725 /*
4726 * Issue the init frame in polled mode
4727 */
4728
4729 if (megasas_issue_polled(instance, cmd)) {
4730 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4731 megasas_return_cmd(instance, cmd);
4732 goto fail_fw_init;
4733 }
4734
4735 megasas_return_cmd(instance, cmd);
4736
4737 return 0;
4738
4739 fail_fw_init:
4740 return -EINVAL;
4741 }
4742
4743 static u32
4744 megasas_init_adapter_mfi(struct megasas_instance *instance)
4745 {
4746 struct megasas_register_set __iomem *reg_set;
4747 u32 context_sz;
4748 u32 reply_q_sz;
4749
4750 reg_set = instance->reg_set;
4751
4752 /*
4753 * Get various operational parameters from status register
4754 */
4755 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4756 /*
4757 * Reduce the max supported cmds by 1. This is to ensure that the
4758 * reply_q_sz (1 more than the max cmd that driver may send)
4759 * does not exceed max cmds that the FW can support
4760 */
4761 instance->max_fw_cmds = instance->max_fw_cmds-1;
4762 instance->max_mfi_cmds = instance->max_fw_cmds;
4763 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4764 0x10;
4765 /*
4766 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4767 * are reserved for IOCTL + driver's internal DCMDs.
4768 */
4769 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4770 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4771 instance->max_scsi_cmds = (instance->max_fw_cmds -
4772 MEGASAS_SKINNY_INT_CMDS);
4773 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4774 } else {
4775 instance->max_scsi_cmds = (instance->max_fw_cmds -
4776 MEGASAS_INT_CMDS);
4777 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4778 }
4779
4780 instance->cur_can_queue = instance->max_scsi_cmds;
4781 /*
4782 * Create a pool of commands
4783 */
4784 if (megasas_alloc_cmds(instance))
4785 goto fail_alloc_cmds;
4786
4787 /*
4788 * Allocate memory for reply queue. Length of reply queue should
4789 * be _one_ more than the maximum commands handled by the firmware.
4790 *
4791 * Note: When FW completes commands, it places corresponding contex
4792 * values in this circular reply queue. This circular queue is a fairly
4793 * typical producer-consumer queue. FW is the producer (of completed
4794 * commands) and the driver is the consumer.
4795 */
4796 context_sz = sizeof(u32);
4797 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4798
4799 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4800 reply_q_sz,
4801 &instance->reply_queue_h);
4802
4803 if (!instance->reply_queue) {
4804 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4805 goto fail_reply_queue;
4806 }
4807
4808 if (megasas_issue_init_mfi(instance))
4809 goto fail_fw_init;
4810
4811 if (megasas_get_ctrl_info(instance)) {
4812 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4813 "Fail from %s %d\n", instance->unique_id,
4814 __func__, __LINE__);
4815 goto fail_fw_init;
4816 }
4817
4818 instance->fw_support_ieee = 0;
4819 instance->fw_support_ieee =
4820 (instance->instancet->read_fw_status_reg(reg_set) &
4821 0x04000000);
4822
4823 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4824 instance->fw_support_ieee);
4825
4826 if (instance->fw_support_ieee)
4827 instance->flag_ieee = 1;
4828
4829 return 0;
4830
4831 fail_fw_init:
4832
4833 pci_free_consistent(instance->pdev, reply_q_sz,
4834 instance->reply_queue, instance->reply_queue_h);
4835 fail_reply_queue:
4836 megasas_free_cmds(instance);
4837
4838 fail_alloc_cmds:
4839 return 1;
4840 }
4841
4842 /*
4843 * megasas_setup_irqs_msix - register legacy interrupts.
4844 * @instance: Adapter soft state
4845 *
4846 * Do not enable interrupt, only setup ISRs.
4847 *
4848 * Return 0 on success.
4849 */
4850 static int
4851 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4852 {
4853 struct pci_dev *pdev;
4854
4855 pdev = instance->pdev;
4856 instance->irq_context[0].instance = instance;
4857 instance->irq_context[0].MSIxIndex = 0;
4858 if (request_irq(pdev->irq, instance->instancet->service_isr,
4859 IRQF_SHARED, "megasas", &instance->irq_context[0])) {
4860 dev_err(&instance->pdev->dev,
4861 "Failed to register IRQ from %s %d\n",
4862 __func__, __LINE__);
4863 return -1;
4864 }
4865 return 0;
4866 }
4867
4868 /**
4869 * megasas_setup_irqs_msix - register MSI-x interrupts.
4870 * @instance: Adapter soft state
4871 * @is_probe: Driver probe check
4872 *
4873 * Do not enable interrupt, only setup ISRs.
4874 *
4875 * Return 0 on success.
4876 */
4877 static int
4878 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
4879 {
4880 int i, j, cpu;
4881 struct pci_dev *pdev;
4882
4883 pdev = instance->pdev;
4884
4885 /* Try MSI-x */
4886 cpu = cpumask_first(cpu_online_mask);
4887 for (i = 0; i < instance->msix_vectors; i++) {
4888 instance->irq_context[i].instance = instance;
4889 instance->irq_context[i].MSIxIndex = i;
4890 if (request_irq(instance->msixentry[i].vector,
4891 instance->instancet->service_isr, 0, "megasas",
4892 &instance->irq_context[i])) {
4893 dev_err(&instance->pdev->dev,
4894 "Failed to register IRQ for vector %d.\n", i);
4895 for (j = 0; j < i; j++) {
4896 if (smp_affinity_enable)
4897 irq_set_affinity_hint(
4898 instance->msixentry[j].vector, NULL);
4899 free_irq(instance->msixentry[j].vector,
4900 &instance->irq_context[j]);
4901 }
4902 /* Retry irq register for IO_APIC*/
4903 instance->msix_vectors = 0;
4904 if (is_probe)
4905 return megasas_setup_irqs_ioapic(instance);
4906 else
4907 return -1;
4908 }
4909 if (smp_affinity_enable) {
4910 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4911 get_cpu_mask(cpu)))
4912 dev_err(&instance->pdev->dev,
4913 "Failed to set affinity hint"
4914 " for cpu %d\n", cpu);
4915 cpu = cpumask_next(cpu, cpu_online_mask);
4916 }
4917 }
4918 return 0;
4919 }
4920
4921 /*
4922 * megasas_destroy_irqs- unregister interrupts.
4923 * @instance: Adapter soft state
4924 * return: void
4925 */
4926 static void
4927 megasas_destroy_irqs(struct megasas_instance *instance) {
4928
4929 int i;
4930
4931 if (instance->msix_vectors)
4932 for (i = 0; i < instance->msix_vectors; i++) {
4933 if (smp_affinity_enable)
4934 irq_set_affinity_hint(
4935 instance->msixentry[i].vector, NULL);
4936 free_irq(instance->msixentry[i].vector,
4937 &instance->irq_context[i]);
4938 }
4939 else
4940 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4941 }
4942
4943 /**
4944 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
4945 * @instance: Adapter soft state
4946 * @is_probe: Driver probe check
4947 *
4948 * Return 0 on success.
4949 */
4950 void
4951 megasas_setup_jbod_map(struct megasas_instance *instance)
4952 {
4953 int i;
4954 struct fusion_context *fusion = instance->ctrl_context;
4955 u32 pd_seq_map_sz;
4956
4957 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
4958 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
4959
4960 if (reset_devices || !fusion ||
4961 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
4962 dev_info(&instance->pdev->dev,
4963 "Jbod map is not supported %s %d\n",
4964 __func__, __LINE__);
4965 instance->use_seqnum_jbod_fp = false;
4966 return;
4967 }
4968
4969 if (fusion->pd_seq_sync[0])
4970 goto skip_alloc;
4971
4972 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
4973 fusion->pd_seq_sync[i] = dma_alloc_coherent
4974 (&instance->pdev->dev, pd_seq_map_sz,
4975 &fusion->pd_seq_phys[i], GFP_KERNEL);
4976 if (!fusion->pd_seq_sync[i]) {
4977 dev_err(&instance->pdev->dev,
4978 "Failed to allocate memory from %s %d\n",
4979 __func__, __LINE__);
4980 if (i == 1) {
4981 dma_free_coherent(&instance->pdev->dev,
4982 pd_seq_map_sz, fusion->pd_seq_sync[0],
4983 fusion->pd_seq_phys[0]);
4984 fusion->pd_seq_sync[0] = NULL;
4985 }
4986 instance->use_seqnum_jbod_fp = false;
4987 return;
4988 }
4989 }
4990
4991 skip_alloc:
4992 if (!megasas_sync_pd_seq_num(instance, false) &&
4993 !megasas_sync_pd_seq_num(instance, true))
4994 instance->use_seqnum_jbod_fp = true;
4995 else
4996 instance->use_seqnum_jbod_fp = false;
4997 }
4998
4999 /**
5000 * megasas_init_fw - Initializes the FW
5001 * @instance: Adapter soft state
5002 *
5003 * This is the main function for initializing firmware
5004 */
5005
5006 static int megasas_init_fw(struct megasas_instance *instance)
5007 {
5008 u32 max_sectors_1;
5009 u32 max_sectors_2;
5010 u32 tmp_sectors, msix_enable, scratch_pad_2;
5011 resource_size_t base_addr;
5012 struct megasas_register_set __iomem *reg_set;
5013 struct megasas_ctrl_info *ctrl_info = NULL;
5014 unsigned long bar_list;
5015 int i, loop, fw_msix_count = 0;
5016 struct IOV_111 *iovPtr;
5017 struct fusion_context *fusion;
5018
5019 fusion = instance->ctrl_context;
5020
5021 /* Find first memory bar */
5022 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5023 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5024 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5025 "megasas: LSI")) {
5026 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5027 return -EBUSY;
5028 }
5029
5030 base_addr = pci_resource_start(instance->pdev, instance->bar);
5031 instance->reg_set = ioremap_nocache(base_addr, 8192);
5032
5033 if (!instance->reg_set) {
5034 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5035 goto fail_ioremap;
5036 }
5037
5038 reg_set = instance->reg_set;
5039
5040 switch (instance->pdev->device) {
5041 case PCI_DEVICE_ID_LSI_FUSION:
5042 case PCI_DEVICE_ID_LSI_PLASMA:
5043 case PCI_DEVICE_ID_LSI_INVADER:
5044 case PCI_DEVICE_ID_LSI_FURY:
5045 case PCI_DEVICE_ID_LSI_INTRUDER:
5046 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5047 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5048 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5049 instance->instancet = &megasas_instance_template_fusion;
5050 break;
5051 case PCI_DEVICE_ID_LSI_SAS1078R:
5052 case PCI_DEVICE_ID_LSI_SAS1078DE:
5053 instance->instancet = &megasas_instance_template_ppc;
5054 break;
5055 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5056 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5057 instance->instancet = &megasas_instance_template_gen2;
5058 break;
5059 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5060 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5061 instance->instancet = &megasas_instance_template_skinny;
5062 break;
5063 case PCI_DEVICE_ID_LSI_SAS1064R:
5064 case PCI_DEVICE_ID_DELL_PERC5:
5065 default:
5066 instance->instancet = &megasas_instance_template_xscale;
5067 break;
5068 }
5069
5070 if (megasas_transition_to_ready(instance, 0)) {
5071 atomic_set(&instance->fw_reset_no_pci_access, 1);
5072 instance->instancet->adp_reset
5073 (instance, instance->reg_set);
5074 atomic_set(&instance->fw_reset_no_pci_access, 0);
5075 dev_info(&instance->pdev->dev,
5076 "FW restarted successfully from %s!\n",
5077 __func__);
5078
5079 /*waitting for about 30 second before retry*/
5080 ssleep(30);
5081
5082 if (megasas_transition_to_ready(instance, 0))
5083 goto fail_ready_state;
5084 }
5085
5086 /*
5087 * MSI-X host index 0 is common for all adapter.
5088 * It is used for all MPT based Adapters.
5089 */
5090 instance->reply_post_host_index_addr[0] =
5091 (u32 __iomem *)((u8 __iomem *)instance->reg_set +
5092 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5093
5094 /* Check if MSI-X is supported while in ready state */
5095 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5096 0x4000000) >> 0x1a;
5097 if (msix_enable && !msix_disable) {
5098 scratch_pad_2 = readl
5099 (&instance->reg_set->outbound_scratch_pad_2);
5100 /* Check max MSI-X vectors */
5101 if (fusion) {
5102 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5103 instance->msix_vectors = (scratch_pad_2
5104 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5105 fw_msix_count = instance->msix_vectors;
5106 } else { /* Invader series supports more than 8 MSI-x vectors*/
5107 instance->msix_vectors = ((scratch_pad_2
5108 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5109 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5110 if (rdpq_enable)
5111 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5112 1 : 0;
5113 fw_msix_count = instance->msix_vectors;
5114 /* Save 1-15 reply post index address to local memory
5115 * Index 0 is already saved from reg offset
5116 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5117 */
5118 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5119 instance->reply_post_host_index_addr[loop] =
5120 (u32 __iomem *)
5121 ((u8 __iomem *)instance->reg_set +
5122 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5123 + (loop * 0x10));
5124 }
5125 }
5126 if (msix_vectors)
5127 instance->msix_vectors = min(msix_vectors,
5128 instance->msix_vectors);
5129 } else /* MFI adapters */
5130 instance->msix_vectors = 1;
5131 /* Don't bother allocating more MSI-X vectors than cpus */
5132 instance->msix_vectors = min(instance->msix_vectors,
5133 (unsigned int)num_online_cpus());
5134 for (i = 0; i < instance->msix_vectors; i++)
5135 instance->msixentry[i].entry = i;
5136 i = pci_enable_msix_range(instance->pdev, instance->msixentry,
5137 1, instance->msix_vectors);
5138 if (i > 0)
5139 instance->msix_vectors = i;
5140 else
5141 instance->msix_vectors = 0;
5142 }
5143
5144 dev_info(&instance->pdev->dev,
5145 "firmware supports msix\t: (%d)", fw_msix_count);
5146 dev_info(&instance->pdev->dev,
5147 "current msix/online cpus\t: (%d/%d)\n",
5148 instance->msix_vectors, (unsigned int)num_online_cpus());
5149 dev_info(&instance->pdev->dev,
5150 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5151
5152 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5153 (unsigned long)instance);
5154
5155 if (instance->msix_vectors ?
5156 megasas_setup_irqs_msix(instance, 1) :
5157 megasas_setup_irqs_ioapic(instance))
5158 goto fail_setup_irqs;
5159
5160 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5161 GFP_KERNEL);
5162 if (instance->ctrl_info == NULL)
5163 goto fail_init_adapter;
5164
5165 /*
5166 * Below are default value for legacy Firmware.
5167 * non-fusion based controllers
5168 */
5169 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5170 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5171 /* Get operational params, sge flags, send init cmd to controller */
5172 if (instance->instancet->init_adapter(instance))
5173 goto fail_init_adapter;
5174
5175
5176 instance->instancet->enable_intr(instance);
5177
5178 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5179
5180 megasas_setup_jbod_map(instance);
5181
5182 /** for passthrough
5183 * the following function will get the PD LIST.
5184 */
5185 memset(instance->pd_list, 0,
5186 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5187 if (megasas_get_pd_list(instance) < 0) {
5188 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5189 goto fail_get_pd_list;
5190 }
5191
5192 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5193 if (megasas_ld_list_query(instance,
5194 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5195 megasas_get_ld_list(instance);
5196
5197 /*
5198 * Compute the max allowed sectors per IO: The controller info has two
5199 * limits on max sectors. Driver should use the minimum of these two.
5200 *
5201 * 1 << stripe_sz_ops.min = max sectors per strip
5202 *
5203 * Note that older firmwares ( < FW ver 30) didn't report information
5204 * to calculate max_sectors_1. So the number ended up as zero always.
5205 */
5206 tmp_sectors = 0;
5207 ctrl_info = instance->ctrl_info;
5208
5209 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5210 le16_to_cpu(ctrl_info->max_strips_per_io);
5211 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5212
5213 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5214
5215 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5216 instance->passive = ctrl_info->cluster.passive;
5217 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5218 instance->UnevenSpanSupport =
5219 ctrl_info->adapterOperations2.supportUnevenSpans;
5220 if (instance->UnevenSpanSupport) {
5221 struct fusion_context *fusion = instance->ctrl_context;
5222 if (MR_ValidateMapInfo(instance))
5223 fusion->fast_path_io = 1;
5224 else
5225 fusion->fast_path_io = 0;
5226
5227 }
5228 if (ctrl_info->host_interface.SRIOV) {
5229 instance->requestorId = ctrl_info->iov.requestorId;
5230 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5231 if (!ctrl_info->adapterOperations2.activePassive)
5232 instance->PlasmaFW111 = 1;
5233
5234 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5235 instance->PlasmaFW111 ? "1.11" : "new");
5236
5237 if (instance->PlasmaFW111) {
5238 iovPtr = (struct IOV_111 *)
5239 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5240 instance->requestorId = iovPtr->requestorId;
5241 }
5242 }
5243 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5244 instance->requestorId);
5245 }
5246
5247 instance->crash_dump_fw_support =
5248 ctrl_info->adapterOperations3.supportCrashDump;
5249 instance->crash_dump_drv_support =
5250 (instance->crash_dump_fw_support &&
5251 instance->crash_dump_buf);
5252 if (instance->crash_dump_drv_support)
5253 megasas_set_crash_dump_params(instance,
5254 MR_CRASH_BUF_TURN_OFF);
5255
5256 else {
5257 if (instance->crash_dump_buf)
5258 pci_free_consistent(instance->pdev,
5259 CRASH_DMA_BUF_SIZE,
5260 instance->crash_dump_buf,
5261 instance->crash_dump_h);
5262 instance->crash_dump_buf = NULL;
5263 }
5264
5265
5266 dev_info(&instance->pdev->dev,
5267 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5268 le16_to_cpu(ctrl_info->pci.vendor_id),
5269 le16_to_cpu(ctrl_info->pci.device_id),
5270 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5271 le16_to_cpu(ctrl_info->pci.sub_device_id));
5272 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5273 instance->UnevenSpanSupport ? "yes" : "no");
5274 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5275 instance->crash_dump_drv_support ? "yes" : "no");
5276 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5277 instance->use_seqnum_jbod_fp ? "yes" : "no");
5278
5279
5280 instance->max_sectors_per_req = instance->max_num_sge *
5281 SGE_BUFFER_SIZE / 512;
5282 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5283 instance->max_sectors_per_req = tmp_sectors;
5284
5285 /* Check for valid throttlequeuedepth module parameter */
5286 if (throttlequeuedepth &&
5287 throttlequeuedepth <= instance->max_scsi_cmds)
5288 instance->throttlequeuedepth = throttlequeuedepth;
5289 else
5290 instance->throttlequeuedepth =
5291 MEGASAS_THROTTLE_QUEUE_DEPTH;
5292
5293 if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
5294 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5295
5296 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5297 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5298
5299 /* Launch SR-IOV heartbeat timer */
5300 if (instance->requestorId) {
5301 if (!megasas_sriov_start_heartbeat(instance, 1))
5302 megasas_start_timer(instance,
5303 &instance->sriov_heartbeat_timer,
5304 megasas_sriov_heartbeat_handler,
5305 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5306 else
5307 instance->skip_heartbeat_timer_del = 1;
5308 }
5309
5310 return 0;
5311
5312 fail_get_pd_list:
5313 instance->instancet->disable_intr(instance);
5314 fail_init_adapter:
5315 megasas_destroy_irqs(instance);
5316 fail_setup_irqs:
5317 if (instance->msix_vectors)
5318 pci_disable_msix(instance->pdev);
5319 instance->msix_vectors = 0;
5320 fail_ready_state:
5321 kfree(instance->ctrl_info);
5322 instance->ctrl_info = NULL;
5323 iounmap(instance->reg_set);
5324
5325 fail_ioremap:
5326 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5327
5328 return -EINVAL;
5329 }
5330
5331 /**
5332 * megasas_release_mfi - Reverses the FW initialization
5333 * @instance: Adapter soft state
5334 */
5335 static void megasas_release_mfi(struct megasas_instance *instance)
5336 {
5337 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5338
5339 if (instance->reply_queue)
5340 pci_free_consistent(instance->pdev, reply_q_sz,
5341 instance->reply_queue, instance->reply_queue_h);
5342
5343 megasas_free_cmds(instance);
5344
5345 iounmap(instance->reg_set);
5346
5347 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5348 }
5349
5350 /**
5351 * megasas_get_seq_num - Gets latest event sequence numbers
5352 * @instance: Adapter soft state
5353 * @eli: FW event log sequence numbers information
5354 *
5355 * FW maintains a log of all events in a non-volatile area. Upper layers would
5356 * usually find out the latest sequence number of the events, the seq number at
5357 * the boot etc. They would "read" all the events below the latest seq number
5358 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5359 * number), they would subsribe to AEN (asynchronous event notification) and
5360 * wait for the events to happen.
5361 */
5362 static int
5363 megasas_get_seq_num(struct megasas_instance *instance,
5364 struct megasas_evt_log_info *eli)
5365 {
5366 struct megasas_cmd *cmd;
5367 struct megasas_dcmd_frame *dcmd;
5368 struct megasas_evt_log_info *el_info;
5369 dma_addr_t el_info_h = 0;
5370
5371 cmd = megasas_get_cmd(instance);
5372
5373 if (!cmd) {
5374 return -ENOMEM;
5375 }
5376
5377 dcmd = &cmd->frame->dcmd;
5378 el_info = pci_alloc_consistent(instance->pdev,
5379 sizeof(struct megasas_evt_log_info),
5380 &el_info_h);
5381
5382 if (!el_info) {
5383 megasas_return_cmd(instance, cmd);
5384 return -ENOMEM;
5385 }
5386
5387 memset(el_info, 0, sizeof(*el_info));
5388 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5389
5390 dcmd->cmd = MFI_CMD_DCMD;
5391 dcmd->cmd_status = 0x0;
5392 dcmd->sge_count = 1;
5393 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5394 dcmd->timeout = 0;
5395 dcmd->pad_0 = 0;
5396 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5397 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5398 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5399 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5400
5401 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5402 DCMD_SUCCESS) {
5403 /*
5404 * Copy the data back into callers buffer
5405 */
5406 eli->newest_seq_num = el_info->newest_seq_num;
5407 eli->oldest_seq_num = el_info->oldest_seq_num;
5408 eli->clear_seq_num = el_info->clear_seq_num;
5409 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5410 eli->boot_seq_num = el_info->boot_seq_num;
5411 } else
5412 dev_err(&instance->pdev->dev, "DCMD failed "
5413 "from %s\n", __func__);
5414
5415 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5416 el_info, el_info_h);
5417
5418 megasas_return_cmd(instance, cmd);
5419
5420 return 0;
5421 }
5422
5423 /**
5424 * megasas_register_aen - Registers for asynchronous event notification
5425 * @instance: Adapter soft state
5426 * @seq_num: The starting sequence number
5427 * @class_locale: Class of the event
5428 *
5429 * This function subscribes for AEN for events beyond the @seq_num. It requests
5430 * to be notified if and only if the event is of type @class_locale
5431 */
5432 static int
5433 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5434 u32 class_locale_word)
5435 {
5436 int ret_val;
5437 struct megasas_cmd *cmd;
5438 struct megasas_dcmd_frame *dcmd;
5439 union megasas_evt_class_locale curr_aen;
5440 union megasas_evt_class_locale prev_aen;
5441
5442 /*
5443 * If there an AEN pending already (aen_cmd), check if the
5444 * class_locale of that pending AEN is inclusive of the new
5445 * AEN request we currently have. If it is, then we don't have
5446 * to do anything. In other words, whichever events the current
5447 * AEN request is subscribing to, have already been subscribed
5448 * to.
5449 *
5450 * If the old_cmd is _not_ inclusive, then we have to abort
5451 * that command, form a class_locale that is superset of both
5452 * old and current and re-issue to the FW
5453 */
5454
5455 curr_aen.word = class_locale_word;
5456
5457 if (instance->aen_cmd) {
5458
5459 prev_aen.word =
5460 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5461
5462 /*
5463 * A class whose enum value is smaller is inclusive of all
5464 * higher values. If a PROGRESS (= -1) was previously
5465 * registered, then a new registration requests for higher
5466 * classes need not be sent to FW. They are automatically
5467 * included.
5468 *
5469 * Locale numbers don't have such hierarchy. They are bitmap
5470 * values
5471 */
5472 if ((prev_aen.members.class <= curr_aen.members.class) &&
5473 !((prev_aen.members.locale & curr_aen.members.locale) ^
5474 curr_aen.members.locale)) {
5475 /*
5476 * Previously issued event registration includes
5477 * current request. Nothing to do.
5478 */
5479 return 0;
5480 } else {
5481 curr_aen.members.locale |= prev_aen.members.locale;
5482
5483 if (prev_aen.members.class < curr_aen.members.class)
5484 curr_aen.members.class = prev_aen.members.class;
5485
5486 instance->aen_cmd->abort_aen = 1;
5487 ret_val = megasas_issue_blocked_abort_cmd(instance,
5488 instance->
5489 aen_cmd, 30);
5490
5491 if (ret_val) {
5492 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5493 "previous AEN command\n");
5494 return ret_val;
5495 }
5496 }
5497 }
5498
5499 cmd = megasas_get_cmd(instance);
5500
5501 if (!cmd)
5502 return -ENOMEM;
5503
5504 dcmd = &cmd->frame->dcmd;
5505
5506 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5507
5508 /*
5509 * Prepare DCMD for aen registration
5510 */
5511 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5512
5513 dcmd->cmd = MFI_CMD_DCMD;
5514 dcmd->cmd_status = 0x0;
5515 dcmd->sge_count = 1;
5516 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5517 dcmd->timeout = 0;
5518 dcmd->pad_0 = 0;
5519 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5520 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5521 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5522 instance->last_seq_num = seq_num;
5523 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5524 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5525 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5526
5527 if (instance->aen_cmd != NULL) {
5528 megasas_return_cmd(instance, cmd);
5529 return 0;
5530 }
5531
5532 /*
5533 * Store reference to the cmd used to register for AEN. When an
5534 * application wants us to register for AEN, we have to abort this
5535 * cmd and re-register with a new EVENT LOCALE supplied by that app
5536 */
5537 instance->aen_cmd = cmd;
5538
5539 /*
5540 * Issue the aen registration frame
5541 */
5542 instance->instancet->issue_dcmd(instance, cmd);
5543
5544 return 0;
5545 }
5546
5547 /**
5548 * megasas_start_aen - Subscribes to AEN during driver load time
5549 * @instance: Adapter soft state
5550 */
5551 static int megasas_start_aen(struct megasas_instance *instance)
5552 {
5553 struct megasas_evt_log_info eli;
5554 union megasas_evt_class_locale class_locale;
5555
5556 /*
5557 * Get the latest sequence number from FW
5558 */
5559 memset(&eli, 0, sizeof(eli));
5560
5561 if (megasas_get_seq_num(instance, &eli))
5562 return -1;
5563
5564 /*
5565 * Register AEN with FW for latest sequence number plus 1
5566 */
5567 class_locale.members.reserved = 0;
5568 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5569 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5570
5571 return megasas_register_aen(instance,
5572 le32_to_cpu(eli.newest_seq_num) + 1,
5573 class_locale.word);
5574 }
5575
5576 /**
5577 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5578 * @instance: Adapter soft state
5579 */
5580 static int megasas_io_attach(struct megasas_instance *instance)
5581 {
5582 struct Scsi_Host *host = instance->host;
5583
5584 /*
5585 * Export parameters required by SCSI mid-layer
5586 */
5587 host->irq = instance->pdev->irq;
5588 host->unique_id = instance->unique_id;
5589 host->can_queue = instance->max_scsi_cmds;
5590 host->this_id = instance->init_id;
5591 host->sg_tablesize = instance->max_num_sge;
5592
5593 if (instance->fw_support_ieee)
5594 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5595
5596 /*
5597 * Check if the module parameter value for max_sectors can be used
5598 */
5599 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5600 instance->max_sectors_per_req = max_sectors;
5601 else {
5602 if (max_sectors) {
5603 if (((instance->pdev->device ==
5604 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5605 (instance->pdev->device ==
5606 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5607 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5608 instance->max_sectors_per_req = max_sectors;
5609 } else {
5610 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5611 "and <= %d (or < 1MB for GEN2 controller)\n",
5612 instance->max_sectors_per_req);
5613 }
5614 }
5615 }
5616
5617 host->max_sectors = instance->max_sectors_per_req;
5618 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5619 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5620 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5621 host->max_lun = MEGASAS_MAX_LUN;
5622 host->max_cmd_len = 16;
5623
5624 /*
5625 * Notify the mid-layer about the new controller
5626 */
5627 if (scsi_add_host(host, &instance->pdev->dev)) {
5628 dev_err(&instance->pdev->dev,
5629 "Failed to add host from %s %d\n",
5630 __func__, __LINE__);
5631 return -ENODEV;
5632 }
5633
5634 return 0;
5635 }
5636
5637 static int
5638 megasas_set_dma_mask(struct pci_dev *pdev)
5639 {
5640 /*
5641 * All our controllers are capable of performing 64-bit DMA
5642 */
5643 if (IS_DMA64) {
5644 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5645
5646 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5647 goto fail_set_dma_mask;
5648 }
5649 } else {
5650 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5651 goto fail_set_dma_mask;
5652 }
5653 /*
5654 * Ensure that all data structures are allocated in 32-bit
5655 * memory.
5656 */
5657 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5658 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5659 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5660 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5661 dev_info(&pdev->dev, "set 32bit DMA mask"
5662 "and 32 bit consistent mask\n");
5663 else
5664 goto fail_set_dma_mask;
5665 }
5666
5667 return 0;
5668
5669 fail_set_dma_mask:
5670 return 1;
5671 }
5672
5673 /**
5674 * megasas_probe_one - PCI hotplug entry point
5675 * @pdev: PCI device structure
5676 * @id: PCI ids of supported hotplugged adapter
5677 */
5678 static int megasas_probe_one(struct pci_dev *pdev,
5679 const struct pci_device_id *id)
5680 {
5681 int rval, pos;
5682 struct Scsi_Host *host;
5683 struct megasas_instance *instance;
5684 u16 control = 0;
5685 struct fusion_context *fusion = NULL;
5686
5687 /* Reset MSI-X in the kdump kernel */
5688 if (reset_devices) {
5689 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5690 if (pos) {
5691 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5692 &control);
5693 if (control & PCI_MSIX_FLAGS_ENABLE) {
5694 dev_info(&pdev->dev, "resetting MSI-X\n");
5695 pci_write_config_word(pdev,
5696 pos + PCI_MSIX_FLAGS,
5697 control &
5698 ~PCI_MSIX_FLAGS_ENABLE);
5699 }
5700 }
5701 }
5702
5703 /*
5704 * PCI prepping: enable device set bus mastering and dma mask
5705 */
5706 rval = pci_enable_device_mem(pdev);
5707
5708 if (rval) {
5709 return rval;
5710 }
5711
5712 pci_set_master(pdev);
5713
5714 if (megasas_set_dma_mask(pdev))
5715 goto fail_set_dma_mask;
5716
5717 host = scsi_host_alloc(&megasas_template,
5718 sizeof(struct megasas_instance));
5719
5720 if (!host) {
5721 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
5722 goto fail_alloc_instance;
5723 }
5724
5725 instance = (struct megasas_instance *)host->hostdata;
5726 memset(instance, 0, sizeof(*instance));
5727 atomic_set(&instance->fw_reset_no_pci_access, 0);
5728 instance->pdev = pdev;
5729
5730 switch (instance->pdev->device) {
5731 case PCI_DEVICE_ID_LSI_FUSION:
5732 case PCI_DEVICE_ID_LSI_PLASMA:
5733 case PCI_DEVICE_ID_LSI_INVADER:
5734 case PCI_DEVICE_ID_LSI_FURY:
5735 case PCI_DEVICE_ID_LSI_INTRUDER:
5736 case PCI_DEVICE_ID_LSI_INTRUDER_24:
5737 case PCI_DEVICE_ID_LSI_CUTLASS_52:
5738 case PCI_DEVICE_ID_LSI_CUTLASS_53:
5739 {
5740 instance->ctrl_context_pages =
5741 get_order(sizeof(struct fusion_context));
5742 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
5743 instance->ctrl_context_pages);
5744 if (!instance->ctrl_context) {
5745 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5746 "memory for Fusion context info\n");
5747 goto fail_alloc_dma_buf;
5748 }
5749 fusion = instance->ctrl_context;
5750 memset(fusion, 0,
5751 ((1 << PAGE_SHIFT) << instance->ctrl_context_pages));
5752 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
5753 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
5754 fusion->adapter_type = THUNDERBOLT_SERIES;
5755 else
5756 fusion->adapter_type = INVADER_SERIES;
5757 }
5758 break;
5759 default: /* For all other supported controllers */
5760
5761 instance->producer =
5762 pci_alloc_consistent(pdev, sizeof(u32),
5763 &instance->producer_h);
5764 instance->consumer =
5765 pci_alloc_consistent(pdev, sizeof(u32),
5766 &instance->consumer_h);
5767
5768 if (!instance->producer || !instance->consumer) {
5769 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5770 "memory for producer, consumer\n");
5771 goto fail_alloc_dma_buf;
5772 }
5773
5774 *instance->producer = 0;
5775 *instance->consumer = 0;
5776 break;
5777 }
5778
5779 /* Crash dump feature related initialisation*/
5780 instance->drv_buf_index = 0;
5781 instance->drv_buf_alloc = 0;
5782 instance->crash_dump_fw_support = 0;
5783 instance->crash_dump_app_support = 0;
5784 instance->fw_crash_state = UNAVAILABLE;
5785 spin_lock_init(&instance->crashdump_lock);
5786 instance->crash_dump_buf = NULL;
5787
5788 megasas_poll_wait_aen = 0;
5789 instance->flag_ieee = 0;
5790 instance->ev = NULL;
5791 instance->issuepend_done = 1;
5792 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5793 instance->is_imr = 0;
5794
5795 instance->evt_detail = pci_alloc_consistent(pdev,
5796 sizeof(struct
5797 megasas_evt_detail),
5798 &instance->evt_detail_h);
5799
5800 if (!instance->evt_detail) {
5801 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
5802 "event detail structure\n");
5803 goto fail_alloc_dma_buf;
5804 }
5805
5806 if (!reset_devices) {
5807 instance->system_info_buf = pci_zalloc_consistent(pdev,
5808 sizeof(struct MR_DRV_SYSTEM_INFO),
5809 &instance->system_info_h);
5810 if (!instance->system_info_buf)
5811 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
5812
5813 instance->pd_info = pci_alloc_consistent(pdev,
5814 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
5815
5816 if (!instance->pd_info)
5817 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
5818
5819 instance->crash_dump_buf = pci_alloc_consistent(pdev,
5820 CRASH_DMA_BUF_SIZE,
5821 &instance->crash_dump_h);
5822 if (!instance->crash_dump_buf)
5823 dev_err(&pdev->dev, "Can't allocate Firmware "
5824 "crash dump DMA buffer\n");
5825 }
5826
5827 /*
5828 * Initialize locks and queues
5829 */
5830 INIT_LIST_HEAD(&instance->cmd_pool);
5831 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
5832
5833 atomic_set(&instance->fw_outstanding,0);
5834
5835 init_waitqueue_head(&instance->int_cmd_wait_q);
5836 init_waitqueue_head(&instance->abort_cmd_wait_q);
5837
5838 spin_lock_init(&instance->mfi_pool_lock);
5839 spin_lock_init(&instance->hba_lock);
5840 spin_lock_init(&instance->completion_lock);
5841
5842 mutex_init(&instance->reset_mutex);
5843 mutex_init(&instance->hba_mutex);
5844
5845 /*
5846 * Initialize PCI related and misc parameters
5847 */
5848 instance->host = host;
5849 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
5850 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
5851 instance->ctrl_info = NULL;
5852
5853
5854 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5855 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
5856 instance->flag_ieee = 1;
5857
5858 megasas_dbg_lvl = 0;
5859 instance->flag = 0;
5860 instance->unload = 1;
5861 instance->last_time = 0;
5862 instance->disableOnlineCtrlReset = 1;
5863 instance->UnevenSpanSupport = 0;
5864
5865 if (instance->ctrl_context) {
5866 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
5867 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
5868 } else
5869 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
5870
5871 /*
5872 * Initialize MFI Firmware
5873 */
5874 if (megasas_init_fw(instance))
5875 goto fail_init_mfi;
5876
5877 if (instance->requestorId) {
5878 if (instance->PlasmaFW111) {
5879 instance->vf_affiliation_111 =
5880 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
5881 &instance->vf_affiliation_111_h);
5882 if (!instance->vf_affiliation_111)
5883 dev_warn(&pdev->dev, "Can't allocate "
5884 "memory for VF affiliation buffer\n");
5885 } else {
5886 instance->vf_affiliation =
5887 pci_alloc_consistent(pdev,
5888 (MAX_LOGICAL_DRIVES + 1) *
5889 sizeof(struct MR_LD_VF_AFFILIATION),
5890 &instance->vf_affiliation_h);
5891 if (!instance->vf_affiliation)
5892 dev_warn(&pdev->dev, "Can't allocate "
5893 "memory for VF affiliation buffer\n");
5894 }
5895 }
5896
5897 /*
5898 * Store instance in PCI softstate
5899 */
5900 pci_set_drvdata(pdev, instance);
5901
5902 /*
5903 * Add this controller to megasas_mgmt_info structure so that it
5904 * can be exported to management applications
5905 */
5906 megasas_mgmt_info.count++;
5907 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
5908 megasas_mgmt_info.max_index++;
5909
5910 /*
5911 * Register with SCSI mid-layer
5912 */
5913 if (megasas_io_attach(instance))
5914 goto fail_io_attach;
5915
5916 instance->unload = 0;
5917 /*
5918 * Trigger SCSI to scan our drives
5919 */
5920 scsi_scan_host(host);
5921
5922 /*
5923 * Initiate AEN (Asynchronous Event Notification)
5924 */
5925 if (megasas_start_aen(instance)) {
5926 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
5927 goto fail_start_aen;
5928 }
5929
5930 /* Get current SR-IOV LD/VF affiliation */
5931 if (instance->requestorId)
5932 megasas_get_ld_vf_affiliation(instance, 1);
5933
5934 return 0;
5935
5936 fail_start_aen:
5937 fail_io_attach:
5938 megasas_mgmt_info.count--;
5939 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
5940 megasas_mgmt_info.max_index--;
5941
5942 instance->instancet->disable_intr(instance);
5943 megasas_destroy_irqs(instance);
5944
5945 if (instance->ctrl_context)
5946 megasas_release_fusion(instance);
5947 else
5948 megasas_release_mfi(instance);
5949 if (instance->msix_vectors)
5950 pci_disable_msix(instance->pdev);
5951 fail_init_mfi:
5952 fail_alloc_dma_buf:
5953 if (instance->evt_detail)
5954 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
5955 instance->evt_detail,
5956 instance->evt_detail_h);
5957
5958 if (instance->pd_info)
5959 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
5960 instance->pd_info,
5961 instance->pd_info_h);
5962 if (instance->producer)
5963 pci_free_consistent(pdev, sizeof(u32), instance->producer,
5964 instance->producer_h);
5965 if (instance->consumer)
5966 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
5967 instance->consumer_h);
5968 scsi_host_put(host);
5969
5970 fail_alloc_instance:
5971 fail_set_dma_mask:
5972 pci_disable_device(pdev);
5973
5974 return -ENODEV;
5975 }
5976
5977 /**
5978 * megasas_flush_cache - Requests FW to flush all its caches
5979 * @instance: Adapter soft state
5980 */
5981 static void megasas_flush_cache(struct megasas_instance *instance)
5982 {
5983 struct megasas_cmd *cmd;
5984 struct megasas_dcmd_frame *dcmd;
5985
5986 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
5987 return;
5988
5989 cmd = megasas_get_cmd(instance);
5990
5991 if (!cmd)
5992 return;
5993
5994 dcmd = &cmd->frame->dcmd;
5995
5996 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5997
5998 dcmd->cmd = MFI_CMD_DCMD;
5999 dcmd->cmd_status = 0x0;
6000 dcmd->sge_count = 0;
6001 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6002 dcmd->timeout = 0;
6003 dcmd->pad_0 = 0;
6004 dcmd->data_xfer_len = 0;
6005 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6006 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6007
6008 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6009 != DCMD_SUCCESS) {
6010 dev_err(&instance->pdev->dev,
6011 "return from %s %d\n", __func__, __LINE__);
6012 return;
6013 }
6014
6015 megasas_return_cmd(instance, cmd);
6016 }
6017
6018 /**
6019 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6020 * @instance: Adapter soft state
6021 * @opcode: Shutdown/Hibernate
6022 */
6023 static void megasas_shutdown_controller(struct megasas_instance *instance,
6024 u32 opcode)
6025 {
6026 struct megasas_cmd *cmd;
6027 struct megasas_dcmd_frame *dcmd;
6028
6029 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6030 return;
6031
6032 cmd = megasas_get_cmd(instance);
6033
6034 if (!cmd)
6035 return;
6036
6037 if (instance->aen_cmd)
6038 megasas_issue_blocked_abort_cmd(instance,
6039 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6040 if (instance->map_update_cmd)
6041 megasas_issue_blocked_abort_cmd(instance,
6042 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6043 if (instance->jbod_seq_cmd)
6044 megasas_issue_blocked_abort_cmd(instance,
6045 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6046
6047 dcmd = &cmd->frame->dcmd;
6048
6049 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6050
6051 dcmd->cmd = MFI_CMD_DCMD;
6052 dcmd->cmd_status = 0x0;
6053 dcmd->sge_count = 0;
6054 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6055 dcmd->timeout = 0;
6056 dcmd->pad_0 = 0;
6057 dcmd->data_xfer_len = 0;
6058 dcmd->opcode = cpu_to_le32(opcode);
6059
6060 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6061 != DCMD_SUCCESS) {
6062 dev_err(&instance->pdev->dev,
6063 "return from %s %d\n", __func__, __LINE__);
6064 return;
6065 }
6066
6067 megasas_return_cmd(instance, cmd);
6068 }
6069
6070 #ifdef CONFIG_PM
6071 /**
6072 * megasas_suspend - driver suspend entry point
6073 * @pdev: PCI device structure
6074 * @state: PCI power state to suspend routine
6075 */
6076 static int
6077 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6078 {
6079 struct Scsi_Host *host;
6080 struct megasas_instance *instance;
6081
6082 instance = pci_get_drvdata(pdev);
6083 host = instance->host;
6084 instance->unload = 1;
6085
6086 /* Shutdown SR-IOV heartbeat timer */
6087 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6088 del_timer_sync(&instance->sriov_heartbeat_timer);
6089
6090 megasas_flush_cache(instance);
6091 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6092
6093 /* cancel the delayed work if this work still in queue */
6094 if (instance->ev != NULL) {
6095 struct megasas_aen_event *ev = instance->ev;
6096 cancel_delayed_work_sync(&ev->hotplug_work);
6097 instance->ev = NULL;
6098 }
6099
6100 tasklet_kill(&instance->isr_tasklet);
6101
6102 pci_set_drvdata(instance->pdev, instance);
6103 instance->instancet->disable_intr(instance);
6104
6105 megasas_destroy_irqs(instance);
6106
6107 if (instance->msix_vectors)
6108 pci_disable_msix(instance->pdev);
6109
6110 pci_save_state(pdev);
6111 pci_disable_device(pdev);
6112
6113 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6114
6115 return 0;
6116 }
6117
6118 /**
6119 * megasas_resume- driver resume entry point
6120 * @pdev: PCI device structure
6121 */
6122 static int
6123 megasas_resume(struct pci_dev *pdev)
6124 {
6125 int rval;
6126 struct Scsi_Host *host;
6127 struct megasas_instance *instance;
6128
6129 instance = pci_get_drvdata(pdev);
6130 host = instance->host;
6131 pci_set_power_state(pdev, PCI_D0);
6132 pci_enable_wake(pdev, PCI_D0, 0);
6133 pci_restore_state(pdev);
6134
6135 /*
6136 * PCI prepping: enable device set bus mastering and dma mask
6137 */
6138 rval = pci_enable_device_mem(pdev);
6139
6140 if (rval) {
6141 dev_err(&pdev->dev, "Enable device failed\n");
6142 return rval;
6143 }
6144
6145 pci_set_master(pdev);
6146
6147 if (megasas_set_dma_mask(pdev))
6148 goto fail_set_dma_mask;
6149
6150 /*
6151 * Initialize MFI Firmware
6152 */
6153
6154 atomic_set(&instance->fw_outstanding, 0);
6155
6156 /*
6157 * We expect the FW state to be READY
6158 */
6159 if (megasas_transition_to_ready(instance, 0))
6160 goto fail_ready_state;
6161
6162 /* Now re-enable MSI-X */
6163 if (instance->msix_vectors &&
6164 pci_enable_msix_exact(instance->pdev, instance->msixentry,
6165 instance->msix_vectors))
6166 goto fail_reenable_msix;
6167
6168 if (instance->ctrl_context) {
6169 megasas_reset_reply_desc(instance);
6170 if (megasas_ioc_init_fusion(instance)) {
6171 megasas_free_cmds(instance);
6172 megasas_free_cmds_fusion(instance);
6173 goto fail_init_mfi;
6174 }
6175 if (!megasas_get_map_info(instance))
6176 megasas_sync_map_info(instance);
6177 } else {
6178 *instance->producer = 0;
6179 *instance->consumer = 0;
6180 if (megasas_issue_init_mfi(instance))
6181 goto fail_init_mfi;
6182 }
6183
6184 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6185 (unsigned long)instance);
6186
6187 if (instance->msix_vectors ?
6188 megasas_setup_irqs_msix(instance, 0) :
6189 megasas_setup_irqs_ioapic(instance))
6190 goto fail_init_mfi;
6191
6192 /* Re-launch SR-IOV heartbeat timer */
6193 if (instance->requestorId) {
6194 if (!megasas_sriov_start_heartbeat(instance, 0))
6195 megasas_start_timer(instance,
6196 &instance->sriov_heartbeat_timer,
6197 megasas_sriov_heartbeat_handler,
6198 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6199 else {
6200 instance->skip_heartbeat_timer_del = 1;
6201 goto fail_init_mfi;
6202 }
6203 }
6204
6205 instance->instancet->enable_intr(instance);
6206 megasas_setup_jbod_map(instance);
6207 instance->unload = 0;
6208
6209 /*
6210 * Initiate AEN (Asynchronous Event Notification)
6211 */
6212 if (megasas_start_aen(instance))
6213 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6214
6215 return 0;
6216
6217 fail_init_mfi:
6218 if (instance->evt_detail)
6219 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6220 instance->evt_detail,
6221 instance->evt_detail_h);
6222
6223 if (instance->pd_info)
6224 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6225 instance->pd_info,
6226 instance->pd_info_h);
6227 if (instance->producer)
6228 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6229 instance->producer_h);
6230 if (instance->consumer)
6231 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6232 instance->consumer_h);
6233 scsi_host_put(host);
6234
6235 fail_set_dma_mask:
6236 fail_ready_state:
6237 fail_reenable_msix:
6238
6239 pci_disable_device(pdev);
6240
6241 return -ENODEV;
6242 }
6243 #else
6244 #define megasas_suspend NULL
6245 #define megasas_resume NULL
6246 #endif
6247
6248 /**
6249 * megasas_detach_one - PCI hot"un"plug entry point
6250 * @pdev: PCI device structure
6251 */
6252 static void megasas_detach_one(struct pci_dev *pdev)
6253 {
6254 int i;
6255 struct Scsi_Host *host;
6256 struct megasas_instance *instance;
6257 struct fusion_context *fusion;
6258 u32 pd_seq_map_sz;
6259
6260 instance = pci_get_drvdata(pdev);
6261 instance->unload = 1;
6262 host = instance->host;
6263 fusion = instance->ctrl_context;
6264
6265 /* Shutdown SR-IOV heartbeat timer */
6266 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6267 del_timer_sync(&instance->sriov_heartbeat_timer);
6268
6269 if (instance->fw_crash_state != UNAVAILABLE)
6270 megasas_free_host_crash_buffer(instance);
6271 scsi_remove_host(instance->host);
6272 megasas_flush_cache(instance);
6273 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6274
6275 /* cancel the delayed work if this work still in queue*/
6276 if (instance->ev != NULL) {
6277 struct megasas_aen_event *ev = instance->ev;
6278 cancel_delayed_work_sync(&ev->hotplug_work);
6279 instance->ev = NULL;
6280 }
6281
6282 /* cancel all wait events */
6283 wake_up_all(&instance->int_cmd_wait_q);
6284
6285 tasklet_kill(&instance->isr_tasklet);
6286
6287 /*
6288 * Take the instance off the instance array. Note that we will not
6289 * decrement the max_index. We let this array be sparse array
6290 */
6291 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6292 if (megasas_mgmt_info.instance[i] == instance) {
6293 megasas_mgmt_info.count--;
6294 megasas_mgmt_info.instance[i] = NULL;
6295
6296 break;
6297 }
6298 }
6299
6300 instance->instancet->disable_intr(instance);
6301
6302 megasas_destroy_irqs(instance);
6303
6304 if (instance->msix_vectors)
6305 pci_disable_msix(instance->pdev);
6306
6307 if (instance->ctrl_context) {
6308 megasas_release_fusion(instance);
6309 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6310 (sizeof(struct MR_PD_CFG_SEQ) *
6311 (MAX_PHYSICAL_DEVICES - 1));
6312 for (i = 0; i < 2 ; i++) {
6313 if (fusion->ld_map[i])
6314 dma_free_coherent(&instance->pdev->dev,
6315 fusion->max_map_sz,
6316 fusion->ld_map[i],
6317 fusion->ld_map_phys[i]);
6318 if (fusion->ld_drv_map[i])
6319 free_pages((ulong)fusion->ld_drv_map[i],
6320 fusion->drv_map_pages);
6321 if (fusion->pd_seq_sync[i])
6322 dma_free_coherent(&instance->pdev->dev,
6323 pd_seq_map_sz,
6324 fusion->pd_seq_sync[i],
6325 fusion->pd_seq_phys[i]);
6326 }
6327 free_pages((ulong)instance->ctrl_context,
6328 instance->ctrl_context_pages);
6329 } else {
6330 megasas_release_mfi(instance);
6331 pci_free_consistent(pdev, sizeof(u32),
6332 instance->producer,
6333 instance->producer_h);
6334 pci_free_consistent(pdev, sizeof(u32),
6335 instance->consumer,
6336 instance->consumer_h);
6337 }
6338
6339 kfree(instance->ctrl_info);
6340
6341 if (instance->evt_detail)
6342 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6343 instance->evt_detail, instance->evt_detail_h);
6344
6345 if (instance->pd_info)
6346 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6347 instance->pd_info,
6348 instance->pd_info_h);
6349 if (instance->vf_affiliation)
6350 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6351 sizeof(struct MR_LD_VF_AFFILIATION),
6352 instance->vf_affiliation,
6353 instance->vf_affiliation_h);
6354
6355 if (instance->vf_affiliation_111)
6356 pci_free_consistent(pdev,
6357 sizeof(struct MR_LD_VF_AFFILIATION_111),
6358 instance->vf_affiliation_111,
6359 instance->vf_affiliation_111_h);
6360
6361 if (instance->hb_host_mem)
6362 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6363 instance->hb_host_mem,
6364 instance->hb_host_mem_h);
6365
6366 if (instance->crash_dump_buf)
6367 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6368 instance->crash_dump_buf, instance->crash_dump_h);
6369
6370 if (instance->system_info_buf)
6371 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6372 instance->system_info_buf, instance->system_info_h);
6373
6374 scsi_host_put(host);
6375
6376 pci_disable_device(pdev);
6377 }
6378
6379 /**
6380 * megasas_shutdown - Shutdown entry point
6381 * @device: Generic device structure
6382 */
6383 static void megasas_shutdown(struct pci_dev *pdev)
6384 {
6385 struct megasas_instance *instance = pci_get_drvdata(pdev);
6386
6387 instance->unload = 1;
6388 megasas_flush_cache(instance);
6389 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6390 instance->instancet->disable_intr(instance);
6391 megasas_destroy_irqs(instance);
6392
6393 if (instance->msix_vectors)
6394 pci_disable_msix(instance->pdev);
6395 }
6396
6397 /**
6398 * megasas_mgmt_open - char node "open" entry point
6399 */
6400 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6401 {
6402 /*
6403 * Allow only those users with admin rights
6404 */
6405 if (!capable(CAP_SYS_ADMIN))
6406 return -EACCES;
6407
6408 return 0;
6409 }
6410
6411 /**
6412 * megasas_mgmt_fasync - Async notifier registration from applications
6413 *
6414 * This function adds the calling process to a driver global queue. When an
6415 * event occurs, SIGIO will be sent to all processes in this queue.
6416 */
6417 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6418 {
6419 int rc;
6420
6421 mutex_lock(&megasas_async_queue_mutex);
6422
6423 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6424
6425 mutex_unlock(&megasas_async_queue_mutex);
6426
6427 if (rc >= 0) {
6428 /* For sanity check when we get ioctl */
6429 filep->private_data = filep;
6430 return 0;
6431 }
6432
6433 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6434
6435 return rc;
6436 }
6437
6438 /**
6439 * megasas_mgmt_poll - char node "poll" entry point
6440 * */
6441 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6442 {
6443 unsigned int mask;
6444 unsigned long flags;
6445
6446 poll_wait(file, &megasas_poll_wait, wait);
6447 spin_lock_irqsave(&poll_aen_lock, flags);
6448 if (megasas_poll_wait_aen)
6449 mask = (POLLIN | POLLRDNORM);
6450 else
6451 mask = 0;
6452 megasas_poll_wait_aen = 0;
6453 spin_unlock_irqrestore(&poll_aen_lock, flags);
6454 return mask;
6455 }
6456
6457 /*
6458 * megasas_set_crash_dump_params_ioctl:
6459 * Send CRASH_DUMP_MODE DCMD to all controllers
6460 * @cmd: MFI command frame
6461 */
6462
6463 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6464 {
6465 struct megasas_instance *local_instance;
6466 int i, error = 0;
6467 int crash_support;
6468
6469 crash_support = cmd->frame->dcmd.mbox.w[0];
6470
6471 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6472 local_instance = megasas_mgmt_info.instance[i];
6473 if (local_instance && local_instance->crash_dump_drv_support) {
6474 if ((atomic_read(&local_instance->adprecovery) ==
6475 MEGASAS_HBA_OPERATIONAL) &&
6476 !megasas_set_crash_dump_params(local_instance,
6477 crash_support)) {
6478 local_instance->crash_dump_app_support =
6479 crash_support;
6480 dev_info(&local_instance->pdev->dev,
6481 "Application firmware crash "
6482 "dump mode set success\n");
6483 error = 0;
6484 } else {
6485 dev_info(&local_instance->pdev->dev,
6486 "Application firmware crash "
6487 "dump mode set failed\n");
6488 error = -1;
6489 }
6490 }
6491 }
6492 return error;
6493 }
6494
6495 /**
6496 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6497 * @instance: Adapter soft state
6498 * @argp: User's ioctl packet
6499 */
6500 static int
6501 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6502 struct megasas_iocpacket __user * user_ioc,
6503 struct megasas_iocpacket *ioc)
6504 {
6505 struct megasas_sge32 *kern_sge32;
6506 struct megasas_cmd *cmd;
6507 void *kbuff_arr[MAX_IOCTL_SGE];
6508 dma_addr_t buf_handle = 0;
6509 int error = 0, i;
6510 void *sense = NULL;
6511 dma_addr_t sense_handle;
6512 unsigned long *sense_ptr;
6513
6514 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6515
6516 if (ioc->sge_count > MAX_IOCTL_SGE) {
6517 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6518 ioc->sge_count, MAX_IOCTL_SGE);
6519 return -EINVAL;
6520 }
6521
6522 cmd = megasas_get_cmd(instance);
6523 if (!cmd) {
6524 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6525 return -ENOMEM;
6526 }
6527
6528 /*
6529 * User's IOCTL packet has 2 frames (maximum). Copy those two
6530 * frames into our cmd's frames. cmd->frame's context will get
6531 * overwritten when we copy from user's frames. So set that value
6532 * alone separately
6533 */
6534 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6535 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6536 cmd->frame->hdr.pad_0 = 0;
6537 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6538 MFI_FRAME_SGL64 |
6539 MFI_FRAME_SENSE64));
6540
6541 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6542 error = megasas_set_crash_dump_params_ioctl(cmd);
6543 megasas_return_cmd(instance, cmd);
6544 return error;
6545 }
6546
6547 /*
6548 * The management interface between applications and the fw uses
6549 * MFI frames. E.g, RAID configuration changes, LD property changes
6550 * etc are accomplishes through different kinds of MFI frames. The
6551 * driver needs to care only about substituting user buffers with
6552 * kernel buffers in SGLs. The location of SGL is embedded in the
6553 * struct iocpacket itself.
6554 */
6555 kern_sge32 = (struct megasas_sge32 *)
6556 ((unsigned long)cmd->frame + ioc->sgl_off);
6557
6558 /*
6559 * For each user buffer, create a mirror buffer and copy in
6560 */
6561 for (i = 0; i < ioc->sge_count; i++) {
6562 if (!ioc->sgl[i].iov_len)
6563 continue;
6564
6565 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6566 ioc->sgl[i].iov_len,
6567 &buf_handle, GFP_KERNEL);
6568 if (!kbuff_arr[i]) {
6569 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6570 "kernel SGL buffer for IOCTL\n");
6571 error = -ENOMEM;
6572 goto out;
6573 }
6574
6575 /*
6576 * We don't change the dma_coherent_mask, so
6577 * pci_alloc_consistent only returns 32bit addresses
6578 */
6579 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6580 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6581
6582 /*
6583 * We created a kernel buffer corresponding to the
6584 * user buffer. Now copy in from the user buffer
6585 */
6586 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6587 (u32) (ioc->sgl[i].iov_len))) {
6588 error = -EFAULT;
6589 goto out;
6590 }
6591 }
6592
6593 if (ioc->sense_len) {
6594 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6595 &sense_handle, GFP_KERNEL);
6596 if (!sense) {
6597 error = -ENOMEM;
6598 goto out;
6599 }
6600
6601 sense_ptr =
6602 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6603 *sense_ptr = cpu_to_le32(sense_handle);
6604 }
6605
6606 /*
6607 * Set the sync_cmd flag so that the ISR knows not to complete this
6608 * cmd to the SCSI mid-layer
6609 */
6610 cmd->sync_cmd = 1;
6611 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6612 cmd->sync_cmd = 0;
6613 dev_err(&instance->pdev->dev,
6614 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6615 __func__, __LINE__, cmd->frame->dcmd.opcode,
6616 cmd->cmd_status_drv);
6617 return -EBUSY;
6618 }
6619
6620 cmd->sync_cmd = 0;
6621
6622 if (instance->unload == 1) {
6623 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6624 "don't submit data to application\n");
6625 goto out;
6626 }
6627 /*
6628 * copy out the kernel buffers to user buffers
6629 */
6630 for (i = 0; i < ioc->sge_count; i++) {
6631 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
6632 ioc->sgl[i].iov_len)) {
6633 error = -EFAULT;
6634 goto out;
6635 }
6636 }
6637
6638 /*
6639 * copy out the sense
6640 */
6641 if (ioc->sense_len) {
6642 /*
6643 * sense_ptr points to the location that has the user
6644 * sense buffer address
6645 */
6646 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
6647 ioc->sense_off);
6648
6649 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
6650 sense, ioc->sense_len)) {
6651 dev_err(&instance->pdev->dev, "Failed to copy out to user "
6652 "sense data\n");
6653 error = -EFAULT;
6654 goto out;
6655 }
6656 }
6657
6658 /*
6659 * copy the status codes returned by the fw
6660 */
6661 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
6662 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
6663 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
6664 error = -EFAULT;
6665 }
6666
6667 out:
6668 if (sense) {
6669 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
6670 sense, sense_handle);
6671 }
6672
6673 for (i = 0; i < ioc->sge_count; i++) {
6674 if (kbuff_arr[i]) {
6675 dma_free_coherent(&instance->pdev->dev,
6676 le32_to_cpu(kern_sge32[i].length),
6677 kbuff_arr[i],
6678 le32_to_cpu(kern_sge32[i].phys_addr));
6679 kbuff_arr[i] = NULL;
6680 }
6681 }
6682
6683 megasas_return_cmd(instance, cmd);
6684 return error;
6685 }
6686
6687 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6688 {
6689 struct megasas_iocpacket __user *user_ioc =
6690 (struct megasas_iocpacket __user *)arg;
6691 struct megasas_iocpacket *ioc;
6692 struct megasas_instance *instance;
6693 int error;
6694 int i;
6695 unsigned long flags;
6696 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6697
6698 ioc = memdup_user(user_ioc, sizeof(*ioc));
6699 if (IS_ERR(ioc))
6700 return PTR_ERR(ioc);
6701
6702 instance = megasas_lookup_instance(ioc->host_no);
6703 if (!instance) {
6704 error = -ENODEV;
6705 goto out_kfree_ioc;
6706 }
6707
6708 /* Adjust ioctl wait time for VF mode */
6709 if (instance->requestorId)
6710 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
6711
6712 /* Block ioctls in VF mode */
6713 if (instance->requestorId && !allow_vf_ioctls) {
6714 error = -ENODEV;
6715 goto out_kfree_ioc;
6716 }
6717
6718 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6719 dev_err(&instance->pdev->dev, "Controller in crit error\n");
6720 error = -ENODEV;
6721 goto out_kfree_ioc;
6722 }
6723
6724 if (instance->unload == 1) {
6725 error = -ENODEV;
6726 goto out_kfree_ioc;
6727 }
6728
6729 if (down_interruptible(&instance->ioctl_sem)) {
6730 error = -ERESTARTSYS;
6731 goto out_kfree_ioc;
6732 }
6733
6734 for (i = 0; i < wait_time; i++) {
6735
6736 spin_lock_irqsave(&instance->hba_lock, flags);
6737 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6738 spin_unlock_irqrestore(&instance->hba_lock, flags);
6739 break;
6740 }
6741 spin_unlock_irqrestore(&instance->hba_lock, flags);
6742
6743 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6744 dev_notice(&instance->pdev->dev, "waiting"
6745 "for controller reset to finish\n");
6746 }
6747
6748 msleep(1000);
6749 }
6750
6751 spin_lock_irqsave(&instance->hba_lock, flags);
6752 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6753 spin_unlock_irqrestore(&instance->hba_lock, flags);
6754
6755 dev_err(&instance->pdev->dev, "timed out while"
6756 "waiting for HBA to recover\n");
6757 error = -ENODEV;
6758 goto out_up;
6759 }
6760 spin_unlock_irqrestore(&instance->hba_lock, flags);
6761
6762 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
6763 out_up:
6764 up(&instance->ioctl_sem);
6765
6766 out_kfree_ioc:
6767 kfree(ioc);
6768 return error;
6769 }
6770
6771 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6772 {
6773 struct megasas_instance *instance;
6774 struct megasas_aen aen;
6775 int error;
6776 int i;
6777 unsigned long flags;
6778 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6779
6780 if (file->private_data != file) {
6781 printk(KERN_DEBUG "megasas: fasync_helper was not "
6782 "called first\n");
6783 return -EINVAL;
6784 }
6785
6786 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
6787 return -EFAULT;
6788
6789 instance = megasas_lookup_instance(aen.host_no);
6790
6791 if (!instance)
6792 return -ENODEV;
6793
6794 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
6795 return -ENODEV;
6796 }
6797
6798 if (instance->unload == 1) {
6799 return -ENODEV;
6800 }
6801
6802 for (i = 0; i < wait_time; i++) {
6803
6804 spin_lock_irqsave(&instance->hba_lock, flags);
6805 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
6806 spin_unlock_irqrestore(&instance->hba_lock,
6807 flags);
6808 break;
6809 }
6810
6811 spin_unlock_irqrestore(&instance->hba_lock, flags);
6812
6813 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6814 dev_notice(&instance->pdev->dev, "waiting for"
6815 "controller reset to finish\n");
6816 }
6817
6818 msleep(1000);
6819 }
6820
6821 spin_lock_irqsave(&instance->hba_lock, flags);
6822 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6823 spin_unlock_irqrestore(&instance->hba_lock, flags);
6824 dev_err(&instance->pdev->dev, "timed out while waiting"
6825 "for HBA to recover\n");
6826 return -ENODEV;
6827 }
6828 spin_unlock_irqrestore(&instance->hba_lock, flags);
6829
6830 mutex_lock(&instance->reset_mutex);
6831 error = megasas_register_aen(instance, aen.seq_num,
6832 aen.class_locale_word);
6833 mutex_unlock(&instance->reset_mutex);
6834 return error;
6835 }
6836
6837 /**
6838 * megasas_mgmt_ioctl - char node ioctl entry point
6839 */
6840 static long
6841 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
6842 {
6843 switch (cmd) {
6844 case MEGASAS_IOC_FIRMWARE:
6845 return megasas_mgmt_ioctl_fw(file, arg);
6846
6847 case MEGASAS_IOC_GET_AEN:
6848 return megasas_mgmt_ioctl_aen(file, arg);
6849 }
6850
6851 return -ENOTTY;
6852 }
6853
6854 #ifdef CONFIG_COMPAT
6855 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
6856 {
6857 struct compat_megasas_iocpacket __user *cioc =
6858 (struct compat_megasas_iocpacket __user *)arg;
6859 struct megasas_iocpacket __user *ioc =
6860 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
6861 int i;
6862 int error = 0;
6863 compat_uptr_t ptr;
6864 u32 local_sense_off;
6865 u32 local_sense_len;
6866 u32 user_sense_off;
6867
6868 if (clear_user(ioc, sizeof(*ioc)))
6869 return -EFAULT;
6870
6871 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
6872 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
6873 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
6874 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
6875 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
6876 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
6877 return -EFAULT;
6878
6879 /*
6880 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
6881 * sense_len is not null, so prepare the 64bit value under
6882 * the same condition.
6883 */
6884 if (get_user(local_sense_off, &ioc->sense_off) ||
6885 get_user(local_sense_len, &ioc->sense_len) ||
6886 get_user(user_sense_off, &cioc->sense_off))
6887 return -EFAULT;
6888
6889 if (local_sense_len) {
6890 void __user **sense_ioc_ptr =
6891 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
6892 compat_uptr_t *sense_cioc_ptr =
6893 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
6894 if (get_user(ptr, sense_cioc_ptr) ||
6895 put_user(compat_ptr(ptr), sense_ioc_ptr))
6896 return -EFAULT;
6897 }
6898
6899 for (i = 0; i < MAX_IOCTL_SGE; i++) {
6900 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
6901 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
6902 copy_in_user(&ioc->sgl[i].iov_len,
6903 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
6904 return -EFAULT;
6905 }
6906
6907 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
6908
6909 if (copy_in_user(&cioc->frame.hdr.cmd_status,
6910 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
6911 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
6912 return -EFAULT;
6913 }
6914 return error;
6915 }
6916
6917 static long
6918 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
6919 unsigned long arg)
6920 {
6921 switch (cmd) {
6922 case MEGASAS_IOC_FIRMWARE32:
6923 return megasas_mgmt_compat_ioctl_fw(file, arg);
6924 case MEGASAS_IOC_GET_AEN:
6925 return megasas_mgmt_ioctl_aen(file, arg);
6926 }
6927
6928 return -ENOTTY;
6929 }
6930 #endif
6931
6932 /*
6933 * File operations structure for management interface
6934 */
6935 static const struct file_operations megasas_mgmt_fops = {
6936 .owner = THIS_MODULE,
6937 .open = megasas_mgmt_open,
6938 .fasync = megasas_mgmt_fasync,
6939 .unlocked_ioctl = megasas_mgmt_ioctl,
6940 .poll = megasas_mgmt_poll,
6941 #ifdef CONFIG_COMPAT
6942 .compat_ioctl = megasas_mgmt_compat_ioctl,
6943 #endif
6944 .llseek = noop_llseek,
6945 };
6946
6947 /*
6948 * PCI hotplug support registration structure
6949 */
6950 static struct pci_driver megasas_pci_driver = {
6951
6952 .name = "megaraid_sas",
6953 .id_table = megasas_pci_table,
6954 .probe = megasas_probe_one,
6955 .remove = megasas_detach_one,
6956 .suspend = megasas_suspend,
6957 .resume = megasas_resume,
6958 .shutdown = megasas_shutdown,
6959 };
6960
6961 /*
6962 * Sysfs driver attributes
6963 */
6964 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
6965 {
6966 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
6967 MEGASAS_VERSION);
6968 }
6969
6970 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
6971
6972 static ssize_t
6973 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
6974 {
6975 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
6976 MEGASAS_RELDATE);
6977 }
6978
6979 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
6980
6981 static ssize_t
6982 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
6983 {
6984 return sprintf(buf, "%u\n", support_poll_for_event);
6985 }
6986
6987 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
6988 megasas_sysfs_show_support_poll_for_event, NULL);
6989
6990 static ssize_t
6991 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
6992 {
6993 return sprintf(buf, "%u\n", support_device_change);
6994 }
6995
6996 static DRIVER_ATTR(support_device_change, S_IRUGO,
6997 megasas_sysfs_show_support_device_change, NULL);
6998
6999 static ssize_t
7000 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
7001 {
7002 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7003 }
7004
7005 static ssize_t
7006 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
7007 {
7008 int retval = count;
7009
7010 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7011 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7012 retval = -EINVAL;
7013 }
7014 return retval;
7015 }
7016
7017 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
7018 megasas_sysfs_set_dbg_lvl);
7019
7020 static void
7021 megasas_aen_polling(struct work_struct *work)
7022 {
7023 struct megasas_aen_event *ev =
7024 container_of(work, struct megasas_aen_event, hotplug_work.work);
7025 struct megasas_instance *instance = ev->instance;
7026 union megasas_evt_class_locale class_locale;
7027 struct Scsi_Host *host;
7028 struct scsi_device *sdev1;
7029 u16 pd_index = 0;
7030 u16 ld_index = 0;
7031 int i, j, doscan = 0;
7032 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7033 int error;
7034 u8 dcmd_ret = DCMD_SUCCESS;
7035
7036 if (!instance) {
7037 printk(KERN_ERR "invalid instance!\n");
7038 kfree(ev);
7039 return;
7040 }
7041
7042 /* Adjust event workqueue thread wait time for VF mode */
7043 if (instance->requestorId)
7044 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7045
7046 /* Don't run the event workqueue thread if OCR is running */
7047 mutex_lock(&instance->reset_mutex);
7048
7049 instance->ev = NULL;
7050 host = instance->host;
7051 if (instance->evt_detail) {
7052 megasas_decode_evt(instance);
7053
7054 switch (le32_to_cpu(instance->evt_detail->code)) {
7055
7056 case MR_EVT_PD_INSERTED:
7057 case MR_EVT_PD_REMOVED:
7058 dcmd_ret = megasas_get_pd_list(instance);
7059 if (dcmd_ret == DCMD_SUCCESS)
7060 doscan = SCAN_PD_CHANNEL;
7061 break;
7062
7063 case MR_EVT_LD_OFFLINE:
7064 case MR_EVT_CFG_CLEARED:
7065 case MR_EVT_LD_DELETED:
7066 case MR_EVT_LD_CREATED:
7067 if (!instance->requestorId ||
7068 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7069 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7070
7071 if (dcmd_ret == DCMD_SUCCESS)
7072 doscan = SCAN_VD_CHANNEL;
7073
7074 break;
7075
7076 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7077 case MR_EVT_FOREIGN_CFG_IMPORTED:
7078 case MR_EVT_LD_STATE_CHANGE:
7079 dcmd_ret = megasas_get_pd_list(instance);
7080
7081 if (dcmd_ret != DCMD_SUCCESS)
7082 break;
7083
7084 if (!instance->requestorId ||
7085 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7086 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7087
7088 if (dcmd_ret != DCMD_SUCCESS)
7089 break;
7090
7091 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7092 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7093 instance->host->host_no);
7094 break;
7095
7096 case MR_EVT_CTRL_PROP_CHANGED:
7097 dcmd_ret = megasas_get_ctrl_info(instance);
7098 break;
7099 default:
7100 doscan = 0;
7101 break;
7102 }
7103 } else {
7104 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7105 mutex_unlock(&instance->reset_mutex);
7106 kfree(ev);
7107 return;
7108 }
7109
7110 mutex_unlock(&instance->reset_mutex);
7111
7112 if (doscan & SCAN_PD_CHANNEL) {
7113 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7114 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7115 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7116 sdev1 = scsi_device_lookup(host, i, j, 0);
7117 if (instance->pd_list[pd_index].driveState ==
7118 MR_PD_STATE_SYSTEM) {
7119 if (!sdev1)
7120 scsi_add_device(host, i, j, 0);
7121 else
7122 scsi_device_put(sdev1);
7123 } else {
7124 if (sdev1) {
7125 scsi_remove_device(sdev1);
7126 scsi_device_put(sdev1);
7127 }
7128 }
7129 }
7130 }
7131 }
7132
7133 if (doscan & SCAN_VD_CHANNEL) {
7134 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7135 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7136 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7137 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7138 if (instance->ld_ids[ld_index] != 0xff) {
7139 if (!sdev1)
7140 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7141 else
7142 scsi_device_put(sdev1);
7143 } else {
7144 if (sdev1) {
7145 scsi_remove_device(sdev1);
7146 scsi_device_put(sdev1);
7147 }
7148 }
7149 }
7150 }
7151 }
7152
7153 if (dcmd_ret == DCMD_SUCCESS)
7154 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7155 else
7156 seq_num = instance->last_seq_num;
7157
7158 /* Register AEN with FW for latest sequence number plus 1 */
7159 class_locale.members.reserved = 0;
7160 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7161 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7162
7163 if (instance->aen_cmd != NULL) {
7164 kfree(ev);
7165 return;
7166 }
7167
7168 mutex_lock(&instance->reset_mutex);
7169 error = megasas_register_aen(instance, seq_num,
7170 class_locale.word);
7171 if (error)
7172 dev_err(&instance->pdev->dev,
7173 "register aen failed error %x\n", error);
7174
7175 mutex_unlock(&instance->reset_mutex);
7176 kfree(ev);
7177 }
7178
7179 /**
7180 * megasas_init - Driver load entry point
7181 */
7182 static int __init megasas_init(void)
7183 {
7184 int rval;
7185
7186 /*
7187 * Booted in kdump kernel, minimize memory footprints by
7188 * disabling few features
7189 */
7190 if (reset_devices) {
7191 msix_vectors = 1;
7192 rdpq_enable = 0;
7193 dual_qdepth_disable = 1;
7194 }
7195
7196 /*
7197 * Announce driver version and other information
7198 */
7199 pr_info("megasas: %s\n", MEGASAS_VERSION);
7200
7201 spin_lock_init(&poll_aen_lock);
7202
7203 support_poll_for_event = 2;
7204 support_device_change = 1;
7205
7206 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7207
7208 /*
7209 * Register character device node
7210 */
7211 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7212
7213 if (rval < 0) {
7214 printk(KERN_DEBUG "megasas: failed to open device node\n");
7215 return rval;
7216 }
7217
7218 megasas_mgmt_majorno = rval;
7219
7220 /*
7221 * Register ourselves as PCI hotplug module
7222 */
7223 rval = pci_register_driver(&megasas_pci_driver);
7224
7225 if (rval) {
7226 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7227 goto err_pcidrv;
7228 }
7229
7230 rval = driver_create_file(&megasas_pci_driver.driver,
7231 &driver_attr_version);
7232 if (rval)
7233 goto err_dcf_attr_ver;
7234
7235 rval = driver_create_file(&megasas_pci_driver.driver,
7236 &driver_attr_release_date);
7237 if (rval)
7238 goto err_dcf_rel_date;
7239
7240 rval = driver_create_file(&megasas_pci_driver.driver,
7241 &driver_attr_support_poll_for_event);
7242 if (rval)
7243 goto err_dcf_support_poll_for_event;
7244
7245 rval = driver_create_file(&megasas_pci_driver.driver,
7246 &driver_attr_dbg_lvl);
7247 if (rval)
7248 goto err_dcf_dbg_lvl;
7249 rval = driver_create_file(&megasas_pci_driver.driver,
7250 &driver_attr_support_device_change);
7251 if (rval)
7252 goto err_dcf_support_device_change;
7253
7254 return rval;
7255
7256 err_dcf_support_device_change:
7257 driver_remove_file(&megasas_pci_driver.driver,
7258 &driver_attr_dbg_lvl);
7259 err_dcf_dbg_lvl:
7260 driver_remove_file(&megasas_pci_driver.driver,
7261 &driver_attr_support_poll_for_event);
7262 err_dcf_support_poll_for_event:
7263 driver_remove_file(&megasas_pci_driver.driver,
7264 &driver_attr_release_date);
7265 err_dcf_rel_date:
7266 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7267 err_dcf_attr_ver:
7268 pci_unregister_driver(&megasas_pci_driver);
7269 err_pcidrv:
7270 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7271 return rval;
7272 }
7273
7274 /**
7275 * megasas_exit - Driver unload entry point
7276 */
7277 static void __exit megasas_exit(void)
7278 {
7279 driver_remove_file(&megasas_pci_driver.driver,
7280 &driver_attr_dbg_lvl);
7281 driver_remove_file(&megasas_pci_driver.driver,
7282 &driver_attr_support_poll_for_event);
7283 driver_remove_file(&megasas_pci_driver.driver,
7284 &driver_attr_support_device_change);
7285 driver_remove_file(&megasas_pci_driver.driver,
7286 &driver_attr_release_date);
7287 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7288
7289 pci_unregister_driver(&megasas_pci_driver);
7290 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7291 }
7292
7293 module_init(megasas_init);
7294 module_exit(megasas_exit);