]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_base.c
scsi: megaraid_sas: Fix endianness issues in DCMD handling
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52
53 #include <scsi/scsi.h>
54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsi_device.h>
56 #include <scsi/scsi_host.h>
57 #include <scsi/scsi_tcq.h>
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60
61 /*
62 * Number of sectors per IO command
63 * Will be set in megasas_init_mfi if user does not provide
64 */
65 static unsigned int max_sectors;
66 module_param_named(max_sectors, max_sectors, int, 0);
67 MODULE_PARM_DESC(max_sectors,
68 "Maximum number of sectors per IO command");
69
70 static int msix_disable;
71 module_param(msix_disable, int, S_IRUGO);
72 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
73
74 static unsigned int msix_vectors;
75 module_param(msix_vectors, int, S_IRUGO);
76 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
77
78 static int allow_vf_ioctls;
79 module_param(allow_vf_ioctls, int, S_IRUGO);
80 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
81
82 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
83 module_param(throttlequeuedepth, int, S_IRUGO);
84 MODULE_PARM_DESC(throttlequeuedepth,
85 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
86
87 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
88 module_param(resetwaittime, int, S_IRUGO);
89 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
90 "before resetting adapter. Default: 180");
91
92 int smp_affinity_enable = 1;
93 module_param(smp_affinity_enable, int, S_IRUGO);
94 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
95
96 int rdpq_enable = 1;
97 module_param(rdpq_enable, int, S_IRUGO);
98 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
99
100 unsigned int dual_qdepth_disable;
101 module_param(dual_qdepth_disable, int, S_IRUGO);
102 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
103
104 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
105 module_param(scmd_timeout, int, S_IRUGO);
106 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
107
108 MODULE_LICENSE("GPL");
109 MODULE_VERSION(MEGASAS_VERSION);
110 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
111 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
112
113 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
114 static int megasas_get_pd_list(struct megasas_instance *instance);
115 static int megasas_ld_list_query(struct megasas_instance *instance,
116 u8 query_type);
117 static int megasas_issue_init_mfi(struct megasas_instance *instance);
118 static int megasas_register_aen(struct megasas_instance *instance,
119 u32 seq_num, u32 class_locale_word);
120 static void megasas_get_pd_info(struct megasas_instance *instance,
121 struct scsi_device *sdev);
122 static int megasas_get_target_prop(struct megasas_instance *instance,
123 struct scsi_device *sdev);
124 /*
125 * PCI ID table for all supported controllers
126 */
127 static struct pci_device_id megasas_pci_table[] = {
128
129 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
130 /* xscale IOP */
131 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
132 /* ppc IOP */
133 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
134 /* ppc IOP */
135 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
136 /* gen2*/
137 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
138 /* gen2*/
139 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
140 /* skinny*/
141 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
142 /* skinny*/
143 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
144 /* xscale IOP, vega */
145 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
146 /* xscale IOP */
147 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
148 /* Fusion */
149 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
150 /* Plasma */
151 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
152 /* Invader */
153 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
154 /* Fury */
155 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
156 /* Intruder */
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
158 /* Intruder 24 port*/
159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
161 /* VENTURA */
162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
167 {}
168 };
169
170 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
171
172 static int megasas_mgmt_majorno;
173 struct megasas_mgmt_info megasas_mgmt_info;
174 static struct fasync_struct *megasas_async_queue;
175 static DEFINE_MUTEX(megasas_async_queue_mutex);
176
177 static int megasas_poll_wait_aen;
178 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
179 static u32 support_poll_for_event;
180 u32 megasas_dbg_lvl;
181 static u32 support_device_change;
182
183 /* define lock for aen poll */
184 spinlock_t poll_aen_lock;
185
186 void
187 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
188 u8 alt_status);
189 static u32
190 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
191 static int
192 megasas_adp_reset_gen2(struct megasas_instance *instance,
193 struct megasas_register_set __iomem *reg_set);
194 static irqreturn_t megasas_isr(int irq, void *devp);
195 static u32
196 megasas_init_adapter_mfi(struct megasas_instance *instance);
197 u32
198 megasas_build_and_issue_cmd(struct megasas_instance *instance,
199 struct scsi_cmnd *scmd);
200 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
201 int
202 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
203 int seconds);
204 void megasas_fusion_ocr_wq(struct work_struct *work);
205 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
206 int initial);
207
208 void
209 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
210 {
211 instance->instancet->fire_cmd(instance,
212 cmd->frame_phys_addr, 0, instance->reg_set);
213 return;
214 }
215
216 /**
217 * megasas_get_cmd - Get a command from the free pool
218 * @instance: Adapter soft state
219 *
220 * Returns a free command from the pool
221 */
222 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
223 *instance)
224 {
225 unsigned long flags;
226 struct megasas_cmd *cmd = NULL;
227
228 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
229
230 if (!list_empty(&instance->cmd_pool)) {
231 cmd = list_entry((&instance->cmd_pool)->next,
232 struct megasas_cmd, list);
233 list_del_init(&cmd->list);
234 } else {
235 dev_err(&instance->pdev->dev, "Command pool empty!\n");
236 }
237
238 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
239 return cmd;
240 }
241
242 /**
243 * megasas_return_cmd - Return a cmd to free command pool
244 * @instance: Adapter soft state
245 * @cmd: Command packet to be returned to free command pool
246 */
247 void
248 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
249 {
250 unsigned long flags;
251 u32 blk_tags;
252 struct megasas_cmd_fusion *cmd_fusion;
253 struct fusion_context *fusion = instance->ctrl_context;
254
255 /* This flag is used only for fusion adapter.
256 * Wait for Interrupt for Polled mode DCMD
257 */
258 if (cmd->flags & DRV_DCMD_POLLED_MODE)
259 return;
260
261 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
262
263 if (fusion) {
264 blk_tags = instance->max_scsi_cmds + cmd->index;
265 cmd_fusion = fusion->cmd_list[blk_tags];
266 megasas_return_cmd_fusion(instance, cmd_fusion);
267 }
268 cmd->scmd = NULL;
269 cmd->frame_count = 0;
270 cmd->flags = 0;
271 memset(cmd->frame, 0, instance->mfi_frame_size);
272 cmd->frame->io.context = cpu_to_le32(cmd->index);
273 if (!fusion && reset_devices)
274 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
275 list_add(&cmd->list, (&instance->cmd_pool)->next);
276
277 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
278
279 }
280
281 static const char *
282 format_timestamp(uint32_t timestamp)
283 {
284 static char buffer[32];
285
286 if ((timestamp & 0xff000000) == 0xff000000)
287 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
288 0x00ffffff);
289 else
290 snprintf(buffer, sizeof(buffer), "%us", timestamp);
291 return buffer;
292 }
293
294 static const char *
295 format_class(int8_t class)
296 {
297 static char buffer[6];
298
299 switch (class) {
300 case MFI_EVT_CLASS_DEBUG:
301 return "debug";
302 case MFI_EVT_CLASS_PROGRESS:
303 return "progress";
304 case MFI_EVT_CLASS_INFO:
305 return "info";
306 case MFI_EVT_CLASS_WARNING:
307 return "WARN";
308 case MFI_EVT_CLASS_CRITICAL:
309 return "CRIT";
310 case MFI_EVT_CLASS_FATAL:
311 return "FATAL";
312 case MFI_EVT_CLASS_DEAD:
313 return "DEAD";
314 default:
315 snprintf(buffer, sizeof(buffer), "%d", class);
316 return buffer;
317 }
318 }
319
320 /**
321 * megasas_decode_evt: Decode FW AEN event and print critical event
322 * for information.
323 * @instance: Adapter soft state
324 */
325 static void
326 megasas_decode_evt(struct megasas_instance *instance)
327 {
328 struct megasas_evt_detail *evt_detail = instance->evt_detail;
329 union megasas_evt_class_locale class_locale;
330 class_locale.word = le32_to_cpu(evt_detail->cl.word);
331
332 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
333 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
334 le32_to_cpu(evt_detail->seq_num),
335 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
336 (class_locale.members.locale),
337 format_class(class_locale.members.class),
338 evt_detail->description);
339 }
340
341 /**
342 * The following functions are defined for xscale
343 * (deviceid : 1064R, PERC5) controllers
344 */
345
346 /**
347 * megasas_enable_intr_xscale - Enables interrupts
348 * @regs: MFI register set
349 */
350 static inline void
351 megasas_enable_intr_xscale(struct megasas_instance *instance)
352 {
353 struct megasas_register_set __iomem *regs;
354
355 regs = instance->reg_set;
356 writel(0, &(regs)->outbound_intr_mask);
357
358 /* Dummy readl to force pci flush */
359 readl(&regs->outbound_intr_mask);
360 }
361
362 /**
363 * megasas_disable_intr_xscale -Disables interrupt
364 * @regs: MFI register set
365 */
366 static inline void
367 megasas_disable_intr_xscale(struct megasas_instance *instance)
368 {
369 struct megasas_register_set __iomem *regs;
370 u32 mask = 0x1f;
371
372 regs = instance->reg_set;
373 writel(mask, &regs->outbound_intr_mask);
374 /* Dummy readl to force pci flush */
375 readl(&regs->outbound_intr_mask);
376 }
377
378 /**
379 * megasas_read_fw_status_reg_xscale - returns the current FW status value
380 * @regs: MFI register set
381 */
382 static u32
383 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
384 {
385 return readl(&(regs)->outbound_msg_0);
386 }
387 /**
388 * megasas_clear_interrupt_xscale - Check & clear interrupt
389 * @regs: MFI register set
390 */
391 static int
392 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
393 {
394 u32 status;
395 u32 mfiStatus = 0;
396
397 /*
398 * Check if it is our interrupt
399 */
400 status = readl(&regs->outbound_intr_status);
401
402 if (status & MFI_OB_INTR_STATUS_MASK)
403 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
404 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
405 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
406
407 /*
408 * Clear the interrupt by writing back the same value
409 */
410 if (mfiStatus)
411 writel(status, &regs->outbound_intr_status);
412
413 /* Dummy readl to force pci flush */
414 readl(&regs->outbound_intr_status);
415
416 return mfiStatus;
417 }
418
419 /**
420 * megasas_fire_cmd_xscale - Sends command to the FW
421 * @frame_phys_addr : Physical address of cmd
422 * @frame_count : Number of frames for the command
423 * @regs : MFI register set
424 */
425 static inline void
426 megasas_fire_cmd_xscale(struct megasas_instance *instance,
427 dma_addr_t frame_phys_addr,
428 u32 frame_count,
429 struct megasas_register_set __iomem *regs)
430 {
431 unsigned long flags;
432
433 spin_lock_irqsave(&instance->hba_lock, flags);
434 writel((frame_phys_addr >> 3)|(frame_count),
435 &(regs)->inbound_queue_port);
436 spin_unlock_irqrestore(&instance->hba_lock, flags);
437 }
438
439 /**
440 * megasas_adp_reset_xscale - For controller reset
441 * @regs: MFI register set
442 */
443 static int
444 megasas_adp_reset_xscale(struct megasas_instance *instance,
445 struct megasas_register_set __iomem *regs)
446 {
447 u32 i;
448 u32 pcidata;
449
450 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
451
452 for (i = 0; i < 3; i++)
453 msleep(1000); /* sleep for 3 secs */
454 pcidata = 0;
455 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
456 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
457 if (pcidata & 0x2) {
458 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
459 pcidata &= ~0x2;
460 pci_write_config_dword(instance->pdev,
461 MFI_1068_PCSR_OFFSET, pcidata);
462
463 for (i = 0; i < 2; i++)
464 msleep(1000); /* need to wait 2 secs again */
465
466 pcidata = 0;
467 pci_read_config_dword(instance->pdev,
468 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
469 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
470 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
471 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
472 pcidata = 0;
473 pci_write_config_dword(instance->pdev,
474 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
475 }
476 }
477 return 0;
478 }
479
480 /**
481 * megasas_check_reset_xscale - For controller reset check
482 * @regs: MFI register set
483 */
484 static int
485 megasas_check_reset_xscale(struct megasas_instance *instance,
486 struct megasas_register_set __iomem *regs)
487 {
488 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
489 (le32_to_cpu(*instance->consumer) ==
490 MEGASAS_ADPRESET_INPROG_SIGN))
491 return 1;
492 return 0;
493 }
494
495 static struct megasas_instance_template megasas_instance_template_xscale = {
496
497 .fire_cmd = megasas_fire_cmd_xscale,
498 .enable_intr = megasas_enable_intr_xscale,
499 .disable_intr = megasas_disable_intr_xscale,
500 .clear_intr = megasas_clear_intr_xscale,
501 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
502 .adp_reset = megasas_adp_reset_xscale,
503 .check_reset = megasas_check_reset_xscale,
504 .service_isr = megasas_isr,
505 .tasklet = megasas_complete_cmd_dpc,
506 .init_adapter = megasas_init_adapter_mfi,
507 .build_and_issue_cmd = megasas_build_and_issue_cmd,
508 .issue_dcmd = megasas_issue_dcmd,
509 };
510
511 /**
512 * This is the end of set of functions & definitions specific
513 * to xscale (deviceid : 1064R, PERC5) controllers
514 */
515
516 /**
517 * The following functions are defined for ppc (deviceid : 0x60)
518 * controllers
519 */
520
521 /**
522 * megasas_enable_intr_ppc - Enables interrupts
523 * @regs: MFI register set
524 */
525 static inline void
526 megasas_enable_intr_ppc(struct megasas_instance *instance)
527 {
528 struct megasas_register_set __iomem *regs;
529
530 regs = instance->reg_set;
531 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
532
533 writel(~0x80000000, &(regs)->outbound_intr_mask);
534
535 /* Dummy readl to force pci flush */
536 readl(&regs->outbound_intr_mask);
537 }
538
539 /**
540 * megasas_disable_intr_ppc - Disable interrupt
541 * @regs: MFI register set
542 */
543 static inline void
544 megasas_disable_intr_ppc(struct megasas_instance *instance)
545 {
546 struct megasas_register_set __iomem *regs;
547 u32 mask = 0xFFFFFFFF;
548
549 regs = instance->reg_set;
550 writel(mask, &regs->outbound_intr_mask);
551 /* Dummy readl to force pci flush */
552 readl(&regs->outbound_intr_mask);
553 }
554
555 /**
556 * megasas_read_fw_status_reg_ppc - returns the current FW status value
557 * @regs: MFI register set
558 */
559 static u32
560 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
561 {
562 return readl(&(regs)->outbound_scratch_pad);
563 }
564
565 /**
566 * megasas_clear_interrupt_ppc - Check & clear interrupt
567 * @regs: MFI register set
568 */
569 static int
570 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
571 {
572 u32 status, mfiStatus = 0;
573
574 /*
575 * Check if it is our interrupt
576 */
577 status = readl(&regs->outbound_intr_status);
578
579 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
580 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
581
582 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
583 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
584
585 /*
586 * Clear the interrupt by writing back the same value
587 */
588 writel(status, &regs->outbound_doorbell_clear);
589
590 /* Dummy readl to force pci flush */
591 readl(&regs->outbound_doorbell_clear);
592
593 return mfiStatus;
594 }
595
596 /**
597 * megasas_fire_cmd_ppc - Sends command to the FW
598 * @frame_phys_addr : Physical address of cmd
599 * @frame_count : Number of frames for the command
600 * @regs : MFI register set
601 */
602 static inline void
603 megasas_fire_cmd_ppc(struct megasas_instance *instance,
604 dma_addr_t frame_phys_addr,
605 u32 frame_count,
606 struct megasas_register_set __iomem *regs)
607 {
608 unsigned long flags;
609
610 spin_lock_irqsave(&instance->hba_lock, flags);
611 writel((frame_phys_addr | (frame_count<<1))|1,
612 &(regs)->inbound_queue_port);
613 spin_unlock_irqrestore(&instance->hba_lock, flags);
614 }
615
616 /**
617 * megasas_check_reset_ppc - For controller reset check
618 * @regs: MFI register set
619 */
620 static int
621 megasas_check_reset_ppc(struct megasas_instance *instance,
622 struct megasas_register_set __iomem *regs)
623 {
624 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
625 return 1;
626
627 return 0;
628 }
629
630 static struct megasas_instance_template megasas_instance_template_ppc = {
631
632 .fire_cmd = megasas_fire_cmd_ppc,
633 .enable_intr = megasas_enable_intr_ppc,
634 .disable_intr = megasas_disable_intr_ppc,
635 .clear_intr = megasas_clear_intr_ppc,
636 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
637 .adp_reset = megasas_adp_reset_xscale,
638 .check_reset = megasas_check_reset_ppc,
639 .service_isr = megasas_isr,
640 .tasklet = megasas_complete_cmd_dpc,
641 .init_adapter = megasas_init_adapter_mfi,
642 .build_and_issue_cmd = megasas_build_and_issue_cmd,
643 .issue_dcmd = megasas_issue_dcmd,
644 };
645
646 /**
647 * megasas_enable_intr_skinny - Enables interrupts
648 * @regs: MFI register set
649 */
650 static inline void
651 megasas_enable_intr_skinny(struct megasas_instance *instance)
652 {
653 struct megasas_register_set __iomem *regs;
654
655 regs = instance->reg_set;
656 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
657
658 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
659
660 /* Dummy readl to force pci flush */
661 readl(&regs->outbound_intr_mask);
662 }
663
664 /**
665 * megasas_disable_intr_skinny - Disables interrupt
666 * @regs: MFI register set
667 */
668 static inline void
669 megasas_disable_intr_skinny(struct megasas_instance *instance)
670 {
671 struct megasas_register_set __iomem *regs;
672 u32 mask = 0xFFFFFFFF;
673
674 regs = instance->reg_set;
675 writel(mask, &regs->outbound_intr_mask);
676 /* Dummy readl to force pci flush */
677 readl(&regs->outbound_intr_mask);
678 }
679
680 /**
681 * megasas_read_fw_status_reg_skinny - returns the current FW status value
682 * @regs: MFI register set
683 */
684 static u32
685 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
686 {
687 return readl(&(regs)->outbound_scratch_pad);
688 }
689
690 /**
691 * megasas_clear_interrupt_skinny - Check & clear interrupt
692 * @regs: MFI register set
693 */
694 static int
695 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
696 {
697 u32 status;
698 u32 mfiStatus = 0;
699
700 /*
701 * Check if it is our interrupt
702 */
703 status = readl(&regs->outbound_intr_status);
704
705 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
706 return 0;
707 }
708
709 /*
710 * Check if it is our interrupt
711 */
712 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
713 MFI_STATE_FAULT) {
714 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
715 } else
716 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
717
718 /*
719 * Clear the interrupt by writing back the same value
720 */
721 writel(status, &regs->outbound_intr_status);
722
723 /*
724 * dummy read to flush PCI
725 */
726 readl(&regs->outbound_intr_status);
727
728 return mfiStatus;
729 }
730
731 /**
732 * megasas_fire_cmd_skinny - Sends command to the FW
733 * @frame_phys_addr : Physical address of cmd
734 * @frame_count : Number of frames for the command
735 * @regs : MFI register set
736 */
737 static inline void
738 megasas_fire_cmd_skinny(struct megasas_instance *instance,
739 dma_addr_t frame_phys_addr,
740 u32 frame_count,
741 struct megasas_register_set __iomem *regs)
742 {
743 unsigned long flags;
744
745 spin_lock_irqsave(&instance->hba_lock, flags);
746 writel(upper_32_bits(frame_phys_addr),
747 &(regs)->inbound_high_queue_port);
748 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
749 &(regs)->inbound_low_queue_port);
750 mmiowb();
751 spin_unlock_irqrestore(&instance->hba_lock, flags);
752 }
753
754 /**
755 * megasas_check_reset_skinny - For controller reset check
756 * @regs: MFI register set
757 */
758 static int
759 megasas_check_reset_skinny(struct megasas_instance *instance,
760 struct megasas_register_set __iomem *regs)
761 {
762 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
763 return 1;
764
765 return 0;
766 }
767
768 static struct megasas_instance_template megasas_instance_template_skinny = {
769
770 .fire_cmd = megasas_fire_cmd_skinny,
771 .enable_intr = megasas_enable_intr_skinny,
772 .disable_intr = megasas_disable_intr_skinny,
773 .clear_intr = megasas_clear_intr_skinny,
774 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
775 .adp_reset = megasas_adp_reset_gen2,
776 .check_reset = megasas_check_reset_skinny,
777 .service_isr = megasas_isr,
778 .tasklet = megasas_complete_cmd_dpc,
779 .init_adapter = megasas_init_adapter_mfi,
780 .build_and_issue_cmd = megasas_build_and_issue_cmd,
781 .issue_dcmd = megasas_issue_dcmd,
782 };
783
784
785 /**
786 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
787 * controllers
788 */
789
790 /**
791 * megasas_enable_intr_gen2 - Enables interrupts
792 * @regs: MFI register set
793 */
794 static inline void
795 megasas_enable_intr_gen2(struct megasas_instance *instance)
796 {
797 struct megasas_register_set __iomem *regs;
798
799 regs = instance->reg_set;
800 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
801
802 /* write ~0x00000005 (4 & 1) to the intr mask*/
803 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
804
805 /* Dummy readl to force pci flush */
806 readl(&regs->outbound_intr_mask);
807 }
808
809 /**
810 * megasas_disable_intr_gen2 - Disables interrupt
811 * @regs: MFI register set
812 */
813 static inline void
814 megasas_disable_intr_gen2(struct megasas_instance *instance)
815 {
816 struct megasas_register_set __iomem *regs;
817 u32 mask = 0xFFFFFFFF;
818
819 regs = instance->reg_set;
820 writel(mask, &regs->outbound_intr_mask);
821 /* Dummy readl to force pci flush */
822 readl(&regs->outbound_intr_mask);
823 }
824
825 /**
826 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
827 * @regs: MFI register set
828 */
829 static u32
830 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
831 {
832 return readl(&(regs)->outbound_scratch_pad);
833 }
834
835 /**
836 * megasas_clear_interrupt_gen2 - Check & clear interrupt
837 * @regs: MFI register set
838 */
839 static int
840 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
841 {
842 u32 status;
843 u32 mfiStatus = 0;
844
845 /*
846 * Check if it is our interrupt
847 */
848 status = readl(&regs->outbound_intr_status);
849
850 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
851 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
852 }
853 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
854 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
855 }
856
857 /*
858 * Clear the interrupt by writing back the same value
859 */
860 if (mfiStatus)
861 writel(status, &regs->outbound_doorbell_clear);
862
863 /* Dummy readl to force pci flush */
864 readl(&regs->outbound_intr_status);
865
866 return mfiStatus;
867 }
868 /**
869 * megasas_fire_cmd_gen2 - Sends command to the FW
870 * @frame_phys_addr : Physical address of cmd
871 * @frame_count : Number of frames for the command
872 * @regs : MFI register set
873 */
874 static inline void
875 megasas_fire_cmd_gen2(struct megasas_instance *instance,
876 dma_addr_t frame_phys_addr,
877 u32 frame_count,
878 struct megasas_register_set __iomem *regs)
879 {
880 unsigned long flags;
881
882 spin_lock_irqsave(&instance->hba_lock, flags);
883 writel((frame_phys_addr | (frame_count<<1))|1,
884 &(regs)->inbound_queue_port);
885 spin_unlock_irqrestore(&instance->hba_lock, flags);
886 }
887
888 /**
889 * megasas_adp_reset_gen2 - For controller reset
890 * @regs: MFI register set
891 */
892 static int
893 megasas_adp_reset_gen2(struct megasas_instance *instance,
894 struct megasas_register_set __iomem *reg_set)
895 {
896 u32 retry = 0 ;
897 u32 HostDiag;
898 u32 __iomem *seq_offset = &reg_set->seq_offset;
899 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
900
901 if (instance->instancet == &megasas_instance_template_skinny) {
902 seq_offset = &reg_set->fusion_seq_offset;
903 hostdiag_offset = &reg_set->fusion_host_diag;
904 }
905
906 writel(0, seq_offset);
907 writel(4, seq_offset);
908 writel(0xb, seq_offset);
909 writel(2, seq_offset);
910 writel(7, seq_offset);
911 writel(0xd, seq_offset);
912
913 msleep(1000);
914
915 HostDiag = (u32)readl(hostdiag_offset);
916
917 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
918 msleep(100);
919 HostDiag = (u32)readl(hostdiag_offset);
920 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
921 retry, HostDiag);
922
923 if (retry++ >= 100)
924 return 1;
925
926 }
927
928 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
929
930 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
931
932 ssleep(10);
933
934 HostDiag = (u32)readl(hostdiag_offset);
935 while (HostDiag & DIAG_RESET_ADAPTER) {
936 msleep(100);
937 HostDiag = (u32)readl(hostdiag_offset);
938 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
939 retry, HostDiag);
940
941 if (retry++ >= 1000)
942 return 1;
943
944 }
945 return 0;
946 }
947
948 /**
949 * megasas_check_reset_gen2 - For controller reset check
950 * @regs: MFI register set
951 */
952 static int
953 megasas_check_reset_gen2(struct megasas_instance *instance,
954 struct megasas_register_set __iomem *regs)
955 {
956 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
957 return 1;
958
959 return 0;
960 }
961
962 static struct megasas_instance_template megasas_instance_template_gen2 = {
963
964 .fire_cmd = megasas_fire_cmd_gen2,
965 .enable_intr = megasas_enable_intr_gen2,
966 .disable_intr = megasas_disable_intr_gen2,
967 .clear_intr = megasas_clear_intr_gen2,
968 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
969 .adp_reset = megasas_adp_reset_gen2,
970 .check_reset = megasas_check_reset_gen2,
971 .service_isr = megasas_isr,
972 .tasklet = megasas_complete_cmd_dpc,
973 .init_adapter = megasas_init_adapter_mfi,
974 .build_and_issue_cmd = megasas_build_and_issue_cmd,
975 .issue_dcmd = megasas_issue_dcmd,
976 };
977
978 /**
979 * This is the end of set of functions & definitions
980 * specific to gen2 (deviceid : 0x78, 0x79) controllers
981 */
982
983 /*
984 * Template added for TB (Fusion)
985 */
986 extern struct megasas_instance_template megasas_instance_template_fusion;
987
988 /**
989 * megasas_issue_polled - Issues a polling command
990 * @instance: Adapter soft state
991 * @cmd: Command packet to be issued
992 *
993 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
994 */
995 int
996 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
997 {
998 struct megasas_header *frame_hdr = &cmd->frame->hdr;
999
1000 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1001 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1002
1003 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1004 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1005 __func__, __LINE__);
1006 return DCMD_NOT_FIRED;
1007 }
1008
1009 instance->instancet->issue_dcmd(instance, cmd);
1010
1011 return wait_and_poll(instance, cmd, instance->requestorId ?
1012 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1013 }
1014
1015 /**
1016 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1017 * @instance: Adapter soft state
1018 * @cmd: Command to be issued
1019 * @timeout: Timeout in seconds
1020 *
1021 * This function waits on an event for the command to be returned from ISR.
1022 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1023 * Used to issue ioctl commands.
1024 */
1025 int
1026 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1027 struct megasas_cmd *cmd, int timeout)
1028 {
1029 int ret = 0;
1030 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1031
1032 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1033 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1034 __func__, __LINE__);
1035 return DCMD_NOT_FIRED;
1036 }
1037
1038 instance->instancet->issue_dcmd(instance, cmd);
1039
1040 if (timeout) {
1041 ret = wait_event_timeout(instance->int_cmd_wait_q,
1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1043 if (!ret) {
1044 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1045 __func__, __LINE__);
1046 return DCMD_TIMEOUT;
1047 }
1048 } else
1049 wait_event(instance->int_cmd_wait_q,
1050 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1051
1052 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1053 DCMD_SUCCESS : DCMD_FAILED;
1054 }
1055
1056 /**
1057 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1058 * @instance: Adapter soft state
1059 * @cmd_to_abort: Previously issued cmd to be aborted
1060 * @timeout: Timeout in seconds
1061 *
1062 * MFI firmware can abort previously issued AEN comamnd (automatic event
1063 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1064 * cmd and waits for return status.
1065 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1066 */
1067 static int
1068 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1069 struct megasas_cmd *cmd_to_abort, int timeout)
1070 {
1071 struct megasas_cmd *cmd;
1072 struct megasas_abort_frame *abort_fr;
1073 int ret = 0;
1074
1075 cmd = megasas_get_cmd(instance);
1076
1077 if (!cmd)
1078 return -1;
1079
1080 abort_fr = &cmd->frame->abort;
1081
1082 /*
1083 * Prepare and issue the abort frame
1084 */
1085 abort_fr->cmd = MFI_CMD_ABORT;
1086 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1087 abort_fr->flags = cpu_to_le16(0);
1088 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1089 abort_fr->abort_mfi_phys_addr_lo =
1090 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1091 abort_fr->abort_mfi_phys_addr_hi =
1092 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1093
1094 cmd->sync_cmd = 1;
1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1096
1097 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1098 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1099 __func__, __LINE__);
1100 return DCMD_NOT_FIRED;
1101 }
1102
1103 instance->instancet->issue_dcmd(instance, cmd);
1104
1105 if (timeout) {
1106 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1107 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1108 if (!ret) {
1109 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1110 __func__, __LINE__);
1111 return DCMD_TIMEOUT;
1112 }
1113 } else
1114 wait_event(instance->abort_cmd_wait_q,
1115 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1116
1117 cmd->sync_cmd = 0;
1118
1119 megasas_return_cmd(instance, cmd);
1120 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1121 DCMD_SUCCESS : DCMD_FAILED;
1122 }
1123
1124 /**
1125 * megasas_make_sgl32 - Prepares 32-bit SGL
1126 * @instance: Adapter soft state
1127 * @scp: SCSI command from the mid-layer
1128 * @mfi_sgl: SGL to be filled in
1129 *
1130 * If successful, this function returns the number of SG elements. Otherwise,
1131 * it returnes -1.
1132 */
1133 static int
1134 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1135 union megasas_sgl *mfi_sgl)
1136 {
1137 int i;
1138 int sge_count;
1139 struct scatterlist *os_sgl;
1140
1141 sge_count = scsi_dma_map(scp);
1142 BUG_ON(sge_count < 0);
1143
1144 if (sge_count) {
1145 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1146 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1147 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1148 }
1149 }
1150 return sge_count;
1151 }
1152
1153 /**
1154 * megasas_make_sgl64 - Prepares 64-bit SGL
1155 * @instance: Adapter soft state
1156 * @scp: SCSI command from the mid-layer
1157 * @mfi_sgl: SGL to be filled in
1158 *
1159 * If successful, this function returns the number of SG elements. Otherwise,
1160 * it returnes -1.
1161 */
1162 static int
1163 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1164 union megasas_sgl *mfi_sgl)
1165 {
1166 int i;
1167 int sge_count;
1168 struct scatterlist *os_sgl;
1169
1170 sge_count = scsi_dma_map(scp);
1171 BUG_ON(sge_count < 0);
1172
1173 if (sge_count) {
1174 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1175 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1176 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1177 }
1178 }
1179 return sge_count;
1180 }
1181
1182 /**
1183 * megasas_make_sgl_skinny - Prepares IEEE SGL
1184 * @instance: Adapter soft state
1185 * @scp: SCSI command from the mid-layer
1186 * @mfi_sgl: SGL to be filled in
1187 *
1188 * If successful, this function returns the number of SG elements. Otherwise,
1189 * it returnes -1.
1190 */
1191 static int
1192 megasas_make_sgl_skinny(struct megasas_instance *instance,
1193 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1194 {
1195 int i;
1196 int sge_count;
1197 struct scatterlist *os_sgl;
1198
1199 sge_count = scsi_dma_map(scp);
1200
1201 if (sge_count) {
1202 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1203 mfi_sgl->sge_skinny[i].length =
1204 cpu_to_le32(sg_dma_len(os_sgl));
1205 mfi_sgl->sge_skinny[i].phys_addr =
1206 cpu_to_le64(sg_dma_address(os_sgl));
1207 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1208 }
1209 }
1210 return sge_count;
1211 }
1212
1213 /**
1214 * megasas_get_frame_count - Computes the number of frames
1215 * @frame_type : type of frame- io or pthru frame
1216 * @sge_count : number of sg elements
1217 *
1218 * Returns the number of frames required for numnber of sge's (sge_count)
1219 */
1220
1221 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1222 u8 sge_count, u8 frame_type)
1223 {
1224 int num_cnt;
1225 int sge_bytes;
1226 u32 sge_sz;
1227 u32 frame_count = 0;
1228
1229 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1230 sizeof(struct megasas_sge32);
1231
1232 if (instance->flag_ieee) {
1233 sge_sz = sizeof(struct megasas_sge_skinny);
1234 }
1235
1236 /*
1237 * Main frame can contain 2 SGEs for 64-bit SGLs and
1238 * 3 SGEs for 32-bit SGLs for ldio &
1239 * 1 SGEs for 64-bit SGLs and
1240 * 2 SGEs for 32-bit SGLs for pthru frame
1241 */
1242 if (unlikely(frame_type == PTHRU_FRAME)) {
1243 if (instance->flag_ieee == 1) {
1244 num_cnt = sge_count - 1;
1245 } else if (IS_DMA64)
1246 num_cnt = sge_count - 1;
1247 else
1248 num_cnt = sge_count - 2;
1249 } else {
1250 if (instance->flag_ieee == 1) {
1251 num_cnt = sge_count - 1;
1252 } else if (IS_DMA64)
1253 num_cnt = sge_count - 2;
1254 else
1255 num_cnt = sge_count - 3;
1256 }
1257
1258 if (num_cnt > 0) {
1259 sge_bytes = sge_sz * num_cnt;
1260
1261 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1262 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1263 }
1264 /* Main frame */
1265 frame_count += 1;
1266
1267 if (frame_count > 7)
1268 frame_count = 8;
1269 return frame_count;
1270 }
1271
1272 /**
1273 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1274 * @instance: Adapter soft state
1275 * @scp: SCSI command
1276 * @cmd: Command to be prepared in
1277 *
1278 * This function prepares CDB commands. These are typcially pass-through
1279 * commands to the devices.
1280 */
1281 static int
1282 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1283 struct megasas_cmd *cmd)
1284 {
1285 u32 is_logical;
1286 u32 device_id;
1287 u16 flags = 0;
1288 struct megasas_pthru_frame *pthru;
1289
1290 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1291 device_id = MEGASAS_DEV_INDEX(scp);
1292 pthru = (struct megasas_pthru_frame *)cmd->frame;
1293
1294 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1295 flags = MFI_FRAME_DIR_WRITE;
1296 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1297 flags = MFI_FRAME_DIR_READ;
1298 else if (scp->sc_data_direction == PCI_DMA_NONE)
1299 flags = MFI_FRAME_DIR_NONE;
1300
1301 if (instance->flag_ieee == 1) {
1302 flags |= MFI_FRAME_IEEE;
1303 }
1304
1305 /*
1306 * Prepare the DCDB frame
1307 */
1308 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1309 pthru->cmd_status = 0x0;
1310 pthru->scsi_status = 0x0;
1311 pthru->target_id = device_id;
1312 pthru->lun = scp->device->lun;
1313 pthru->cdb_len = scp->cmd_len;
1314 pthru->timeout = 0;
1315 pthru->pad_0 = 0;
1316 pthru->flags = cpu_to_le16(flags);
1317 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1318
1319 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1320
1321 /*
1322 * If the command is for the tape device, set the
1323 * pthru timeout to the os layer timeout value.
1324 */
1325 if (scp->device->type == TYPE_TAPE) {
1326 if ((scp->request->timeout / HZ) > 0xFFFF)
1327 pthru->timeout = cpu_to_le16(0xFFFF);
1328 else
1329 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1330 }
1331
1332 /*
1333 * Construct SGL
1334 */
1335 if (instance->flag_ieee == 1) {
1336 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1337 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1338 &pthru->sgl);
1339 } else if (IS_DMA64) {
1340 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1341 pthru->sge_count = megasas_make_sgl64(instance, scp,
1342 &pthru->sgl);
1343 } else
1344 pthru->sge_count = megasas_make_sgl32(instance, scp,
1345 &pthru->sgl);
1346
1347 if (pthru->sge_count > instance->max_num_sge) {
1348 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1349 pthru->sge_count);
1350 return 0;
1351 }
1352
1353 /*
1354 * Sense info specific
1355 */
1356 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1357 pthru->sense_buf_phys_addr_hi =
1358 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1359 pthru->sense_buf_phys_addr_lo =
1360 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1361
1362 /*
1363 * Compute the total number of frames this command consumes. FW uses
1364 * this number to pull sufficient number of frames from host memory.
1365 */
1366 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1367 PTHRU_FRAME);
1368
1369 return cmd->frame_count;
1370 }
1371
1372 /**
1373 * megasas_build_ldio - Prepares IOs to logical devices
1374 * @instance: Adapter soft state
1375 * @scp: SCSI command
1376 * @cmd: Command to be prepared
1377 *
1378 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1379 */
1380 static int
1381 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1382 struct megasas_cmd *cmd)
1383 {
1384 u32 device_id;
1385 u8 sc = scp->cmnd[0];
1386 u16 flags = 0;
1387 struct megasas_io_frame *ldio;
1388
1389 device_id = MEGASAS_DEV_INDEX(scp);
1390 ldio = (struct megasas_io_frame *)cmd->frame;
1391
1392 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1393 flags = MFI_FRAME_DIR_WRITE;
1394 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1395 flags = MFI_FRAME_DIR_READ;
1396
1397 if (instance->flag_ieee == 1) {
1398 flags |= MFI_FRAME_IEEE;
1399 }
1400
1401 /*
1402 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1403 */
1404 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1405 ldio->cmd_status = 0x0;
1406 ldio->scsi_status = 0x0;
1407 ldio->target_id = device_id;
1408 ldio->timeout = 0;
1409 ldio->reserved_0 = 0;
1410 ldio->pad_0 = 0;
1411 ldio->flags = cpu_to_le16(flags);
1412 ldio->start_lba_hi = 0;
1413 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1414
1415 /*
1416 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1417 */
1418 if (scp->cmd_len == 6) {
1419 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1420 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1421 ((u32) scp->cmnd[2] << 8) |
1422 (u32) scp->cmnd[3]);
1423
1424 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1425 }
1426
1427 /*
1428 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1429 */
1430 else if (scp->cmd_len == 10) {
1431 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1432 ((u32) scp->cmnd[7] << 8));
1433 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1434 ((u32) scp->cmnd[3] << 16) |
1435 ((u32) scp->cmnd[4] << 8) |
1436 (u32) scp->cmnd[5]);
1437 }
1438
1439 /*
1440 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1441 */
1442 else if (scp->cmd_len == 12) {
1443 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1444 ((u32) scp->cmnd[7] << 16) |
1445 ((u32) scp->cmnd[8] << 8) |
1446 (u32) scp->cmnd[9]);
1447
1448 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1449 ((u32) scp->cmnd[3] << 16) |
1450 ((u32) scp->cmnd[4] << 8) |
1451 (u32) scp->cmnd[5]);
1452 }
1453
1454 /*
1455 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1456 */
1457 else if (scp->cmd_len == 16) {
1458 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1459 ((u32) scp->cmnd[11] << 16) |
1460 ((u32) scp->cmnd[12] << 8) |
1461 (u32) scp->cmnd[13]);
1462
1463 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1464 ((u32) scp->cmnd[7] << 16) |
1465 ((u32) scp->cmnd[8] << 8) |
1466 (u32) scp->cmnd[9]);
1467
1468 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1469 ((u32) scp->cmnd[3] << 16) |
1470 ((u32) scp->cmnd[4] << 8) |
1471 (u32) scp->cmnd[5]);
1472
1473 }
1474
1475 /*
1476 * Construct SGL
1477 */
1478 if (instance->flag_ieee) {
1479 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1480 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1481 &ldio->sgl);
1482 } else if (IS_DMA64) {
1483 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1484 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1485 } else
1486 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1487
1488 if (ldio->sge_count > instance->max_num_sge) {
1489 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1490 ldio->sge_count);
1491 return 0;
1492 }
1493
1494 /*
1495 * Sense info specific
1496 */
1497 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1498 ldio->sense_buf_phys_addr_hi = 0;
1499 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1500
1501 /*
1502 * Compute the total number of frames this command consumes. FW uses
1503 * this number to pull sufficient number of frames from host memory.
1504 */
1505 cmd->frame_count = megasas_get_frame_count(instance,
1506 ldio->sge_count, IO_FRAME);
1507
1508 return cmd->frame_count;
1509 }
1510
1511 /**
1512 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1513 * and whether it's RW or non RW
1514 * @scmd: SCSI command
1515 *
1516 */
1517 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1518 {
1519 int ret;
1520
1521 switch (cmd->cmnd[0]) {
1522 case READ_10:
1523 case WRITE_10:
1524 case READ_12:
1525 case WRITE_12:
1526 case READ_6:
1527 case WRITE_6:
1528 case READ_16:
1529 case WRITE_16:
1530 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1531 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1532 break;
1533 default:
1534 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1535 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1536 }
1537 return ret;
1538 }
1539
1540 /**
1541 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1542 * in FW
1543 * @instance: Adapter soft state
1544 */
1545 static inline void
1546 megasas_dump_pending_frames(struct megasas_instance *instance)
1547 {
1548 struct megasas_cmd *cmd;
1549 int i,n;
1550 union megasas_sgl *mfi_sgl;
1551 struct megasas_io_frame *ldio;
1552 struct megasas_pthru_frame *pthru;
1553 u32 sgcount;
1554 u16 max_cmd = instance->max_fw_cmds;
1555
1556 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1557 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1558 if (IS_DMA64)
1559 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1560 else
1561 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1562
1563 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1564 for (i = 0; i < max_cmd; i++) {
1565 cmd = instance->cmd_list[i];
1566 if (!cmd->scmd)
1567 continue;
1568 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1569 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1570 ldio = (struct megasas_io_frame *)cmd->frame;
1571 mfi_sgl = &ldio->sgl;
1572 sgcount = ldio->sge_count;
1573 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1574 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1575 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1576 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1577 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1578 } else {
1579 pthru = (struct megasas_pthru_frame *) cmd->frame;
1580 mfi_sgl = &pthru->sgl;
1581 sgcount = pthru->sge_count;
1582 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1583 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1584 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1585 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1586 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1587 }
1588 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1589 for (n = 0; n < sgcount; n++) {
1590 if (IS_DMA64)
1591 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1592 le32_to_cpu(mfi_sgl->sge64[n].length),
1593 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1594 else
1595 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1596 le32_to_cpu(mfi_sgl->sge32[n].length),
1597 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1598 }
1599 }
1600 } /*for max_cmd*/
1601 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1602 for (i = 0; i < max_cmd; i++) {
1603
1604 cmd = instance->cmd_list[i];
1605
1606 if (cmd->sync_cmd == 1)
1607 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1608 }
1609 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1610 }
1611
1612 u32
1613 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1614 struct scsi_cmnd *scmd)
1615 {
1616 struct megasas_cmd *cmd;
1617 u32 frame_count;
1618
1619 cmd = megasas_get_cmd(instance);
1620 if (!cmd)
1621 return SCSI_MLQUEUE_HOST_BUSY;
1622
1623 /*
1624 * Logical drive command
1625 */
1626 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1627 frame_count = megasas_build_ldio(instance, scmd, cmd);
1628 else
1629 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1630
1631 if (!frame_count)
1632 goto out_return_cmd;
1633
1634 cmd->scmd = scmd;
1635 scmd->SCp.ptr = (char *)cmd;
1636
1637 /*
1638 * Issue the command to the FW
1639 */
1640 atomic_inc(&instance->fw_outstanding);
1641
1642 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1643 cmd->frame_count-1, instance->reg_set);
1644
1645 return 0;
1646 out_return_cmd:
1647 megasas_return_cmd(instance, cmd);
1648 return SCSI_MLQUEUE_HOST_BUSY;
1649 }
1650
1651
1652 /**
1653 * megasas_queue_command - Queue entry point
1654 * @scmd: SCSI command to be queued
1655 * @done: Callback entry point
1656 */
1657 static int
1658 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1659 {
1660 struct megasas_instance *instance;
1661 struct MR_PRIV_DEVICE *mr_device_priv_data;
1662
1663 instance = (struct megasas_instance *)
1664 scmd->device->host->hostdata;
1665
1666 if (instance->unload == 1) {
1667 scmd->result = DID_NO_CONNECT << 16;
1668 scmd->scsi_done(scmd);
1669 return 0;
1670 }
1671
1672 if (instance->issuepend_done == 0)
1673 return SCSI_MLQUEUE_HOST_BUSY;
1674
1675
1676 /* Check for an mpio path and adjust behavior */
1677 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1678 if (megasas_check_mpio_paths(instance, scmd) ==
1679 (DID_REQUEUE << 16)) {
1680 return SCSI_MLQUEUE_HOST_BUSY;
1681 } else {
1682 scmd->result = DID_NO_CONNECT << 16;
1683 scmd->scsi_done(scmd);
1684 return 0;
1685 }
1686 }
1687
1688 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1689 scmd->result = DID_NO_CONNECT << 16;
1690 scmd->scsi_done(scmd);
1691 return 0;
1692 }
1693
1694 mr_device_priv_data = scmd->device->hostdata;
1695 if (!mr_device_priv_data) {
1696 scmd->result = DID_NO_CONNECT << 16;
1697 scmd->scsi_done(scmd);
1698 return 0;
1699 }
1700
1701 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1702 return SCSI_MLQUEUE_HOST_BUSY;
1703
1704 if (mr_device_priv_data->tm_busy)
1705 return SCSI_MLQUEUE_DEVICE_BUSY;
1706
1707
1708 scmd->result = 0;
1709
1710 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1711 (scmd->device->id >= instance->fw_supported_vd_count ||
1712 scmd->device->lun)) {
1713 scmd->result = DID_BAD_TARGET << 16;
1714 goto out_done;
1715 }
1716
1717 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1718 MEGASAS_IS_LOGICAL(scmd->device) &&
1719 (!instance->fw_sync_cache_support)) {
1720 scmd->result = DID_OK << 16;
1721 goto out_done;
1722 }
1723
1724 return instance->instancet->build_and_issue_cmd(instance, scmd);
1725
1726 out_done:
1727 scmd->scsi_done(scmd);
1728 return 0;
1729 }
1730
1731 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1732 {
1733 int i;
1734
1735 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1736
1737 if ((megasas_mgmt_info.instance[i]) &&
1738 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1739 return megasas_mgmt_info.instance[i];
1740 }
1741
1742 return NULL;
1743 }
1744
1745 /*
1746 * megasas_set_dynamic_target_properties -
1747 * Device property set by driver may not be static and it is required to be
1748 * updated after OCR
1749 *
1750 * set tm_capable.
1751 * set dma alignment (only for eedp protection enable vd).
1752 *
1753 * @sdev: OS provided scsi device
1754 *
1755 * Returns void
1756 */
1757 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1758 {
1759 u16 pd_index = 0, ld;
1760 u32 device_id;
1761 struct megasas_instance *instance;
1762 struct fusion_context *fusion;
1763 struct MR_PRIV_DEVICE *mr_device_priv_data;
1764 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1765 struct MR_LD_RAID *raid;
1766 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1767
1768 instance = megasas_lookup_instance(sdev->host->host_no);
1769 fusion = instance->ctrl_context;
1770 mr_device_priv_data = sdev->hostdata;
1771
1772 if (!fusion || !mr_device_priv_data)
1773 return;
1774
1775 if (MEGASAS_IS_LOGICAL(sdev)) {
1776 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1777 + sdev->id;
1778 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1779 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1780 if (ld >= instance->fw_supported_vd_count)
1781 return;
1782 raid = MR_LdRaidGet(ld, local_map_ptr);
1783
1784 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1785 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1786
1787 mr_device_priv_data->is_tm_capable =
1788 raid->capability.tmCapable;
1789 } else if (instance->use_seqnum_jbod_fp) {
1790 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1791 sdev->id;
1792 pd_sync = (void *)fusion->pd_seq_sync
1793 [(instance->pd_seq_map_id - 1) & 1];
1794 mr_device_priv_data->is_tm_capable =
1795 pd_sync->seq[pd_index].capability.tmCapable;
1796 }
1797 }
1798
1799 /*
1800 * megasas_set_nvme_device_properties -
1801 * set nomerges=2
1802 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1803 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1804 *
1805 * MR firmware provides value in KB. Caller of this function converts
1806 * kb into bytes.
1807 *
1808 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1809 * MR firmware provides value 128 as (32 * 4K) = 128K.
1810 *
1811 * @sdev: scsi device
1812 * @max_io_size: maximum io transfer size
1813 *
1814 */
1815 static inline void
1816 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1817 {
1818 struct megasas_instance *instance;
1819 u32 mr_nvme_pg_size;
1820
1821 instance = (struct megasas_instance *)sdev->host->hostdata;
1822 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1823 MR_DEFAULT_NVME_PAGE_SIZE);
1824
1825 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1826
1827 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1828 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1829 }
1830
1831
1832 /*
1833 * megasas_set_static_target_properties -
1834 * Device property set by driver are static and it is not required to be
1835 * updated after OCR.
1836 *
1837 * set io timeout
1838 * set device queue depth
1839 * set nvme device properties. see - megasas_set_nvme_device_properties
1840 *
1841 * @sdev: scsi device
1842 * @is_target_prop true, if fw provided target properties.
1843 */
1844 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1845 bool is_target_prop)
1846 {
1847 u16 target_index = 0;
1848 u8 interface_type;
1849 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1850 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1851 u32 tgt_device_qd;
1852 struct megasas_instance *instance;
1853 struct MR_PRIV_DEVICE *mr_device_priv_data;
1854
1855 instance = megasas_lookup_instance(sdev->host->host_no);
1856 mr_device_priv_data = sdev->hostdata;
1857 interface_type = mr_device_priv_data->interface_type;
1858
1859 /*
1860 * The RAID firmware may require extended timeouts.
1861 */
1862 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1863
1864 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1865
1866 switch (interface_type) {
1867 case SAS_PD:
1868 device_qd = MEGASAS_SAS_QD;
1869 break;
1870 case SATA_PD:
1871 device_qd = MEGASAS_SATA_QD;
1872 break;
1873 case NVME_PD:
1874 device_qd = MEGASAS_NVME_QD;
1875 break;
1876 }
1877
1878 if (is_target_prop) {
1879 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1880 if (tgt_device_qd &&
1881 (tgt_device_qd <= instance->host->can_queue))
1882 device_qd = tgt_device_qd;
1883
1884 /* max_io_size_kb will be set to non zero for
1885 * nvme based vd and syspd.
1886 */
1887 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1888 }
1889
1890 if (instance->nvme_page_size && max_io_size_kb)
1891 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1892
1893 scsi_change_queue_depth(sdev, device_qd);
1894
1895 }
1896
1897
1898 static int megasas_slave_configure(struct scsi_device *sdev)
1899 {
1900 u16 pd_index = 0;
1901 struct megasas_instance *instance;
1902 int ret_target_prop = DCMD_FAILED;
1903 bool is_target_prop = false;
1904
1905 instance = megasas_lookup_instance(sdev->host->host_no);
1906 if (instance->pd_list_not_supported) {
1907 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1908 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1909 sdev->id;
1910 if (instance->pd_list[pd_index].driveState !=
1911 MR_PD_STATE_SYSTEM)
1912 return -ENXIO;
1913 }
1914 }
1915
1916 mutex_lock(&instance->hba_mutex);
1917 /* Send DCMD to Firmware and cache the information */
1918 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1919 megasas_get_pd_info(instance, sdev);
1920
1921 /* Some ventura firmware may not have instance->nvme_page_size set.
1922 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1923 */
1924 if ((instance->tgt_prop) && (instance->nvme_page_size))
1925 ret_target_prop = megasas_get_target_prop(instance, sdev);
1926
1927 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1928 megasas_set_static_target_properties(sdev, is_target_prop);
1929
1930 mutex_unlock(&instance->hba_mutex);
1931
1932 /* This sdev property may change post OCR */
1933 megasas_set_dynamic_target_properties(sdev);
1934
1935 return 0;
1936 }
1937
1938 static int megasas_slave_alloc(struct scsi_device *sdev)
1939 {
1940 u16 pd_index = 0;
1941 struct megasas_instance *instance ;
1942 struct MR_PRIV_DEVICE *mr_device_priv_data;
1943
1944 instance = megasas_lookup_instance(sdev->host->host_no);
1945 if (!MEGASAS_IS_LOGICAL(sdev)) {
1946 /*
1947 * Open the OS scan to the SYSTEM PD
1948 */
1949 pd_index =
1950 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1951 sdev->id;
1952 if ((instance->pd_list_not_supported ||
1953 instance->pd_list[pd_index].driveState ==
1954 MR_PD_STATE_SYSTEM)) {
1955 goto scan_target;
1956 }
1957 return -ENXIO;
1958 }
1959
1960 scan_target:
1961 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1962 GFP_KERNEL);
1963 if (!mr_device_priv_data)
1964 return -ENOMEM;
1965 sdev->hostdata = mr_device_priv_data;
1966
1967 atomic_set(&mr_device_priv_data->r1_ldio_hint,
1968 instance->r1_ldio_hint_default);
1969 return 0;
1970 }
1971
1972 static void megasas_slave_destroy(struct scsi_device *sdev)
1973 {
1974 kfree(sdev->hostdata);
1975 sdev->hostdata = NULL;
1976 }
1977
1978 /*
1979 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1980 * kill adapter
1981 * @instance: Adapter soft state
1982 *
1983 */
1984 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1985 {
1986 int i;
1987 struct megasas_cmd *cmd_mfi;
1988 struct megasas_cmd_fusion *cmd_fusion;
1989 struct fusion_context *fusion = instance->ctrl_context;
1990
1991 /* Find all outstanding ioctls */
1992 if (fusion) {
1993 for (i = 0; i < instance->max_fw_cmds; i++) {
1994 cmd_fusion = fusion->cmd_list[i];
1995 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1996 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1997 if (cmd_mfi->sync_cmd &&
1998 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1999 megasas_complete_cmd(instance,
2000 cmd_mfi, DID_OK);
2001 }
2002 }
2003 } else {
2004 for (i = 0; i < instance->max_fw_cmds; i++) {
2005 cmd_mfi = instance->cmd_list[i];
2006 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2007 MFI_CMD_ABORT)
2008 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2009 }
2010 }
2011 }
2012
2013
2014 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2015 {
2016 /* Set critical error to block I/O & ioctls in case caller didn't */
2017 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2018 /* Wait 1 second to ensure IO or ioctls in build have posted */
2019 msleep(1000);
2020 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2021 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2022 (instance->ctrl_context)) {
2023 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2024 /* Flush */
2025 readl(&instance->reg_set->doorbell);
2026 if (instance->requestorId && instance->peerIsPresent)
2027 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2028 } else {
2029 writel(MFI_STOP_ADP,
2030 &instance->reg_set->inbound_doorbell);
2031 }
2032 /* Complete outstanding ioctls when adapter is killed */
2033 megasas_complete_outstanding_ioctls(instance);
2034 }
2035
2036 /**
2037 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2038 * restored to max value
2039 * @instance: Adapter soft state
2040 *
2041 */
2042 void
2043 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2044 {
2045 unsigned long flags;
2046
2047 if (instance->flag & MEGASAS_FW_BUSY
2048 && time_after(jiffies, instance->last_time + 5 * HZ)
2049 && atomic_read(&instance->fw_outstanding) <
2050 instance->throttlequeuedepth + 1) {
2051
2052 spin_lock_irqsave(instance->host->host_lock, flags);
2053 instance->flag &= ~MEGASAS_FW_BUSY;
2054
2055 instance->host->can_queue = instance->cur_can_queue;
2056 spin_unlock_irqrestore(instance->host->host_lock, flags);
2057 }
2058 }
2059
2060 /**
2061 * megasas_complete_cmd_dpc - Returns FW's controller structure
2062 * @instance_addr: Address of adapter soft state
2063 *
2064 * Tasklet to complete cmds
2065 */
2066 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2067 {
2068 u32 producer;
2069 u32 consumer;
2070 u32 context;
2071 struct megasas_cmd *cmd;
2072 struct megasas_instance *instance =
2073 (struct megasas_instance *)instance_addr;
2074 unsigned long flags;
2075
2076 /* If we have already declared adapter dead, donot complete cmds */
2077 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2078 return;
2079
2080 spin_lock_irqsave(&instance->completion_lock, flags);
2081
2082 producer = le32_to_cpu(*instance->producer);
2083 consumer = le32_to_cpu(*instance->consumer);
2084
2085 while (consumer != producer) {
2086 context = le32_to_cpu(instance->reply_queue[consumer]);
2087 if (context >= instance->max_fw_cmds) {
2088 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2089 context);
2090 BUG();
2091 }
2092
2093 cmd = instance->cmd_list[context];
2094
2095 megasas_complete_cmd(instance, cmd, DID_OK);
2096
2097 consumer++;
2098 if (consumer == (instance->max_fw_cmds + 1)) {
2099 consumer = 0;
2100 }
2101 }
2102
2103 *instance->consumer = cpu_to_le32(producer);
2104
2105 spin_unlock_irqrestore(&instance->completion_lock, flags);
2106
2107 /*
2108 * Check if we can restore can_queue
2109 */
2110 megasas_check_and_restore_queue_depth(instance);
2111 }
2112
2113 /**
2114 * megasas_start_timer - Initializes a timer object
2115 * @instance: Adapter soft state
2116 * @timer: timer object to be initialized
2117 * @fn: timer function
2118 * @interval: time interval between timer function call
2119 *
2120 */
2121 void megasas_start_timer(struct megasas_instance *instance,
2122 struct timer_list *timer,
2123 void *fn, unsigned long interval)
2124 {
2125 init_timer(timer);
2126 timer->expires = jiffies + interval;
2127 timer->data = (unsigned long)instance;
2128 timer->function = fn;
2129 add_timer(timer);
2130 }
2131
2132 static void
2133 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2134
2135 static void
2136 process_fw_state_change_wq(struct work_struct *work);
2137
2138 void megasas_do_ocr(struct megasas_instance *instance)
2139 {
2140 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2141 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2142 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2143 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2144 }
2145 instance->instancet->disable_intr(instance);
2146 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2147 instance->issuepend_done = 0;
2148
2149 atomic_set(&instance->fw_outstanding, 0);
2150 megasas_internal_reset_defer_cmds(instance);
2151 process_fw_state_change_wq(&instance->work_init);
2152 }
2153
2154 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2155 int initial)
2156 {
2157 struct megasas_cmd *cmd;
2158 struct megasas_dcmd_frame *dcmd;
2159 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2160 dma_addr_t new_affiliation_111_h;
2161 int ld, retval = 0;
2162 u8 thisVf;
2163
2164 cmd = megasas_get_cmd(instance);
2165
2166 if (!cmd) {
2167 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2168 "Failed to get cmd for scsi%d\n",
2169 instance->host->host_no);
2170 return -ENOMEM;
2171 }
2172
2173 dcmd = &cmd->frame->dcmd;
2174
2175 if (!instance->vf_affiliation_111) {
2176 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2177 "affiliation for scsi%d\n", instance->host->host_no);
2178 megasas_return_cmd(instance, cmd);
2179 return -ENOMEM;
2180 }
2181
2182 if (initial)
2183 memset(instance->vf_affiliation_111, 0,
2184 sizeof(struct MR_LD_VF_AFFILIATION_111));
2185 else {
2186 new_affiliation_111 =
2187 pci_alloc_consistent(instance->pdev,
2188 sizeof(struct MR_LD_VF_AFFILIATION_111),
2189 &new_affiliation_111_h);
2190 if (!new_affiliation_111) {
2191 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2192 "memory for new affiliation for scsi%d\n",
2193 instance->host->host_no);
2194 megasas_return_cmd(instance, cmd);
2195 return -ENOMEM;
2196 }
2197 memset(new_affiliation_111, 0,
2198 sizeof(struct MR_LD_VF_AFFILIATION_111));
2199 }
2200
2201 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2202
2203 dcmd->cmd = MFI_CMD_DCMD;
2204 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2205 dcmd->sge_count = 1;
2206 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2207 dcmd->timeout = 0;
2208 dcmd->pad_0 = 0;
2209 dcmd->data_xfer_len =
2210 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2211 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2212
2213 if (initial)
2214 dcmd->sgl.sge32[0].phys_addr =
2215 cpu_to_le32(instance->vf_affiliation_111_h);
2216 else
2217 dcmd->sgl.sge32[0].phys_addr =
2218 cpu_to_le32(new_affiliation_111_h);
2219
2220 dcmd->sgl.sge32[0].length = cpu_to_le32(
2221 sizeof(struct MR_LD_VF_AFFILIATION_111));
2222
2223 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2224 "scsi%d\n", instance->host->host_no);
2225
2226 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2227 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2228 " failed with status 0x%x for scsi%d\n",
2229 dcmd->cmd_status, instance->host->host_no);
2230 retval = 1; /* Do a scan if we couldn't get affiliation */
2231 goto out;
2232 }
2233
2234 if (!initial) {
2235 thisVf = new_affiliation_111->thisVf;
2236 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2237 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2238 new_affiliation_111->map[ld].policy[thisVf]) {
2239 dev_warn(&instance->pdev->dev, "SR-IOV: "
2240 "Got new LD/VF affiliation for scsi%d\n",
2241 instance->host->host_no);
2242 memcpy(instance->vf_affiliation_111,
2243 new_affiliation_111,
2244 sizeof(struct MR_LD_VF_AFFILIATION_111));
2245 retval = 1;
2246 goto out;
2247 }
2248 }
2249 out:
2250 if (new_affiliation_111) {
2251 pci_free_consistent(instance->pdev,
2252 sizeof(struct MR_LD_VF_AFFILIATION_111),
2253 new_affiliation_111,
2254 new_affiliation_111_h);
2255 }
2256
2257 megasas_return_cmd(instance, cmd);
2258
2259 return retval;
2260 }
2261
2262 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2263 int initial)
2264 {
2265 struct megasas_cmd *cmd;
2266 struct megasas_dcmd_frame *dcmd;
2267 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2268 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2269 dma_addr_t new_affiliation_h;
2270 int i, j, retval = 0, found = 0, doscan = 0;
2271 u8 thisVf;
2272
2273 cmd = megasas_get_cmd(instance);
2274
2275 if (!cmd) {
2276 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2277 "Failed to get cmd for scsi%d\n",
2278 instance->host->host_no);
2279 return -ENOMEM;
2280 }
2281
2282 dcmd = &cmd->frame->dcmd;
2283
2284 if (!instance->vf_affiliation) {
2285 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2286 "affiliation for scsi%d\n", instance->host->host_no);
2287 megasas_return_cmd(instance, cmd);
2288 return -ENOMEM;
2289 }
2290
2291 if (initial)
2292 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2293 sizeof(struct MR_LD_VF_AFFILIATION));
2294 else {
2295 new_affiliation =
2296 pci_alloc_consistent(instance->pdev,
2297 (MAX_LOGICAL_DRIVES + 1) *
2298 sizeof(struct MR_LD_VF_AFFILIATION),
2299 &new_affiliation_h);
2300 if (!new_affiliation) {
2301 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2302 "memory for new affiliation for scsi%d\n",
2303 instance->host->host_no);
2304 megasas_return_cmd(instance, cmd);
2305 return -ENOMEM;
2306 }
2307 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2308 sizeof(struct MR_LD_VF_AFFILIATION));
2309 }
2310
2311 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2312
2313 dcmd->cmd = MFI_CMD_DCMD;
2314 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2315 dcmd->sge_count = 1;
2316 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2317 dcmd->timeout = 0;
2318 dcmd->pad_0 = 0;
2319 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2320 sizeof(struct MR_LD_VF_AFFILIATION));
2321 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2322
2323 if (initial)
2324 dcmd->sgl.sge32[0].phys_addr =
2325 cpu_to_le32(instance->vf_affiliation_h);
2326 else
2327 dcmd->sgl.sge32[0].phys_addr =
2328 cpu_to_le32(new_affiliation_h);
2329
2330 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2331 sizeof(struct MR_LD_VF_AFFILIATION));
2332
2333 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2334 "scsi%d\n", instance->host->host_no);
2335
2336
2337 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2338 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2339 " failed with status 0x%x for scsi%d\n",
2340 dcmd->cmd_status, instance->host->host_no);
2341 retval = 1; /* Do a scan if we couldn't get affiliation */
2342 goto out;
2343 }
2344
2345 if (!initial) {
2346 if (!new_affiliation->ldCount) {
2347 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2348 "affiliation for passive path for scsi%d\n",
2349 instance->host->host_no);
2350 retval = 1;
2351 goto out;
2352 }
2353 newmap = new_affiliation->map;
2354 savedmap = instance->vf_affiliation->map;
2355 thisVf = new_affiliation->thisVf;
2356 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2357 found = 0;
2358 for (j = 0; j < instance->vf_affiliation->ldCount;
2359 j++) {
2360 if (newmap->ref.targetId ==
2361 savedmap->ref.targetId) {
2362 found = 1;
2363 if (newmap->policy[thisVf] !=
2364 savedmap->policy[thisVf]) {
2365 doscan = 1;
2366 goto out;
2367 }
2368 }
2369 savedmap = (struct MR_LD_VF_MAP *)
2370 ((unsigned char *)savedmap +
2371 savedmap->size);
2372 }
2373 if (!found && newmap->policy[thisVf] !=
2374 MR_LD_ACCESS_HIDDEN) {
2375 doscan = 1;
2376 goto out;
2377 }
2378 newmap = (struct MR_LD_VF_MAP *)
2379 ((unsigned char *)newmap + newmap->size);
2380 }
2381
2382 newmap = new_affiliation->map;
2383 savedmap = instance->vf_affiliation->map;
2384
2385 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2386 found = 0;
2387 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2388 if (savedmap->ref.targetId ==
2389 newmap->ref.targetId) {
2390 found = 1;
2391 if (savedmap->policy[thisVf] !=
2392 newmap->policy[thisVf]) {
2393 doscan = 1;
2394 goto out;
2395 }
2396 }
2397 newmap = (struct MR_LD_VF_MAP *)
2398 ((unsigned char *)newmap +
2399 newmap->size);
2400 }
2401 if (!found && savedmap->policy[thisVf] !=
2402 MR_LD_ACCESS_HIDDEN) {
2403 doscan = 1;
2404 goto out;
2405 }
2406 savedmap = (struct MR_LD_VF_MAP *)
2407 ((unsigned char *)savedmap +
2408 savedmap->size);
2409 }
2410 }
2411 out:
2412 if (doscan) {
2413 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2414 "affiliation for scsi%d\n", instance->host->host_no);
2415 memcpy(instance->vf_affiliation, new_affiliation,
2416 new_affiliation->size);
2417 retval = 1;
2418 }
2419
2420 if (new_affiliation)
2421 pci_free_consistent(instance->pdev,
2422 (MAX_LOGICAL_DRIVES + 1) *
2423 sizeof(struct MR_LD_VF_AFFILIATION),
2424 new_affiliation, new_affiliation_h);
2425 megasas_return_cmd(instance, cmd);
2426
2427 return retval;
2428 }
2429
2430 /* This function will get the current SR-IOV LD/VF affiliation */
2431 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2432 int initial)
2433 {
2434 int retval;
2435
2436 if (instance->PlasmaFW111)
2437 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2438 else
2439 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2440 return retval;
2441 }
2442
2443 /* This function will tell FW to start the SR-IOV heartbeat */
2444 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2445 int initial)
2446 {
2447 struct megasas_cmd *cmd;
2448 struct megasas_dcmd_frame *dcmd;
2449 int retval = 0;
2450
2451 cmd = megasas_get_cmd(instance);
2452
2453 if (!cmd) {
2454 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2455 "Failed to get cmd for scsi%d\n",
2456 instance->host->host_no);
2457 return -ENOMEM;
2458 }
2459
2460 dcmd = &cmd->frame->dcmd;
2461
2462 if (initial) {
2463 instance->hb_host_mem =
2464 pci_zalloc_consistent(instance->pdev,
2465 sizeof(struct MR_CTRL_HB_HOST_MEM),
2466 &instance->hb_host_mem_h);
2467 if (!instance->hb_host_mem) {
2468 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2469 " memory for heartbeat host memory for scsi%d\n",
2470 instance->host->host_no);
2471 retval = -ENOMEM;
2472 goto out;
2473 }
2474 }
2475
2476 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2477
2478 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2479 dcmd->cmd = MFI_CMD_DCMD;
2480 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2481 dcmd->sge_count = 1;
2482 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2483 dcmd->timeout = 0;
2484 dcmd->pad_0 = 0;
2485 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2486 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2487 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2488 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2489
2490 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2491 instance->host->host_no);
2492
2493 if (instance->ctrl_context && !instance->mask_interrupts)
2494 retval = megasas_issue_blocked_cmd(instance, cmd,
2495 MEGASAS_ROUTINE_WAIT_TIME_VF);
2496 else
2497 retval = megasas_issue_polled(instance, cmd);
2498
2499 if (retval) {
2500 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2501 "_MEM_ALLOC DCMD %s for scsi%d\n",
2502 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2503 "timed out" : "failed", instance->host->host_no);
2504 retval = 1;
2505 }
2506
2507 out:
2508 megasas_return_cmd(instance, cmd);
2509
2510 return retval;
2511 }
2512
2513 /* Handler for SR-IOV heartbeat */
2514 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2515 {
2516 struct megasas_instance *instance =
2517 (struct megasas_instance *)instance_addr;
2518
2519 if (instance->hb_host_mem->HB.fwCounter !=
2520 instance->hb_host_mem->HB.driverCounter) {
2521 instance->hb_host_mem->HB.driverCounter =
2522 instance->hb_host_mem->HB.fwCounter;
2523 mod_timer(&instance->sriov_heartbeat_timer,
2524 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2525 } else {
2526 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2527 "completed for scsi%d\n", instance->host->host_no);
2528 schedule_work(&instance->work_init);
2529 }
2530 }
2531
2532 /**
2533 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2534 * @instance: Adapter soft state
2535 *
2536 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2537 * complete all its outstanding commands. Returns error if one or more IOs
2538 * are pending after this time period. It also marks the controller dead.
2539 */
2540 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2541 {
2542 int i, sl, outstanding;
2543 u32 reset_index;
2544 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2545 unsigned long flags;
2546 struct list_head clist_local;
2547 struct megasas_cmd *reset_cmd;
2548 u32 fw_state;
2549
2550 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2551 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2552 __func__, __LINE__);
2553 return FAILED;
2554 }
2555
2556 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2557
2558 INIT_LIST_HEAD(&clist_local);
2559 spin_lock_irqsave(&instance->hba_lock, flags);
2560 list_splice_init(&instance->internal_reset_pending_q,
2561 &clist_local);
2562 spin_unlock_irqrestore(&instance->hba_lock, flags);
2563
2564 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2565 for (i = 0; i < wait_time; i++) {
2566 msleep(1000);
2567 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2568 break;
2569 }
2570
2571 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2572 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2573 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2574 return FAILED;
2575 }
2576
2577 reset_index = 0;
2578 while (!list_empty(&clist_local)) {
2579 reset_cmd = list_entry((&clist_local)->next,
2580 struct megasas_cmd, list);
2581 list_del_init(&reset_cmd->list);
2582 if (reset_cmd->scmd) {
2583 reset_cmd->scmd->result = DID_REQUEUE << 16;
2584 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2585 reset_index, reset_cmd,
2586 reset_cmd->scmd->cmnd[0]);
2587
2588 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2589 megasas_return_cmd(instance, reset_cmd);
2590 } else if (reset_cmd->sync_cmd) {
2591 dev_notice(&instance->pdev->dev, "%p synch cmds"
2592 "reset queue\n",
2593 reset_cmd);
2594
2595 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2596 instance->instancet->fire_cmd(instance,
2597 reset_cmd->frame_phys_addr,
2598 0, instance->reg_set);
2599 } else {
2600 dev_notice(&instance->pdev->dev, "%p unexpected"
2601 "cmds lst\n",
2602 reset_cmd);
2603 }
2604 reset_index++;
2605 }
2606
2607 return SUCCESS;
2608 }
2609
2610 for (i = 0; i < resetwaittime; i++) {
2611 outstanding = atomic_read(&instance->fw_outstanding);
2612
2613 if (!outstanding)
2614 break;
2615
2616 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2617 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2618 "commands to complete\n",i,outstanding);
2619 /*
2620 * Call cmd completion routine. Cmd to be
2621 * be completed directly without depending on isr.
2622 */
2623 megasas_complete_cmd_dpc((unsigned long)instance);
2624 }
2625
2626 msleep(1000);
2627 }
2628
2629 i = 0;
2630 outstanding = atomic_read(&instance->fw_outstanding);
2631 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2632
2633 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2634 goto no_outstanding;
2635
2636 if (instance->disableOnlineCtrlReset)
2637 goto kill_hba_and_failed;
2638 do {
2639 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2640 dev_info(&instance->pdev->dev,
2641 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2642 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2643 if (i == 3)
2644 goto kill_hba_and_failed;
2645 megasas_do_ocr(instance);
2646
2647 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2648 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2649 __func__, __LINE__);
2650 return FAILED;
2651 }
2652 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2653 __func__, __LINE__);
2654
2655 for (sl = 0; sl < 10; sl++)
2656 msleep(500);
2657
2658 outstanding = atomic_read(&instance->fw_outstanding);
2659
2660 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2661 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2662 goto no_outstanding;
2663 }
2664 i++;
2665 } while (i <= 3);
2666
2667 no_outstanding:
2668
2669 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2670 __func__, __LINE__);
2671 return SUCCESS;
2672
2673 kill_hba_and_failed:
2674
2675 /* Reset not supported, kill adapter */
2676 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2677 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2678 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2679 atomic_read(&instance->fw_outstanding));
2680 megasas_dump_pending_frames(instance);
2681 megaraid_sas_kill_hba(instance);
2682
2683 return FAILED;
2684 }
2685
2686 /**
2687 * megasas_generic_reset - Generic reset routine
2688 * @scmd: Mid-layer SCSI command
2689 *
2690 * This routine implements a generic reset handler for device, bus and host
2691 * reset requests. Device, bus and host specific reset handlers can use this
2692 * function after they do their specific tasks.
2693 */
2694 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2695 {
2696 int ret_val;
2697 struct megasas_instance *instance;
2698
2699 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2700
2701 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2702 scmd->cmnd[0], scmd->retries);
2703
2704 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2705 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2706 return FAILED;
2707 }
2708
2709 ret_val = megasas_wait_for_outstanding(instance);
2710 if (ret_val == SUCCESS)
2711 dev_notice(&instance->pdev->dev, "reset successful\n");
2712 else
2713 dev_err(&instance->pdev->dev, "failed to do reset\n");
2714
2715 return ret_val;
2716 }
2717
2718 /**
2719 * megasas_reset_timer - quiesce the adapter if required
2720 * @scmd: scsi cmnd
2721 *
2722 * Sets the FW busy flag and reduces the host->can_queue if the
2723 * cmd has not been completed within the timeout period.
2724 */
2725 static enum
2726 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2727 {
2728 struct megasas_instance *instance;
2729 unsigned long flags;
2730
2731 if (time_after(jiffies, scmd->jiffies_at_alloc +
2732 (scmd_timeout * 2) * HZ)) {
2733 return BLK_EH_NOT_HANDLED;
2734 }
2735
2736 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2737 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2738 /* FW is busy, throttle IO */
2739 spin_lock_irqsave(instance->host->host_lock, flags);
2740
2741 instance->host->can_queue = instance->throttlequeuedepth;
2742 instance->last_time = jiffies;
2743 instance->flag |= MEGASAS_FW_BUSY;
2744
2745 spin_unlock_irqrestore(instance->host->host_lock, flags);
2746 }
2747 return BLK_EH_RESET_TIMER;
2748 }
2749
2750 /**
2751 * megasas_dump_frame - This function will dump MPT/MFI frame
2752 */
2753 static inline void
2754 megasas_dump_frame(void *mpi_request, int sz)
2755 {
2756 int i;
2757 __le32 *mfp = (__le32 *)mpi_request;
2758
2759 printk(KERN_INFO "IO request frame:\n\t");
2760 for (i = 0; i < sz / sizeof(__le32); i++) {
2761 if (i && ((i % 8) == 0))
2762 printk("\n\t");
2763 printk("%08x ", le32_to_cpu(mfp[i]));
2764 }
2765 printk("\n");
2766 }
2767
2768 /**
2769 * megasas_reset_bus_host - Bus & host reset handler entry point
2770 */
2771 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2772 {
2773 int ret;
2774 struct megasas_instance *instance;
2775
2776 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2777
2778 scmd_printk(KERN_INFO, scmd,
2779 "Controller reset is requested due to IO timeout\n"
2780 "SCSI command pointer: (%p)\t SCSI host state: %d\t"
2781 " SCSI host busy: %d\t FW outstanding: %d\n",
2782 scmd, scmd->device->host->shost_state,
2783 atomic_read((atomic_t *)&scmd->device->host->host_busy),
2784 atomic_read(&instance->fw_outstanding));
2785
2786 /*
2787 * First wait for all commands to complete
2788 */
2789 if (instance->ctrl_context) {
2790 struct megasas_cmd_fusion *cmd;
2791 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2792 if (cmd)
2793 megasas_dump_frame(cmd->io_request,
2794 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
2795 ret = megasas_reset_fusion(scmd->device->host,
2796 SCSIIO_TIMEOUT_OCR);
2797 } else
2798 ret = megasas_generic_reset(scmd);
2799
2800 return ret;
2801 }
2802
2803 /**
2804 * megasas_task_abort - Issues task abort request to firmware
2805 * (supported only for fusion adapters)
2806 * @scmd: SCSI command pointer
2807 */
2808 static int megasas_task_abort(struct scsi_cmnd *scmd)
2809 {
2810 int ret;
2811 struct megasas_instance *instance;
2812
2813 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2814
2815 if (instance->ctrl_context)
2816 ret = megasas_task_abort_fusion(scmd);
2817 else {
2818 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2819 ret = FAILED;
2820 }
2821
2822 return ret;
2823 }
2824
2825 /**
2826 * megasas_reset_target: Issues target reset request to firmware
2827 * (supported only for fusion adapters)
2828 * @scmd: SCSI command pointer
2829 */
2830 static int megasas_reset_target(struct scsi_cmnd *scmd)
2831 {
2832 int ret;
2833 struct megasas_instance *instance;
2834
2835 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2836
2837 if (instance->ctrl_context)
2838 ret = megasas_reset_target_fusion(scmd);
2839 else {
2840 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2841 ret = FAILED;
2842 }
2843
2844 return ret;
2845 }
2846
2847 /**
2848 * megasas_bios_param - Returns disk geometry for a disk
2849 * @sdev: device handle
2850 * @bdev: block device
2851 * @capacity: drive capacity
2852 * @geom: geometry parameters
2853 */
2854 static int
2855 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2856 sector_t capacity, int geom[])
2857 {
2858 int heads;
2859 int sectors;
2860 sector_t cylinders;
2861 unsigned long tmp;
2862
2863 /* Default heads (64) & sectors (32) */
2864 heads = 64;
2865 sectors = 32;
2866
2867 tmp = heads * sectors;
2868 cylinders = capacity;
2869
2870 sector_div(cylinders, tmp);
2871
2872 /*
2873 * Handle extended translation size for logical drives > 1Gb
2874 */
2875
2876 if (capacity >= 0x200000) {
2877 heads = 255;
2878 sectors = 63;
2879 tmp = heads*sectors;
2880 cylinders = capacity;
2881 sector_div(cylinders, tmp);
2882 }
2883
2884 geom[0] = heads;
2885 geom[1] = sectors;
2886 geom[2] = cylinders;
2887
2888 return 0;
2889 }
2890
2891 static void megasas_aen_polling(struct work_struct *work);
2892
2893 /**
2894 * megasas_service_aen - Processes an event notification
2895 * @instance: Adapter soft state
2896 * @cmd: AEN command completed by the ISR
2897 *
2898 * For AEN, driver sends a command down to FW that is held by the FW till an
2899 * event occurs. When an event of interest occurs, FW completes the command
2900 * that it was previously holding.
2901 *
2902 * This routines sends SIGIO signal to processes that have registered with the
2903 * driver for AEN.
2904 */
2905 static void
2906 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2907 {
2908 unsigned long flags;
2909
2910 /*
2911 * Don't signal app if it is just an aborted previously registered aen
2912 */
2913 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2914 spin_lock_irqsave(&poll_aen_lock, flags);
2915 megasas_poll_wait_aen = 1;
2916 spin_unlock_irqrestore(&poll_aen_lock, flags);
2917 wake_up(&megasas_poll_wait);
2918 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2919 }
2920 else
2921 cmd->abort_aen = 0;
2922
2923 instance->aen_cmd = NULL;
2924
2925 megasas_return_cmd(instance, cmd);
2926
2927 if ((instance->unload == 0) &&
2928 ((instance->issuepend_done == 1))) {
2929 struct megasas_aen_event *ev;
2930
2931 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2932 if (!ev) {
2933 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2934 } else {
2935 ev->instance = instance;
2936 instance->ev = ev;
2937 INIT_DELAYED_WORK(&ev->hotplug_work,
2938 megasas_aen_polling);
2939 schedule_delayed_work(&ev->hotplug_work, 0);
2940 }
2941 }
2942 }
2943
2944 static ssize_t
2945 megasas_fw_crash_buffer_store(struct device *cdev,
2946 struct device_attribute *attr, const char *buf, size_t count)
2947 {
2948 struct Scsi_Host *shost = class_to_shost(cdev);
2949 struct megasas_instance *instance =
2950 (struct megasas_instance *) shost->hostdata;
2951 int val = 0;
2952 unsigned long flags;
2953
2954 if (kstrtoint(buf, 0, &val) != 0)
2955 return -EINVAL;
2956
2957 spin_lock_irqsave(&instance->crashdump_lock, flags);
2958 instance->fw_crash_buffer_offset = val;
2959 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2960 return strlen(buf);
2961 }
2962
2963 static ssize_t
2964 megasas_fw_crash_buffer_show(struct device *cdev,
2965 struct device_attribute *attr, char *buf)
2966 {
2967 struct Scsi_Host *shost = class_to_shost(cdev);
2968 struct megasas_instance *instance =
2969 (struct megasas_instance *) shost->hostdata;
2970 u32 size;
2971 unsigned long buff_addr;
2972 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2973 unsigned long src_addr;
2974 unsigned long flags;
2975 u32 buff_offset;
2976
2977 spin_lock_irqsave(&instance->crashdump_lock, flags);
2978 buff_offset = instance->fw_crash_buffer_offset;
2979 if (!instance->crash_dump_buf &&
2980 !((instance->fw_crash_state == AVAILABLE) ||
2981 (instance->fw_crash_state == COPYING))) {
2982 dev_err(&instance->pdev->dev,
2983 "Firmware crash dump is not available\n");
2984 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2985 return -EINVAL;
2986 }
2987
2988 buff_addr = (unsigned long) buf;
2989
2990 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2991 dev_err(&instance->pdev->dev,
2992 "Firmware crash dump offset is out of range\n");
2993 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2994 return 0;
2995 }
2996
2997 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2998 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2999
3000 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3001 (buff_offset % dmachunk);
3002 memcpy(buf, (void *)src_addr, size);
3003 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3004
3005 return size;
3006 }
3007
3008 static ssize_t
3009 megasas_fw_crash_buffer_size_show(struct device *cdev,
3010 struct device_attribute *attr, char *buf)
3011 {
3012 struct Scsi_Host *shost = class_to_shost(cdev);
3013 struct megasas_instance *instance =
3014 (struct megasas_instance *) shost->hostdata;
3015
3016 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3017 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3018 }
3019
3020 static ssize_t
3021 megasas_fw_crash_state_store(struct device *cdev,
3022 struct device_attribute *attr, const char *buf, size_t count)
3023 {
3024 struct Scsi_Host *shost = class_to_shost(cdev);
3025 struct megasas_instance *instance =
3026 (struct megasas_instance *) shost->hostdata;
3027 int val = 0;
3028 unsigned long flags;
3029
3030 if (kstrtoint(buf, 0, &val) != 0)
3031 return -EINVAL;
3032
3033 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3034 dev_err(&instance->pdev->dev, "application updates invalid "
3035 "firmware crash state\n");
3036 return -EINVAL;
3037 }
3038
3039 instance->fw_crash_state = val;
3040
3041 if ((val == COPIED) || (val == COPY_ERROR)) {
3042 spin_lock_irqsave(&instance->crashdump_lock, flags);
3043 megasas_free_host_crash_buffer(instance);
3044 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3045 if (val == COPY_ERROR)
3046 dev_info(&instance->pdev->dev, "application failed to "
3047 "copy Firmware crash dump\n");
3048 else
3049 dev_info(&instance->pdev->dev, "Firmware crash dump "
3050 "copied successfully\n");
3051 }
3052 return strlen(buf);
3053 }
3054
3055 static ssize_t
3056 megasas_fw_crash_state_show(struct device *cdev,
3057 struct device_attribute *attr, char *buf)
3058 {
3059 struct Scsi_Host *shost = class_to_shost(cdev);
3060 struct megasas_instance *instance =
3061 (struct megasas_instance *) shost->hostdata;
3062
3063 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3064 }
3065
3066 static ssize_t
3067 megasas_page_size_show(struct device *cdev,
3068 struct device_attribute *attr, char *buf)
3069 {
3070 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3071 }
3072
3073 static ssize_t
3074 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3075 char *buf)
3076 {
3077 struct Scsi_Host *shost = class_to_shost(cdev);
3078 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3079
3080 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3081 }
3082
3083 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3084 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3085 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3086 megasas_fw_crash_buffer_size_show, NULL);
3087 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3088 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3089 static DEVICE_ATTR(page_size, S_IRUGO,
3090 megasas_page_size_show, NULL);
3091 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3092 megasas_ldio_outstanding_show, NULL);
3093
3094 struct device_attribute *megaraid_host_attrs[] = {
3095 &dev_attr_fw_crash_buffer_size,
3096 &dev_attr_fw_crash_buffer,
3097 &dev_attr_fw_crash_state,
3098 &dev_attr_page_size,
3099 &dev_attr_ldio_outstanding,
3100 NULL,
3101 };
3102
3103 /*
3104 * Scsi host template for megaraid_sas driver
3105 */
3106 static struct scsi_host_template megasas_template = {
3107
3108 .module = THIS_MODULE,
3109 .name = "Avago SAS based MegaRAID driver",
3110 .proc_name = "megaraid_sas",
3111 .slave_configure = megasas_slave_configure,
3112 .slave_alloc = megasas_slave_alloc,
3113 .slave_destroy = megasas_slave_destroy,
3114 .queuecommand = megasas_queue_command,
3115 .eh_target_reset_handler = megasas_reset_target,
3116 .eh_abort_handler = megasas_task_abort,
3117 .eh_host_reset_handler = megasas_reset_bus_host,
3118 .eh_timed_out = megasas_reset_timer,
3119 .shost_attrs = megaraid_host_attrs,
3120 .bios_param = megasas_bios_param,
3121 .use_clustering = ENABLE_CLUSTERING,
3122 .change_queue_depth = scsi_change_queue_depth,
3123 .no_write_same = 1,
3124 };
3125
3126 /**
3127 * megasas_complete_int_cmd - Completes an internal command
3128 * @instance: Adapter soft state
3129 * @cmd: Command to be completed
3130 *
3131 * The megasas_issue_blocked_cmd() function waits for a command to complete
3132 * after it issues a command. This function wakes up that waiting routine by
3133 * calling wake_up() on the wait queue.
3134 */
3135 static void
3136 megasas_complete_int_cmd(struct megasas_instance *instance,
3137 struct megasas_cmd *cmd)
3138 {
3139 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3140 wake_up(&instance->int_cmd_wait_q);
3141 }
3142
3143 /**
3144 * megasas_complete_abort - Completes aborting a command
3145 * @instance: Adapter soft state
3146 * @cmd: Cmd that was issued to abort another cmd
3147 *
3148 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3149 * after it issues an abort on a previously issued command. This function
3150 * wakes up all functions waiting on the same wait queue.
3151 */
3152 static void
3153 megasas_complete_abort(struct megasas_instance *instance,
3154 struct megasas_cmd *cmd)
3155 {
3156 if (cmd->sync_cmd) {
3157 cmd->sync_cmd = 0;
3158 cmd->cmd_status_drv = 0;
3159 wake_up(&instance->abort_cmd_wait_q);
3160 }
3161 }
3162
3163 /**
3164 * megasas_complete_cmd - Completes a command
3165 * @instance: Adapter soft state
3166 * @cmd: Command to be completed
3167 * @alt_status: If non-zero, use this value as status to
3168 * SCSI mid-layer instead of the value returned
3169 * by the FW. This should be used if caller wants
3170 * an alternate status (as in the case of aborted
3171 * commands)
3172 */
3173 void
3174 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3175 u8 alt_status)
3176 {
3177 int exception = 0;
3178 struct megasas_header *hdr = &cmd->frame->hdr;
3179 unsigned long flags;
3180 struct fusion_context *fusion = instance->ctrl_context;
3181 u32 opcode, status;
3182
3183 /* flag for the retry reset */
3184 cmd->retry_for_fw_reset = 0;
3185
3186 if (cmd->scmd)
3187 cmd->scmd->SCp.ptr = NULL;
3188
3189 switch (hdr->cmd) {
3190 case MFI_CMD_INVALID:
3191 /* Some older 1068 controller FW may keep a pended
3192 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3193 when booting the kdump kernel. Ignore this command to
3194 prevent a kernel panic on shutdown of the kdump kernel. */
3195 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3196 "completed\n");
3197 dev_warn(&instance->pdev->dev, "If you have a controller "
3198 "other than PERC5, please upgrade your firmware\n");
3199 break;
3200 case MFI_CMD_PD_SCSI_IO:
3201 case MFI_CMD_LD_SCSI_IO:
3202
3203 /*
3204 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3205 * issued either through an IO path or an IOCTL path. If it
3206 * was via IOCTL, we will send it to internal completion.
3207 */
3208 if (cmd->sync_cmd) {
3209 cmd->sync_cmd = 0;
3210 megasas_complete_int_cmd(instance, cmd);
3211 break;
3212 }
3213
3214 case MFI_CMD_LD_READ:
3215 case MFI_CMD_LD_WRITE:
3216
3217 if (alt_status) {
3218 cmd->scmd->result = alt_status << 16;
3219 exception = 1;
3220 }
3221
3222 if (exception) {
3223
3224 atomic_dec(&instance->fw_outstanding);
3225
3226 scsi_dma_unmap(cmd->scmd);
3227 cmd->scmd->scsi_done(cmd->scmd);
3228 megasas_return_cmd(instance, cmd);
3229
3230 break;
3231 }
3232
3233 switch (hdr->cmd_status) {
3234
3235 case MFI_STAT_OK:
3236 cmd->scmd->result = DID_OK << 16;
3237 break;
3238
3239 case MFI_STAT_SCSI_IO_FAILED:
3240 case MFI_STAT_LD_INIT_IN_PROGRESS:
3241 cmd->scmd->result =
3242 (DID_ERROR << 16) | hdr->scsi_status;
3243 break;
3244
3245 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3246
3247 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3248
3249 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3250 memset(cmd->scmd->sense_buffer, 0,
3251 SCSI_SENSE_BUFFERSIZE);
3252 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3253 hdr->sense_len);
3254
3255 cmd->scmd->result |= DRIVER_SENSE << 24;
3256 }
3257
3258 break;
3259
3260 case MFI_STAT_LD_OFFLINE:
3261 case MFI_STAT_DEVICE_NOT_FOUND:
3262 cmd->scmd->result = DID_BAD_TARGET << 16;
3263 break;
3264
3265 default:
3266 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3267 hdr->cmd_status);
3268 cmd->scmd->result = DID_ERROR << 16;
3269 break;
3270 }
3271
3272 atomic_dec(&instance->fw_outstanding);
3273
3274 scsi_dma_unmap(cmd->scmd);
3275 cmd->scmd->scsi_done(cmd->scmd);
3276 megasas_return_cmd(instance, cmd);
3277
3278 break;
3279
3280 case MFI_CMD_SMP:
3281 case MFI_CMD_STP:
3282 case MFI_CMD_DCMD:
3283 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3284 /* Check for LD map update */
3285 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3286 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3287 fusion->fast_path_io = 0;
3288 spin_lock_irqsave(instance->host->host_lock, flags);
3289 instance->map_update_cmd = NULL;
3290 if (cmd->frame->hdr.cmd_status != 0) {
3291 if (cmd->frame->hdr.cmd_status !=
3292 MFI_STAT_NOT_FOUND)
3293 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3294 cmd->frame->hdr.cmd_status);
3295 else {
3296 megasas_return_cmd(instance, cmd);
3297 spin_unlock_irqrestore(
3298 instance->host->host_lock,
3299 flags);
3300 break;
3301 }
3302 } else
3303 instance->map_id++;
3304 megasas_return_cmd(instance, cmd);
3305
3306 /*
3307 * Set fast path IO to ZERO.
3308 * Validate Map will set proper value.
3309 * Meanwhile all IOs will go as LD IO.
3310 */
3311 if (MR_ValidateMapInfo(instance))
3312 fusion->fast_path_io = 1;
3313 else
3314 fusion->fast_path_io = 0;
3315 megasas_sync_map_info(instance);
3316 spin_unlock_irqrestore(instance->host->host_lock,
3317 flags);
3318 break;
3319 }
3320 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3321 opcode == MR_DCMD_CTRL_EVENT_GET) {
3322 spin_lock_irqsave(&poll_aen_lock, flags);
3323 megasas_poll_wait_aen = 0;
3324 spin_unlock_irqrestore(&poll_aen_lock, flags);
3325 }
3326
3327 /* FW has an updated PD sequence */
3328 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3329 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3330
3331 spin_lock_irqsave(instance->host->host_lock, flags);
3332 status = cmd->frame->hdr.cmd_status;
3333 instance->jbod_seq_cmd = NULL;
3334 megasas_return_cmd(instance, cmd);
3335
3336 if (status == MFI_STAT_OK) {
3337 instance->pd_seq_map_id++;
3338 /* Re-register a pd sync seq num cmd */
3339 if (megasas_sync_pd_seq_num(instance, true))
3340 instance->use_seqnum_jbod_fp = false;
3341 } else
3342 instance->use_seqnum_jbod_fp = false;
3343
3344 spin_unlock_irqrestore(instance->host->host_lock, flags);
3345 break;
3346 }
3347
3348 /*
3349 * See if got an event notification
3350 */
3351 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3352 megasas_service_aen(instance, cmd);
3353 else
3354 megasas_complete_int_cmd(instance, cmd);
3355
3356 break;
3357
3358 case MFI_CMD_ABORT:
3359 /*
3360 * Cmd issued to abort another cmd returned
3361 */
3362 megasas_complete_abort(instance, cmd);
3363 break;
3364
3365 default:
3366 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3367 hdr->cmd);
3368 break;
3369 }
3370 }
3371
3372 /**
3373 * megasas_issue_pending_cmds_again - issue all pending cmds
3374 * in FW again because of the fw reset
3375 * @instance: Adapter soft state
3376 */
3377 static inline void
3378 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3379 {
3380 struct megasas_cmd *cmd;
3381 struct list_head clist_local;
3382 union megasas_evt_class_locale class_locale;
3383 unsigned long flags;
3384 u32 seq_num;
3385
3386 INIT_LIST_HEAD(&clist_local);
3387 spin_lock_irqsave(&instance->hba_lock, flags);
3388 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3389 spin_unlock_irqrestore(&instance->hba_lock, flags);
3390
3391 while (!list_empty(&clist_local)) {
3392 cmd = list_entry((&clist_local)->next,
3393 struct megasas_cmd, list);
3394 list_del_init(&cmd->list);
3395
3396 if (cmd->sync_cmd || cmd->scmd) {
3397 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3398 "detected to be pending while HBA reset\n",
3399 cmd, cmd->scmd, cmd->sync_cmd);
3400
3401 cmd->retry_for_fw_reset++;
3402
3403 if (cmd->retry_for_fw_reset == 3) {
3404 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3405 "was tried multiple times during reset."
3406 "Shutting down the HBA\n",
3407 cmd, cmd->scmd, cmd->sync_cmd);
3408 instance->instancet->disable_intr(instance);
3409 atomic_set(&instance->fw_reset_no_pci_access, 1);
3410 megaraid_sas_kill_hba(instance);
3411 return;
3412 }
3413 }
3414
3415 if (cmd->sync_cmd == 1) {
3416 if (cmd->scmd) {
3417 dev_notice(&instance->pdev->dev, "unexpected"
3418 "cmd attached to internal command!\n");
3419 }
3420 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3421 "on the internal reset queue,"
3422 "issue it again.\n", cmd);
3423 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3424 instance->instancet->fire_cmd(instance,
3425 cmd->frame_phys_addr,
3426 0, instance->reg_set);
3427 } else if (cmd->scmd) {
3428 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3429 "detected on the internal queue, issue again.\n",
3430 cmd, cmd->scmd->cmnd[0]);
3431
3432 atomic_inc(&instance->fw_outstanding);
3433 instance->instancet->fire_cmd(instance,
3434 cmd->frame_phys_addr,
3435 cmd->frame_count-1, instance->reg_set);
3436 } else {
3437 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3438 "internal reset defer list while re-issue!!\n",
3439 cmd);
3440 }
3441 }
3442
3443 if (instance->aen_cmd) {
3444 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3445 megasas_return_cmd(instance, instance->aen_cmd);
3446
3447 instance->aen_cmd = NULL;
3448 }
3449
3450 /*
3451 * Initiate AEN (Asynchronous Event Notification)
3452 */
3453 seq_num = instance->last_seq_num;
3454 class_locale.members.reserved = 0;
3455 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3456 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3457
3458 megasas_register_aen(instance, seq_num, class_locale.word);
3459 }
3460
3461 /**
3462 * Move the internal reset pending commands to a deferred queue.
3463 *
3464 * We move the commands pending at internal reset time to a
3465 * pending queue. This queue would be flushed after successful
3466 * completion of the internal reset sequence. if the internal reset
3467 * did not complete in time, the kernel reset handler would flush
3468 * these commands.
3469 **/
3470 static void
3471 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3472 {
3473 struct megasas_cmd *cmd;
3474 int i;
3475 u16 max_cmd = instance->max_fw_cmds;
3476 u32 defer_index;
3477 unsigned long flags;
3478
3479 defer_index = 0;
3480 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3481 for (i = 0; i < max_cmd; i++) {
3482 cmd = instance->cmd_list[i];
3483 if (cmd->sync_cmd == 1 || cmd->scmd) {
3484 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3485 "on the defer queue as internal\n",
3486 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3487
3488 if (!list_empty(&cmd->list)) {
3489 dev_notice(&instance->pdev->dev, "ERROR while"
3490 " moving this cmd:%p, %d %p, it was"
3491 "discovered on some list?\n",
3492 cmd, cmd->sync_cmd, cmd->scmd);
3493
3494 list_del_init(&cmd->list);
3495 }
3496 defer_index++;
3497 list_add_tail(&cmd->list,
3498 &instance->internal_reset_pending_q);
3499 }
3500 }
3501 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3502 }
3503
3504
3505 static void
3506 process_fw_state_change_wq(struct work_struct *work)
3507 {
3508 struct megasas_instance *instance =
3509 container_of(work, struct megasas_instance, work_init);
3510 u32 wait;
3511 unsigned long flags;
3512
3513 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3514 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3515 atomic_read(&instance->adprecovery));
3516 return ;
3517 }
3518
3519 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3520 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3521 "state, restarting it...\n");
3522
3523 instance->instancet->disable_intr(instance);
3524 atomic_set(&instance->fw_outstanding, 0);
3525
3526 atomic_set(&instance->fw_reset_no_pci_access, 1);
3527 instance->instancet->adp_reset(instance, instance->reg_set);
3528 atomic_set(&instance->fw_reset_no_pci_access, 0);
3529
3530 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3531 "initiating next stage...\n");
3532
3533 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3534 "state 2 starting...\n");
3535
3536 /* waiting for about 20 second before start the second init */
3537 for (wait = 0; wait < 30; wait++) {
3538 msleep(1000);
3539 }
3540
3541 if (megasas_transition_to_ready(instance, 1)) {
3542 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3543
3544 atomic_set(&instance->fw_reset_no_pci_access, 1);
3545 megaraid_sas_kill_hba(instance);
3546 return ;
3547 }
3548
3549 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3550 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3551 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3552 ) {
3553 *instance->consumer = *instance->producer;
3554 } else {
3555 *instance->consumer = 0;
3556 *instance->producer = 0;
3557 }
3558
3559 megasas_issue_init_mfi(instance);
3560
3561 spin_lock_irqsave(&instance->hba_lock, flags);
3562 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3563 spin_unlock_irqrestore(&instance->hba_lock, flags);
3564 instance->instancet->enable_intr(instance);
3565
3566 megasas_issue_pending_cmds_again(instance);
3567 instance->issuepend_done = 1;
3568 }
3569 }
3570
3571 /**
3572 * megasas_deplete_reply_queue - Processes all completed commands
3573 * @instance: Adapter soft state
3574 * @alt_status: Alternate status to be returned to
3575 * SCSI mid-layer instead of the status
3576 * returned by the FW
3577 * Note: this must be called with hba lock held
3578 */
3579 static int
3580 megasas_deplete_reply_queue(struct megasas_instance *instance,
3581 u8 alt_status)
3582 {
3583 u32 mfiStatus;
3584 u32 fw_state;
3585
3586 if ((mfiStatus = instance->instancet->check_reset(instance,
3587 instance->reg_set)) == 1) {
3588 return IRQ_HANDLED;
3589 }
3590
3591 if ((mfiStatus = instance->instancet->clear_intr(
3592 instance->reg_set)
3593 ) == 0) {
3594 /* Hardware may not set outbound_intr_status in MSI-X mode */
3595 if (!instance->msix_vectors)
3596 return IRQ_NONE;
3597 }
3598
3599 instance->mfiStatus = mfiStatus;
3600
3601 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3602 fw_state = instance->instancet->read_fw_status_reg(
3603 instance->reg_set) & MFI_STATE_MASK;
3604
3605 if (fw_state != MFI_STATE_FAULT) {
3606 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3607 fw_state);
3608 }
3609
3610 if ((fw_state == MFI_STATE_FAULT) &&
3611 (instance->disableOnlineCtrlReset == 0)) {
3612 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3613
3614 if ((instance->pdev->device ==
3615 PCI_DEVICE_ID_LSI_SAS1064R) ||
3616 (instance->pdev->device ==
3617 PCI_DEVICE_ID_DELL_PERC5) ||
3618 (instance->pdev->device ==
3619 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3620
3621 *instance->consumer =
3622 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3623 }
3624
3625
3626 instance->instancet->disable_intr(instance);
3627 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3628 instance->issuepend_done = 0;
3629
3630 atomic_set(&instance->fw_outstanding, 0);
3631 megasas_internal_reset_defer_cmds(instance);
3632
3633 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3634 fw_state, atomic_read(&instance->adprecovery));
3635
3636 schedule_work(&instance->work_init);
3637 return IRQ_HANDLED;
3638
3639 } else {
3640 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3641 fw_state, instance->disableOnlineCtrlReset);
3642 }
3643 }
3644
3645 tasklet_schedule(&instance->isr_tasklet);
3646 return IRQ_HANDLED;
3647 }
3648 /**
3649 * megasas_isr - isr entry point
3650 */
3651 static irqreturn_t megasas_isr(int irq, void *devp)
3652 {
3653 struct megasas_irq_context *irq_context = devp;
3654 struct megasas_instance *instance = irq_context->instance;
3655 unsigned long flags;
3656 irqreturn_t rc;
3657
3658 if (atomic_read(&instance->fw_reset_no_pci_access))
3659 return IRQ_HANDLED;
3660
3661 spin_lock_irqsave(&instance->hba_lock, flags);
3662 rc = megasas_deplete_reply_queue(instance, DID_OK);
3663 spin_unlock_irqrestore(&instance->hba_lock, flags);
3664
3665 return rc;
3666 }
3667
3668 /**
3669 * megasas_transition_to_ready - Move the FW to READY state
3670 * @instance: Adapter soft state
3671 *
3672 * During the initialization, FW passes can potentially be in any one of
3673 * several possible states. If the FW in operational, waiting-for-handshake
3674 * states, driver must take steps to bring it to ready state. Otherwise, it
3675 * has to wait for the ready state.
3676 */
3677 int
3678 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3679 {
3680 int i;
3681 u8 max_wait;
3682 u32 fw_state;
3683 u32 cur_state;
3684 u32 abs_state, curr_abs_state;
3685
3686 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3687 fw_state = abs_state & MFI_STATE_MASK;
3688
3689 if (fw_state != MFI_STATE_READY)
3690 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3691 " state\n");
3692
3693 while (fw_state != MFI_STATE_READY) {
3694
3695 switch (fw_state) {
3696
3697 case MFI_STATE_FAULT:
3698 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3699 if (ocr) {
3700 max_wait = MEGASAS_RESET_WAIT_TIME;
3701 cur_state = MFI_STATE_FAULT;
3702 break;
3703 } else
3704 return -ENODEV;
3705
3706 case MFI_STATE_WAIT_HANDSHAKE:
3707 /*
3708 * Set the CLR bit in inbound doorbell
3709 */
3710 if ((instance->pdev->device ==
3711 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3712 (instance->pdev->device ==
3713 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3714 (instance->ctrl_context))
3715 writel(
3716 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3717 &instance->reg_set->doorbell);
3718 else
3719 writel(
3720 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3721 &instance->reg_set->inbound_doorbell);
3722
3723 max_wait = MEGASAS_RESET_WAIT_TIME;
3724 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3725 break;
3726
3727 case MFI_STATE_BOOT_MESSAGE_PENDING:
3728 if ((instance->pdev->device ==
3729 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3730 (instance->pdev->device ==
3731 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3732 (instance->ctrl_context))
3733 writel(MFI_INIT_HOTPLUG,
3734 &instance->reg_set->doorbell);
3735 else
3736 writel(MFI_INIT_HOTPLUG,
3737 &instance->reg_set->inbound_doorbell);
3738
3739 max_wait = MEGASAS_RESET_WAIT_TIME;
3740 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3741 break;
3742
3743 case MFI_STATE_OPERATIONAL:
3744 /*
3745 * Bring it to READY state; assuming max wait 10 secs
3746 */
3747 instance->instancet->disable_intr(instance);
3748 if ((instance->pdev->device ==
3749 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3750 (instance->pdev->device ==
3751 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3752 (instance->ctrl_context)) {
3753 writel(MFI_RESET_FLAGS,
3754 &instance->reg_set->doorbell);
3755
3756 if (instance->ctrl_context) {
3757 for (i = 0; i < (10 * 1000); i += 20) {
3758 if (readl(
3759 &instance->
3760 reg_set->
3761 doorbell) & 1)
3762 msleep(20);
3763 else
3764 break;
3765 }
3766 }
3767 } else
3768 writel(MFI_RESET_FLAGS,
3769 &instance->reg_set->inbound_doorbell);
3770
3771 max_wait = MEGASAS_RESET_WAIT_TIME;
3772 cur_state = MFI_STATE_OPERATIONAL;
3773 break;
3774
3775 case MFI_STATE_UNDEFINED:
3776 /*
3777 * This state should not last for more than 2 seconds
3778 */
3779 max_wait = MEGASAS_RESET_WAIT_TIME;
3780 cur_state = MFI_STATE_UNDEFINED;
3781 break;
3782
3783 case MFI_STATE_BB_INIT:
3784 max_wait = MEGASAS_RESET_WAIT_TIME;
3785 cur_state = MFI_STATE_BB_INIT;
3786 break;
3787
3788 case MFI_STATE_FW_INIT:
3789 max_wait = MEGASAS_RESET_WAIT_TIME;
3790 cur_state = MFI_STATE_FW_INIT;
3791 break;
3792
3793 case MFI_STATE_FW_INIT_2:
3794 max_wait = MEGASAS_RESET_WAIT_TIME;
3795 cur_state = MFI_STATE_FW_INIT_2;
3796 break;
3797
3798 case MFI_STATE_DEVICE_SCAN:
3799 max_wait = MEGASAS_RESET_WAIT_TIME;
3800 cur_state = MFI_STATE_DEVICE_SCAN;
3801 break;
3802
3803 case MFI_STATE_FLUSH_CACHE:
3804 max_wait = MEGASAS_RESET_WAIT_TIME;
3805 cur_state = MFI_STATE_FLUSH_CACHE;
3806 break;
3807
3808 default:
3809 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3810 fw_state);
3811 return -ENODEV;
3812 }
3813
3814 /*
3815 * The cur_state should not last for more than max_wait secs
3816 */
3817 for (i = 0; i < (max_wait * 1000); i++) {
3818 curr_abs_state = instance->instancet->
3819 read_fw_status_reg(instance->reg_set);
3820
3821 if (abs_state == curr_abs_state) {
3822 msleep(1);
3823 } else
3824 break;
3825 }
3826
3827 /*
3828 * Return error if fw_state hasn't changed after max_wait
3829 */
3830 if (curr_abs_state == abs_state) {
3831 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3832 "in %d secs\n", fw_state, max_wait);
3833 return -ENODEV;
3834 }
3835
3836 abs_state = curr_abs_state;
3837 fw_state = curr_abs_state & MFI_STATE_MASK;
3838 }
3839 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3840
3841 return 0;
3842 }
3843
3844 /**
3845 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3846 * @instance: Adapter soft state
3847 */
3848 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3849 {
3850 int i;
3851 u16 max_cmd = instance->max_mfi_cmds;
3852 struct megasas_cmd *cmd;
3853
3854 if (!instance->frame_dma_pool)
3855 return;
3856
3857 /*
3858 * Return all frames to pool
3859 */
3860 for (i = 0; i < max_cmd; i++) {
3861
3862 cmd = instance->cmd_list[i];
3863
3864 if (cmd->frame)
3865 dma_pool_free(instance->frame_dma_pool, cmd->frame,
3866 cmd->frame_phys_addr);
3867
3868 if (cmd->sense)
3869 dma_pool_free(instance->sense_dma_pool, cmd->sense,
3870 cmd->sense_phys_addr);
3871 }
3872
3873 /*
3874 * Now destroy the pool itself
3875 */
3876 dma_pool_destroy(instance->frame_dma_pool);
3877 dma_pool_destroy(instance->sense_dma_pool);
3878
3879 instance->frame_dma_pool = NULL;
3880 instance->sense_dma_pool = NULL;
3881 }
3882
3883 /**
3884 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3885 * @instance: Adapter soft state
3886 *
3887 * Each command packet has an embedded DMA memory buffer that is used for
3888 * filling MFI frame and the SG list that immediately follows the frame. This
3889 * function creates those DMA memory buffers for each command packet by using
3890 * PCI pool facility.
3891 */
3892 static int megasas_create_frame_pool(struct megasas_instance *instance)
3893 {
3894 int i;
3895 u16 max_cmd;
3896 u32 sge_sz;
3897 u32 frame_count;
3898 struct megasas_cmd *cmd;
3899
3900 max_cmd = instance->max_mfi_cmds;
3901
3902 /*
3903 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3904 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3905 */
3906 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3907 sizeof(struct megasas_sge32);
3908
3909 if (instance->flag_ieee)
3910 sge_sz = sizeof(struct megasas_sge_skinny);
3911
3912 /*
3913 * For MFI controllers.
3914 * max_num_sge = 60
3915 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3916 * Total 960 byte (15 MFI frame of 64 byte)
3917 *
3918 * Fusion adapter require only 3 extra frame.
3919 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3920 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3921 * Total 192 byte (3 MFI frame of 64 byte)
3922 */
3923 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3924 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3925 /*
3926 * Use DMA pool facility provided by PCI layer
3927 */
3928 instance->frame_dma_pool = dma_pool_create("megasas frame pool",
3929 &instance->pdev->dev,
3930 instance->mfi_frame_size, 256, 0);
3931
3932 if (!instance->frame_dma_pool) {
3933 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3934 return -ENOMEM;
3935 }
3936
3937 instance->sense_dma_pool = dma_pool_create("megasas sense pool",
3938 &instance->pdev->dev, 128,
3939 4, 0);
3940
3941 if (!instance->sense_dma_pool) {
3942 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3943
3944 dma_pool_destroy(instance->frame_dma_pool);
3945 instance->frame_dma_pool = NULL;
3946
3947 return -ENOMEM;
3948 }
3949
3950 /*
3951 * Allocate and attach a frame to each of the commands in cmd_list.
3952 * By making cmd->index as the context instead of the &cmd, we can
3953 * always use 32bit context regardless of the architecture
3954 */
3955 for (i = 0; i < max_cmd; i++) {
3956
3957 cmd = instance->cmd_list[i];
3958
3959 cmd->frame = dma_pool_alloc(instance->frame_dma_pool,
3960 GFP_KERNEL, &cmd->frame_phys_addr);
3961
3962 cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
3963 GFP_KERNEL, &cmd->sense_phys_addr);
3964
3965 /*
3966 * megasas_teardown_frame_pool() takes care of freeing
3967 * whatever has been allocated
3968 */
3969 if (!cmd->frame || !cmd->sense) {
3970 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
3971 megasas_teardown_frame_pool(instance);
3972 return -ENOMEM;
3973 }
3974
3975 memset(cmd->frame, 0, instance->mfi_frame_size);
3976 cmd->frame->io.context = cpu_to_le32(cmd->index);
3977 cmd->frame->io.pad_0 = 0;
3978 if (!instance->ctrl_context && reset_devices)
3979 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3980 }
3981
3982 return 0;
3983 }
3984
3985 /**
3986 * megasas_free_cmds - Free all the cmds in the free cmd pool
3987 * @instance: Adapter soft state
3988 */
3989 void megasas_free_cmds(struct megasas_instance *instance)
3990 {
3991 int i;
3992
3993 /* First free the MFI frame pool */
3994 megasas_teardown_frame_pool(instance);
3995
3996 /* Free all the commands in the cmd_list */
3997 for (i = 0; i < instance->max_mfi_cmds; i++)
3998
3999 kfree(instance->cmd_list[i]);
4000
4001 /* Free the cmd_list buffer itself */
4002 kfree(instance->cmd_list);
4003 instance->cmd_list = NULL;
4004
4005 INIT_LIST_HEAD(&instance->cmd_pool);
4006 }
4007
4008 /**
4009 * megasas_alloc_cmds - Allocates the command packets
4010 * @instance: Adapter soft state
4011 *
4012 * Each command that is issued to the FW, whether IO commands from the OS or
4013 * internal commands like IOCTLs, are wrapped in local data structure called
4014 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4015 * the FW.
4016 *
4017 * Each frame has a 32-bit field called context (tag). This context is used
4018 * to get back the megasas_cmd from the frame when a frame gets completed in
4019 * the ISR. Typically the address of the megasas_cmd itself would be used as
4020 * the context. But we wanted to keep the differences between 32 and 64 bit
4021 * systems to the mininum. We always use 32 bit integers for the context. In
4022 * this driver, the 32 bit values are the indices into an array cmd_list.
4023 * This array is used only to look up the megasas_cmd given the context. The
4024 * free commands themselves are maintained in a linked list called cmd_pool.
4025 */
4026 int megasas_alloc_cmds(struct megasas_instance *instance)
4027 {
4028 int i;
4029 int j;
4030 u16 max_cmd;
4031 struct megasas_cmd *cmd;
4032 struct fusion_context *fusion;
4033
4034 fusion = instance->ctrl_context;
4035 max_cmd = instance->max_mfi_cmds;
4036
4037 /*
4038 * instance->cmd_list is an array of struct megasas_cmd pointers.
4039 * Allocate the dynamic array first and then allocate individual
4040 * commands.
4041 */
4042 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4043
4044 if (!instance->cmd_list) {
4045 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4046 return -ENOMEM;
4047 }
4048
4049 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4050
4051 for (i = 0; i < max_cmd; i++) {
4052 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4053 GFP_KERNEL);
4054
4055 if (!instance->cmd_list[i]) {
4056
4057 for (j = 0; j < i; j++)
4058 kfree(instance->cmd_list[j]);
4059
4060 kfree(instance->cmd_list);
4061 instance->cmd_list = NULL;
4062
4063 return -ENOMEM;
4064 }
4065 }
4066
4067 for (i = 0; i < max_cmd; i++) {
4068 cmd = instance->cmd_list[i];
4069 memset(cmd, 0, sizeof(struct megasas_cmd));
4070 cmd->index = i;
4071 cmd->scmd = NULL;
4072 cmd->instance = instance;
4073
4074 list_add_tail(&cmd->list, &instance->cmd_pool);
4075 }
4076
4077 /*
4078 * Create a frame pool and assign one frame to each cmd
4079 */
4080 if (megasas_create_frame_pool(instance)) {
4081 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4082 megasas_free_cmds(instance);
4083 }
4084
4085 return 0;
4086 }
4087
4088 /*
4089 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4090 * @instance: Adapter soft state
4091 *
4092 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4093 * or FW is not under OCR.
4094 */
4095 inline int
4096 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4097
4098 if (!instance->ctrl_context)
4099 return KILL_ADAPTER;
4100 else if (instance->unload ||
4101 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4102 return IGNORE_TIMEOUT;
4103 else
4104 return INITIATE_OCR;
4105 }
4106
4107 static void
4108 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4109 {
4110 int ret;
4111 struct megasas_cmd *cmd;
4112 struct megasas_dcmd_frame *dcmd;
4113
4114 struct MR_PRIV_DEVICE *mr_device_priv_data;
4115 u16 device_id = 0;
4116
4117 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4118 cmd = megasas_get_cmd(instance);
4119
4120 if (!cmd) {
4121 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4122 return;
4123 }
4124
4125 dcmd = &cmd->frame->dcmd;
4126
4127 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4128 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4129
4130 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4131 dcmd->cmd = MFI_CMD_DCMD;
4132 dcmd->cmd_status = 0xFF;
4133 dcmd->sge_count = 1;
4134 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4135 dcmd->timeout = 0;
4136 dcmd->pad_0 = 0;
4137 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4138 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4139 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4140 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4141
4142 if (instance->ctrl_context && !instance->mask_interrupts)
4143 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4144 else
4145 ret = megasas_issue_polled(instance, cmd);
4146
4147 switch (ret) {
4148 case DCMD_SUCCESS:
4149 mr_device_priv_data = sdev->hostdata;
4150 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4151 mr_device_priv_data->interface_type =
4152 instance->pd_info->state.ddf.pdType.intf;
4153 break;
4154
4155 case DCMD_TIMEOUT:
4156
4157 switch (dcmd_timeout_ocr_possible(instance)) {
4158 case INITIATE_OCR:
4159 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4160 megasas_reset_fusion(instance->host,
4161 MFI_IO_TIMEOUT_OCR);
4162 break;
4163 case KILL_ADAPTER:
4164 megaraid_sas_kill_hba(instance);
4165 break;
4166 case IGNORE_TIMEOUT:
4167 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4168 __func__, __LINE__);
4169 break;
4170 }
4171
4172 break;
4173 }
4174
4175 if (ret != DCMD_TIMEOUT)
4176 megasas_return_cmd(instance, cmd);
4177
4178 return;
4179 }
4180 /*
4181 * megasas_get_pd_list_info - Returns FW's pd_list structure
4182 * @instance: Adapter soft state
4183 * @pd_list: pd_list structure
4184 *
4185 * Issues an internal command (DCMD) to get the FW's controller PD
4186 * list structure. This information is mainly used to find out SYSTEM
4187 * supported by the FW.
4188 */
4189 static int
4190 megasas_get_pd_list(struct megasas_instance *instance)
4191 {
4192 int ret = 0, pd_index = 0;
4193 struct megasas_cmd *cmd;
4194 struct megasas_dcmd_frame *dcmd;
4195 struct MR_PD_LIST *ci;
4196 struct MR_PD_ADDRESS *pd_addr;
4197 dma_addr_t ci_h = 0;
4198
4199 if (instance->pd_list_not_supported) {
4200 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4201 "not supported by firmware\n");
4202 return ret;
4203 }
4204
4205 cmd = megasas_get_cmd(instance);
4206
4207 if (!cmd) {
4208 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4209 return -ENOMEM;
4210 }
4211
4212 dcmd = &cmd->frame->dcmd;
4213
4214 ci = pci_alloc_consistent(instance->pdev,
4215 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4216
4217 if (!ci) {
4218 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4219 megasas_return_cmd(instance, cmd);
4220 return -ENOMEM;
4221 }
4222
4223 memset(ci, 0, sizeof(*ci));
4224 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4225
4226 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4227 dcmd->mbox.b[1] = 0;
4228 dcmd->cmd = MFI_CMD_DCMD;
4229 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4230 dcmd->sge_count = 1;
4231 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4232 dcmd->timeout = 0;
4233 dcmd->pad_0 = 0;
4234 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4235 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4236 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4237 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4238
4239 if (instance->ctrl_context && !instance->mask_interrupts)
4240 ret = megasas_issue_blocked_cmd(instance, cmd,
4241 MFI_IO_TIMEOUT_SECS);
4242 else
4243 ret = megasas_issue_polled(instance, cmd);
4244
4245 switch (ret) {
4246 case DCMD_FAILED:
4247 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4248 "failed/not supported by firmware\n");
4249
4250 if (instance->ctrl_context)
4251 megaraid_sas_kill_hba(instance);
4252 else
4253 instance->pd_list_not_supported = 1;
4254 break;
4255 case DCMD_TIMEOUT:
4256
4257 switch (dcmd_timeout_ocr_possible(instance)) {
4258 case INITIATE_OCR:
4259 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4260 /*
4261 * DCMD failed from AEN path.
4262 * AEN path already hold reset_mutex to avoid PCI access
4263 * while OCR is in progress.
4264 */
4265 mutex_unlock(&instance->reset_mutex);
4266 megasas_reset_fusion(instance->host,
4267 MFI_IO_TIMEOUT_OCR);
4268 mutex_lock(&instance->reset_mutex);
4269 break;
4270 case KILL_ADAPTER:
4271 megaraid_sas_kill_hba(instance);
4272 break;
4273 case IGNORE_TIMEOUT:
4274 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4275 __func__, __LINE__);
4276 break;
4277 }
4278
4279 break;
4280
4281 case DCMD_SUCCESS:
4282 pd_addr = ci->addr;
4283
4284 if ((le32_to_cpu(ci->count) >
4285 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4286 break;
4287
4288 memset(instance->local_pd_list, 0,
4289 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4290
4291 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4292 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4293 le16_to_cpu(pd_addr->deviceId);
4294 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4295 pd_addr->scsiDevType;
4296 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4297 MR_PD_STATE_SYSTEM;
4298 pd_addr++;
4299 }
4300
4301 memcpy(instance->pd_list, instance->local_pd_list,
4302 sizeof(instance->pd_list));
4303 break;
4304
4305 }
4306
4307 pci_free_consistent(instance->pdev,
4308 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4309 ci, ci_h);
4310
4311 if (ret != DCMD_TIMEOUT)
4312 megasas_return_cmd(instance, cmd);
4313
4314 return ret;
4315 }
4316
4317 /*
4318 * megasas_get_ld_list_info - Returns FW's ld_list structure
4319 * @instance: Adapter soft state
4320 * @ld_list: ld_list structure
4321 *
4322 * Issues an internal command (DCMD) to get the FW's controller PD
4323 * list structure. This information is mainly used to find out SYSTEM
4324 * supported by the FW.
4325 */
4326 static int
4327 megasas_get_ld_list(struct megasas_instance *instance)
4328 {
4329 int ret = 0, ld_index = 0, ids = 0;
4330 struct megasas_cmd *cmd;
4331 struct megasas_dcmd_frame *dcmd;
4332 struct MR_LD_LIST *ci;
4333 dma_addr_t ci_h = 0;
4334 u32 ld_count;
4335
4336 cmd = megasas_get_cmd(instance);
4337
4338 if (!cmd) {
4339 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4340 return -ENOMEM;
4341 }
4342
4343 dcmd = &cmd->frame->dcmd;
4344
4345 ci = pci_alloc_consistent(instance->pdev,
4346 sizeof(struct MR_LD_LIST),
4347 &ci_h);
4348
4349 if (!ci) {
4350 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4351 megasas_return_cmd(instance, cmd);
4352 return -ENOMEM;
4353 }
4354
4355 memset(ci, 0, sizeof(*ci));
4356 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4357
4358 if (instance->supportmax256vd)
4359 dcmd->mbox.b[0] = 1;
4360 dcmd->cmd = MFI_CMD_DCMD;
4361 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4362 dcmd->sge_count = 1;
4363 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4364 dcmd->timeout = 0;
4365 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4366 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4367 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4368 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4369 dcmd->pad_0 = 0;
4370
4371 if (instance->ctrl_context && !instance->mask_interrupts)
4372 ret = megasas_issue_blocked_cmd(instance, cmd,
4373 MFI_IO_TIMEOUT_SECS);
4374 else
4375 ret = megasas_issue_polled(instance, cmd);
4376
4377 ld_count = le32_to_cpu(ci->ldCount);
4378
4379 switch (ret) {
4380 case DCMD_FAILED:
4381 megaraid_sas_kill_hba(instance);
4382 break;
4383 case DCMD_TIMEOUT:
4384
4385 switch (dcmd_timeout_ocr_possible(instance)) {
4386 case INITIATE_OCR:
4387 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4388 /*
4389 * DCMD failed from AEN path.
4390 * AEN path already hold reset_mutex to avoid PCI access
4391 * while OCR is in progress.
4392 */
4393 mutex_unlock(&instance->reset_mutex);
4394 megasas_reset_fusion(instance->host,
4395 MFI_IO_TIMEOUT_OCR);
4396 mutex_lock(&instance->reset_mutex);
4397 break;
4398 case KILL_ADAPTER:
4399 megaraid_sas_kill_hba(instance);
4400 break;
4401 case IGNORE_TIMEOUT:
4402 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4403 __func__, __LINE__);
4404 break;
4405 }
4406
4407 break;
4408
4409 case DCMD_SUCCESS:
4410 if (ld_count > instance->fw_supported_vd_count)
4411 break;
4412
4413 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4414
4415 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4416 if (ci->ldList[ld_index].state != 0) {
4417 ids = ci->ldList[ld_index].ref.targetId;
4418 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4419 }
4420 }
4421
4422 break;
4423 }
4424
4425 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4426
4427 if (ret != DCMD_TIMEOUT)
4428 megasas_return_cmd(instance, cmd);
4429
4430 return ret;
4431 }
4432
4433 /**
4434 * megasas_ld_list_query - Returns FW's ld_list structure
4435 * @instance: Adapter soft state
4436 * @ld_list: ld_list structure
4437 *
4438 * Issues an internal command (DCMD) to get the FW's controller PD
4439 * list structure. This information is mainly used to find out SYSTEM
4440 * supported by the FW.
4441 */
4442 static int
4443 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4444 {
4445 int ret = 0, ld_index = 0, ids = 0;
4446 struct megasas_cmd *cmd;
4447 struct megasas_dcmd_frame *dcmd;
4448 struct MR_LD_TARGETID_LIST *ci;
4449 dma_addr_t ci_h = 0;
4450 u32 tgtid_count;
4451
4452 cmd = megasas_get_cmd(instance);
4453
4454 if (!cmd) {
4455 dev_warn(&instance->pdev->dev,
4456 "megasas_ld_list_query: Failed to get cmd\n");
4457 return -ENOMEM;
4458 }
4459
4460 dcmd = &cmd->frame->dcmd;
4461
4462 ci = pci_alloc_consistent(instance->pdev,
4463 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4464
4465 if (!ci) {
4466 dev_warn(&instance->pdev->dev,
4467 "Failed to alloc mem for ld_list_query\n");
4468 megasas_return_cmd(instance, cmd);
4469 return -ENOMEM;
4470 }
4471
4472 memset(ci, 0, sizeof(*ci));
4473 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4474
4475 dcmd->mbox.b[0] = query_type;
4476 if (instance->supportmax256vd)
4477 dcmd->mbox.b[2] = 1;
4478
4479 dcmd->cmd = MFI_CMD_DCMD;
4480 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4481 dcmd->sge_count = 1;
4482 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4483 dcmd->timeout = 0;
4484 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4485 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4486 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4487 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4488 dcmd->pad_0 = 0;
4489
4490 if (instance->ctrl_context && !instance->mask_interrupts)
4491 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4492 else
4493 ret = megasas_issue_polled(instance, cmd);
4494
4495 switch (ret) {
4496 case DCMD_FAILED:
4497 dev_info(&instance->pdev->dev,
4498 "DCMD not supported by firmware - %s %d\n",
4499 __func__, __LINE__);
4500 ret = megasas_get_ld_list(instance);
4501 break;
4502 case DCMD_TIMEOUT:
4503 switch (dcmd_timeout_ocr_possible(instance)) {
4504 case INITIATE_OCR:
4505 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4506 /*
4507 * DCMD failed from AEN path.
4508 * AEN path already hold reset_mutex to avoid PCI access
4509 * while OCR is in progress.
4510 */
4511 mutex_unlock(&instance->reset_mutex);
4512 megasas_reset_fusion(instance->host,
4513 MFI_IO_TIMEOUT_OCR);
4514 mutex_lock(&instance->reset_mutex);
4515 break;
4516 case KILL_ADAPTER:
4517 megaraid_sas_kill_hba(instance);
4518 break;
4519 case IGNORE_TIMEOUT:
4520 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4521 __func__, __LINE__);
4522 break;
4523 }
4524
4525 break;
4526 case DCMD_SUCCESS:
4527 tgtid_count = le32_to_cpu(ci->count);
4528
4529 if ((tgtid_count > (instance->fw_supported_vd_count)))
4530 break;
4531
4532 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4533 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4534 ids = ci->targetId[ld_index];
4535 instance->ld_ids[ids] = ci->targetId[ld_index];
4536 }
4537
4538 break;
4539 }
4540
4541 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4542 ci, ci_h);
4543
4544 if (ret != DCMD_TIMEOUT)
4545 megasas_return_cmd(instance, cmd);
4546
4547 return ret;
4548 }
4549
4550 /*
4551 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4552 * instance : Controller's instance
4553 */
4554 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4555 {
4556 struct fusion_context *fusion;
4557 u32 ventura_map_sz = 0;
4558
4559 fusion = instance->ctrl_context;
4560 /* For MFI based controllers return dummy success */
4561 if (!fusion)
4562 return;
4563
4564 instance->supportmax256vd =
4565 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4566 /* Below is additional check to address future FW enhancement */
4567 if (instance->ctrl_info->max_lds > 64)
4568 instance->supportmax256vd = 1;
4569
4570 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4571 * MEGASAS_MAX_DEV_PER_CHANNEL;
4572 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4573 * MEGASAS_MAX_DEV_PER_CHANNEL;
4574 if (instance->supportmax256vd) {
4575 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4576 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4577 } else {
4578 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4579 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4580 }
4581
4582 dev_info(&instance->pdev->dev,
4583 "firmware type\t: %s\n",
4584 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4585 "Legacy(64 VD) firmware");
4586
4587 if (instance->max_raid_mapsize) {
4588 ventura_map_sz = instance->max_raid_mapsize *
4589 MR_MIN_MAP_SIZE; /* 64k */
4590 fusion->current_map_sz = ventura_map_sz;
4591 fusion->max_map_sz = ventura_map_sz;
4592 } else {
4593 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4594 (sizeof(struct MR_LD_SPAN_MAP) *
4595 (instance->fw_supported_vd_count - 1));
4596 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4597
4598 fusion->max_map_sz =
4599 max(fusion->old_map_sz, fusion->new_map_sz);
4600
4601 if (instance->supportmax256vd)
4602 fusion->current_map_sz = fusion->new_map_sz;
4603 else
4604 fusion->current_map_sz = fusion->old_map_sz;
4605 }
4606 /* irrespective of FW raid maps, driver raid map is constant */
4607 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4608 }
4609
4610 /**
4611 * megasas_get_controller_info - Returns FW's controller structure
4612 * @instance: Adapter soft state
4613 *
4614 * Issues an internal command (DCMD) to get the FW's controller structure.
4615 * This information is mainly used to find out the maximum IO transfer per
4616 * command supported by the FW.
4617 */
4618 int
4619 megasas_get_ctrl_info(struct megasas_instance *instance)
4620 {
4621 int ret = 0;
4622 struct megasas_cmd *cmd;
4623 struct megasas_dcmd_frame *dcmd;
4624 struct megasas_ctrl_info *ci;
4625 struct megasas_ctrl_info *ctrl_info;
4626 dma_addr_t ci_h = 0;
4627
4628 ctrl_info = instance->ctrl_info;
4629
4630 cmd = megasas_get_cmd(instance);
4631
4632 if (!cmd) {
4633 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4634 return -ENOMEM;
4635 }
4636
4637 dcmd = &cmd->frame->dcmd;
4638
4639 ci = pci_alloc_consistent(instance->pdev,
4640 sizeof(struct megasas_ctrl_info), &ci_h);
4641
4642 if (!ci) {
4643 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4644 megasas_return_cmd(instance, cmd);
4645 return -ENOMEM;
4646 }
4647
4648 memset(ci, 0, sizeof(*ci));
4649 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4650
4651 dcmd->cmd = MFI_CMD_DCMD;
4652 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4653 dcmd->sge_count = 1;
4654 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4655 dcmd->timeout = 0;
4656 dcmd->pad_0 = 0;
4657 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4658 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4659 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4660 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4661 dcmd->mbox.b[0] = 1;
4662
4663 if (instance->ctrl_context && !instance->mask_interrupts)
4664 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4665 else
4666 ret = megasas_issue_polled(instance, cmd);
4667
4668 switch (ret) {
4669 case DCMD_SUCCESS:
4670 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4671 /* Save required controller information in
4672 * CPU endianness format.
4673 */
4674 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4675 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4676 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4677 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
4678
4679 /* Update the latest Ext VD info.
4680 * From Init path, store current firmware details.
4681 * From OCR path, detect any firmware properties changes.
4682 * in case of Firmware upgrade without system reboot.
4683 */
4684 megasas_update_ext_vd_details(instance);
4685 instance->use_seqnum_jbod_fp =
4686 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4687 instance->support_morethan256jbod =
4688 ctrl_info->adapter_operations4.support_pd_map_target_id;
4689
4690 /*Check whether controller is iMR or MR */
4691 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4692 dev_info(&instance->pdev->dev,
4693 "controller type\t: %s(%dMB)\n",
4694 instance->is_imr ? "iMR" : "MR",
4695 le16_to_cpu(ctrl_info->memory_size));
4696
4697 instance->disableOnlineCtrlReset =
4698 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4699 instance->secure_jbod_support =
4700 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4701 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4702 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4703 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4704 instance->secure_jbod_support ? "Yes" : "No");
4705 break;
4706
4707 case DCMD_TIMEOUT:
4708 switch (dcmd_timeout_ocr_possible(instance)) {
4709 case INITIATE_OCR:
4710 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4711 megasas_reset_fusion(instance->host,
4712 MFI_IO_TIMEOUT_OCR);
4713 break;
4714 case KILL_ADAPTER:
4715 megaraid_sas_kill_hba(instance);
4716 break;
4717 case IGNORE_TIMEOUT:
4718 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4719 __func__, __LINE__);
4720 break;
4721 }
4722 case DCMD_FAILED:
4723 megaraid_sas_kill_hba(instance);
4724 break;
4725
4726 }
4727
4728 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4729 ci, ci_h);
4730
4731 megasas_return_cmd(instance, cmd);
4732
4733
4734 return ret;
4735 }
4736
4737 /*
4738 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4739 * to firmware
4740 *
4741 * @instance: Adapter soft state
4742 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4743 MR_CRASH_BUF_TURN_OFF = 0
4744 MR_CRASH_BUF_TURN_ON = 1
4745 * @return 0 on success non-zero on failure.
4746 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4747 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4748 * that driver supports crash dump feature. This DCMD will be sent only if
4749 * crash dump feature is supported by the FW.
4750 *
4751 */
4752 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4753 u8 crash_buf_state)
4754 {
4755 int ret = 0;
4756 struct megasas_cmd *cmd;
4757 struct megasas_dcmd_frame *dcmd;
4758
4759 cmd = megasas_get_cmd(instance);
4760
4761 if (!cmd) {
4762 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4763 return -ENOMEM;
4764 }
4765
4766
4767 dcmd = &cmd->frame->dcmd;
4768
4769 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4770 dcmd->mbox.b[0] = crash_buf_state;
4771 dcmd->cmd = MFI_CMD_DCMD;
4772 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4773 dcmd->sge_count = 1;
4774 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4775 dcmd->timeout = 0;
4776 dcmd->pad_0 = 0;
4777 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4778 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4779 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4780 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4781
4782 if (instance->ctrl_context && !instance->mask_interrupts)
4783 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4784 else
4785 ret = megasas_issue_polled(instance, cmd);
4786
4787 if (ret == DCMD_TIMEOUT) {
4788 switch (dcmd_timeout_ocr_possible(instance)) {
4789 case INITIATE_OCR:
4790 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4791 megasas_reset_fusion(instance->host,
4792 MFI_IO_TIMEOUT_OCR);
4793 break;
4794 case KILL_ADAPTER:
4795 megaraid_sas_kill_hba(instance);
4796 break;
4797 case IGNORE_TIMEOUT:
4798 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4799 __func__, __LINE__);
4800 break;
4801 }
4802 } else
4803 megasas_return_cmd(instance, cmd);
4804
4805 return ret;
4806 }
4807
4808 /**
4809 * megasas_issue_init_mfi - Initializes the FW
4810 * @instance: Adapter soft state
4811 *
4812 * Issues the INIT MFI cmd
4813 */
4814 static int
4815 megasas_issue_init_mfi(struct megasas_instance *instance)
4816 {
4817 __le32 context;
4818 struct megasas_cmd *cmd;
4819 struct megasas_init_frame *init_frame;
4820 struct megasas_init_queue_info *initq_info;
4821 dma_addr_t init_frame_h;
4822 dma_addr_t initq_info_h;
4823
4824 /*
4825 * Prepare a init frame. Note the init frame points to queue info
4826 * structure. Each frame has SGL allocated after first 64 bytes. For
4827 * this frame - since we don't need any SGL - we use SGL's space as
4828 * queue info structure
4829 *
4830 * We will not get a NULL command below. We just created the pool.
4831 */
4832 cmd = megasas_get_cmd(instance);
4833
4834 init_frame = (struct megasas_init_frame *)cmd->frame;
4835 initq_info = (struct megasas_init_queue_info *)
4836 ((unsigned long)init_frame + 64);
4837
4838 init_frame_h = cmd->frame_phys_addr;
4839 initq_info_h = init_frame_h + 64;
4840
4841 context = init_frame->context;
4842 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4843 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4844 init_frame->context = context;
4845
4846 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4847 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4848
4849 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4850 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4851
4852 init_frame->cmd = MFI_CMD_INIT;
4853 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4854 init_frame->queue_info_new_phys_addr_lo =
4855 cpu_to_le32(lower_32_bits(initq_info_h));
4856 init_frame->queue_info_new_phys_addr_hi =
4857 cpu_to_le32(upper_32_bits(initq_info_h));
4858
4859 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4860
4861 /*
4862 * disable the intr before firing the init frame to FW
4863 */
4864 instance->instancet->disable_intr(instance);
4865
4866 /*
4867 * Issue the init frame in polled mode
4868 */
4869
4870 if (megasas_issue_polled(instance, cmd)) {
4871 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4872 megasas_return_cmd(instance, cmd);
4873 goto fail_fw_init;
4874 }
4875
4876 megasas_return_cmd(instance, cmd);
4877
4878 return 0;
4879
4880 fail_fw_init:
4881 return -EINVAL;
4882 }
4883
4884 static u32
4885 megasas_init_adapter_mfi(struct megasas_instance *instance)
4886 {
4887 struct megasas_register_set __iomem *reg_set;
4888 u32 context_sz;
4889 u32 reply_q_sz;
4890
4891 reg_set = instance->reg_set;
4892
4893 /*
4894 * Get various operational parameters from status register
4895 */
4896 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4897 /*
4898 * Reduce the max supported cmds by 1. This is to ensure that the
4899 * reply_q_sz (1 more than the max cmd that driver may send)
4900 * does not exceed max cmds that the FW can support
4901 */
4902 instance->max_fw_cmds = instance->max_fw_cmds-1;
4903 instance->max_mfi_cmds = instance->max_fw_cmds;
4904 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4905 0x10;
4906 /*
4907 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4908 * are reserved for IOCTL + driver's internal DCMDs.
4909 */
4910 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4911 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4912 instance->max_scsi_cmds = (instance->max_fw_cmds -
4913 MEGASAS_SKINNY_INT_CMDS);
4914 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4915 } else {
4916 instance->max_scsi_cmds = (instance->max_fw_cmds -
4917 MEGASAS_INT_CMDS);
4918 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4919 }
4920
4921 instance->cur_can_queue = instance->max_scsi_cmds;
4922 /*
4923 * Create a pool of commands
4924 */
4925 if (megasas_alloc_cmds(instance))
4926 goto fail_alloc_cmds;
4927
4928 /*
4929 * Allocate memory for reply queue. Length of reply queue should
4930 * be _one_ more than the maximum commands handled by the firmware.
4931 *
4932 * Note: When FW completes commands, it places corresponding contex
4933 * values in this circular reply queue. This circular queue is a fairly
4934 * typical producer-consumer queue. FW is the producer (of completed
4935 * commands) and the driver is the consumer.
4936 */
4937 context_sz = sizeof(u32);
4938 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4939
4940 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4941 reply_q_sz,
4942 &instance->reply_queue_h);
4943
4944 if (!instance->reply_queue) {
4945 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4946 goto fail_reply_queue;
4947 }
4948
4949 if (megasas_issue_init_mfi(instance))
4950 goto fail_fw_init;
4951
4952 if (megasas_get_ctrl_info(instance)) {
4953 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4954 "Fail from %s %d\n", instance->unique_id,
4955 __func__, __LINE__);
4956 goto fail_fw_init;
4957 }
4958
4959 instance->fw_support_ieee = 0;
4960 instance->fw_support_ieee =
4961 (instance->instancet->read_fw_status_reg(reg_set) &
4962 0x04000000);
4963
4964 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4965 instance->fw_support_ieee);
4966
4967 if (instance->fw_support_ieee)
4968 instance->flag_ieee = 1;
4969
4970 return 0;
4971
4972 fail_fw_init:
4973
4974 pci_free_consistent(instance->pdev, reply_q_sz,
4975 instance->reply_queue, instance->reply_queue_h);
4976 fail_reply_queue:
4977 megasas_free_cmds(instance);
4978
4979 fail_alloc_cmds:
4980 return 1;
4981 }
4982
4983 /*
4984 * megasas_setup_irqs_ioapic - register legacy interrupts.
4985 * @instance: Adapter soft state
4986 *
4987 * Do not enable interrupt, only setup ISRs.
4988 *
4989 * Return 0 on success.
4990 */
4991 static int
4992 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4993 {
4994 struct pci_dev *pdev;
4995
4996 pdev = instance->pdev;
4997 instance->irq_context[0].instance = instance;
4998 instance->irq_context[0].MSIxIndex = 0;
4999 if (request_irq(pci_irq_vector(pdev, 0),
5000 instance->instancet->service_isr, IRQF_SHARED,
5001 "megasas", &instance->irq_context[0])) {
5002 dev_err(&instance->pdev->dev,
5003 "Failed to register IRQ from %s %d\n",
5004 __func__, __LINE__);
5005 return -1;
5006 }
5007 return 0;
5008 }
5009
5010 /**
5011 * megasas_setup_irqs_msix - register MSI-x interrupts.
5012 * @instance: Adapter soft state
5013 * @is_probe: Driver probe check
5014 *
5015 * Do not enable interrupt, only setup ISRs.
5016 *
5017 * Return 0 on success.
5018 */
5019 static int
5020 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5021 {
5022 int i, j;
5023 struct pci_dev *pdev;
5024
5025 pdev = instance->pdev;
5026
5027 /* Try MSI-x */
5028 for (i = 0; i < instance->msix_vectors; i++) {
5029 instance->irq_context[i].instance = instance;
5030 instance->irq_context[i].MSIxIndex = i;
5031 if (request_irq(pci_irq_vector(pdev, i),
5032 instance->instancet->service_isr, 0, "megasas",
5033 &instance->irq_context[i])) {
5034 dev_err(&instance->pdev->dev,
5035 "Failed to register IRQ for vector %d.\n", i);
5036 for (j = 0; j < i; j++)
5037 free_irq(pci_irq_vector(pdev, j),
5038 &instance->irq_context[j]);
5039 /* Retry irq register for IO_APIC*/
5040 instance->msix_vectors = 0;
5041 if (is_probe) {
5042 pci_free_irq_vectors(instance->pdev);
5043 return megasas_setup_irqs_ioapic(instance);
5044 } else {
5045 return -1;
5046 }
5047 }
5048 }
5049 return 0;
5050 }
5051
5052 /*
5053 * megasas_destroy_irqs- unregister interrupts.
5054 * @instance: Adapter soft state
5055 * return: void
5056 */
5057 static void
5058 megasas_destroy_irqs(struct megasas_instance *instance) {
5059
5060 int i;
5061
5062 if (instance->msix_vectors)
5063 for (i = 0; i < instance->msix_vectors; i++) {
5064 free_irq(pci_irq_vector(instance->pdev, i),
5065 &instance->irq_context[i]);
5066 }
5067 else
5068 free_irq(pci_irq_vector(instance->pdev, 0),
5069 &instance->irq_context[0]);
5070 }
5071
5072 /**
5073 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5074 * @instance: Adapter soft state
5075 * @is_probe: Driver probe check
5076 *
5077 * Return 0 on success.
5078 */
5079 void
5080 megasas_setup_jbod_map(struct megasas_instance *instance)
5081 {
5082 int i;
5083 struct fusion_context *fusion = instance->ctrl_context;
5084 u32 pd_seq_map_sz;
5085
5086 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5087 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5088
5089 if (reset_devices || !fusion ||
5090 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
5091 dev_info(&instance->pdev->dev,
5092 "Jbod map is not supported %s %d\n",
5093 __func__, __LINE__);
5094 instance->use_seqnum_jbod_fp = false;
5095 return;
5096 }
5097
5098 if (fusion->pd_seq_sync[0])
5099 goto skip_alloc;
5100
5101 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5102 fusion->pd_seq_sync[i] = dma_alloc_coherent
5103 (&instance->pdev->dev, pd_seq_map_sz,
5104 &fusion->pd_seq_phys[i], GFP_KERNEL);
5105 if (!fusion->pd_seq_sync[i]) {
5106 dev_err(&instance->pdev->dev,
5107 "Failed to allocate memory from %s %d\n",
5108 __func__, __LINE__);
5109 if (i == 1) {
5110 dma_free_coherent(&instance->pdev->dev,
5111 pd_seq_map_sz, fusion->pd_seq_sync[0],
5112 fusion->pd_seq_phys[0]);
5113 fusion->pd_seq_sync[0] = NULL;
5114 }
5115 instance->use_seqnum_jbod_fp = false;
5116 return;
5117 }
5118 }
5119
5120 skip_alloc:
5121 if (!megasas_sync_pd_seq_num(instance, false) &&
5122 !megasas_sync_pd_seq_num(instance, true))
5123 instance->use_seqnum_jbod_fp = true;
5124 else
5125 instance->use_seqnum_jbod_fp = false;
5126 }
5127
5128 /**
5129 * megasas_init_fw - Initializes the FW
5130 * @instance: Adapter soft state
5131 *
5132 * This is the main function for initializing firmware
5133 */
5134
5135 static int megasas_init_fw(struct megasas_instance *instance)
5136 {
5137 u32 max_sectors_1;
5138 u32 max_sectors_2, tmp_sectors, msix_enable;
5139 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5140 resource_size_t base_addr;
5141 struct megasas_register_set __iomem *reg_set;
5142 struct megasas_ctrl_info *ctrl_info = NULL;
5143 unsigned long bar_list;
5144 int i, j, loop, fw_msix_count = 0;
5145 struct IOV_111 *iovPtr;
5146 struct fusion_context *fusion;
5147
5148 fusion = instance->ctrl_context;
5149
5150 /* Find first memory bar */
5151 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5152 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5153 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5154 "megasas: LSI")) {
5155 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5156 return -EBUSY;
5157 }
5158
5159 base_addr = pci_resource_start(instance->pdev, instance->bar);
5160 instance->reg_set = ioremap_nocache(base_addr, 8192);
5161
5162 if (!instance->reg_set) {
5163 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5164 goto fail_ioremap;
5165 }
5166
5167 reg_set = instance->reg_set;
5168
5169 if (fusion)
5170 instance->instancet = &megasas_instance_template_fusion;
5171 else {
5172 switch (instance->pdev->device) {
5173 case PCI_DEVICE_ID_LSI_SAS1078R:
5174 case PCI_DEVICE_ID_LSI_SAS1078DE:
5175 instance->instancet = &megasas_instance_template_ppc;
5176 break;
5177 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5178 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5179 instance->instancet = &megasas_instance_template_gen2;
5180 break;
5181 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5182 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5183 instance->instancet = &megasas_instance_template_skinny;
5184 break;
5185 case PCI_DEVICE_ID_LSI_SAS1064R:
5186 case PCI_DEVICE_ID_DELL_PERC5:
5187 default:
5188 instance->instancet = &megasas_instance_template_xscale;
5189 instance->pd_list_not_supported = 1;
5190 break;
5191 }
5192 }
5193
5194 if (megasas_transition_to_ready(instance, 0)) {
5195 atomic_set(&instance->fw_reset_no_pci_access, 1);
5196 instance->instancet->adp_reset
5197 (instance, instance->reg_set);
5198 atomic_set(&instance->fw_reset_no_pci_access, 0);
5199 dev_info(&instance->pdev->dev,
5200 "FW restarted successfully from %s!\n",
5201 __func__);
5202
5203 /*waitting for about 30 second before retry*/
5204 ssleep(30);
5205
5206 if (megasas_transition_to_ready(instance, 0))
5207 goto fail_ready_state;
5208 }
5209
5210 if (instance->is_ventura) {
5211 scratch_pad_3 =
5212 readl(&instance->reg_set->outbound_scratch_pad_3);
5213 instance->max_raid_mapsize = ((scratch_pad_3 >>
5214 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5215 MR_MAX_RAID_MAP_SIZE_MASK);
5216 }
5217
5218 /* Check if MSI-X is supported while in ready state */
5219 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5220 0x4000000) >> 0x1a;
5221 if (msix_enable && !msix_disable) {
5222 int irq_flags = PCI_IRQ_MSIX;
5223
5224 scratch_pad_2 = readl
5225 (&instance->reg_set->outbound_scratch_pad_2);
5226 /* Check max MSI-X vectors */
5227 if (fusion) {
5228 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5229 instance->msix_vectors = (scratch_pad_2
5230 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5231 fw_msix_count = instance->msix_vectors;
5232 } else { /* Invader series supports more than 8 MSI-x vectors*/
5233 instance->msix_vectors = ((scratch_pad_2
5234 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5235 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5236 if (instance->msix_vectors > 16)
5237 instance->msix_combined = true;
5238
5239 if (rdpq_enable)
5240 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5241 1 : 0;
5242 fw_msix_count = instance->msix_vectors;
5243 /* Save 1-15 reply post index address to local memory
5244 * Index 0 is already saved from reg offset
5245 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5246 */
5247 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5248 instance->reply_post_host_index_addr[loop] =
5249 (u32 __iomem *)
5250 ((u8 __iomem *)instance->reg_set +
5251 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5252 + (loop * 0x10));
5253 }
5254 }
5255 if (msix_vectors)
5256 instance->msix_vectors = min(msix_vectors,
5257 instance->msix_vectors);
5258 } else /* MFI adapters */
5259 instance->msix_vectors = 1;
5260 /* Don't bother allocating more MSI-X vectors than cpus */
5261 instance->msix_vectors = min(instance->msix_vectors,
5262 (unsigned int)num_online_cpus());
5263 if (smp_affinity_enable)
5264 irq_flags |= PCI_IRQ_AFFINITY;
5265 i = pci_alloc_irq_vectors(instance->pdev, 1,
5266 instance->msix_vectors, irq_flags);
5267 if (i > 0)
5268 instance->msix_vectors = i;
5269 else
5270 instance->msix_vectors = 0;
5271 }
5272 /*
5273 * MSI-X host index 0 is common for all adapter.
5274 * It is used for all MPT based Adapters.
5275 */
5276 if (instance->msix_combined) {
5277 instance->reply_post_host_index_addr[0] =
5278 (u32 *)((u8 *)instance->reg_set +
5279 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5280 } else {
5281 instance->reply_post_host_index_addr[0] =
5282 (u32 *)((u8 *)instance->reg_set +
5283 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5284 }
5285
5286 if (!instance->msix_vectors) {
5287 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5288 if (i < 0)
5289 goto fail_setup_irqs;
5290 }
5291
5292 dev_info(&instance->pdev->dev,
5293 "firmware supports msix\t: (%d)", fw_msix_count);
5294 dev_info(&instance->pdev->dev,
5295 "current msix/online cpus\t: (%d/%d)\n",
5296 instance->msix_vectors, (unsigned int)num_online_cpus());
5297 dev_info(&instance->pdev->dev,
5298 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5299
5300 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5301 (unsigned long)instance);
5302
5303 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5304 GFP_KERNEL);
5305 if (instance->ctrl_info == NULL)
5306 goto fail_init_adapter;
5307
5308 /*
5309 * Below are default value for legacy Firmware.
5310 * non-fusion based controllers
5311 */
5312 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5313 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5314 /* Get operational params, sge flags, send init cmd to controller */
5315 if (instance->instancet->init_adapter(instance))
5316 goto fail_init_adapter;
5317
5318 if (instance->is_ventura) {
5319 scratch_pad_4 =
5320 readl(&instance->reg_set->outbound_scratch_pad_4);
5321 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5322 MR_DEFAULT_NVME_PAGE_SHIFT)
5323 instance->nvme_page_size =
5324 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5325
5326 dev_info(&instance->pdev->dev,
5327 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5328 }
5329
5330 if (instance->msix_vectors ?
5331 megasas_setup_irqs_msix(instance, 1) :
5332 megasas_setup_irqs_ioapic(instance))
5333 goto fail_init_adapter;
5334
5335 instance->instancet->enable_intr(instance);
5336
5337 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5338
5339 megasas_setup_jbod_map(instance);
5340
5341 /** for passthrough
5342 * the following function will get the PD LIST.
5343 */
5344 memset(instance->pd_list, 0,
5345 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5346 if (megasas_get_pd_list(instance) < 0) {
5347 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5348 goto fail_get_ld_pd_list;
5349 }
5350
5351 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5352
5353 /* stream detection initialization */
5354 if (instance->is_ventura && fusion) {
5355 fusion->stream_detect_by_ld =
5356 kzalloc(sizeof(struct LD_STREAM_DETECT *)
5357 * MAX_LOGICAL_DRIVES_EXT,
5358 GFP_KERNEL);
5359 if (!fusion->stream_detect_by_ld) {
5360 dev_err(&instance->pdev->dev,
5361 "unable to allocate stream detection for pool of LDs\n");
5362 goto fail_get_ld_pd_list;
5363 }
5364 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5365 fusion->stream_detect_by_ld[i] =
5366 kmalloc(sizeof(struct LD_STREAM_DETECT),
5367 GFP_KERNEL);
5368 if (!fusion->stream_detect_by_ld[i]) {
5369 dev_err(&instance->pdev->dev,
5370 "unable to allocate stream detect by LD\n ");
5371 for (j = 0; j < i; ++j)
5372 kfree(fusion->stream_detect_by_ld[j]);
5373 kfree(fusion->stream_detect_by_ld);
5374 fusion->stream_detect_by_ld = NULL;
5375 goto fail_get_ld_pd_list;
5376 }
5377 fusion->stream_detect_by_ld[i]->mru_bit_map
5378 = MR_STREAM_BITMAP;
5379 }
5380 }
5381
5382 if (megasas_ld_list_query(instance,
5383 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5384 goto fail_get_ld_pd_list;
5385
5386 /*
5387 * Compute the max allowed sectors per IO: The controller info has two
5388 * limits on max sectors. Driver should use the minimum of these two.
5389 *
5390 * 1 << stripe_sz_ops.min = max sectors per strip
5391 *
5392 * Note that older firmwares ( < FW ver 30) didn't report information
5393 * to calculate max_sectors_1. So the number ended up as zero always.
5394 */
5395 tmp_sectors = 0;
5396 ctrl_info = instance->ctrl_info;
5397
5398 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5399 le16_to_cpu(ctrl_info->max_strips_per_io);
5400 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5401
5402 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5403
5404 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5405 instance->passive = ctrl_info->cluster.passive;
5406 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5407 instance->UnevenSpanSupport =
5408 ctrl_info->adapterOperations2.supportUnevenSpans;
5409 if (instance->UnevenSpanSupport) {
5410 struct fusion_context *fusion = instance->ctrl_context;
5411 if (MR_ValidateMapInfo(instance))
5412 fusion->fast_path_io = 1;
5413 else
5414 fusion->fast_path_io = 0;
5415
5416 }
5417 if (ctrl_info->host_interface.SRIOV) {
5418 instance->requestorId = ctrl_info->iov.requestorId;
5419 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5420 if (!ctrl_info->adapterOperations2.activePassive)
5421 instance->PlasmaFW111 = 1;
5422
5423 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5424 instance->PlasmaFW111 ? "1.11" : "new");
5425
5426 if (instance->PlasmaFW111) {
5427 iovPtr = (struct IOV_111 *)
5428 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5429 instance->requestorId = iovPtr->requestorId;
5430 }
5431 }
5432 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5433 instance->requestorId);
5434 }
5435
5436 instance->crash_dump_fw_support =
5437 ctrl_info->adapterOperations3.supportCrashDump;
5438 instance->crash_dump_drv_support =
5439 (instance->crash_dump_fw_support &&
5440 instance->crash_dump_buf);
5441 if (instance->crash_dump_drv_support)
5442 megasas_set_crash_dump_params(instance,
5443 MR_CRASH_BUF_TURN_OFF);
5444
5445 else {
5446 if (instance->crash_dump_buf)
5447 pci_free_consistent(instance->pdev,
5448 CRASH_DMA_BUF_SIZE,
5449 instance->crash_dump_buf,
5450 instance->crash_dump_h);
5451 instance->crash_dump_buf = NULL;
5452 }
5453
5454
5455 dev_info(&instance->pdev->dev,
5456 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5457 le16_to_cpu(ctrl_info->pci.vendor_id),
5458 le16_to_cpu(ctrl_info->pci.device_id),
5459 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5460 le16_to_cpu(ctrl_info->pci.sub_device_id));
5461 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5462 instance->UnevenSpanSupport ? "yes" : "no");
5463 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5464 instance->crash_dump_drv_support ? "yes" : "no");
5465 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5466 instance->use_seqnum_jbod_fp ? "yes" : "no");
5467
5468
5469 instance->max_sectors_per_req = instance->max_num_sge *
5470 SGE_BUFFER_SIZE / 512;
5471 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5472 instance->max_sectors_per_req = tmp_sectors;
5473
5474 /* Check for valid throttlequeuedepth module parameter */
5475 if (throttlequeuedepth &&
5476 throttlequeuedepth <= instance->max_scsi_cmds)
5477 instance->throttlequeuedepth = throttlequeuedepth;
5478 else
5479 instance->throttlequeuedepth =
5480 MEGASAS_THROTTLE_QUEUE_DEPTH;
5481
5482 if ((resetwaittime < 1) ||
5483 (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5484 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5485
5486 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5487 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5488
5489 /* Launch SR-IOV heartbeat timer */
5490 if (instance->requestorId) {
5491 if (!megasas_sriov_start_heartbeat(instance, 1))
5492 megasas_start_timer(instance,
5493 &instance->sriov_heartbeat_timer,
5494 megasas_sriov_heartbeat_handler,
5495 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5496 else
5497 instance->skip_heartbeat_timer_del = 1;
5498 }
5499
5500 return 0;
5501
5502 fail_get_ld_pd_list:
5503 instance->instancet->disable_intr(instance);
5504 fail_init_adapter:
5505 megasas_destroy_irqs(instance);
5506 fail_setup_irqs:
5507 if (instance->msix_vectors)
5508 pci_free_irq_vectors(instance->pdev);
5509 instance->msix_vectors = 0;
5510 fail_ready_state:
5511 kfree(instance->ctrl_info);
5512 instance->ctrl_info = NULL;
5513 iounmap(instance->reg_set);
5514
5515 fail_ioremap:
5516 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5517
5518 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5519 __func__, __LINE__);
5520 return -EINVAL;
5521 }
5522
5523 /**
5524 * megasas_release_mfi - Reverses the FW initialization
5525 * @instance: Adapter soft state
5526 */
5527 static void megasas_release_mfi(struct megasas_instance *instance)
5528 {
5529 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5530
5531 if (instance->reply_queue)
5532 pci_free_consistent(instance->pdev, reply_q_sz,
5533 instance->reply_queue, instance->reply_queue_h);
5534
5535 megasas_free_cmds(instance);
5536
5537 iounmap(instance->reg_set);
5538
5539 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5540 }
5541
5542 /**
5543 * megasas_get_seq_num - Gets latest event sequence numbers
5544 * @instance: Adapter soft state
5545 * @eli: FW event log sequence numbers information
5546 *
5547 * FW maintains a log of all events in a non-volatile area. Upper layers would
5548 * usually find out the latest sequence number of the events, the seq number at
5549 * the boot etc. They would "read" all the events below the latest seq number
5550 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5551 * number), they would subsribe to AEN (asynchronous event notification) and
5552 * wait for the events to happen.
5553 */
5554 static int
5555 megasas_get_seq_num(struct megasas_instance *instance,
5556 struct megasas_evt_log_info *eli)
5557 {
5558 struct megasas_cmd *cmd;
5559 struct megasas_dcmd_frame *dcmd;
5560 struct megasas_evt_log_info *el_info;
5561 dma_addr_t el_info_h = 0;
5562
5563 cmd = megasas_get_cmd(instance);
5564
5565 if (!cmd) {
5566 return -ENOMEM;
5567 }
5568
5569 dcmd = &cmd->frame->dcmd;
5570 el_info = pci_alloc_consistent(instance->pdev,
5571 sizeof(struct megasas_evt_log_info),
5572 &el_info_h);
5573
5574 if (!el_info) {
5575 megasas_return_cmd(instance, cmd);
5576 return -ENOMEM;
5577 }
5578
5579 memset(el_info, 0, sizeof(*el_info));
5580 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5581
5582 dcmd->cmd = MFI_CMD_DCMD;
5583 dcmd->cmd_status = 0x0;
5584 dcmd->sge_count = 1;
5585 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5586 dcmd->timeout = 0;
5587 dcmd->pad_0 = 0;
5588 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5589 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5590 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5591 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5592
5593 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5594 DCMD_SUCCESS) {
5595 /*
5596 * Copy the data back into callers buffer
5597 */
5598 eli->newest_seq_num = el_info->newest_seq_num;
5599 eli->oldest_seq_num = el_info->oldest_seq_num;
5600 eli->clear_seq_num = el_info->clear_seq_num;
5601 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5602 eli->boot_seq_num = el_info->boot_seq_num;
5603 } else
5604 dev_err(&instance->pdev->dev, "DCMD failed "
5605 "from %s\n", __func__);
5606
5607 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5608 el_info, el_info_h);
5609
5610 megasas_return_cmd(instance, cmd);
5611
5612 return 0;
5613 }
5614
5615 /**
5616 * megasas_register_aen - Registers for asynchronous event notification
5617 * @instance: Adapter soft state
5618 * @seq_num: The starting sequence number
5619 * @class_locale: Class of the event
5620 *
5621 * This function subscribes for AEN for events beyond the @seq_num. It requests
5622 * to be notified if and only if the event is of type @class_locale
5623 */
5624 static int
5625 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5626 u32 class_locale_word)
5627 {
5628 int ret_val;
5629 struct megasas_cmd *cmd;
5630 struct megasas_dcmd_frame *dcmd;
5631 union megasas_evt_class_locale curr_aen;
5632 union megasas_evt_class_locale prev_aen;
5633
5634 /*
5635 * If there an AEN pending already (aen_cmd), check if the
5636 * class_locale of that pending AEN is inclusive of the new
5637 * AEN request we currently have. If it is, then we don't have
5638 * to do anything. In other words, whichever events the current
5639 * AEN request is subscribing to, have already been subscribed
5640 * to.
5641 *
5642 * If the old_cmd is _not_ inclusive, then we have to abort
5643 * that command, form a class_locale that is superset of both
5644 * old and current and re-issue to the FW
5645 */
5646
5647 curr_aen.word = class_locale_word;
5648
5649 if (instance->aen_cmd) {
5650
5651 prev_aen.word =
5652 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5653
5654 /*
5655 * A class whose enum value is smaller is inclusive of all
5656 * higher values. If a PROGRESS (= -1) was previously
5657 * registered, then a new registration requests for higher
5658 * classes need not be sent to FW. They are automatically
5659 * included.
5660 *
5661 * Locale numbers don't have such hierarchy. They are bitmap
5662 * values
5663 */
5664 if ((prev_aen.members.class <= curr_aen.members.class) &&
5665 !((prev_aen.members.locale & curr_aen.members.locale) ^
5666 curr_aen.members.locale)) {
5667 /*
5668 * Previously issued event registration includes
5669 * current request. Nothing to do.
5670 */
5671 return 0;
5672 } else {
5673 curr_aen.members.locale |= prev_aen.members.locale;
5674
5675 if (prev_aen.members.class < curr_aen.members.class)
5676 curr_aen.members.class = prev_aen.members.class;
5677
5678 instance->aen_cmd->abort_aen = 1;
5679 ret_val = megasas_issue_blocked_abort_cmd(instance,
5680 instance->
5681 aen_cmd, 30);
5682
5683 if (ret_val) {
5684 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5685 "previous AEN command\n");
5686 return ret_val;
5687 }
5688 }
5689 }
5690
5691 cmd = megasas_get_cmd(instance);
5692
5693 if (!cmd)
5694 return -ENOMEM;
5695
5696 dcmd = &cmd->frame->dcmd;
5697
5698 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5699
5700 /*
5701 * Prepare DCMD for aen registration
5702 */
5703 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5704
5705 dcmd->cmd = MFI_CMD_DCMD;
5706 dcmd->cmd_status = 0x0;
5707 dcmd->sge_count = 1;
5708 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5709 dcmd->timeout = 0;
5710 dcmd->pad_0 = 0;
5711 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5712 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5713 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5714 instance->last_seq_num = seq_num;
5715 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5716 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5717 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5718
5719 if (instance->aen_cmd != NULL) {
5720 megasas_return_cmd(instance, cmd);
5721 return 0;
5722 }
5723
5724 /*
5725 * Store reference to the cmd used to register for AEN. When an
5726 * application wants us to register for AEN, we have to abort this
5727 * cmd and re-register with a new EVENT LOCALE supplied by that app
5728 */
5729 instance->aen_cmd = cmd;
5730
5731 /*
5732 * Issue the aen registration frame
5733 */
5734 instance->instancet->issue_dcmd(instance, cmd);
5735
5736 return 0;
5737 }
5738
5739 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5740 *
5741 * This DCMD will fetch few properties of LD/system PD defined
5742 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5743 *
5744 * DCMD send by drivers whenever new target is added to the OS.
5745 *
5746 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
5747 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
5748 * 0 = system PD, 1 = LD.
5749 * dcmd.mbox.s[1] - TargetID for LD/system PD.
5750 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
5751 *
5752 * @instance: Adapter soft state
5753 * @sdev: OS provided scsi device
5754 *
5755 * Returns 0 on success non-zero on failure.
5756 */
5757 static int
5758 megasas_get_target_prop(struct megasas_instance *instance,
5759 struct scsi_device *sdev)
5760 {
5761 int ret;
5762 struct megasas_cmd *cmd;
5763 struct megasas_dcmd_frame *dcmd;
5764 u16 targetId = (sdev->channel % 2) + sdev->id;
5765
5766 cmd = megasas_get_cmd(instance);
5767
5768 if (!cmd) {
5769 dev_err(&instance->pdev->dev,
5770 "Failed to get cmd %s\n", __func__);
5771 return -ENOMEM;
5772 }
5773
5774 dcmd = &cmd->frame->dcmd;
5775
5776 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5777 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5778 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5779
5780 dcmd->mbox.s[1] = cpu_to_le16(targetId);
5781 dcmd->cmd = MFI_CMD_DCMD;
5782 dcmd->cmd_status = 0xFF;
5783 dcmd->sge_count = 1;
5784 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5785 dcmd->timeout = 0;
5786 dcmd->pad_0 = 0;
5787 dcmd->data_xfer_len =
5788 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5789 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5790 dcmd->sgl.sge32[0].phys_addr =
5791 cpu_to_le32(instance->tgt_prop_h);
5792 dcmd->sgl.sge32[0].length =
5793 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5794
5795 if (instance->ctrl_context && !instance->mask_interrupts)
5796 ret = megasas_issue_blocked_cmd(instance,
5797 cmd, MFI_IO_TIMEOUT_SECS);
5798 else
5799 ret = megasas_issue_polled(instance, cmd);
5800
5801 switch (ret) {
5802 case DCMD_TIMEOUT:
5803 switch (dcmd_timeout_ocr_possible(instance)) {
5804 case INITIATE_OCR:
5805 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5806 megasas_reset_fusion(instance->host,
5807 MFI_IO_TIMEOUT_OCR);
5808 break;
5809 case KILL_ADAPTER:
5810 megaraid_sas_kill_hba(instance);
5811 break;
5812 case IGNORE_TIMEOUT:
5813 dev_info(&instance->pdev->dev,
5814 "Ignore DCMD timeout: %s %d\n",
5815 __func__, __LINE__);
5816 break;
5817 }
5818 break;
5819
5820 default:
5821 megasas_return_cmd(instance, cmd);
5822 }
5823 if (ret != DCMD_SUCCESS)
5824 dev_err(&instance->pdev->dev,
5825 "return from %s %d return value %d\n",
5826 __func__, __LINE__, ret);
5827
5828 return ret;
5829 }
5830
5831 /**
5832 * megasas_start_aen - Subscribes to AEN during driver load time
5833 * @instance: Adapter soft state
5834 */
5835 static int megasas_start_aen(struct megasas_instance *instance)
5836 {
5837 struct megasas_evt_log_info eli;
5838 union megasas_evt_class_locale class_locale;
5839
5840 /*
5841 * Get the latest sequence number from FW
5842 */
5843 memset(&eli, 0, sizeof(eli));
5844
5845 if (megasas_get_seq_num(instance, &eli))
5846 return -1;
5847
5848 /*
5849 * Register AEN with FW for latest sequence number plus 1
5850 */
5851 class_locale.members.reserved = 0;
5852 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5853 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5854
5855 return megasas_register_aen(instance,
5856 le32_to_cpu(eli.newest_seq_num) + 1,
5857 class_locale.word);
5858 }
5859
5860 /**
5861 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5862 * @instance: Adapter soft state
5863 */
5864 static int megasas_io_attach(struct megasas_instance *instance)
5865 {
5866 struct Scsi_Host *host = instance->host;
5867
5868 /*
5869 * Export parameters required by SCSI mid-layer
5870 */
5871 host->unique_id = instance->unique_id;
5872 host->can_queue = instance->max_scsi_cmds;
5873 host->this_id = instance->init_id;
5874 host->sg_tablesize = instance->max_num_sge;
5875
5876 if (instance->fw_support_ieee)
5877 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5878
5879 /*
5880 * Check if the module parameter value for max_sectors can be used
5881 */
5882 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5883 instance->max_sectors_per_req = max_sectors;
5884 else {
5885 if (max_sectors) {
5886 if (((instance->pdev->device ==
5887 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5888 (instance->pdev->device ==
5889 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5890 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5891 instance->max_sectors_per_req = max_sectors;
5892 } else {
5893 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5894 "and <= %d (or < 1MB for GEN2 controller)\n",
5895 instance->max_sectors_per_req);
5896 }
5897 }
5898 }
5899
5900 host->max_sectors = instance->max_sectors_per_req;
5901 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5902 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5903 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5904 host->max_lun = MEGASAS_MAX_LUN;
5905 host->max_cmd_len = 16;
5906
5907 /*
5908 * Notify the mid-layer about the new controller
5909 */
5910 if (scsi_add_host(host, &instance->pdev->dev)) {
5911 dev_err(&instance->pdev->dev,
5912 "Failed to add host from %s %d\n",
5913 __func__, __LINE__);
5914 return -ENODEV;
5915 }
5916
5917 return 0;
5918 }
5919
5920 static int
5921 megasas_set_dma_mask(struct pci_dev *pdev)
5922 {
5923 /*
5924 * All our controllers are capable of performing 64-bit DMA
5925 */
5926 if (IS_DMA64) {
5927 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5928
5929 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5930 goto fail_set_dma_mask;
5931 }
5932 } else {
5933 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5934 goto fail_set_dma_mask;
5935 }
5936 /*
5937 * Ensure that all data structures are allocated in 32-bit
5938 * memory.
5939 */
5940 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5941 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5942 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5943 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5944 dev_info(&pdev->dev, "set 32bit DMA mask"
5945 "and 32 bit consistent mask\n");
5946 else
5947 goto fail_set_dma_mask;
5948 }
5949
5950 return 0;
5951
5952 fail_set_dma_mask:
5953 return 1;
5954 }
5955
5956 /**
5957 * megasas_probe_one - PCI hotplug entry point
5958 * @pdev: PCI device structure
5959 * @id: PCI ids of supported hotplugged adapter
5960 */
5961 static int megasas_probe_one(struct pci_dev *pdev,
5962 const struct pci_device_id *id)
5963 {
5964 int rval, pos;
5965 struct Scsi_Host *host;
5966 struct megasas_instance *instance;
5967 u16 control = 0;
5968 struct fusion_context *fusion = NULL;
5969
5970 /* Reset MSI-X in the kdump kernel */
5971 if (reset_devices) {
5972 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5973 if (pos) {
5974 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5975 &control);
5976 if (control & PCI_MSIX_FLAGS_ENABLE) {
5977 dev_info(&pdev->dev, "resetting MSI-X\n");
5978 pci_write_config_word(pdev,
5979 pos + PCI_MSIX_FLAGS,
5980 control &
5981 ~PCI_MSIX_FLAGS_ENABLE);
5982 }
5983 }
5984 }
5985
5986 /*
5987 * PCI prepping: enable device set bus mastering and dma mask
5988 */
5989 rval = pci_enable_device_mem(pdev);
5990
5991 if (rval) {
5992 return rval;
5993 }
5994
5995 pci_set_master(pdev);
5996
5997 if (megasas_set_dma_mask(pdev))
5998 goto fail_set_dma_mask;
5999
6000 host = scsi_host_alloc(&megasas_template,
6001 sizeof(struct megasas_instance));
6002
6003 if (!host) {
6004 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6005 goto fail_alloc_instance;
6006 }
6007
6008 instance = (struct megasas_instance *)host->hostdata;
6009 memset(instance, 0, sizeof(*instance));
6010 atomic_set(&instance->fw_reset_no_pci_access, 0);
6011 instance->pdev = pdev;
6012
6013 switch (instance->pdev->device) {
6014 case PCI_DEVICE_ID_LSI_VENTURA:
6015 case PCI_DEVICE_ID_LSI_HARPOON:
6016 case PCI_DEVICE_ID_LSI_TOMCAT:
6017 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6018 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6019 instance->is_ventura = true;
6020 case PCI_DEVICE_ID_LSI_FUSION:
6021 case PCI_DEVICE_ID_LSI_PLASMA:
6022 case PCI_DEVICE_ID_LSI_INVADER:
6023 case PCI_DEVICE_ID_LSI_FURY:
6024 case PCI_DEVICE_ID_LSI_INTRUDER:
6025 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6026 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6027 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6028 {
6029 if (megasas_alloc_fusion_context(instance)) {
6030 megasas_free_fusion_context(instance);
6031 goto fail_alloc_dma_buf;
6032 }
6033 fusion = instance->ctrl_context;
6034
6035 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
6036 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
6037 fusion->adapter_type = THUNDERBOLT_SERIES;
6038 else if (instance->is_ventura)
6039 fusion->adapter_type = VENTURA_SERIES;
6040 else
6041 fusion->adapter_type = INVADER_SERIES;
6042 }
6043 break;
6044 default: /* For all other supported controllers */
6045
6046 instance->producer =
6047 pci_alloc_consistent(pdev, sizeof(u32),
6048 &instance->producer_h);
6049 instance->consumer =
6050 pci_alloc_consistent(pdev, sizeof(u32),
6051 &instance->consumer_h);
6052
6053 if (!instance->producer || !instance->consumer) {
6054 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
6055 "memory for producer, consumer\n");
6056 goto fail_alloc_dma_buf;
6057 }
6058
6059 *instance->producer = 0;
6060 *instance->consumer = 0;
6061 break;
6062 }
6063
6064 /* Crash dump feature related initialisation*/
6065 instance->drv_buf_index = 0;
6066 instance->drv_buf_alloc = 0;
6067 instance->crash_dump_fw_support = 0;
6068 instance->crash_dump_app_support = 0;
6069 instance->fw_crash_state = UNAVAILABLE;
6070 spin_lock_init(&instance->crashdump_lock);
6071 instance->crash_dump_buf = NULL;
6072
6073 megasas_poll_wait_aen = 0;
6074 instance->flag_ieee = 0;
6075 instance->ev = NULL;
6076 instance->issuepend_done = 1;
6077 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6078 instance->is_imr = 0;
6079
6080 instance->evt_detail = pci_alloc_consistent(pdev,
6081 sizeof(struct
6082 megasas_evt_detail),
6083 &instance->evt_detail_h);
6084
6085 if (!instance->evt_detail) {
6086 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
6087 "event detail structure\n");
6088 goto fail_alloc_dma_buf;
6089 }
6090
6091 if (!reset_devices) {
6092 instance->system_info_buf = pci_zalloc_consistent(pdev,
6093 sizeof(struct MR_DRV_SYSTEM_INFO),
6094 &instance->system_info_h);
6095 if (!instance->system_info_buf)
6096 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
6097
6098 instance->pd_info = pci_alloc_consistent(pdev,
6099 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6100
6101 if (!instance->pd_info)
6102 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
6103
6104 instance->tgt_prop = pci_alloc_consistent(pdev,
6105 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6106
6107 if (!instance->tgt_prop)
6108 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
6109
6110 instance->crash_dump_buf = pci_alloc_consistent(pdev,
6111 CRASH_DMA_BUF_SIZE,
6112 &instance->crash_dump_h);
6113 if (!instance->crash_dump_buf)
6114 dev_err(&pdev->dev, "Can't allocate Firmware "
6115 "crash dump DMA buffer\n");
6116 }
6117
6118 /*
6119 * Initialize locks and queues
6120 */
6121 INIT_LIST_HEAD(&instance->cmd_pool);
6122 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6123
6124 atomic_set(&instance->fw_outstanding,0);
6125
6126 init_waitqueue_head(&instance->int_cmd_wait_q);
6127 init_waitqueue_head(&instance->abort_cmd_wait_q);
6128
6129 spin_lock_init(&instance->mfi_pool_lock);
6130 spin_lock_init(&instance->hba_lock);
6131 spin_lock_init(&instance->stream_lock);
6132 spin_lock_init(&instance->completion_lock);
6133
6134 mutex_init(&instance->reset_mutex);
6135 mutex_init(&instance->hba_mutex);
6136
6137 /*
6138 * Initialize PCI related and misc parameters
6139 */
6140 instance->host = host;
6141 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6142 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6143 instance->ctrl_info = NULL;
6144
6145
6146 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6147 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6148 instance->flag_ieee = 1;
6149
6150 megasas_dbg_lvl = 0;
6151 instance->flag = 0;
6152 instance->unload = 1;
6153 instance->last_time = 0;
6154 instance->disableOnlineCtrlReset = 1;
6155 instance->UnevenSpanSupport = 0;
6156
6157 if (instance->ctrl_context) {
6158 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6159 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6160 } else
6161 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6162
6163 /*
6164 * Initialize MFI Firmware
6165 */
6166 if (megasas_init_fw(instance))
6167 goto fail_init_mfi;
6168
6169 if (instance->requestorId) {
6170 if (instance->PlasmaFW111) {
6171 instance->vf_affiliation_111 =
6172 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6173 &instance->vf_affiliation_111_h);
6174 if (!instance->vf_affiliation_111)
6175 dev_warn(&pdev->dev, "Can't allocate "
6176 "memory for VF affiliation buffer\n");
6177 } else {
6178 instance->vf_affiliation =
6179 pci_alloc_consistent(pdev,
6180 (MAX_LOGICAL_DRIVES + 1) *
6181 sizeof(struct MR_LD_VF_AFFILIATION),
6182 &instance->vf_affiliation_h);
6183 if (!instance->vf_affiliation)
6184 dev_warn(&pdev->dev, "Can't allocate "
6185 "memory for VF affiliation buffer\n");
6186 }
6187 }
6188
6189 /*
6190 * Store instance in PCI softstate
6191 */
6192 pci_set_drvdata(pdev, instance);
6193
6194 /*
6195 * Add this controller to megasas_mgmt_info structure so that it
6196 * can be exported to management applications
6197 */
6198 megasas_mgmt_info.count++;
6199 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6200 megasas_mgmt_info.max_index++;
6201
6202 /*
6203 * Register with SCSI mid-layer
6204 */
6205 if (megasas_io_attach(instance))
6206 goto fail_io_attach;
6207
6208 instance->unload = 0;
6209 /*
6210 * Trigger SCSI to scan our drives
6211 */
6212 scsi_scan_host(host);
6213
6214 /*
6215 * Initiate AEN (Asynchronous Event Notification)
6216 */
6217 if (megasas_start_aen(instance)) {
6218 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6219 goto fail_start_aen;
6220 }
6221
6222 /* Get current SR-IOV LD/VF affiliation */
6223 if (instance->requestorId)
6224 megasas_get_ld_vf_affiliation(instance, 1);
6225
6226 return 0;
6227
6228 fail_start_aen:
6229 fail_io_attach:
6230 megasas_mgmt_info.count--;
6231 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6232 megasas_mgmt_info.max_index--;
6233
6234 instance->instancet->disable_intr(instance);
6235 megasas_destroy_irqs(instance);
6236
6237 if (instance->ctrl_context)
6238 megasas_release_fusion(instance);
6239 else
6240 megasas_release_mfi(instance);
6241 if (instance->msix_vectors)
6242 pci_free_irq_vectors(instance->pdev);
6243 fail_init_mfi:
6244 fail_alloc_dma_buf:
6245 if (instance->evt_detail)
6246 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6247 instance->evt_detail,
6248 instance->evt_detail_h);
6249
6250 if (instance->pd_info)
6251 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6252 instance->pd_info,
6253 instance->pd_info_h);
6254 if (instance->tgt_prop)
6255 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6256 instance->tgt_prop,
6257 instance->tgt_prop_h);
6258 if (instance->producer)
6259 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6260 instance->producer_h);
6261 if (instance->consumer)
6262 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6263 instance->consumer_h);
6264 scsi_host_put(host);
6265
6266 fail_alloc_instance:
6267 fail_set_dma_mask:
6268 pci_disable_device(pdev);
6269
6270 return -ENODEV;
6271 }
6272
6273 /**
6274 * megasas_flush_cache - Requests FW to flush all its caches
6275 * @instance: Adapter soft state
6276 */
6277 static void megasas_flush_cache(struct megasas_instance *instance)
6278 {
6279 struct megasas_cmd *cmd;
6280 struct megasas_dcmd_frame *dcmd;
6281
6282 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6283 return;
6284
6285 cmd = megasas_get_cmd(instance);
6286
6287 if (!cmd)
6288 return;
6289
6290 dcmd = &cmd->frame->dcmd;
6291
6292 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6293
6294 dcmd->cmd = MFI_CMD_DCMD;
6295 dcmd->cmd_status = 0x0;
6296 dcmd->sge_count = 0;
6297 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6298 dcmd->timeout = 0;
6299 dcmd->pad_0 = 0;
6300 dcmd->data_xfer_len = 0;
6301 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6302 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6303
6304 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6305 != DCMD_SUCCESS) {
6306 dev_err(&instance->pdev->dev,
6307 "return from %s %d\n", __func__, __LINE__);
6308 return;
6309 }
6310
6311 megasas_return_cmd(instance, cmd);
6312 }
6313
6314 /**
6315 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6316 * @instance: Adapter soft state
6317 * @opcode: Shutdown/Hibernate
6318 */
6319 static void megasas_shutdown_controller(struct megasas_instance *instance,
6320 u32 opcode)
6321 {
6322 struct megasas_cmd *cmd;
6323 struct megasas_dcmd_frame *dcmd;
6324
6325 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6326 return;
6327
6328 cmd = megasas_get_cmd(instance);
6329
6330 if (!cmd)
6331 return;
6332
6333 if (instance->aen_cmd)
6334 megasas_issue_blocked_abort_cmd(instance,
6335 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6336 if (instance->map_update_cmd)
6337 megasas_issue_blocked_abort_cmd(instance,
6338 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6339 if (instance->jbod_seq_cmd)
6340 megasas_issue_blocked_abort_cmd(instance,
6341 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6342
6343 dcmd = &cmd->frame->dcmd;
6344
6345 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6346
6347 dcmd->cmd = MFI_CMD_DCMD;
6348 dcmd->cmd_status = 0x0;
6349 dcmd->sge_count = 0;
6350 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6351 dcmd->timeout = 0;
6352 dcmd->pad_0 = 0;
6353 dcmd->data_xfer_len = 0;
6354 dcmd->opcode = cpu_to_le32(opcode);
6355
6356 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6357 != DCMD_SUCCESS) {
6358 dev_err(&instance->pdev->dev,
6359 "return from %s %d\n", __func__, __LINE__);
6360 return;
6361 }
6362
6363 megasas_return_cmd(instance, cmd);
6364 }
6365
6366 #ifdef CONFIG_PM
6367 /**
6368 * megasas_suspend - driver suspend entry point
6369 * @pdev: PCI device structure
6370 * @state: PCI power state to suspend routine
6371 */
6372 static int
6373 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6374 {
6375 struct Scsi_Host *host;
6376 struct megasas_instance *instance;
6377
6378 instance = pci_get_drvdata(pdev);
6379 host = instance->host;
6380 instance->unload = 1;
6381
6382 /* Shutdown SR-IOV heartbeat timer */
6383 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6384 del_timer_sync(&instance->sriov_heartbeat_timer);
6385
6386 megasas_flush_cache(instance);
6387 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6388
6389 /* cancel the delayed work if this work still in queue */
6390 if (instance->ev != NULL) {
6391 struct megasas_aen_event *ev = instance->ev;
6392 cancel_delayed_work_sync(&ev->hotplug_work);
6393 instance->ev = NULL;
6394 }
6395
6396 tasklet_kill(&instance->isr_tasklet);
6397
6398 pci_set_drvdata(instance->pdev, instance);
6399 instance->instancet->disable_intr(instance);
6400
6401 megasas_destroy_irqs(instance);
6402
6403 if (instance->msix_vectors)
6404 pci_free_irq_vectors(instance->pdev);
6405
6406 pci_save_state(pdev);
6407 pci_disable_device(pdev);
6408
6409 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6410
6411 return 0;
6412 }
6413
6414 /**
6415 * megasas_resume- driver resume entry point
6416 * @pdev: PCI device structure
6417 */
6418 static int
6419 megasas_resume(struct pci_dev *pdev)
6420 {
6421 int rval;
6422 struct Scsi_Host *host;
6423 struct megasas_instance *instance;
6424 int irq_flags = PCI_IRQ_LEGACY;
6425
6426 instance = pci_get_drvdata(pdev);
6427 host = instance->host;
6428 pci_set_power_state(pdev, PCI_D0);
6429 pci_enable_wake(pdev, PCI_D0, 0);
6430 pci_restore_state(pdev);
6431
6432 /*
6433 * PCI prepping: enable device set bus mastering and dma mask
6434 */
6435 rval = pci_enable_device_mem(pdev);
6436
6437 if (rval) {
6438 dev_err(&pdev->dev, "Enable device failed\n");
6439 return rval;
6440 }
6441
6442 pci_set_master(pdev);
6443
6444 if (megasas_set_dma_mask(pdev))
6445 goto fail_set_dma_mask;
6446
6447 /*
6448 * Initialize MFI Firmware
6449 */
6450
6451 atomic_set(&instance->fw_outstanding, 0);
6452
6453 /*
6454 * We expect the FW state to be READY
6455 */
6456 if (megasas_transition_to_ready(instance, 0))
6457 goto fail_ready_state;
6458
6459 /* Now re-enable MSI-X */
6460 if (instance->msix_vectors) {
6461 irq_flags = PCI_IRQ_MSIX;
6462 if (smp_affinity_enable)
6463 irq_flags |= PCI_IRQ_AFFINITY;
6464 }
6465 rval = pci_alloc_irq_vectors(instance->pdev, 1,
6466 instance->msix_vectors ?
6467 instance->msix_vectors : 1, irq_flags);
6468 if (rval < 0)
6469 goto fail_reenable_msix;
6470
6471 if (instance->ctrl_context) {
6472 megasas_reset_reply_desc(instance);
6473 if (megasas_ioc_init_fusion(instance)) {
6474 megasas_free_cmds(instance);
6475 megasas_free_cmds_fusion(instance);
6476 goto fail_init_mfi;
6477 }
6478 if (!megasas_get_map_info(instance))
6479 megasas_sync_map_info(instance);
6480 } else {
6481 *instance->producer = 0;
6482 *instance->consumer = 0;
6483 if (megasas_issue_init_mfi(instance))
6484 goto fail_init_mfi;
6485 }
6486
6487 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6488 (unsigned long)instance);
6489
6490 if (instance->msix_vectors ?
6491 megasas_setup_irqs_msix(instance, 0) :
6492 megasas_setup_irqs_ioapic(instance))
6493 goto fail_init_mfi;
6494
6495 /* Re-launch SR-IOV heartbeat timer */
6496 if (instance->requestorId) {
6497 if (!megasas_sriov_start_heartbeat(instance, 0))
6498 megasas_start_timer(instance,
6499 &instance->sriov_heartbeat_timer,
6500 megasas_sriov_heartbeat_handler,
6501 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6502 else {
6503 instance->skip_heartbeat_timer_del = 1;
6504 goto fail_init_mfi;
6505 }
6506 }
6507
6508 instance->instancet->enable_intr(instance);
6509 megasas_setup_jbod_map(instance);
6510 instance->unload = 0;
6511
6512 /*
6513 * Initiate AEN (Asynchronous Event Notification)
6514 */
6515 if (megasas_start_aen(instance))
6516 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6517
6518 return 0;
6519
6520 fail_init_mfi:
6521 if (instance->evt_detail)
6522 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6523 instance->evt_detail,
6524 instance->evt_detail_h);
6525
6526 if (instance->pd_info)
6527 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6528 instance->pd_info,
6529 instance->pd_info_h);
6530 if (instance->tgt_prop)
6531 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6532 instance->tgt_prop,
6533 instance->tgt_prop_h);
6534 if (instance->producer)
6535 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6536 instance->producer_h);
6537 if (instance->consumer)
6538 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6539 instance->consumer_h);
6540 scsi_host_put(host);
6541
6542 fail_set_dma_mask:
6543 fail_ready_state:
6544 fail_reenable_msix:
6545
6546 pci_disable_device(pdev);
6547
6548 return -ENODEV;
6549 }
6550 #else
6551 #define megasas_suspend NULL
6552 #define megasas_resume NULL
6553 #endif
6554
6555 static inline int
6556 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6557 {
6558 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6559 int i;
6560
6561 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6562 return 1;
6563
6564 for (i = 0; i < wait_time; i++) {
6565 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
6566 break;
6567
6568 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6569 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6570
6571 msleep(1000);
6572 }
6573
6574 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6575 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6576 __func__);
6577 return 1;
6578 }
6579
6580 return 0;
6581 }
6582
6583 /**
6584 * megasas_detach_one - PCI hot"un"plug entry point
6585 * @pdev: PCI device structure
6586 */
6587 static void megasas_detach_one(struct pci_dev *pdev)
6588 {
6589 int i;
6590 struct Scsi_Host *host;
6591 struct megasas_instance *instance;
6592 struct fusion_context *fusion;
6593 u32 pd_seq_map_sz;
6594
6595 instance = pci_get_drvdata(pdev);
6596 instance->unload = 1;
6597 host = instance->host;
6598 fusion = instance->ctrl_context;
6599
6600 /* Shutdown SR-IOV heartbeat timer */
6601 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6602 del_timer_sync(&instance->sriov_heartbeat_timer);
6603
6604 if (instance->fw_crash_state != UNAVAILABLE)
6605 megasas_free_host_crash_buffer(instance);
6606 scsi_remove_host(instance->host);
6607
6608 if (megasas_wait_for_adapter_operational(instance))
6609 goto skip_firing_dcmds;
6610
6611 megasas_flush_cache(instance);
6612 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6613
6614 skip_firing_dcmds:
6615 /* cancel the delayed work if this work still in queue*/
6616 if (instance->ev != NULL) {
6617 struct megasas_aen_event *ev = instance->ev;
6618 cancel_delayed_work_sync(&ev->hotplug_work);
6619 instance->ev = NULL;
6620 }
6621
6622 /* cancel all wait events */
6623 wake_up_all(&instance->int_cmd_wait_q);
6624
6625 tasklet_kill(&instance->isr_tasklet);
6626
6627 /*
6628 * Take the instance off the instance array. Note that we will not
6629 * decrement the max_index. We let this array be sparse array
6630 */
6631 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6632 if (megasas_mgmt_info.instance[i] == instance) {
6633 megasas_mgmt_info.count--;
6634 megasas_mgmt_info.instance[i] = NULL;
6635
6636 break;
6637 }
6638 }
6639
6640 instance->instancet->disable_intr(instance);
6641
6642 megasas_destroy_irqs(instance);
6643
6644 if (instance->msix_vectors)
6645 pci_free_irq_vectors(instance->pdev);
6646
6647 if (instance->is_ventura) {
6648 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6649 kfree(fusion->stream_detect_by_ld[i]);
6650 kfree(fusion->stream_detect_by_ld);
6651 fusion->stream_detect_by_ld = NULL;
6652 }
6653
6654
6655 if (instance->ctrl_context) {
6656 megasas_release_fusion(instance);
6657 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6658 (sizeof(struct MR_PD_CFG_SEQ) *
6659 (MAX_PHYSICAL_DEVICES - 1));
6660 for (i = 0; i < 2 ; i++) {
6661 if (fusion->ld_map[i])
6662 dma_free_coherent(&instance->pdev->dev,
6663 fusion->max_map_sz,
6664 fusion->ld_map[i],
6665 fusion->ld_map_phys[i]);
6666 if (fusion->ld_drv_map[i])
6667 free_pages((ulong)fusion->ld_drv_map[i],
6668 fusion->drv_map_pages);
6669 if (fusion->pd_seq_sync[i])
6670 dma_free_coherent(&instance->pdev->dev,
6671 pd_seq_map_sz,
6672 fusion->pd_seq_sync[i],
6673 fusion->pd_seq_phys[i]);
6674 }
6675 megasas_free_fusion_context(instance);
6676 } else {
6677 megasas_release_mfi(instance);
6678 pci_free_consistent(pdev, sizeof(u32),
6679 instance->producer,
6680 instance->producer_h);
6681 pci_free_consistent(pdev, sizeof(u32),
6682 instance->consumer,
6683 instance->consumer_h);
6684 }
6685
6686 kfree(instance->ctrl_info);
6687
6688 if (instance->evt_detail)
6689 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6690 instance->evt_detail, instance->evt_detail_h);
6691 if (instance->pd_info)
6692 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6693 instance->pd_info,
6694 instance->pd_info_h);
6695 if (instance->tgt_prop)
6696 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6697 instance->tgt_prop,
6698 instance->tgt_prop_h);
6699 if (instance->vf_affiliation)
6700 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6701 sizeof(struct MR_LD_VF_AFFILIATION),
6702 instance->vf_affiliation,
6703 instance->vf_affiliation_h);
6704
6705 if (instance->vf_affiliation_111)
6706 pci_free_consistent(pdev,
6707 sizeof(struct MR_LD_VF_AFFILIATION_111),
6708 instance->vf_affiliation_111,
6709 instance->vf_affiliation_111_h);
6710
6711 if (instance->hb_host_mem)
6712 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6713 instance->hb_host_mem,
6714 instance->hb_host_mem_h);
6715
6716 if (instance->crash_dump_buf)
6717 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6718 instance->crash_dump_buf, instance->crash_dump_h);
6719
6720 if (instance->system_info_buf)
6721 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6722 instance->system_info_buf, instance->system_info_h);
6723
6724 scsi_host_put(host);
6725
6726 pci_disable_device(pdev);
6727 }
6728
6729 /**
6730 * megasas_shutdown - Shutdown entry point
6731 * @device: Generic device structure
6732 */
6733 static void megasas_shutdown(struct pci_dev *pdev)
6734 {
6735 struct megasas_instance *instance = pci_get_drvdata(pdev);
6736
6737 instance->unload = 1;
6738
6739 if (megasas_wait_for_adapter_operational(instance))
6740 goto skip_firing_dcmds;
6741
6742 megasas_flush_cache(instance);
6743 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6744
6745 skip_firing_dcmds:
6746 instance->instancet->disable_intr(instance);
6747 megasas_destroy_irqs(instance);
6748
6749 if (instance->msix_vectors)
6750 pci_free_irq_vectors(instance->pdev);
6751 }
6752
6753 /**
6754 * megasas_mgmt_open - char node "open" entry point
6755 */
6756 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6757 {
6758 /*
6759 * Allow only those users with admin rights
6760 */
6761 if (!capable(CAP_SYS_ADMIN))
6762 return -EACCES;
6763
6764 return 0;
6765 }
6766
6767 /**
6768 * megasas_mgmt_fasync - Async notifier registration from applications
6769 *
6770 * This function adds the calling process to a driver global queue. When an
6771 * event occurs, SIGIO will be sent to all processes in this queue.
6772 */
6773 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6774 {
6775 int rc;
6776
6777 mutex_lock(&megasas_async_queue_mutex);
6778
6779 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6780
6781 mutex_unlock(&megasas_async_queue_mutex);
6782
6783 if (rc >= 0) {
6784 /* For sanity check when we get ioctl */
6785 filep->private_data = filep;
6786 return 0;
6787 }
6788
6789 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6790
6791 return rc;
6792 }
6793
6794 /**
6795 * megasas_mgmt_poll - char node "poll" entry point
6796 * */
6797 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6798 {
6799 unsigned int mask;
6800 unsigned long flags;
6801
6802 poll_wait(file, &megasas_poll_wait, wait);
6803 spin_lock_irqsave(&poll_aen_lock, flags);
6804 if (megasas_poll_wait_aen)
6805 mask = (POLLIN | POLLRDNORM);
6806 else
6807 mask = 0;
6808 megasas_poll_wait_aen = 0;
6809 spin_unlock_irqrestore(&poll_aen_lock, flags);
6810 return mask;
6811 }
6812
6813 /*
6814 * megasas_set_crash_dump_params_ioctl:
6815 * Send CRASH_DUMP_MODE DCMD to all controllers
6816 * @cmd: MFI command frame
6817 */
6818
6819 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6820 {
6821 struct megasas_instance *local_instance;
6822 int i, error = 0;
6823 int crash_support;
6824
6825 crash_support = cmd->frame->dcmd.mbox.w[0];
6826
6827 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6828 local_instance = megasas_mgmt_info.instance[i];
6829 if (local_instance && local_instance->crash_dump_drv_support) {
6830 if ((atomic_read(&local_instance->adprecovery) ==
6831 MEGASAS_HBA_OPERATIONAL) &&
6832 !megasas_set_crash_dump_params(local_instance,
6833 crash_support)) {
6834 local_instance->crash_dump_app_support =
6835 crash_support;
6836 dev_info(&local_instance->pdev->dev,
6837 "Application firmware crash "
6838 "dump mode set success\n");
6839 error = 0;
6840 } else {
6841 dev_info(&local_instance->pdev->dev,
6842 "Application firmware crash "
6843 "dump mode set failed\n");
6844 error = -1;
6845 }
6846 }
6847 }
6848 return error;
6849 }
6850
6851 /**
6852 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6853 * @instance: Adapter soft state
6854 * @argp: User's ioctl packet
6855 */
6856 static int
6857 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6858 struct megasas_iocpacket __user * user_ioc,
6859 struct megasas_iocpacket *ioc)
6860 {
6861 struct megasas_sge32 *kern_sge32;
6862 struct megasas_cmd *cmd;
6863 void *kbuff_arr[MAX_IOCTL_SGE];
6864 dma_addr_t buf_handle = 0;
6865 int error = 0, i;
6866 void *sense = NULL;
6867 dma_addr_t sense_handle;
6868 unsigned long *sense_ptr;
6869 u32 opcode;
6870
6871 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6872
6873 if (ioc->sge_count > MAX_IOCTL_SGE) {
6874 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6875 ioc->sge_count, MAX_IOCTL_SGE);
6876 return -EINVAL;
6877 }
6878
6879 cmd = megasas_get_cmd(instance);
6880 if (!cmd) {
6881 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6882 return -ENOMEM;
6883 }
6884
6885 /*
6886 * User's IOCTL packet has 2 frames (maximum). Copy those two
6887 * frames into our cmd's frames. cmd->frame's context will get
6888 * overwritten when we copy from user's frames. So set that value
6889 * alone separately
6890 */
6891 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6892 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6893 cmd->frame->hdr.pad_0 = 0;
6894 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6895 MFI_FRAME_SGL64 |
6896 MFI_FRAME_SENSE64));
6897 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
6898
6899 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
6900 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
6901 megasas_return_cmd(instance, cmd);
6902 return -1;
6903 }
6904 }
6905
6906 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6907 error = megasas_set_crash_dump_params_ioctl(cmd);
6908 megasas_return_cmd(instance, cmd);
6909 return error;
6910 }
6911
6912 /*
6913 * The management interface between applications and the fw uses
6914 * MFI frames. E.g, RAID configuration changes, LD property changes
6915 * etc are accomplishes through different kinds of MFI frames. The
6916 * driver needs to care only about substituting user buffers with
6917 * kernel buffers in SGLs. The location of SGL is embedded in the
6918 * struct iocpacket itself.
6919 */
6920 kern_sge32 = (struct megasas_sge32 *)
6921 ((unsigned long)cmd->frame + ioc->sgl_off);
6922
6923 /*
6924 * For each user buffer, create a mirror buffer and copy in
6925 */
6926 for (i = 0; i < ioc->sge_count; i++) {
6927 if (!ioc->sgl[i].iov_len)
6928 continue;
6929
6930 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6931 ioc->sgl[i].iov_len,
6932 &buf_handle, GFP_KERNEL);
6933 if (!kbuff_arr[i]) {
6934 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6935 "kernel SGL buffer for IOCTL\n");
6936 error = -ENOMEM;
6937 goto out;
6938 }
6939
6940 /*
6941 * We don't change the dma_coherent_mask, so
6942 * pci_alloc_consistent only returns 32bit addresses
6943 */
6944 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6945 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6946
6947 /*
6948 * We created a kernel buffer corresponding to the
6949 * user buffer. Now copy in from the user buffer
6950 */
6951 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6952 (u32) (ioc->sgl[i].iov_len))) {
6953 error = -EFAULT;
6954 goto out;
6955 }
6956 }
6957
6958 if (ioc->sense_len) {
6959 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6960 &sense_handle, GFP_KERNEL);
6961 if (!sense) {
6962 error = -ENOMEM;
6963 goto out;
6964 }
6965
6966 sense_ptr =
6967 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6968 *sense_ptr = cpu_to_le32(sense_handle);
6969 }
6970
6971 /*
6972 * Set the sync_cmd flag so that the ISR knows not to complete this
6973 * cmd to the SCSI mid-layer
6974 */
6975 cmd->sync_cmd = 1;
6976 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6977 cmd->sync_cmd = 0;
6978 dev_err(&instance->pdev->dev,
6979 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6980 __func__, __LINE__, opcode, cmd->cmd_status_drv);
6981 return -EBUSY;
6982 }
6983
6984 cmd->sync_cmd = 0;
6985
6986 if (instance->unload == 1) {
6987 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6988 "don't submit data to application\n");
6989 goto out;
6990 }
6991 /*
6992 * copy out the kernel buffers to user buffers
6993 */
6994 for (i = 0; i < ioc->sge_count; i++) {
6995 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
6996 ioc->sgl[i].iov_len)) {
6997 error = -EFAULT;
6998 goto out;
6999 }
7000 }
7001
7002 /*
7003 * copy out the sense
7004 */
7005 if (ioc->sense_len) {
7006 /*
7007 * sense_ptr points to the location that has the user
7008 * sense buffer address
7009 */
7010 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7011 ioc->sense_off);
7012
7013 if (copy_to_user((void __user *)((unsigned long)
7014 get_unaligned((unsigned long *)sense_ptr)),
7015 sense, ioc->sense_len)) {
7016 dev_err(&instance->pdev->dev, "Failed to copy out to user "
7017 "sense data\n");
7018 error = -EFAULT;
7019 goto out;
7020 }
7021 }
7022
7023 /*
7024 * copy the status codes returned by the fw
7025 */
7026 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7027 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7028 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7029 error = -EFAULT;
7030 }
7031
7032 out:
7033 if (sense) {
7034 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7035 sense, sense_handle);
7036 }
7037
7038 for (i = 0; i < ioc->sge_count; i++) {
7039 if (kbuff_arr[i]) {
7040 dma_free_coherent(&instance->pdev->dev,
7041 le32_to_cpu(kern_sge32[i].length),
7042 kbuff_arr[i],
7043 le32_to_cpu(kern_sge32[i].phys_addr));
7044 kbuff_arr[i] = NULL;
7045 }
7046 }
7047
7048 megasas_return_cmd(instance, cmd);
7049 return error;
7050 }
7051
7052 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7053 {
7054 struct megasas_iocpacket __user *user_ioc =
7055 (struct megasas_iocpacket __user *)arg;
7056 struct megasas_iocpacket *ioc;
7057 struct megasas_instance *instance;
7058 int error;
7059 int i;
7060 unsigned long flags;
7061 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7062
7063 ioc = memdup_user(user_ioc, sizeof(*ioc));
7064 if (IS_ERR(ioc))
7065 return PTR_ERR(ioc);
7066
7067 instance = megasas_lookup_instance(ioc->host_no);
7068 if (!instance) {
7069 error = -ENODEV;
7070 goto out_kfree_ioc;
7071 }
7072
7073 /* Adjust ioctl wait time for VF mode */
7074 if (instance->requestorId)
7075 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7076
7077 /* Block ioctls in VF mode */
7078 if (instance->requestorId && !allow_vf_ioctls) {
7079 error = -ENODEV;
7080 goto out_kfree_ioc;
7081 }
7082
7083 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7084 dev_err(&instance->pdev->dev, "Controller in crit error\n");
7085 error = -ENODEV;
7086 goto out_kfree_ioc;
7087 }
7088
7089 if (instance->unload == 1) {
7090 error = -ENODEV;
7091 goto out_kfree_ioc;
7092 }
7093
7094 if (down_interruptible(&instance->ioctl_sem)) {
7095 error = -ERESTARTSYS;
7096 goto out_kfree_ioc;
7097 }
7098
7099 for (i = 0; i < wait_time; i++) {
7100
7101 spin_lock_irqsave(&instance->hba_lock, flags);
7102 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7103 spin_unlock_irqrestore(&instance->hba_lock, flags);
7104 break;
7105 }
7106 spin_unlock_irqrestore(&instance->hba_lock, flags);
7107
7108 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7109 dev_notice(&instance->pdev->dev, "waiting"
7110 "for controller reset to finish\n");
7111 }
7112
7113 msleep(1000);
7114 }
7115
7116 spin_lock_irqsave(&instance->hba_lock, flags);
7117 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7118 spin_unlock_irqrestore(&instance->hba_lock, flags);
7119
7120 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7121 error = -ENODEV;
7122 goto out_up;
7123 }
7124 spin_unlock_irqrestore(&instance->hba_lock, flags);
7125
7126 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7127 out_up:
7128 up(&instance->ioctl_sem);
7129
7130 out_kfree_ioc:
7131 kfree(ioc);
7132 return error;
7133 }
7134
7135 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7136 {
7137 struct megasas_instance *instance;
7138 struct megasas_aen aen;
7139 int error;
7140 int i;
7141 unsigned long flags;
7142 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7143
7144 if (file->private_data != file) {
7145 printk(KERN_DEBUG "megasas: fasync_helper was not "
7146 "called first\n");
7147 return -EINVAL;
7148 }
7149
7150 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7151 return -EFAULT;
7152
7153 instance = megasas_lookup_instance(aen.host_no);
7154
7155 if (!instance)
7156 return -ENODEV;
7157
7158 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7159 return -ENODEV;
7160 }
7161
7162 if (instance->unload == 1) {
7163 return -ENODEV;
7164 }
7165
7166 for (i = 0; i < wait_time; i++) {
7167
7168 spin_lock_irqsave(&instance->hba_lock, flags);
7169 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7170 spin_unlock_irqrestore(&instance->hba_lock,
7171 flags);
7172 break;
7173 }
7174
7175 spin_unlock_irqrestore(&instance->hba_lock, flags);
7176
7177 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7178 dev_notice(&instance->pdev->dev, "waiting for"
7179 "controller reset to finish\n");
7180 }
7181
7182 msleep(1000);
7183 }
7184
7185 spin_lock_irqsave(&instance->hba_lock, flags);
7186 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7187 spin_unlock_irqrestore(&instance->hba_lock, flags);
7188 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7189 return -ENODEV;
7190 }
7191 spin_unlock_irqrestore(&instance->hba_lock, flags);
7192
7193 mutex_lock(&instance->reset_mutex);
7194 error = megasas_register_aen(instance, aen.seq_num,
7195 aen.class_locale_word);
7196 mutex_unlock(&instance->reset_mutex);
7197 return error;
7198 }
7199
7200 /**
7201 * megasas_mgmt_ioctl - char node ioctl entry point
7202 */
7203 static long
7204 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7205 {
7206 switch (cmd) {
7207 case MEGASAS_IOC_FIRMWARE:
7208 return megasas_mgmt_ioctl_fw(file, arg);
7209
7210 case MEGASAS_IOC_GET_AEN:
7211 return megasas_mgmt_ioctl_aen(file, arg);
7212 }
7213
7214 return -ENOTTY;
7215 }
7216
7217 #ifdef CONFIG_COMPAT
7218 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7219 {
7220 struct compat_megasas_iocpacket __user *cioc =
7221 (struct compat_megasas_iocpacket __user *)arg;
7222 struct megasas_iocpacket __user *ioc =
7223 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7224 int i;
7225 int error = 0;
7226 compat_uptr_t ptr;
7227 u32 local_sense_off;
7228 u32 local_sense_len;
7229 u32 user_sense_off;
7230
7231 if (clear_user(ioc, sizeof(*ioc)))
7232 return -EFAULT;
7233
7234 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7235 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7236 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7237 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7238 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7239 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7240 return -EFAULT;
7241
7242 /*
7243 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7244 * sense_len is not null, so prepare the 64bit value under
7245 * the same condition.
7246 */
7247 if (get_user(local_sense_off, &ioc->sense_off) ||
7248 get_user(local_sense_len, &ioc->sense_len) ||
7249 get_user(user_sense_off, &cioc->sense_off))
7250 return -EFAULT;
7251
7252 if (local_sense_len) {
7253 void __user **sense_ioc_ptr =
7254 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7255 compat_uptr_t *sense_cioc_ptr =
7256 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7257 if (get_user(ptr, sense_cioc_ptr) ||
7258 put_user(compat_ptr(ptr), sense_ioc_ptr))
7259 return -EFAULT;
7260 }
7261
7262 for (i = 0; i < MAX_IOCTL_SGE; i++) {
7263 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7264 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7265 copy_in_user(&ioc->sgl[i].iov_len,
7266 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7267 return -EFAULT;
7268 }
7269
7270 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7271
7272 if (copy_in_user(&cioc->frame.hdr.cmd_status,
7273 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7274 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7275 return -EFAULT;
7276 }
7277 return error;
7278 }
7279
7280 static long
7281 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7282 unsigned long arg)
7283 {
7284 switch (cmd) {
7285 case MEGASAS_IOC_FIRMWARE32:
7286 return megasas_mgmt_compat_ioctl_fw(file, arg);
7287 case MEGASAS_IOC_GET_AEN:
7288 return megasas_mgmt_ioctl_aen(file, arg);
7289 }
7290
7291 return -ENOTTY;
7292 }
7293 #endif
7294
7295 /*
7296 * File operations structure for management interface
7297 */
7298 static const struct file_operations megasas_mgmt_fops = {
7299 .owner = THIS_MODULE,
7300 .open = megasas_mgmt_open,
7301 .fasync = megasas_mgmt_fasync,
7302 .unlocked_ioctl = megasas_mgmt_ioctl,
7303 .poll = megasas_mgmt_poll,
7304 #ifdef CONFIG_COMPAT
7305 .compat_ioctl = megasas_mgmt_compat_ioctl,
7306 #endif
7307 .llseek = noop_llseek,
7308 };
7309
7310 /*
7311 * PCI hotplug support registration structure
7312 */
7313 static struct pci_driver megasas_pci_driver = {
7314
7315 .name = "megaraid_sas",
7316 .id_table = megasas_pci_table,
7317 .probe = megasas_probe_one,
7318 .remove = megasas_detach_one,
7319 .suspend = megasas_suspend,
7320 .resume = megasas_resume,
7321 .shutdown = megasas_shutdown,
7322 };
7323
7324 /*
7325 * Sysfs driver attributes
7326 */
7327 static ssize_t version_show(struct device_driver *dd, char *buf)
7328 {
7329 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7330 MEGASAS_VERSION);
7331 }
7332 static DRIVER_ATTR_RO(version);
7333
7334 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7335 {
7336 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7337 MEGASAS_RELDATE);
7338 }
7339 static DRIVER_ATTR_RO(release_date);
7340
7341 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7342 {
7343 return sprintf(buf, "%u\n", support_poll_for_event);
7344 }
7345 static DRIVER_ATTR_RO(support_poll_for_event);
7346
7347 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7348 {
7349 return sprintf(buf, "%u\n", support_device_change);
7350 }
7351 static DRIVER_ATTR_RO(support_device_change);
7352
7353 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7354 {
7355 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7356 }
7357
7358 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7359 size_t count)
7360 {
7361 int retval = count;
7362
7363 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7364 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7365 retval = -EINVAL;
7366 }
7367 return retval;
7368 }
7369 static DRIVER_ATTR_RW(dbg_lvl);
7370
7371 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7372 {
7373 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7374 scsi_remove_device(sdev);
7375 scsi_device_put(sdev);
7376 }
7377
7378 static void
7379 megasas_aen_polling(struct work_struct *work)
7380 {
7381 struct megasas_aen_event *ev =
7382 container_of(work, struct megasas_aen_event, hotplug_work.work);
7383 struct megasas_instance *instance = ev->instance;
7384 union megasas_evt_class_locale class_locale;
7385 struct Scsi_Host *host;
7386 struct scsi_device *sdev1;
7387 u16 pd_index = 0;
7388 u16 ld_index = 0;
7389 int i, j, doscan = 0;
7390 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7391 int error;
7392 u8 dcmd_ret = DCMD_SUCCESS;
7393
7394 if (!instance) {
7395 printk(KERN_ERR "invalid instance!\n");
7396 kfree(ev);
7397 return;
7398 }
7399
7400 /* Adjust event workqueue thread wait time for VF mode */
7401 if (instance->requestorId)
7402 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7403
7404 /* Don't run the event workqueue thread if OCR is running */
7405 mutex_lock(&instance->reset_mutex);
7406
7407 instance->ev = NULL;
7408 host = instance->host;
7409 if (instance->evt_detail) {
7410 megasas_decode_evt(instance);
7411
7412 switch (le32_to_cpu(instance->evt_detail->code)) {
7413
7414 case MR_EVT_PD_INSERTED:
7415 case MR_EVT_PD_REMOVED:
7416 dcmd_ret = megasas_get_pd_list(instance);
7417 if (dcmd_ret == DCMD_SUCCESS)
7418 doscan = SCAN_PD_CHANNEL;
7419 break;
7420
7421 case MR_EVT_LD_OFFLINE:
7422 case MR_EVT_CFG_CLEARED:
7423 case MR_EVT_LD_DELETED:
7424 case MR_EVT_LD_CREATED:
7425 if (!instance->requestorId ||
7426 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7427 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7428
7429 if (dcmd_ret == DCMD_SUCCESS)
7430 doscan = SCAN_VD_CHANNEL;
7431
7432 break;
7433
7434 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7435 case MR_EVT_FOREIGN_CFG_IMPORTED:
7436 case MR_EVT_LD_STATE_CHANGE:
7437 dcmd_ret = megasas_get_pd_list(instance);
7438
7439 if (dcmd_ret != DCMD_SUCCESS)
7440 break;
7441
7442 if (!instance->requestorId ||
7443 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7444 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7445
7446 if (dcmd_ret != DCMD_SUCCESS)
7447 break;
7448
7449 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7450 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7451 instance->host->host_no);
7452 break;
7453
7454 case MR_EVT_CTRL_PROP_CHANGED:
7455 dcmd_ret = megasas_get_ctrl_info(instance);
7456 break;
7457 default:
7458 doscan = 0;
7459 break;
7460 }
7461 } else {
7462 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7463 mutex_unlock(&instance->reset_mutex);
7464 kfree(ev);
7465 return;
7466 }
7467
7468 mutex_unlock(&instance->reset_mutex);
7469
7470 if (doscan & SCAN_PD_CHANNEL) {
7471 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7472 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7473 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7474 sdev1 = scsi_device_lookup(host, i, j, 0);
7475 if (instance->pd_list[pd_index].driveState ==
7476 MR_PD_STATE_SYSTEM) {
7477 if (!sdev1)
7478 scsi_add_device(host, i, j, 0);
7479 else
7480 scsi_device_put(sdev1);
7481 } else {
7482 if (sdev1)
7483 megasas_remove_scsi_device(sdev1);
7484 }
7485 }
7486 }
7487 }
7488
7489 if (doscan & SCAN_VD_CHANNEL) {
7490 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7491 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7492 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7493 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7494 if (instance->ld_ids[ld_index] != 0xff) {
7495 if (!sdev1)
7496 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7497 else
7498 scsi_device_put(sdev1);
7499 } else {
7500 if (sdev1)
7501 megasas_remove_scsi_device(sdev1);
7502 }
7503 }
7504 }
7505 }
7506
7507 if (dcmd_ret == DCMD_SUCCESS)
7508 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7509 else
7510 seq_num = instance->last_seq_num;
7511
7512 /* Register AEN with FW for latest sequence number plus 1 */
7513 class_locale.members.reserved = 0;
7514 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7515 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7516
7517 if (instance->aen_cmd != NULL) {
7518 kfree(ev);
7519 return;
7520 }
7521
7522 mutex_lock(&instance->reset_mutex);
7523 error = megasas_register_aen(instance, seq_num,
7524 class_locale.word);
7525 if (error)
7526 dev_err(&instance->pdev->dev,
7527 "register aen failed error %x\n", error);
7528
7529 mutex_unlock(&instance->reset_mutex);
7530 kfree(ev);
7531 }
7532
7533 /**
7534 * megasas_init - Driver load entry point
7535 */
7536 static int __init megasas_init(void)
7537 {
7538 int rval;
7539
7540 /*
7541 * Booted in kdump kernel, minimize memory footprints by
7542 * disabling few features
7543 */
7544 if (reset_devices) {
7545 msix_vectors = 1;
7546 rdpq_enable = 0;
7547 dual_qdepth_disable = 1;
7548 }
7549
7550 /*
7551 * Announce driver version and other information
7552 */
7553 pr_info("megasas: %s\n", MEGASAS_VERSION);
7554
7555 spin_lock_init(&poll_aen_lock);
7556
7557 support_poll_for_event = 2;
7558 support_device_change = 1;
7559
7560 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7561
7562 /*
7563 * Register character device node
7564 */
7565 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7566
7567 if (rval < 0) {
7568 printk(KERN_DEBUG "megasas: failed to open device node\n");
7569 return rval;
7570 }
7571
7572 megasas_mgmt_majorno = rval;
7573
7574 /*
7575 * Register ourselves as PCI hotplug module
7576 */
7577 rval = pci_register_driver(&megasas_pci_driver);
7578
7579 if (rval) {
7580 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7581 goto err_pcidrv;
7582 }
7583
7584 rval = driver_create_file(&megasas_pci_driver.driver,
7585 &driver_attr_version);
7586 if (rval)
7587 goto err_dcf_attr_ver;
7588
7589 rval = driver_create_file(&megasas_pci_driver.driver,
7590 &driver_attr_release_date);
7591 if (rval)
7592 goto err_dcf_rel_date;
7593
7594 rval = driver_create_file(&megasas_pci_driver.driver,
7595 &driver_attr_support_poll_for_event);
7596 if (rval)
7597 goto err_dcf_support_poll_for_event;
7598
7599 rval = driver_create_file(&megasas_pci_driver.driver,
7600 &driver_attr_dbg_lvl);
7601 if (rval)
7602 goto err_dcf_dbg_lvl;
7603 rval = driver_create_file(&megasas_pci_driver.driver,
7604 &driver_attr_support_device_change);
7605 if (rval)
7606 goto err_dcf_support_device_change;
7607
7608 return rval;
7609
7610 err_dcf_support_device_change:
7611 driver_remove_file(&megasas_pci_driver.driver,
7612 &driver_attr_dbg_lvl);
7613 err_dcf_dbg_lvl:
7614 driver_remove_file(&megasas_pci_driver.driver,
7615 &driver_attr_support_poll_for_event);
7616 err_dcf_support_poll_for_event:
7617 driver_remove_file(&megasas_pci_driver.driver,
7618 &driver_attr_release_date);
7619 err_dcf_rel_date:
7620 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7621 err_dcf_attr_ver:
7622 pci_unregister_driver(&megasas_pci_driver);
7623 err_pcidrv:
7624 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7625 return rval;
7626 }
7627
7628 /**
7629 * megasas_exit - Driver unload entry point
7630 */
7631 static void __exit megasas_exit(void)
7632 {
7633 driver_remove_file(&megasas_pci_driver.driver,
7634 &driver_attr_dbg_lvl);
7635 driver_remove_file(&megasas_pci_driver.driver,
7636 &driver_attr_support_poll_for_event);
7637 driver_remove_file(&megasas_pci_driver.driver,
7638 &driver_attr_support_device_change);
7639 driver_remove_file(&megasas_pci_driver.driver,
7640 &driver_attr_release_date);
7641 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7642
7643 pci_unregister_driver(&megasas_pci_driver);
7644 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7645 }
7646
7647 module_init(megasas_init);
7648 module_exit(megasas_exit);