]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_base.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2003-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * Authors: Avago Technologies
21 * Sreenivas Bagalkote
22 * Sumant Patro
23 * Bo Yang
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/slab.h>
45 #include <linux/uaccess.h>
46 #include <asm/unaligned.h>
47 #include <linux/fs.h>
48 #include <linux/compat.h>
49 #include <linux/blkdev.h>
50 #include <linux/mutex.h>
51 #include <linux/poll.h>
52
53 #include <scsi/scsi.h>
54 #include <scsi/scsi_cmnd.h>
55 #include <scsi/scsi_device.h>
56 #include <scsi/scsi_host.h>
57 #include <scsi/scsi_tcq.h>
58 #include "megaraid_sas_fusion.h"
59 #include "megaraid_sas.h"
60
61 /*
62 * Number of sectors per IO command
63 * Will be set in megasas_init_mfi if user does not provide
64 */
65 static unsigned int max_sectors;
66 module_param_named(max_sectors, max_sectors, int, 0);
67 MODULE_PARM_DESC(max_sectors,
68 "Maximum number of sectors per IO command");
69
70 static int msix_disable;
71 module_param(msix_disable, int, S_IRUGO);
72 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
73
74 static unsigned int msix_vectors;
75 module_param(msix_vectors, int, S_IRUGO);
76 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
77
78 static int allow_vf_ioctls;
79 module_param(allow_vf_ioctls, int, S_IRUGO);
80 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
81
82 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
83 module_param(throttlequeuedepth, int, S_IRUGO);
84 MODULE_PARM_DESC(throttlequeuedepth,
85 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
86
87 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
88 module_param(resetwaittime, int, S_IRUGO);
89 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
90 "before resetting adapter. Default: 180");
91
92 int smp_affinity_enable = 1;
93 module_param(smp_affinity_enable, int, S_IRUGO);
94 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
95
96 int rdpq_enable = 1;
97 module_param(rdpq_enable, int, S_IRUGO);
98 MODULE_PARM_DESC(rdpq_enable, " Allocate reply queue in chunks for large queue depth enable/disable Default: disable(0)");
99
100 unsigned int dual_qdepth_disable;
101 module_param(dual_qdepth_disable, int, S_IRUGO);
102 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
103
104 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
105 module_param(scmd_timeout, int, S_IRUGO);
106 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
107
108 MODULE_LICENSE("GPL");
109 MODULE_VERSION(MEGASAS_VERSION);
110 MODULE_AUTHOR("megaraidlinux.pdl@avagotech.com");
111 MODULE_DESCRIPTION("Avago MegaRAID SAS Driver");
112
113 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
114 static int megasas_get_pd_list(struct megasas_instance *instance);
115 static int megasas_ld_list_query(struct megasas_instance *instance,
116 u8 query_type);
117 static int megasas_issue_init_mfi(struct megasas_instance *instance);
118 static int megasas_register_aen(struct megasas_instance *instance,
119 u32 seq_num, u32 class_locale_word);
120 static void megasas_get_pd_info(struct megasas_instance *instance,
121 struct scsi_device *sdev);
122 static int megasas_get_target_prop(struct megasas_instance *instance,
123 struct scsi_device *sdev);
124 /*
125 * PCI ID table for all supported controllers
126 */
127 static struct pci_device_id megasas_pci_table[] = {
128
129 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
130 /* xscale IOP */
131 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
132 /* ppc IOP */
133 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
134 /* ppc IOP */
135 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
136 /* gen2*/
137 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
138 /* gen2*/
139 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
140 /* skinny*/
141 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
142 /* skinny*/
143 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
144 /* xscale IOP, vega */
145 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
146 /* xscale IOP */
147 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
148 /* Fusion */
149 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
150 /* Plasma */
151 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
152 /* Invader */
153 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
154 /* Fury */
155 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
156 /* Intruder */
157 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
158 /* Intruder 24 port*/
159 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
161 /* VENTURA */
162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
163 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
165 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
167 {}
168 };
169
170 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
171
172 static int megasas_mgmt_majorno;
173 struct megasas_mgmt_info megasas_mgmt_info;
174 static struct fasync_struct *megasas_async_queue;
175 static DEFINE_MUTEX(megasas_async_queue_mutex);
176
177 static int megasas_poll_wait_aen;
178 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
179 static u32 support_poll_for_event;
180 u32 megasas_dbg_lvl;
181 static u32 support_device_change;
182
183 /* define lock for aen poll */
184 spinlock_t poll_aen_lock;
185
186 void
187 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
188 u8 alt_status);
189 static u32
190 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs);
191 static int
192 megasas_adp_reset_gen2(struct megasas_instance *instance,
193 struct megasas_register_set __iomem *reg_set);
194 static irqreturn_t megasas_isr(int irq, void *devp);
195 static u32
196 megasas_init_adapter_mfi(struct megasas_instance *instance);
197 u32
198 megasas_build_and_issue_cmd(struct megasas_instance *instance,
199 struct scsi_cmnd *scmd);
200 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
201 int
202 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
203 int seconds);
204 void megasas_fusion_ocr_wq(struct work_struct *work);
205 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
206 int initial);
207
208 void
209 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
210 {
211 instance->instancet->fire_cmd(instance,
212 cmd->frame_phys_addr, 0, instance->reg_set);
213 return;
214 }
215
216 /**
217 * megasas_get_cmd - Get a command from the free pool
218 * @instance: Adapter soft state
219 *
220 * Returns a free command from the pool
221 */
222 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
223 *instance)
224 {
225 unsigned long flags;
226 struct megasas_cmd *cmd = NULL;
227
228 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
229
230 if (!list_empty(&instance->cmd_pool)) {
231 cmd = list_entry((&instance->cmd_pool)->next,
232 struct megasas_cmd, list);
233 list_del_init(&cmd->list);
234 } else {
235 dev_err(&instance->pdev->dev, "Command pool empty!\n");
236 }
237
238 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
239 return cmd;
240 }
241
242 /**
243 * megasas_return_cmd - Return a cmd to free command pool
244 * @instance: Adapter soft state
245 * @cmd: Command packet to be returned to free command pool
246 */
247 inline void
248 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
249 {
250 unsigned long flags;
251 u32 blk_tags;
252 struct megasas_cmd_fusion *cmd_fusion;
253 struct fusion_context *fusion = instance->ctrl_context;
254
255 /* This flag is used only for fusion adapter.
256 * Wait for Interrupt for Polled mode DCMD
257 */
258 if (cmd->flags & DRV_DCMD_POLLED_MODE)
259 return;
260
261 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
262
263 if (fusion) {
264 blk_tags = instance->max_scsi_cmds + cmd->index;
265 cmd_fusion = fusion->cmd_list[blk_tags];
266 megasas_return_cmd_fusion(instance, cmd_fusion);
267 }
268 cmd->scmd = NULL;
269 cmd->frame_count = 0;
270 cmd->flags = 0;
271 memset(cmd->frame, 0, instance->mfi_frame_size);
272 cmd->frame->io.context = cpu_to_le32(cmd->index);
273 if (!fusion && reset_devices)
274 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
275 list_add(&cmd->list, (&instance->cmd_pool)->next);
276
277 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
278
279 }
280
281 static const char *
282 format_timestamp(uint32_t timestamp)
283 {
284 static char buffer[32];
285
286 if ((timestamp & 0xff000000) == 0xff000000)
287 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
288 0x00ffffff);
289 else
290 snprintf(buffer, sizeof(buffer), "%us", timestamp);
291 return buffer;
292 }
293
294 static const char *
295 format_class(int8_t class)
296 {
297 static char buffer[6];
298
299 switch (class) {
300 case MFI_EVT_CLASS_DEBUG:
301 return "debug";
302 case MFI_EVT_CLASS_PROGRESS:
303 return "progress";
304 case MFI_EVT_CLASS_INFO:
305 return "info";
306 case MFI_EVT_CLASS_WARNING:
307 return "WARN";
308 case MFI_EVT_CLASS_CRITICAL:
309 return "CRIT";
310 case MFI_EVT_CLASS_FATAL:
311 return "FATAL";
312 case MFI_EVT_CLASS_DEAD:
313 return "DEAD";
314 default:
315 snprintf(buffer, sizeof(buffer), "%d", class);
316 return buffer;
317 }
318 }
319
320 /**
321 * megasas_decode_evt: Decode FW AEN event and print critical event
322 * for information.
323 * @instance: Adapter soft state
324 */
325 static void
326 megasas_decode_evt(struct megasas_instance *instance)
327 {
328 struct megasas_evt_detail *evt_detail = instance->evt_detail;
329 union megasas_evt_class_locale class_locale;
330 class_locale.word = le32_to_cpu(evt_detail->cl.word);
331
332 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
333 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
334 le32_to_cpu(evt_detail->seq_num),
335 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
336 (class_locale.members.locale),
337 format_class(class_locale.members.class),
338 evt_detail->description);
339 }
340
341 /**
342 * The following functions are defined for xscale
343 * (deviceid : 1064R, PERC5) controllers
344 */
345
346 /**
347 * megasas_enable_intr_xscale - Enables interrupts
348 * @regs: MFI register set
349 */
350 static inline void
351 megasas_enable_intr_xscale(struct megasas_instance *instance)
352 {
353 struct megasas_register_set __iomem *regs;
354
355 regs = instance->reg_set;
356 writel(0, &(regs)->outbound_intr_mask);
357
358 /* Dummy readl to force pci flush */
359 readl(&regs->outbound_intr_mask);
360 }
361
362 /**
363 * megasas_disable_intr_xscale -Disables interrupt
364 * @regs: MFI register set
365 */
366 static inline void
367 megasas_disable_intr_xscale(struct megasas_instance *instance)
368 {
369 struct megasas_register_set __iomem *regs;
370 u32 mask = 0x1f;
371
372 regs = instance->reg_set;
373 writel(mask, &regs->outbound_intr_mask);
374 /* Dummy readl to force pci flush */
375 readl(&regs->outbound_intr_mask);
376 }
377
378 /**
379 * megasas_read_fw_status_reg_xscale - returns the current FW status value
380 * @regs: MFI register set
381 */
382 static u32
383 megasas_read_fw_status_reg_xscale(struct megasas_register_set __iomem * regs)
384 {
385 return readl(&(regs)->outbound_msg_0);
386 }
387 /**
388 * megasas_clear_interrupt_xscale - Check & clear interrupt
389 * @regs: MFI register set
390 */
391 static int
392 megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
393 {
394 u32 status;
395 u32 mfiStatus = 0;
396
397 /*
398 * Check if it is our interrupt
399 */
400 status = readl(&regs->outbound_intr_status);
401
402 if (status & MFI_OB_INTR_STATUS_MASK)
403 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
404 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
405 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
406
407 /*
408 * Clear the interrupt by writing back the same value
409 */
410 if (mfiStatus)
411 writel(status, &regs->outbound_intr_status);
412
413 /* Dummy readl to force pci flush */
414 readl(&regs->outbound_intr_status);
415
416 return mfiStatus;
417 }
418
419 /**
420 * megasas_fire_cmd_xscale - Sends command to the FW
421 * @frame_phys_addr : Physical address of cmd
422 * @frame_count : Number of frames for the command
423 * @regs : MFI register set
424 */
425 static inline void
426 megasas_fire_cmd_xscale(struct megasas_instance *instance,
427 dma_addr_t frame_phys_addr,
428 u32 frame_count,
429 struct megasas_register_set __iomem *regs)
430 {
431 unsigned long flags;
432
433 spin_lock_irqsave(&instance->hba_lock, flags);
434 writel((frame_phys_addr >> 3)|(frame_count),
435 &(regs)->inbound_queue_port);
436 spin_unlock_irqrestore(&instance->hba_lock, flags);
437 }
438
439 /**
440 * megasas_adp_reset_xscale - For controller reset
441 * @regs: MFI register set
442 */
443 static int
444 megasas_adp_reset_xscale(struct megasas_instance *instance,
445 struct megasas_register_set __iomem *regs)
446 {
447 u32 i;
448 u32 pcidata;
449
450 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
451
452 for (i = 0; i < 3; i++)
453 msleep(1000); /* sleep for 3 secs */
454 pcidata = 0;
455 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
456 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
457 if (pcidata & 0x2) {
458 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
459 pcidata &= ~0x2;
460 pci_write_config_dword(instance->pdev,
461 MFI_1068_PCSR_OFFSET, pcidata);
462
463 for (i = 0; i < 2; i++)
464 msleep(1000); /* need to wait 2 secs again */
465
466 pcidata = 0;
467 pci_read_config_dword(instance->pdev,
468 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
469 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
470 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
471 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
472 pcidata = 0;
473 pci_write_config_dword(instance->pdev,
474 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
475 }
476 }
477 return 0;
478 }
479
480 /**
481 * megasas_check_reset_xscale - For controller reset check
482 * @regs: MFI register set
483 */
484 static int
485 megasas_check_reset_xscale(struct megasas_instance *instance,
486 struct megasas_register_set __iomem *regs)
487 {
488 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
489 (le32_to_cpu(*instance->consumer) ==
490 MEGASAS_ADPRESET_INPROG_SIGN))
491 return 1;
492 return 0;
493 }
494
495 static struct megasas_instance_template megasas_instance_template_xscale = {
496
497 .fire_cmd = megasas_fire_cmd_xscale,
498 .enable_intr = megasas_enable_intr_xscale,
499 .disable_intr = megasas_disable_intr_xscale,
500 .clear_intr = megasas_clear_intr_xscale,
501 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
502 .adp_reset = megasas_adp_reset_xscale,
503 .check_reset = megasas_check_reset_xscale,
504 .service_isr = megasas_isr,
505 .tasklet = megasas_complete_cmd_dpc,
506 .init_adapter = megasas_init_adapter_mfi,
507 .build_and_issue_cmd = megasas_build_and_issue_cmd,
508 .issue_dcmd = megasas_issue_dcmd,
509 };
510
511 /**
512 * This is the end of set of functions & definitions specific
513 * to xscale (deviceid : 1064R, PERC5) controllers
514 */
515
516 /**
517 * The following functions are defined for ppc (deviceid : 0x60)
518 * controllers
519 */
520
521 /**
522 * megasas_enable_intr_ppc - Enables interrupts
523 * @regs: MFI register set
524 */
525 static inline void
526 megasas_enable_intr_ppc(struct megasas_instance *instance)
527 {
528 struct megasas_register_set __iomem *regs;
529
530 regs = instance->reg_set;
531 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
532
533 writel(~0x80000000, &(regs)->outbound_intr_mask);
534
535 /* Dummy readl to force pci flush */
536 readl(&regs->outbound_intr_mask);
537 }
538
539 /**
540 * megasas_disable_intr_ppc - Disable interrupt
541 * @regs: MFI register set
542 */
543 static inline void
544 megasas_disable_intr_ppc(struct megasas_instance *instance)
545 {
546 struct megasas_register_set __iomem *regs;
547 u32 mask = 0xFFFFFFFF;
548
549 regs = instance->reg_set;
550 writel(mask, &regs->outbound_intr_mask);
551 /* Dummy readl to force pci flush */
552 readl(&regs->outbound_intr_mask);
553 }
554
555 /**
556 * megasas_read_fw_status_reg_ppc - returns the current FW status value
557 * @regs: MFI register set
558 */
559 static u32
560 megasas_read_fw_status_reg_ppc(struct megasas_register_set __iomem * regs)
561 {
562 return readl(&(regs)->outbound_scratch_pad);
563 }
564
565 /**
566 * megasas_clear_interrupt_ppc - Check & clear interrupt
567 * @regs: MFI register set
568 */
569 static int
570 megasas_clear_intr_ppc(struct megasas_register_set __iomem * regs)
571 {
572 u32 status, mfiStatus = 0;
573
574 /*
575 * Check if it is our interrupt
576 */
577 status = readl(&regs->outbound_intr_status);
578
579 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
580 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
581
582 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
583 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
584
585 /*
586 * Clear the interrupt by writing back the same value
587 */
588 writel(status, &regs->outbound_doorbell_clear);
589
590 /* Dummy readl to force pci flush */
591 readl(&regs->outbound_doorbell_clear);
592
593 return mfiStatus;
594 }
595
596 /**
597 * megasas_fire_cmd_ppc - Sends command to the FW
598 * @frame_phys_addr : Physical address of cmd
599 * @frame_count : Number of frames for the command
600 * @regs : MFI register set
601 */
602 static inline void
603 megasas_fire_cmd_ppc(struct megasas_instance *instance,
604 dma_addr_t frame_phys_addr,
605 u32 frame_count,
606 struct megasas_register_set __iomem *regs)
607 {
608 unsigned long flags;
609
610 spin_lock_irqsave(&instance->hba_lock, flags);
611 writel((frame_phys_addr | (frame_count<<1))|1,
612 &(regs)->inbound_queue_port);
613 spin_unlock_irqrestore(&instance->hba_lock, flags);
614 }
615
616 /**
617 * megasas_check_reset_ppc - For controller reset check
618 * @regs: MFI register set
619 */
620 static int
621 megasas_check_reset_ppc(struct megasas_instance *instance,
622 struct megasas_register_set __iomem *regs)
623 {
624 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
625 return 1;
626
627 return 0;
628 }
629
630 static struct megasas_instance_template megasas_instance_template_ppc = {
631
632 .fire_cmd = megasas_fire_cmd_ppc,
633 .enable_intr = megasas_enable_intr_ppc,
634 .disable_intr = megasas_disable_intr_ppc,
635 .clear_intr = megasas_clear_intr_ppc,
636 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
637 .adp_reset = megasas_adp_reset_xscale,
638 .check_reset = megasas_check_reset_ppc,
639 .service_isr = megasas_isr,
640 .tasklet = megasas_complete_cmd_dpc,
641 .init_adapter = megasas_init_adapter_mfi,
642 .build_and_issue_cmd = megasas_build_and_issue_cmd,
643 .issue_dcmd = megasas_issue_dcmd,
644 };
645
646 /**
647 * megasas_enable_intr_skinny - Enables interrupts
648 * @regs: MFI register set
649 */
650 static inline void
651 megasas_enable_intr_skinny(struct megasas_instance *instance)
652 {
653 struct megasas_register_set __iomem *regs;
654
655 regs = instance->reg_set;
656 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
657
658 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
659
660 /* Dummy readl to force pci flush */
661 readl(&regs->outbound_intr_mask);
662 }
663
664 /**
665 * megasas_disable_intr_skinny - Disables interrupt
666 * @regs: MFI register set
667 */
668 static inline void
669 megasas_disable_intr_skinny(struct megasas_instance *instance)
670 {
671 struct megasas_register_set __iomem *regs;
672 u32 mask = 0xFFFFFFFF;
673
674 regs = instance->reg_set;
675 writel(mask, &regs->outbound_intr_mask);
676 /* Dummy readl to force pci flush */
677 readl(&regs->outbound_intr_mask);
678 }
679
680 /**
681 * megasas_read_fw_status_reg_skinny - returns the current FW status value
682 * @regs: MFI register set
683 */
684 static u32
685 megasas_read_fw_status_reg_skinny(struct megasas_register_set __iomem *regs)
686 {
687 return readl(&(regs)->outbound_scratch_pad);
688 }
689
690 /**
691 * megasas_clear_interrupt_skinny - Check & clear interrupt
692 * @regs: MFI register set
693 */
694 static int
695 megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
696 {
697 u32 status;
698 u32 mfiStatus = 0;
699
700 /*
701 * Check if it is our interrupt
702 */
703 status = readl(&regs->outbound_intr_status);
704
705 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
706 return 0;
707 }
708
709 /*
710 * Check if it is our interrupt
711 */
712 if ((megasas_read_fw_status_reg_skinny(regs) & MFI_STATE_MASK) ==
713 MFI_STATE_FAULT) {
714 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
715 } else
716 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
717
718 /*
719 * Clear the interrupt by writing back the same value
720 */
721 writel(status, &regs->outbound_intr_status);
722
723 /*
724 * dummy read to flush PCI
725 */
726 readl(&regs->outbound_intr_status);
727
728 return mfiStatus;
729 }
730
731 /**
732 * megasas_fire_cmd_skinny - Sends command to the FW
733 * @frame_phys_addr : Physical address of cmd
734 * @frame_count : Number of frames for the command
735 * @regs : MFI register set
736 */
737 static inline void
738 megasas_fire_cmd_skinny(struct megasas_instance *instance,
739 dma_addr_t frame_phys_addr,
740 u32 frame_count,
741 struct megasas_register_set __iomem *regs)
742 {
743 unsigned long flags;
744
745 spin_lock_irqsave(&instance->hba_lock, flags);
746 writel(upper_32_bits(frame_phys_addr),
747 &(regs)->inbound_high_queue_port);
748 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
749 &(regs)->inbound_low_queue_port);
750 mmiowb();
751 spin_unlock_irqrestore(&instance->hba_lock, flags);
752 }
753
754 /**
755 * megasas_check_reset_skinny - For controller reset check
756 * @regs: MFI register set
757 */
758 static int
759 megasas_check_reset_skinny(struct megasas_instance *instance,
760 struct megasas_register_set __iomem *regs)
761 {
762 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
763 return 1;
764
765 return 0;
766 }
767
768 static struct megasas_instance_template megasas_instance_template_skinny = {
769
770 .fire_cmd = megasas_fire_cmd_skinny,
771 .enable_intr = megasas_enable_intr_skinny,
772 .disable_intr = megasas_disable_intr_skinny,
773 .clear_intr = megasas_clear_intr_skinny,
774 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
775 .adp_reset = megasas_adp_reset_gen2,
776 .check_reset = megasas_check_reset_skinny,
777 .service_isr = megasas_isr,
778 .tasklet = megasas_complete_cmd_dpc,
779 .init_adapter = megasas_init_adapter_mfi,
780 .build_and_issue_cmd = megasas_build_and_issue_cmd,
781 .issue_dcmd = megasas_issue_dcmd,
782 };
783
784
785 /**
786 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
787 * controllers
788 */
789
790 /**
791 * megasas_enable_intr_gen2 - Enables interrupts
792 * @regs: MFI register set
793 */
794 static inline void
795 megasas_enable_intr_gen2(struct megasas_instance *instance)
796 {
797 struct megasas_register_set __iomem *regs;
798
799 regs = instance->reg_set;
800 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
801
802 /* write ~0x00000005 (4 & 1) to the intr mask*/
803 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
804
805 /* Dummy readl to force pci flush */
806 readl(&regs->outbound_intr_mask);
807 }
808
809 /**
810 * megasas_disable_intr_gen2 - Disables interrupt
811 * @regs: MFI register set
812 */
813 static inline void
814 megasas_disable_intr_gen2(struct megasas_instance *instance)
815 {
816 struct megasas_register_set __iomem *regs;
817 u32 mask = 0xFFFFFFFF;
818
819 regs = instance->reg_set;
820 writel(mask, &regs->outbound_intr_mask);
821 /* Dummy readl to force pci flush */
822 readl(&regs->outbound_intr_mask);
823 }
824
825 /**
826 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
827 * @regs: MFI register set
828 */
829 static u32
830 megasas_read_fw_status_reg_gen2(struct megasas_register_set __iomem *regs)
831 {
832 return readl(&(regs)->outbound_scratch_pad);
833 }
834
835 /**
836 * megasas_clear_interrupt_gen2 - Check & clear interrupt
837 * @regs: MFI register set
838 */
839 static int
840 megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
841 {
842 u32 status;
843 u32 mfiStatus = 0;
844
845 /*
846 * Check if it is our interrupt
847 */
848 status = readl(&regs->outbound_intr_status);
849
850 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
851 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
852 }
853 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
854 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
855 }
856
857 /*
858 * Clear the interrupt by writing back the same value
859 */
860 if (mfiStatus)
861 writel(status, &regs->outbound_doorbell_clear);
862
863 /* Dummy readl to force pci flush */
864 readl(&regs->outbound_intr_status);
865
866 return mfiStatus;
867 }
868 /**
869 * megasas_fire_cmd_gen2 - Sends command to the FW
870 * @frame_phys_addr : Physical address of cmd
871 * @frame_count : Number of frames for the command
872 * @regs : MFI register set
873 */
874 static inline void
875 megasas_fire_cmd_gen2(struct megasas_instance *instance,
876 dma_addr_t frame_phys_addr,
877 u32 frame_count,
878 struct megasas_register_set __iomem *regs)
879 {
880 unsigned long flags;
881
882 spin_lock_irqsave(&instance->hba_lock, flags);
883 writel((frame_phys_addr | (frame_count<<1))|1,
884 &(regs)->inbound_queue_port);
885 spin_unlock_irqrestore(&instance->hba_lock, flags);
886 }
887
888 /**
889 * megasas_adp_reset_gen2 - For controller reset
890 * @regs: MFI register set
891 */
892 static int
893 megasas_adp_reset_gen2(struct megasas_instance *instance,
894 struct megasas_register_set __iomem *reg_set)
895 {
896 u32 retry = 0 ;
897 u32 HostDiag;
898 u32 __iomem *seq_offset = &reg_set->seq_offset;
899 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
900
901 if (instance->instancet == &megasas_instance_template_skinny) {
902 seq_offset = &reg_set->fusion_seq_offset;
903 hostdiag_offset = &reg_set->fusion_host_diag;
904 }
905
906 writel(0, seq_offset);
907 writel(4, seq_offset);
908 writel(0xb, seq_offset);
909 writel(2, seq_offset);
910 writel(7, seq_offset);
911 writel(0xd, seq_offset);
912
913 msleep(1000);
914
915 HostDiag = (u32)readl(hostdiag_offset);
916
917 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
918 msleep(100);
919 HostDiag = (u32)readl(hostdiag_offset);
920 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
921 retry, HostDiag);
922
923 if (retry++ >= 100)
924 return 1;
925
926 }
927
928 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
929
930 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
931
932 ssleep(10);
933
934 HostDiag = (u32)readl(hostdiag_offset);
935 while (HostDiag & DIAG_RESET_ADAPTER) {
936 msleep(100);
937 HostDiag = (u32)readl(hostdiag_offset);
938 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
939 retry, HostDiag);
940
941 if (retry++ >= 1000)
942 return 1;
943
944 }
945 return 0;
946 }
947
948 /**
949 * megasas_check_reset_gen2 - For controller reset check
950 * @regs: MFI register set
951 */
952 static int
953 megasas_check_reset_gen2(struct megasas_instance *instance,
954 struct megasas_register_set __iomem *regs)
955 {
956 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
957 return 1;
958
959 return 0;
960 }
961
962 static struct megasas_instance_template megasas_instance_template_gen2 = {
963
964 .fire_cmd = megasas_fire_cmd_gen2,
965 .enable_intr = megasas_enable_intr_gen2,
966 .disable_intr = megasas_disable_intr_gen2,
967 .clear_intr = megasas_clear_intr_gen2,
968 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
969 .adp_reset = megasas_adp_reset_gen2,
970 .check_reset = megasas_check_reset_gen2,
971 .service_isr = megasas_isr,
972 .tasklet = megasas_complete_cmd_dpc,
973 .init_adapter = megasas_init_adapter_mfi,
974 .build_and_issue_cmd = megasas_build_and_issue_cmd,
975 .issue_dcmd = megasas_issue_dcmd,
976 };
977
978 /**
979 * This is the end of set of functions & definitions
980 * specific to gen2 (deviceid : 0x78, 0x79) controllers
981 */
982
983 /*
984 * Template added for TB (Fusion)
985 */
986 extern struct megasas_instance_template megasas_instance_template_fusion;
987
988 /**
989 * megasas_issue_polled - Issues a polling command
990 * @instance: Adapter soft state
991 * @cmd: Command packet to be issued
992 *
993 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
994 */
995 int
996 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
997 {
998 struct megasas_header *frame_hdr = &cmd->frame->hdr;
999
1000 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1001 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1002
1003 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1004 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1005 __func__, __LINE__);
1006 return DCMD_NOT_FIRED;
1007 }
1008
1009 instance->instancet->issue_dcmd(instance, cmd);
1010
1011 return wait_and_poll(instance, cmd, instance->requestorId ?
1012 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1013 }
1014
1015 /**
1016 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1017 * @instance: Adapter soft state
1018 * @cmd: Command to be issued
1019 * @timeout: Timeout in seconds
1020 *
1021 * This function waits on an event for the command to be returned from ISR.
1022 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1023 * Used to issue ioctl commands.
1024 */
1025 int
1026 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1027 struct megasas_cmd *cmd, int timeout)
1028 {
1029 int ret = 0;
1030 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1031
1032 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1033 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1034 __func__, __LINE__);
1035 return DCMD_NOT_FIRED;
1036 }
1037
1038 instance->instancet->issue_dcmd(instance, cmd);
1039
1040 if (timeout) {
1041 ret = wait_event_timeout(instance->int_cmd_wait_q,
1042 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1043 if (!ret) {
1044 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1045 __func__, __LINE__);
1046 return DCMD_TIMEOUT;
1047 }
1048 } else
1049 wait_event(instance->int_cmd_wait_q,
1050 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1051
1052 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1053 DCMD_SUCCESS : DCMD_FAILED;
1054 }
1055
1056 /**
1057 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1058 * @instance: Adapter soft state
1059 * @cmd_to_abort: Previously issued cmd to be aborted
1060 * @timeout: Timeout in seconds
1061 *
1062 * MFI firmware can abort previously issued AEN comamnd (automatic event
1063 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1064 * cmd and waits for return status.
1065 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1066 */
1067 static int
1068 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1069 struct megasas_cmd *cmd_to_abort, int timeout)
1070 {
1071 struct megasas_cmd *cmd;
1072 struct megasas_abort_frame *abort_fr;
1073 int ret = 0;
1074
1075 cmd = megasas_get_cmd(instance);
1076
1077 if (!cmd)
1078 return -1;
1079
1080 abort_fr = &cmd->frame->abort;
1081
1082 /*
1083 * Prepare and issue the abort frame
1084 */
1085 abort_fr->cmd = MFI_CMD_ABORT;
1086 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1087 abort_fr->flags = cpu_to_le16(0);
1088 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1089 abort_fr->abort_mfi_phys_addr_lo =
1090 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1091 abort_fr->abort_mfi_phys_addr_hi =
1092 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1093
1094 cmd->sync_cmd = 1;
1095 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1096
1097 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1098 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1099 __func__, __LINE__);
1100 return DCMD_NOT_FIRED;
1101 }
1102
1103 instance->instancet->issue_dcmd(instance, cmd);
1104
1105 if (timeout) {
1106 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1107 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1108 if (!ret) {
1109 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1110 __func__, __LINE__);
1111 return DCMD_TIMEOUT;
1112 }
1113 } else
1114 wait_event(instance->abort_cmd_wait_q,
1115 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1116
1117 cmd->sync_cmd = 0;
1118
1119 megasas_return_cmd(instance, cmd);
1120 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1121 DCMD_SUCCESS : DCMD_FAILED;
1122 }
1123
1124 /**
1125 * megasas_make_sgl32 - Prepares 32-bit SGL
1126 * @instance: Adapter soft state
1127 * @scp: SCSI command from the mid-layer
1128 * @mfi_sgl: SGL to be filled in
1129 *
1130 * If successful, this function returns the number of SG elements. Otherwise,
1131 * it returnes -1.
1132 */
1133 static int
1134 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1135 union megasas_sgl *mfi_sgl)
1136 {
1137 int i;
1138 int sge_count;
1139 struct scatterlist *os_sgl;
1140
1141 sge_count = scsi_dma_map(scp);
1142 BUG_ON(sge_count < 0);
1143
1144 if (sge_count) {
1145 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1146 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1147 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1148 }
1149 }
1150 return sge_count;
1151 }
1152
1153 /**
1154 * megasas_make_sgl64 - Prepares 64-bit SGL
1155 * @instance: Adapter soft state
1156 * @scp: SCSI command from the mid-layer
1157 * @mfi_sgl: SGL to be filled in
1158 *
1159 * If successful, this function returns the number of SG elements. Otherwise,
1160 * it returnes -1.
1161 */
1162 static int
1163 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1164 union megasas_sgl *mfi_sgl)
1165 {
1166 int i;
1167 int sge_count;
1168 struct scatterlist *os_sgl;
1169
1170 sge_count = scsi_dma_map(scp);
1171 BUG_ON(sge_count < 0);
1172
1173 if (sge_count) {
1174 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1175 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1176 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1177 }
1178 }
1179 return sge_count;
1180 }
1181
1182 /**
1183 * megasas_make_sgl_skinny - Prepares IEEE SGL
1184 * @instance: Adapter soft state
1185 * @scp: SCSI command from the mid-layer
1186 * @mfi_sgl: SGL to be filled in
1187 *
1188 * If successful, this function returns the number of SG elements. Otherwise,
1189 * it returnes -1.
1190 */
1191 static int
1192 megasas_make_sgl_skinny(struct megasas_instance *instance,
1193 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1194 {
1195 int i;
1196 int sge_count;
1197 struct scatterlist *os_sgl;
1198
1199 sge_count = scsi_dma_map(scp);
1200
1201 if (sge_count) {
1202 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1203 mfi_sgl->sge_skinny[i].length =
1204 cpu_to_le32(sg_dma_len(os_sgl));
1205 mfi_sgl->sge_skinny[i].phys_addr =
1206 cpu_to_le64(sg_dma_address(os_sgl));
1207 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1208 }
1209 }
1210 return sge_count;
1211 }
1212
1213 /**
1214 * megasas_get_frame_count - Computes the number of frames
1215 * @frame_type : type of frame- io or pthru frame
1216 * @sge_count : number of sg elements
1217 *
1218 * Returns the number of frames required for numnber of sge's (sge_count)
1219 */
1220
1221 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1222 u8 sge_count, u8 frame_type)
1223 {
1224 int num_cnt;
1225 int sge_bytes;
1226 u32 sge_sz;
1227 u32 frame_count = 0;
1228
1229 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1230 sizeof(struct megasas_sge32);
1231
1232 if (instance->flag_ieee) {
1233 sge_sz = sizeof(struct megasas_sge_skinny);
1234 }
1235
1236 /*
1237 * Main frame can contain 2 SGEs for 64-bit SGLs and
1238 * 3 SGEs for 32-bit SGLs for ldio &
1239 * 1 SGEs for 64-bit SGLs and
1240 * 2 SGEs for 32-bit SGLs for pthru frame
1241 */
1242 if (unlikely(frame_type == PTHRU_FRAME)) {
1243 if (instance->flag_ieee == 1) {
1244 num_cnt = sge_count - 1;
1245 } else if (IS_DMA64)
1246 num_cnt = sge_count - 1;
1247 else
1248 num_cnt = sge_count - 2;
1249 } else {
1250 if (instance->flag_ieee == 1) {
1251 num_cnt = sge_count - 1;
1252 } else if (IS_DMA64)
1253 num_cnt = sge_count - 2;
1254 else
1255 num_cnt = sge_count - 3;
1256 }
1257
1258 if (num_cnt > 0) {
1259 sge_bytes = sge_sz * num_cnt;
1260
1261 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1262 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1263 }
1264 /* Main frame */
1265 frame_count += 1;
1266
1267 if (frame_count > 7)
1268 frame_count = 8;
1269 return frame_count;
1270 }
1271
1272 /**
1273 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1274 * @instance: Adapter soft state
1275 * @scp: SCSI command
1276 * @cmd: Command to be prepared in
1277 *
1278 * This function prepares CDB commands. These are typcially pass-through
1279 * commands to the devices.
1280 */
1281 static int
1282 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1283 struct megasas_cmd *cmd)
1284 {
1285 u32 is_logical;
1286 u32 device_id;
1287 u16 flags = 0;
1288 struct megasas_pthru_frame *pthru;
1289
1290 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1291 device_id = MEGASAS_DEV_INDEX(scp);
1292 pthru = (struct megasas_pthru_frame *)cmd->frame;
1293
1294 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1295 flags = MFI_FRAME_DIR_WRITE;
1296 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1297 flags = MFI_FRAME_DIR_READ;
1298 else if (scp->sc_data_direction == PCI_DMA_NONE)
1299 flags = MFI_FRAME_DIR_NONE;
1300
1301 if (instance->flag_ieee == 1) {
1302 flags |= MFI_FRAME_IEEE;
1303 }
1304
1305 /*
1306 * Prepare the DCDB frame
1307 */
1308 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1309 pthru->cmd_status = 0x0;
1310 pthru->scsi_status = 0x0;
1311 pthru->target_id = device_id;
1312 pthru->lun = scp->device->lun;
1313 pthru->cdb_len = scp->cmd_len;
1314 pthru->timeout = 0;
1315 pthru->pad_0 = 0;
1316 pthru->flags = cpu_to_le16(flags);
1317 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1318
1319 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1320
1321 /*
1322 * If the command is for the tape device, set the
1323 * pthru timeout to the os layer timeout value.
1324 */
1325 if (scp->device->type == TYPE_TAPE) {
1326 if ((scp->request->timeout / HZ) > 0xFFFF)
1327 pthru->timeout = cpu_to_le16(0xFFFF);
1328 else
1329 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1330 }
1331
1332 /*
1333 * Construct SGL
1334 */
1335 if (instance->flag_ieee == 1) {
1336 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1337 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1338 &pthru->sgl);
1339 } else if (IS_DMA64) {
1340 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1341 pthru->sge_count = megasas_make_sgl64(instance, scp,
1342 &pthru->sgl);
1343 } else
1344 pthru->sge_count = megasas_make_sgl32(instance, scp,
1345 &pthru->sgl);
1346
1347 if (pthru->sge_count > instance->max_num_sge) {
1348 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1349 pthru->sge_count);
1350 return 0;
1351 }
1352
1353 /*
1354 * Sense info specific
1355 */
1356 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1357 pthru->sense_buf_phys_addr_hi =
1358 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1359 pthru->sense_buf_phys_addr_lo =
1360 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1361
1362 /*
1363 * Compute the total number of frames this command consumes. FW uses
1364 * this number to pull sufficient number of frames from host memory.
1365 */
1366 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1367 PTHRU_FRAME);
1368
1369 return cmd->frame_count;
1370 }
1371
1372 /**
1373 * megasas_build_ldio - Prepares IOs to logical devices
1374 * @instance: Adapter soft state
1375 * @scp: SCSI command
1376 * @cmd: Command to be prepared
1377 *
1378 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1379 */
1380 static int
1381 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1382 struct megasas_cmd *cmd)
1383 {
1384 u32 device_id;
1385 u8 sc = scp->cmnd[0];
1386 u16 flags = 0;
1387 struct megasas_io_frame *ldio;
1388
1389 device_id = MEGASAS_DEV_INDEX(scp);
1390 ldio = (struct megasas_io_frame *)cmd->frame;
1391
1392 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
1393 flags = MFI_FRAME_DIR_WRITE;
1394 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1395 flags = MFI_FRAME_DIR_READ;
1396
1397 if (instance->flag_ieee == 1) {
1398 flags |= MFI_FRAME_IEEE;
1399 }
1400
1401 /*
1402 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1403 */
1404 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1405 ldio->cmd_status = 0x0;
1406 ldio->scsi_status = 0x0;
1407 ldio->target_id = device_id;
1408 ldio->timeout = 0;
1409 ldio->reserved_0 = 0;
1410 ldio->pad_0 = 0;
1411 ldio->flags = cpu_to_le16(flags);
1412 ldio->start_lba_hi = 0;
1413 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1414
1415 /*
1416 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1417 */
1418 if (scp->cmd_len == 6) {
1419 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1420 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1421 ((u32) scp->cmnd[2] << 8) |
1422 (u32) scp->cmnd[3]);
1423
1424 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1425 }
1426
1427 /*
1428 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1429 */
1430 else if (scp->cmd_len == 10) {
1431 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1432 ((u32) scp->cmnd[7] << 8));
1433 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1434 ((u32) scp->cmnd[3] << 16) |
1435 ((u32) scp->cmnd[4] << 8) |
1436 (u32) scp->cmnd[5]);
1437 }
1438
1439 /*
1440 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1441 */
1442 else if (scp->cmd_len == 12) {
1443 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1444 ((u32) scp->cmnd[7] << 16) |
1445 ((u32) scp->cmnd[8] << 8) |
1446 (u32) scp->cmnd[9]);
1447
1448 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1449 ((u32) scp->cmnd[3] << 16) |
1450 ((u32) scp->cmnd[4] << 8) |
1451 (u32) scp->cmnd[5]);
1452 }
1453
1454 /*
1455 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1456 */
1457 else if (scp->cmd_len == 16) {
1458 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1459 ((u32) scp->cmnd[11] << 16) |
1460 ((u32) scp->cmnd[12] << 8) |
1461 (u32) scp->cmnd[13]);
1462
1463 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1464 ((u32) scp->cmnd[7] << 16) |
1465 ((u32) scp->cmnd[8] << 8) |
1466 (u32) scp->cmnd[9]);
1467
1468 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1469 ((u32) scp->cmnd[3] << 16) |
1470 ((u32) scp->cmnd[4] << 8) |
1471 (u32) scp->cmnd[5]);
1472
1473 }
1474
1475 /*
1476 * Construct SGL
1477 */
1478 if (instance->flag_ieee) {
1479 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1480 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1481 &ldio->sgl);
1482 } else if (IS_DMA64) {
1483 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1484 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1485 } else
1486 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1487
1488 if (ldio->sge_count > instance->max_num_sge) {
1489 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1490 ldio->sge_count);
1491 return 0;
1492 }
1493
1494 /*
1495 * Sense info specific
1496 */
1497 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1498 ldio->sense_buf_phys_addr_hi = 0;
1499 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1500
1501 /*
1502 * Compute the total number of frames this command consumes. FW uses
1503 * this number to pull sufficient number of frames from host memory.
1504 */
1505 cmd->frame_count = megasas_get_frame_count(instance,
1506 ldio->sge_count, IO_FRAME);
1507
1508 return cmd->frame_count;
1509 }
1510
1511 /**
1512 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1513 * and whether it's RW or non RW
1514 * @scmd: SCSI command
1515 *
1516 */
1517 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1518 {
1519 int ret;
1520
1521 switch (cmd->cmnd[0]) {
1522 case READ_10:
1523 case WRITE_10:
1524 case READ_12:
1525 case WRITE_12:
1526 case READ_6:
1527 case WRITE_6:
1528 case READ_16:
1529 case WRITE_16:
1530 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1531 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1532 break;
1533 default:
1534 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1535 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1536 }
1537 return ret;
1538 }
1539
1540 /**
1541 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1542 * in FW
1543 * @instance: Adapter soft state
1544 */
1545 static inline void
1546 megasas_dump_pending_frames(struct megasas_instance *instance)
1547 {
1548 struct megasas_cmd *cmd;
1549 int i,n;
1550 union megasas_sgl *mfi_sgl;
1551 struct megasas_io_frame *ldio;
1552 struct megasas_pthru_frame *pthru;
1553 u32 sgcount;
1554 u16 max_cmd = instance->max_fw_cmds;
1555
1556 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1557 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1558 if (IS_DMA64)
1559 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1560 else
1561 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1562
1563 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1564 for (i = 0; i < max_cmd; i++) {
1565 cmd = instance->cmd_list[i];
1566 if (!cmd->scmd)
1567 continue;
1568 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1569 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1570 ldio = (struct megasas_io_frame *)cmd->frame;
1571 mfi_sgl = &ldio->sgl;
1572 sgcount = ldio->sge_count;
1573 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1574 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1575 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1576 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1577 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1578 } else {
1579 pthru = (struct megasas_pthru_frame *) cmd->frame;
1580 mfi_sgl = &pthru->sgl;
1581 sgcount = pthru->sge_count;
1582 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1583 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1584 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1585 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1586 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1587 }
1588 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1589 for (n = 0; n < sgcount; n++) {
1590 if (IS_DMA64)
1591 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1592 le32_to_cpu(mfi_sgl->sge64[n].length),
1593 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1594 else
1595 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1596 le32_to_cpu(mfi_sgl->sge32[n].length),
1597 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1598 }
1599 }
1600 } /*for max_cmd*/
1601 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1602 for (i = 0; i < max_cmd; i++) {
1603
1604 cmd = instance->cmd_list[i];
1605
1606 if (cmd->sync_cmd == 1)
1607 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1608 }
1609 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1610 }
1611
1612 u32
1613 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1614 struct scsi_cmnd *scmd)
1615 {
1616 struct megasas_cmd *cmd;
1617 u32 frame_count;
1618
1619 cmd = megasas_get_cmd(instance);
1620 if (!cmd)
1621 return SCSI_MLQUEUE_HOST_BUSY;
1622
1623 /*
1624 * Logical drive command
1625 */
1626 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1627 frame_count = megasas_build_ldio(instance, scmd, cmd);
1628 else
1629 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1630
1631 if (!frame_count)
1632 goto out_return_cmd;
1633
1634 cmd->scmd = scmd;
1635 scmd->SCp.ptr = (char *)cmd;
1636
1637 /*
1638 * Issue the command to the FW
1639 */
1640 atomic_inc(&instance->fw_outstanding);
1641
1642 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1643 cmd->frame_count-1, instance->reg_set);
1644
1645 return 0;
1646 out_return_cmd:
1647 megasas_return_cmd(instance, cmd);
1648 return SCSI_MLQUEUE_HOST_BUSY;
1649 }
1650
1651
1652 /**
1653 * megasas_queue_command - Queue entry point
1654 * @scmd: SCSI command to be queued
1655 * @done: Callback entry point
1656 */
1657 static int
1658 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1659 {
1660 struct megasas_instance *instance;
1661 struct MR_PRIV_DEVICE *mr_device_priv_data;
1662
1663 instance = (struct megasas_instance *)
1664 scmd->device->host->hostdata;
1665
1666 if (instance->unload == 1) {
1667 scmd->result = DID_NO_CONNECT << 16;
1668 scmd->scsi_done(scmd);
1669 return 0;
1670 }
1671
1672 if (instance->issuepend_done == 0)
1673 return SCSI_MLQUEUE_HOST_BUSY;
1674
1675
1676 /* Check for an mpio path and adjust behavior */
1677 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1678 if (megasas_check_mpio_paths(instance, scmd) ==
1679 (DID_REQUEUE << 16)) {
1680 return SCSI_MLQUEUE_HOST_BUSY;
1681 } else {
1682 scmd->result = DID_NO_CONNECT << 16;
1683 scmd->scsi_done(scmd);
1684 return 0;
1685 }
1686 }
1687
1688 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1689 scmd->result = DID_NO_CONNECT << 16;
1690 scmd->scsi_done(scmd);
1691 return 0;
1692 }
1693
1694 mr_device_priv_data = scmd->device->hostdata;
1695 if (!mr_device_priv_data) {
1696 scmd->result = DID_NO_CONNECT << 16;
1697 scmd->scsi_done(scmd);
1698 return 0;
1699 }
1700
1701 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1702 return SCSI_MLQUEUE_HOST_BUSY;
1703
1704 if (mr_device_priv_data->tm_busy)
1705 return SCSI_MLQUEUE_DEVICE_BUSY;
1706
1707
1708 scmd->result = 0;
1709
1710 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1711 (scmd->device->id >= instance->fw_supported_vd_count ||
1712 scmd->device->lun)) {
1713 scmd->result = DID_BAD_TARGET << 16;
1714 goto out_done;
1715 }
1716
1717 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1718 MEGASAS_IS_LOGICAL(scmd->device) &&
1719 (!instance->fw_sync_cache_support)) {
1720 scmd->result = DID_OK << 16;
1721 goto out_done;
1722 }
1723
1724 return instance->instancet->build_and_issue_cmd(instance, scmd);
1725
1726 out_done:
1727 scmd->scsi_done(scmd);
1728 return 0;
1729 }
1730
1731 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1732 {
1733 int i;
1734
1735 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1736
1737 if ((megasas_mgmt_info.instance[i]) &&
1738 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1739 return megasas_mgmt_info.instance[i];
1740 }
1741
1742 return NULL;
1743 }
1744
1745 /*
1746 * megasas_set_dynamic_target_properties -
1747 * Device property set by driver may not be static and it is required to be
1748 * updated after OCR
1749 *
1750 * set tm_capable.
1751 * set dma alignment (only for eedp protection enable vd).
1752 *
1753 * @sdev: OS provided scsi device
1754 *
1755 * Returns void
1756 */
1757 void megasas_set_dynamic_target_properties(struct scsi_device *sdev)
1758 {
1759 u16 pd_index = 0, ld;
1760 u32 device_id;
1761 struct megasas_instance *instance;
1762 struct fusion_context *fusion;
1763 struct MR_PRIV_DEVICE *mr_device_priv_data;
1764 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1765 struct MR_LD_RAID *raid;
1766 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1767
1768 instance = megasas_lookup_instance(sdev->host->host_no);
1769 fusion = instance->ctrl_context;
1770 mr_device_priv_data = sdev->hostdata;
1771
1772 if (!fusion || !mr_device_priv_data)
1773 return;
1774
1775 if (MEGASAS_IS_LOGICAL(sdev)) {
1776 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1777 + sdev->id;
1778 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1779 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1780 if (ld >= instance->fw_supported_vd_count)
1781 return;
1782 raid = MR_LdRaidGet(ld, local_map_ptr);
1783
1784 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1785 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1786
1787 mr_device_priv_data->is_tm_capable =
1788 raid->capability.tmCapable;
1789 } else if (instance->use_seqnum_jbod_fp) {
1790 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1791 sdev->id;
1792 pd_sync = (void *)fusion->pd_seq_sync
1793 [(instance->pd_seq_map_id - 1) & 1];
1794 mr_device_priv_data->is_tm_capable =
1795 pd_sync->seq[pd_index].capability.tmCapable;
1796 }
1797 }
1798
1799 /*
1800 * megasas_set_nvme_device_properties -
1801 * set nomerges=2
1802 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1803 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1804 *
1805 * MR firmware provides value in KB. Caller of this function converts
1806 * kb into bytes.
1807 *
1808 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1809 * MR firmware provides value 128 as (32 * 4K) = 128K.
1810 *
1811 * @sdev: scsi device
1812 * @max_io_size: maximum io transfer size
1813 *
1814 */
1815 static inline void
1816 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1817 {
1818 struct megasas_instance *instance;
1819 u32 mr_nvme_pg_size;
1820
1821 instance = (struct megasas_instance *)sdev->host->hostdata;
1822 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1823 MR_DEFAULT_NVME_PAGE_SIZE);
1824
1825 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1826
1827 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1828 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1829 }
1830
1831
1832 /*
1833 * megasas_set_static_target_properties -
1834 * Device property set by driver are static and it is not required to be
1835 * updated after OCR.
1836 *
1837 * set io timeout
1838 * set device queue depth
1839 * set nvme device properties. see - megasas_set_nvme_device_properties
1840 *
1841 * @sdev: scsi device
1842 * @is_target_prop true, if fw provided target properties.
1843 */
1844 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1845 bool is_target_prop)
1846 {
1847 u16 target_index = 0;
1848 u8 interface_type;
1849 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1850 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1851 u32 tgt_device_qd;
1852 struct megasas_instance *instance;
1853 struct MR_PRIV_DEVICE *mr_device_priv_data;
1854
1855 instance = megasas_lookup_instance(sdev->host->host_no);
1856 mr_device_priv_data = sdev->hostdata;
1857 interface_type = mr_device_priv_data->interface_type;
1858
1859 /*
1860 * The RAID firmware may require extended timeouts.
1861 */
1862 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1863
1864 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1865
1866 switch (interface_type) {
1867 case SAS_PD:
1868 device_qd = MEGASAS_SAS_QD;
1869 break;
1870 case SATA_PD:
1871 device_qd = MEGASAS_SATA_QD;
1872 break;
1873 case NVME_PD:
1874 device_qd = MEGASAS_NVME_QD;
1875 break;
1876 }
1877
1878 if (is_target_prop) {
1879 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1880 if (tgt_device_qd &&
1881 (tgt_device_qd <= instance->host->can_queue))
1882 device_qd = tgt_device_qd;
1883
1884 /* max_io_size_kb will be set to non zero for
1885 * nvme based vd and syspd.
1886 */
1887 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1888 }
1889
1890 if (instance->nvme_page_size && max_io_size_kb)
1891 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1892
1893 scsi_change_queue_depth(sdev, device_qd);
1894
1895 }
1896
1897
1898 static int megasas_slave_configure(struct scsi_device *sdev)
1899 {
1900 u16 pd_index = 0;
1901 struct megasas_instance *instance;
1902 int ret_target_prop = DCMD_FAILED;
1903 bool is_target_prop = false;
1904
1905 instance = megasas_lookup_instance(sdev->host->host_no);
1906 if (instance->pd_list_not_supported) {
1907 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1908 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1909 sdev->id;
1910 if (instance->pd_list[pd_index].driveState !=
1911 MR_PD_STATE_SYSTEM)
1912 return -ENXIO;
1913 }
1914 }
1915
1916 mutex_lock(&instance->hba_mutex);
1917 /* Send DCMD to Firmware and cache the information */
1918 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1919 megasas_get_pd_info(instance, sdev);
1920
1921 /* Some ventura firmware may not have instance->nvme_page_size set.
1922 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1923 */
1924 if ((instance->tgt_prop) && (instance->nvme_page_size))
1925 ret_target_prop = megasas_get_target_prop(instance, sdev);
1926
1927 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
1928 megasas_set_static_target_properties(sdev, is_target_prop);
1929
1930 mutex_unlock(&instance->hba_mutex);
1931
1932 /* This sdev property may change post OCR */
1933 megasas_set_dynamic_target_properties(sdev);
1934
1935 return 0;
1936 }
1937
1938 static int megasas_slave_alloc(struct scsi_device *sdev)
1939 {
1940 u16 pd_index = 0;
1941 struct megasas_instance *instance ;
1942 struct MR_PRIV_DEVICE *mr_device_priv_data;
1943
1944 instance = megasas_lookup_instance(sdev->host->host_no);
1945 if (!MEGASAS_IS_LOGICAL(sdev)) {
1946 /*
1947 * Open the OS scan to the SYSTEM PD
1948 */
1949 pd_index =
1950 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1951 sdev->id;
1952 if ((instance->pd_list_not_supported ||
1953 instance->pd_list[pd_index].driveState ==
1954 MR_PD_STATE_SYSTEM)) {
1955 goto scan_target;
1956 }
1957 return -ENXIO;
1958 }
1959
1960 scan_target:
1961 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
1962 GFP_KERNEL);
1963 if (!mr_device_priv_data)
1964 return -ENOMEM;
1965 sdev->hostdata = mr_device_priv_data;
1966 return 0;
1967 }
1968
1969 static void megasas_slave_destroy(struct scsi_device *sdev)
1970 {
1971 kfree(sdev->hostdata);
1972 sdev->hostdata = NULL;
1973 }
1974
1975 /*
1976 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
1977 * kill adapter
1978 * @instance: Adapter soft state
1979 *
1980 */
1981 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
1982 {
1983 int i;
1984 struct megasas_cmd *cmd_mfi;
1985 struct megasas_cmd_fusion *cmd_fusion;
1986 struct fusion_context *fusion = instance->ctrl_context;
1987
1988 /* Find all outstanding ioctls */
1989 if (fusion) {
1990 for (i = 0; i < instance->max_fw_cmds; i++) {
1991 cmd_fusion = fusion->cmd_list[i];
1992 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
1993 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
1994 if (cmd_mfi->sync_cmd &&
1995 cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
1996 megasas_complete_cmd(instance,
1997 cmd_mfi, DID_OK);
1998 }
1999 }
2000 } else {
2001 for (i = 0; i < instance->max_fw_cmds; i++) {
2002 cmd_mfi = instance->cmd_list[i];
2003 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2004 MFI_CMD_ABORT)
2005 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2006 }
2007 }
2008 }
2009
2010
2011 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2012 {
2013 /* Set critical error to block I/O & ioctls in case caller didn't */
2014 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2015 /* Wait 1 second to ensure IO or ioctls in build have posted */
2016 msleep(1000);
2017 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2018 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2019 (instance->ctrl_context)) {
2020 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2021 /* Flush */
2022 readl(&instance->reg_set->doorbell);
2023 if (instance->requestorId && instance->peerIsPresent)
2024 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2025 } else {
2026 writel(MFI_STOP_ADP,
2027 &instance->reg_set->inbound_doorbell);
2028 }
2029 /* Complete outstanding ioctls when adapter is killed */
2030 megasas_complete_outstanding_ioctls(instance);
2031 }
2032
2033 /**
2034 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2035 * restored to max value
2036 * @instance: Adapter soft state
2037 *
2038 */
2039 void
2040 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2041 {
2042 unsigned long flags;
2043
2044 if (instance->flag & MEGASAS_FW_BUSY
2045 && time_after(jiffies, instance->last_time + 5 * HZ)
2046 && atomic_read(&instance->fw_outstanding) <
2047 instance->throttlequeuedepth + 1) {
2048
2049 spin_lock_irqsave(instance->host->host_lock, flags);
2050 instance->flag &= ~MEGASAS_FW_BUSY;
2051
2052 instance->host->can_queue = instance->cur_can_queue;
2053 spin_unlock_irqrestore(instance->host->host_lock, flags);
2054 }
2055 }
2056
2057 /**
2058 * megasas_complete_cmd_dpc - Returns FW's controller structure
2059 * @instance_addr: Address of adapter soft state
2060 *
2061 * Tasklet to complete cmds
2062 */
2063 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2064 {
2065 u32 producer;
2066 u32 consumer;
2067 u32 context;
2068 struct megasas_cmd *cmd;
2069 struct megasas_instance *instance =
2070 (struct megasas_instance *)instance_addr;
2071 unsigned long flags;
2072
2073 /* If we have already declared adapter dead, donot complete cmds */
2074 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2075 return;
2076
2077 spin_lock_irqsave(&instance->completion_lock, flags);
2078
2079 producer = le32_to_cpu(*instance->producer);
2080 consumer = le32_to_cpu(*instance->consumer);
2081
2082 while (consumer != producer) {
2083 context = le32_to_cpu(instance->reply_queue[consumer]);
2084 if (context >= instance->max_fw_cmds) {
2085 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2086 context);
2087 BUG();
2088 }
2089
2090 cmd = instance->cmd_list[context];
2091
2092 megasas_complete_cmd(instance, cmd, DID_OK);
2093
2094 consumer++;
2095 if (consumer == (instance->max_fw_cmds + 1)) {
2096 consumer = 0;
2097 }
2098 }
2099
2100 *instance->consumer = cpu_to_le32(producer);
2101
2102 spin_unlock_irqrestore(&instance->completion_lock, flags);
2103
2104 /*
2105 * Check if we can restore can_queue
2106 */
2107 megasas_check_and_restore_queue_depth(instance);
2108 }
2109
2110 /**
2111 * megasas_start_timer - Initializes a timer object
2112 * @instance: Adapter soft state
2113 * @timer: timer object to be initialized
2114 * @fn: timer function
2115 * @interval: time interval between timer function call
2116 *
2117 */
2118 void megasas_start_timer(struct megasas_instance *instance,
2119 struct timer_list *timer,
2120 void *fn, unsigned long interval)
2121 {
2122 init_timer(timer);
2123 timer->expires = jiffies + interval;
2124 timer->data = (unsigned long)instance;
2125 timer->function = fn;
2126 add_timer(timer);
2127 }
2128
2129 static void
2130 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2131
2132 static void
2133 process_fw_state_change_wq(struct work_struct *work);
2134
2135 void megasas_do_ocr(struct megasas_instance *instance)
2136 {
2137 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2138 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2139 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2140 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2141 }
2142 instance->instancet->disable_intr(instance);
2143 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2144 instance->issuepend_done = 0;
2145
2146 atomic_set(&instance->fw_outstanding, 0);
2147 megasas_internal_reset_defer_cmds(instance);
2148 process_fw_state_change_wq(&instance->work_init);
2149 }
2150
2151 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2152 int initial)
2153 {
2154 struct megasas_cmd *cmd;
2155 struct megasas_dcmd_frame *dcmd;
2156 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2157 dma_addr_t new_affiliation_111_h;
2158 int ld, retval = 0;
2159 u8 thisVf;
2160
2161 cmd = megasas_get_cmd(instance);
2162
2163 if (!cmd) {
2164 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2165 "Failed to get cmd for scsi%d\n",
2166 instance->host->host_no);
2167 return -ENOMEM;
2168 }
2169
2170 dcmd = &cmd->frame->dcmd;
2171
2172 if (!instance->vf_affiliation_111) {
2173 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2174 "affiliation for scsi%d\n", instance->host->host_no);
2175 megasas_return_cmd(instance, cmd);
2176 return -ENOMEM;
2177 }
2178
2179 if (initial)
2180 memset(instance->vf_affiliation_111, 0,
2181 sizeof(struct MR_LD_VF_AFFILIATION_111));
2182 else {
2183 new_affiliation_111 =
2184 pci_alloc_consistent(instance->pdev,
2185 sizeof(struct MR_LD_VF_AFFILIATION_111),
2186 &new_affiliation_111_h);
2187 if (!new_affiliation_111) {
2188 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2189 "memory for new affiliation for scsi%d\n",
2190 instance->host->host_no);
2191 megasas_return_cmd(instance, cmd);
2192 return -ENOMEM;
2193 }
2194 memset(new_affiliation_111, 0,
2195 sizeof(struct MR_LD_VF_AFFILIATION_111));
2196 }
2197
2198 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2199
2200 dcmd->cmd = MFI_CMD_DCMD;
2201 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2202 dcmd->sge_count = 1;
2203 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2204 dcmd->timeout = 0;
2205 dcmd->pad_0 = 0;
2206 dcmd->data_xfer_len =
2207 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2208 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2209
2210 if (initial)
2211 dcmd->sgl.sge32[0].phys_addr =
2212 cpu_to_le32(instance->vf_affiliation_111_h);
2213 else
2214 dcmd->sgl.sge32[0].phys_addr =
2215 cpu_to_le32(new_affiliation_111_h);
2216
2217 dcmd->sgl.sge32[0].length = cpu_to_le32(
2218 sizeof(struct MR_LD_VF_AFFILIATION_111));
2219
2220 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2221 "scsi%d\n", instance->host->host_no);
2222
2223 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2224 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2225 " failed with status 0x%x for scsi%d\n",
2226 dcmd->cmd_status, instance->host->host_no);
2227 retval = 1; /* Do a scan if we couldn't get affiliation */
2228 goto out;
2229 }
2230
2231 if (!initial) {
2232 thisVf = new_affiliation_111->thisVf;
2233 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2234 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2235 new_affiliation_111->map[ld].policy[thisVf]) {
2236 dev_warn(&instance->pdev->dev, "SR-IOV: "
2237 "Got new LD/VF affiliation for scsi%d\n",
2238 instance->host->host_no);
2239 memcpy(instance->vf_affiliation_111,
2240 new_affiliation_111,
2241 sizeof(struct MR_LD_VF_AFFILIATION_111));
2242 retval = 1;
2243 goto out;
2244 }
2245 }
2246 out:
2247 if (new_affiliation_111) {
2248 pci_free_consistent(instance->pdev,
2249 sizeof(struct MR_LD_VF_AFFILIATION_111),
2250 new_affiliation_111,
2251 new_affiliation_111_h);
2252 }
2253
2254 megasas_return_cmd(instance, cmd);
2255
2256 return retval;
2257 }
2258
2259 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2260 int initial)
2261 {
2262 struct megasas_cmd *cmd;
2263 struct megasas_dcmd_frame *dcmd;
2264 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2265 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2266 dma_addr_t new_affiliation_h;
2267 int i, j, retval = 0, found = 0, doscan = 0;
2268 u8 thisVf;
2269
2270 cmd = megasas_get_cmd(instance);
2271
2272 if (!cmd) {
2273 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2274 "Failed to get cmd for scsi%d\n",
2275 instance->host->host_no);
2276 return -ENOMEM;
2277 }
2278
2279 dcmd = &cmd->frame->dcmd;
2280
2281 if (!instance->vf_affiliation) {
2282 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2283 "affiliation for scsi%d\n", instance->host->host_no);
2284 megasas_return_cmd(instance, cmd);
2285 return -ENOMEM;
2286 }
2287
2288 if (initial)
2289 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2290 sizeof(struct MR_LD_VF_AFFILIATION));
2291 else {
2292 new_affiliation =
2293 pci_alloc_consistent(instance->pdev,
2294 (MAX_LOGICAL_DRIVES + 1) *
2295 sizeof(struct MR_LD_VF_AFFILIATION),
2296 &new_affiliation_h);
2297 if (!new_affiliation) {
2298 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2299 "memory for new affiliation for scsi%d\n",
2300 instance->host->host_no);
2301 megasas_return_cmd(instance, cmd);
2302 return -ENOMEM;
2303 }
2304 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2305 sizeof(struct MR_LD_VF_AFFILIATION));
2306 }
2307
2308 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2309
2310 dcmd->cmd = MFI_CMD_DCMD;
2311 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2312 dcmd->sge_count = 1;
2313 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2314 dcmd->timeout = 0;
2315 dcmd->pad_0 = 0;
2316 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2317 sizeof(struct MR_LD_VF_AFFILIATION));
2318 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2319
2320 if (initial)
2321 dcmd->sgl.sge32[0].phys_addr =
2322 cpu_to_le32(instance->vf_affiliation_h);
2323 else
2324 dcmd->sgl.sge32[0].phys_addr =
2325 cpu_to_le32(new_affiliation_h);
2326
2327 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2328 sizeof(struct MR_LD_VF_AFFILIATION));
2329
2330 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2331 "scsi%d\n", instance->host->host_no);
2332
2333
2334 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2335 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2336 " failed with status 0x%x for scsi%d\n",
2337 dcmd->cmd_status, instance->host->host_no);
2338 retval = 1; /* Do a scan if we couldn't get affiliation */
2339 goto out;
2340 }
2341
2342 if (!initial) {
2343 if (!new_affiliation->ldCount) {
2344 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2345 "affiliation for passive path for scsi%d\n",
2346 instance->host->host_no);
2347 retval = 1;
2348 goto out;
2349 }
2350 newmap = new_affiliation->map;
2351 savedmap = instance->vf_affiliation->map;
2352 thisVf = new_affiliation->thisVf;
2353 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2354 found = 0;
2355 for (j = 0; j < instance->vf_affiliation->ldCount;
2356 j++) {
2357 if (newmap->ref.targetId ==
2358 savedmap->ref.targetId) {
2359 found = 1;
2360 if (newmap->policy[thisVf] !=
2361 savedmap->policy[thisVf]) {
2362 doscan = 1;
2363 goto out;
2364 }
2365 }
2366 savedmap = (struct MR_LD_VF_MAP *)
2367 ((unsigned char *)savedmap +
2368 savedmap->size);
2369 }
2370 if (!found && newmap->policy[thisVf] !=
2371 MR_LD_ACCESS_HIDDEN) {
2372 doscan = 1;
2373 goto out;
2374 }
2375 newmap = (struct MR_LD_VF_MAP *)
2376 ((unsigned char *)newmap + newmap->size);
2377 }
2378
2379 newmap = new_affiliation->map;
2380 savedmap = instance->vf_affiliation->map;
2381
2382 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2383 found = 0;
2384 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2385 if (savedmap->ref.targetId ==
2386 newmap->ref.targetId) {
2387 found = 1;
2388 if (savedmap->policy[thisVf] !=
2389 newmap->policy[thisVf]) {
2390 doscan = 1;
2391 goto out;
2392 }
2393 }
2394 newmap = (struct MR_LD_VF_MAP *)
2395 ((unsigned char *)newmap +
2396 newmap->size);
2397 }
2398 if (!found && savedmap->policy[thisVf] !=
2399 MR_LD_ACCESS_HIDDEN) {
2400 doscan = 1;
2401 goto out;
2402 }
2403 savedmap = (struct MR_LD_VF_MAP *)
2404 ((unsigned char *)savedmap +
2405 savedmap->size);
2406 }
2407 }
2408 out:
2409 if (doscan) {
2410 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2411 "affiliation for scsi%d\n", instance->host->host_no);
2412 memcpy(instance->vf_affiliation, new_affiliation,
2413 new_affiliation->size);
2414 retval = 1;
2415 }
2416
2417 if (new_affiliation)
2418 pci_free_consistent(instance->pdev,
2419 (MAX_LOGICAL_DRIVES + 1) *
2420 sizeof(struct MR_LD_VF_AFFILIATION),
2421 new_affiliation, new_affiliation_h);
2422 megasas_return_cmd(instance, cmd);
2423
2424 return retval;
2425 }
2426
2427 /* This function will get the current SR-IOV LD/VF affiliation */
2428 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2429 int initial)
2430 {
2431 int retval;
2432
2433 if (instance->PlasmaFW111)
2434 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2435 else
2436 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2437 return retval;
2438 }
2439
2440 /* This function will tell FW to start the SR-IOV heartbeat */
2441 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2442 int initial)
2443 {
2444 struct megasas_cmd *cmd;
2445 struct megasas_dcmd_frame *dcmd;
2446 int retval = 0;
2447
2448 cmd = megasas_get_cmd(instance);
2449
2450 if (!cmd) {
2451 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2452 "Failed to get cmd for scsi%d\n",
2453 instance->host->host_no);
2454 return -ENOMEM;
2455 }
2456
2457 dcmd = &cmd->frame->dcmd;
2458
2459 if (initial) {
2460 instance->hb_host_mem =
2461 pci_zalloc_consistent(instance->pdev,
2462 sizeof(struct MR_CTRL_HB_HOST_MEM),
2463 &instance->hb_host_mem_h);
2464 if (!instance->hb_host_mem) {
2465 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2466 " memory for heartbeat host memory for scsi%d\n",
2467 instance->host->host_no);
2468 retval = -ENOMEM;
2469 goto out;
2470 }
2471 }
2472
2473 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2474
2475 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2476 dcmd->cmd = MFI_CMD_DCMD;
2477 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2478 dcmd->sge_count = 1;
2479 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2480 dcmd->timeout = 0;
2481 dcmd->pad_0 = 0;
2482 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2483 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2484 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2485 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2486
2487 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2488 instance->host->host_no);
2489
2490 if (instance->ctrl_context && !instance->mask_interrupts)
2491 retval = megasas_issue_blocked_cmd(instance, cmd,
2492 MEGASAS_ROUTINE_WAIT_TIME_VF);
2493 else
2494 retval = megasas_issue_polled(instance, cmd);
2495
2496 if (retval) {
2497 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2498 "_MEM_ALLOC DCMD %s for scsi%d\n",
2499 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2500 "timed out" : "failed", instance->host->host_no);
2501 retval = 1;
2502 }
2503
2504 out:
2505 megasas_return_cmd(instance, cmd);
2506
2507 return retval;
2508 }
2509
2510 /* Handler for SR-IOV heartbeat */
2511 void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2512 {
2513 struct megasas_instance *instance =
2514 (struct megasas_instance *)instance_addr;
2515
2516 if (instance->hb_host_mem->HB.fwCounter !=
2517 instance->hb_host_mem->HB.driverCounter) {
2518 instance->hb_host_mem->HB.driverCounter =
2519 instance->hb_host_mem->HB.fwCounter;
2520 mod_timer(&instance->sriov_heartbeat_timer,
2521 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2522 } else {
2523 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2524 "completed for scsi%d\n", instance->host->host_no);
2525 schedule_work(&instance->work_init);
2526 }
2527 }
2528
2529 /**
2530 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2531 * @instance: Adapter soft state
2532 *
2533 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2534 * complete all its outstanding commands. Returns error if one or more IOs
2535 * are pending after this time period. It also marks the controller dead.
2536 */
2537 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2538 {
2539 int i, sl, outstanding;
2540 u32 reset_index;
2541 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2542 unsigned long flags;
2543 struct list_head clist_local;
2544 struct megasas_cmd *reset_cmd;
2545 u32 fw_state;
2546
2547 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2548 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2549 __func__, __LINE__);
2550 return FAILED;
2551 }
2552
2553 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2554
2555 INIT_LIST_HEAD(&clist_local);
2556 spin_lock_irqsave(&instance->hba_lock, flags);
2557 list_splice_init(&instance->internal_reset_pending_q,
2558 &clist_local);
2559 spin_unlock_irqrestore(&instance->hba_lock, flags);
2560
2561 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2562 for (i = 0; i < wait_time; i++) {
2563 msleep(1000);
2564 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2565 break;
2566 }
2567
2568 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2569 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2570 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2571 return FAILED;
2572 }
2573
2574 reset_index = 0;
2575 while (!list_empty(&clist_local)) {
2576 reset_cmd = list_entry((&clist_local)->next,
2577 struct megasas_cmd, list);
2578 list_del_init(&reset_cmd->list);
2579 if (reset_cmd->scmd) {
2580 reset_cmd->scmd->result = DID_REQUEUE << 16;
2581 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2582 reset_index, reset_cmd,
2583 reset_cmd->scmd->cmnd[0]);
2584
2585 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2586 megasas_return_cmd(instance, reset_cmd);
2587 } else if (reset_cmd->sync_cmd) {
2588 dev_notice(&instance->pdev->dev, "%p synch cmds"
2589 "reset queue\n",
2590 reset_cmd);
2591
2592 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2593 instance->instancet->fire_cmd(instance,
2594 reset_cmd->frame_phys_addr,
2595 0, instance->reg_set);
2596 } else {
2597 dev_notice(&instance->pdev->dev, "%p unexpected"
2598 "cmds lst\n",
2599 reset_cmd);
2600 }
2601 reset_index++;
2602 }
2603
2604 return SUCCESS;
2605 }
2606
2607 for (i = 0; i < resetwaittime; i++) {
2608 outstanding = atomic_read(&instance->fw_outstanding);
2609
2610 if (!outstanding)
2611 break;
2612
2613 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2614 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2615 "commands to complete\n",i,outstanding);
2616 /*
2617 * Call cmd completion routine. Cmd to be
2618 * be completed directly without depending on isr.
2619 */
2620 megasas_complete_cmd_dpc((unsigned long)instance);
2621 }
2622
2623 msleep(1000);
2624 }
2625
2626 i = 0;
2627 outstanding = atomic_read(&instance->fw_outstanding);
2628 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2629
2630 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2631 goto no_outstanding;
2632
2633 if (instance->disableOnlineCtrlReset)
2634 goto kill_hba_and_failed;
2635 do {
2636 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2637 dev_info(&instance->pdev->dev,
2638 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, oustanding 0x%x\n",
2639 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2640 if (i == 3)
2641 goto kill_hba_and_failed;
2642 megasas_do_ocr(instance);
2643
2644 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2645 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2646 __func__, __LINE__);
2647 return FAILED;
2648 }
2649 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2650 __func__, __LINE__);
2651
2652 for (sl = 0; sl < 10; sl++)
2653 msleep(500);
2654
2655 outstanding = atomic_read(&instance->fw_outstanding);
2656
2657 fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
2658 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2659 goto no_outstanding;
2660 }
2661 i++;
2662 } while (i <= 3);
2663
2664 no_outstanding:
2665
2666 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2667 __func__, __LINE__);
2668 return SUCCESS;
2669
2670 kill_hba_and_failed:
2671
2672 /* Reset not supported, kill adapter */
2673 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2674 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2675 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2676 atomic_read(&instance->fw_outstanding));
2677 megasas_dump_pending_frames(instance);
2678 megaraid_sas_kill_hba(instance);
2679
2680 return FAILED;
2681 }
2682
2683 /**
2684 * megasas_generic_reset - Generic reset routine
2685 * @scmd: Mid-layer SCSI command
2686 *
2687 * This routine implements a generic reset handler for device, bus and host
2688 * reset requests. Device, bus and host specific reset handlers can use this
2689 * function after they do their specific tasks.
2690 */
2691 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2692 {
2693 int ret_val;
2694 struct megasas_instance *instance;
2695
2696 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2697
2698 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2699 scmd->cmnd[0], scmd->retries);
2700
2701 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2702 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2703 return FAILED;
2704 }
2705
2706 ret_val = megasas_wait_for_outstanding(instance);
2707 if (ret_val == SUCCESS)
2708 dev_notice(&instance->pdev->dev, "reset successful\n");
2709 else
2710 dev_err(&instance->pdev->dev, "failed to do reset\n");
2711
2712 return ret_val;
2713 }
2714
2715 /**
2716 * megasas_reset_timer - quiesce the adapter if required
2717 * @scmd: scsi cmnd
2718 *
2719 * Sets the FW busy flag and reduces the host->can_queue if the
2720 * cmd has not been completed within the timeout period.
2721 */
2722 static enum
2723 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2724 {
2725 struct megasas_instance *instance;
2726 unsigned long flags;
2727
2728 if (time_after(jiffies, scmd->jiffies_at_alloc +
2729 (scmd_timeout * 2) * HZ)) {
2730 return BLK_EH_NOT_HANDLED;
2731 }
2732
2733 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2734 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2735 /* FW is busy, throttle IO */
2736 spin_lock_irqsave(instance->host->host_lock, flags);
2737
2738 instance->host->can_queue = instance->throttlequeuedepth;
2739 instance->last_time = jiffies;
2740 instance->flag |= MEGASAS_FW_BUSY;
2741
2742 spin_unlock_irqrestore(instance->host->host_lock, flags);
2743 }
2744 return BLK_EH_RESET_TIMER;
2745 }
2746
2747 /**
2748 * megasas_dump_frame - This function will dump MPT/MFI frame
2749 */
2750 static inline void
2751 megasas_dump_frame(void *mpi_request, int sz)
2752 {
2753 int i;
2754 __le32 *mfp = (__le32 *)mpi_request;
2755
2756 printk(KERN_INFO "IO request frame:\n\t");
2757 for (i = 0; i < sz / sizeof(__le32); i++) {
2758 if (i && ((i % 8) == 0))
2759 printk("\n\t");
2760 printk("%08x ", le32_to_cpu(mfp[i]));
2761 }
2762 printk("\n");
2763 }
2764
2765 /**
2766 * megasas_reset_bus_host - Bus & host reset handler entry point
2767 */
2768 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2769 {
2770 int ret;
2771 struct megasas_instance *instance;
2772
2773 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2774
2775 scmd_printk(KERN_INFO, scmd,
2776 "Controller reset is requested due to IO timeout\n"
2777 "SCSI command pointer: (%p)\t SCSI host state: %d\t"
2778 " SCSI host busy: %d\t FW outstanding: %d\n",
2779 scmd, scmd->device->host->shost_state,
2780 atomic_read((atomic_t *)&scmd->device->host->host_busy),
2781 atomic_read(&instance->fw_outstanding));
2782
2783 /*
2784 * First wait for all commands to complete
2785 */
2786 if (instance->ctrl_context) {
2787 struct megasas_cmd_fusion *cmd;
2788 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2789 if (cmd)
2790 megasas_dump_frame(cmd->io_request,
2791 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
2792 ret = megasas_reset_fusion(scmd->device->host,
2793 SCSIIO_TIMEOUT_OCR);
2794 } else
2795 ret = megasas_generic_reset(scmd);
2796
2797 return ret;
2798 }
2799
2800 /**
2801 * megasas_task_abort - Issues task abort request to firmware
2802 * (supported only for fusion adapters)
2803 * @scmd: SCSI command pointer
2804 */
2805 static int megasas_task_abort(struct scsi_cmnd *scmd)
2806 {
2807 int ret;
2808 struct megasas_instance *instance;
2809
2810 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2811
2812 if (instance->ctrl_context)
2813 ret = megasas_task_abort_fusion(scmd);
2814 else {
2815 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2816 ret = FAILED;
2817 }
2818
2819 return ret;
2820 }
2821
2822 /**
2823 * megasas_reset_target: Issues target reset request to firmware
2824 * (supported only for fusion adapters)
2825 * @scmd: SCSI command pointer
2826 */
2827 static int megasas_reset_target(struct scsi_cmnd *scmd)
2828 {
2829 int ret;
2830 struct megasas_instance *instance;
2831
2832 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2833
2834 if (instance->ctrl_context)
2835 ret = megasas_reset_target_fusion(scmd);
2836 else {
2837 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2838 ret = FAILED;
2839 }
2840
2841 return ret;
2842 }
2843
2844 /**
2845 * megasas_bios_param - Returns disk geometry for a disk
2846 * @sdev: device handle
2847 * @bdev: block device
2848 * @capacity: drive capacity
2849 * @geom: geometry parameters
2850 */
2851 static int
2852 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2853 sector_t capacity, int geom[])
2854 {
2855 int heads;
2856 int sectors;
2857 sector_t cylinders;
2858 unsigned long tmp;
2859
2860 /* Default heads (64) & sectors (32) */
2861 heads = 64;
2862 sectors = 32;
2863
2864 tmp = heads * sectors;
2865 cylinders = capacity;
2866
2867 sector_div(cylinders, tmp);
2868
2869 /*
2870 * Handle extended translation size for logical drives > 1Gb
2871 */
2872
2873 if (capacity >= 0x200000) {
2874 heads = 255;
2875 sectors = 63;
2876 tmp = heads*sectors;
2877 cylinders = capacity;
2878 sector_div(cylinders, tmp);
2879 }
2880
2881 geom[0] = heads;
2882 geom[1] = sectors;
2883 geom[2] = cylinders;
2884
2885 return 0;
2886 }
2887
2888 static void megasas_aen_polling(struct work_struct *work);
2889
2890 /**
2891 * megasas_service_aen - Processes an event notification
2892 * @instance: Adapter soft state
2893 * @cmd: AEN command completed by the ISR
2894 *
2895 * For AEN, driver sends a command down to FW that is held by the FW till an
2896 * event occurs. When an event of interest occurs, FW completes the command
2897 * that it was previously holding.
2898 *
2899 * This routines sends SIGIO signal to processes that have registered with the
2900 * driver for AEN.
2901 */
2902 static void
2903 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2904 {
2905 unsigned long flags;
2906
2907 /*
2908 * Don't signal app if it is just an aborted previously registered aen
2909 */
2910 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2911 spin_lock_irqsave(&poll_aen_lock, flags);
2912 megasas_poll_wait_aen = 1;
2913 spin_unlock_irqrestore(&poll_aen_lock, flags);
2914 wake_up(&megasas_poll_wait);
2915 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2916 }
2917 else
2918 cmd->abort_aen = 0;
2919
2920 instance->aen_cmd = NULL;
2921
2922 megasas_return_cmd(instance, cmd);
2923
2924 if ((instance->unload == 0) &&
2925 ((instance->issuepend_done == 1))) {
2926 struct megasas_aen_event *ev;
2927
2928 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2929 if (!ev) {
2930 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2931 } else {
2932 ev->instance = instance;
2933 instance->ev = ev;
2934 INIT_DELAYED_WORK(&ev->hotplug_work,
2935 megasas_aen_polling);
2936 schedule_delayed_work(&ev->hotplug_work, 0);
2937 }
2938 }
2939 }
2940
2941 static ssize_t
2942 megasas_fw_crash_buffer_store(struct device *cdev,
2943 struct device_attribute *attr, const char *buf, size_t count)
2944 {
2945 struct Scsi_Host *shost = class_to_shost(cdev);
2946 struct megasas_instance *instance =
2947 (struct megasas_instance *) shost->hostdata;
2948 int val = 0;
2949 unsigned long flags;
2950
2951 if (kstrtoint(buf, 0, &val) != 0)
2952 return -EINVAL;
2953
2954 spin_lock_irqsave(&instance->crashdump_lock, flags);
2955 instance->fw_crash_buffer_offset = val;
2956 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2957 return strlen(buf);
2958 }
2959
2960 static ssize_t
2961 megasas_fw_crash_buffer_show(struct device *cdev,
2962 struct device_attribute *attr, char *buf)
2963 {
2964 struct Scsi_Host *shost = class_to_shost(cdev);
2965 struct megasas_instance *instance =
2966 (struct megasas_instance *) shost->hostdata;
2967 u32 size;
2968 unsigned long buff_addr;
2969 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2970 unsigned long src_addr;
2971 unsigned long flags;
2972 u32 buff_offset;
2973
2974 spin_lock_irqsave(&instance->crashdump_lock, flags);
2975 buff_offset = instance->fw_crash_buffer_offset;
2976 if (!instance->crash_dump_buf &&
2977 !((instance->fw_crash_state == AVAILABLE) ||
2978 (instance->fw_crash_state == COPYING))) {
2979 dev_err(&instance->pdev->dev,
2980 "Firmware crash dump is not available\n");
2981 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2982 return -EINVAL;
2983 }
2984
2985 buff_addr = (unsigned long) buf;
2986
2987 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2988 dev_err(&instance->pdev->dev,
2989 "Firmware crash dump offset is out of range\n");
2990 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2991 return 0;
2992 }
2993
2994 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2995 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2996
2997 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2998 (buff_offset % dmachunk);
2999 memcpy(buf, (void *)src_addr, size);
3000 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3001
3002 return size;
3003 }
3004
3005 static ssize_t
3006 megasas_fw_crash_buffer_size_show(struct device *cdev,
3007 struct device_attribute *attr, char *buf)
3008 {
3009 struct Scsi_Host *shost = class_to_shost(cdev);
3010 struct megasas_instance *instance =
3011 (struct megasas_instance *) shost->hostdata;
3012
3013 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3014 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3015 }
3016
3017 static ssize_t
3018 megasas_fw_crash_state_store(struct device *cdev,
3019 struct device_attribute *attr, const char *buf, size_t count)
3020 {
3021 struct Scsi_Host *shost = class_to_shost(cdev);
3022 struct megasas_instance *instance =
3023 (struct megasas_instance *) shost->hostdata;
3024 int val = 0;
3025 unsigned long flags;
3026
3027 if (kstrtoint(buf, 0, &val) != 0)
3028 return -EINVAL;
3029
3030 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3031 dev_err(&instance->pdev->dev, "application updates invalid "
3032 "firmware crash state\n");
3033 return -EINVAL;
3034 }
3035
3036 instance->fw_crash_state = val;
3037
3038 if ((val == COPIED) || (val == COPY_ERROR)) {
3039 spin_lock_irqsave(&instance->crashdump_lock, flags);
3040 megasas_free_host_crash_buffer(instance);
3041 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3042 if (val == COPY_ERROR)
3043 dev_info(&instance->pdev->dev, "application failed to "
3044 "copy Firmware crash dump\n");
3045 else
3046 dev_info(&instance->pdev->dev, "Firmware crash dump "
3047 "copied successfully\n");
3048 }
3049 return strlen(buf);
3050 }
3051
3052 static ssize_t
3053 megasas_fw_crash_state_show(struct device *cdev,
3054 struct device_attribute *attr, char *buf)
3055 {
3056 struct Scsi_Host *shost = class_to_shost(cdev);
3057 struct megasas_instance *instance =
3058 (struct megasas_instance *) shost->hostdata;
3059
3060 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3061 }
3062
3063 static ssize_t
3064 megasas_page_size_show(struct device *cdev,
3065 struct device_attribute *attr, char *buf)
3066 {
3067 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3068 }
3069
3070 static ssize_t
3071 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3072 char *buf)
3073 {
3074 struct Scsi_Host *shost = class_to_shost(cdev);
3075 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3076
3077 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3078 }
3079
3080 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3081 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3082 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3083 megasas_fw_crash_buffer_size_show, NULL);
3084 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3085 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3086 static DEVICE_ATTR(page_size, S_IRUGO,
3087 megasas_page_size_show, NULL);
3088 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3089 megasas_ldio_outstanding_show, NULL);
3090
3091 struct device_attribute *megaraid_host_attrs[] = {
3092 &dev_attr_fw_crash_buffer_size,
3093 &dev_attr_fw_crash_buffer,
3094 &dev_attr_fw_crash_state,
3095 &dev_attr_page_size,
3096 &dev_attr_ldio_outstanding,
3097 NULL,
3098 };
3099
3100 /*
3101 * Scsi host template for megaraid_sas driver
3102 */
3103 static struct scsi_host_template megasas_template = {
3104
3105 .module = THIS_MODULE,
3106 .name = "Avago SAS based MegaRAID driver",
3107 .proc_name = "megaraid_sas",
3108 .slave_configure = megasas_slave_configure,
3109 .slave_alloc = megasas_slave_alloc,
3110 .slave_destroy = megasas_slave_destroy,
3111 .queuecommand = megasas_queue_command,
3112 .eh_target_reset_handler = megasas_reset_target,
3113 .eh_abort_handler = megasas_task_abort,
3114 .eh_host_reset_handler = megasas_reset_bus_host,
3115 .eh_timed_out = megasas_reset_timer,
3116 .shost_attrs = megaraid_host_attrs,
3117 .bios_param = megasas_bios_param,
3118 .use_clustering = ENABLE_CLUSTERING,
3119 .change_queue_depth = scsi_change_queue_depth,
3120 .no_write_same = 1,
3121 };
3122
3123 /**
3124 * megasas_complete_int_cmd - Completes an internal command
3125 * @instance: Adapter soft state
3126 * @cmd: Command to be completed
3127 *
3128 * The megasas_issue_blocked_cmd() function waits for a command to complete
3129 * after it issues a command. This function wakes up that waiting routine by
3130 * calling wake_up() on the wait queue.
3131 */
3132 static void
3133 megasas_complete_int_cmd(struct megasas_instance *instance,
3134 struct megasas_cmd *cmd)
3135 {
3136 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3137 wake_up(&instance->int_cmd_wait_q);
3138 }
3139
3140 /**
3141 * megasas_complete_abort - Completes aborting a command
3142 * @instance: Adapter soft state
3143 * @cmd: Cmd that was issued to abort another cmd
3144 *
3145 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3146 * after it issues an abort on a previously issued command. This function
3147 * wakes up all functions waiting on the same wait queue.
3148 */
3149 static void
3150 megasas_complete_abort(struct megasas_instance *instance,
3151 struct megasas_cmd *cmd)
3152 {
3153 if (cmd->sync_cmd) {
3154 cmd->sync_cmd = 0;
3155 cmd->cmd_status_drv = 0;
3156 wake_up(&instance->abort_cmd_wait_q);
3157 }
3158 }
3159
3160 /**
3161 * megasas_complete_cmd - Completes a command
3162 * @instance: Adapter soft state
3163 * @cmd: Command to be completed
3164 * @alt_status: If non-zero, use this value as status to
3165 * SCSI mid-layer instead of the value returned
3166 * by the FW. This should be used if caller wants
3167 * an alternate status (as in the case of aborted
3168 * commands)
3169 */
3170 void
3171 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3172 u8 alt_status)
3173 {
3174 int exception = 0;
3175 struct megasas_header *hdr = &cmd->frame->hdr;
3176 unsigned long flags;
3177 struct fusion_context *fusion = instance->ctrl_context;
3178 u32 opcode, status;
3179
3180 /* flag for the retry reset */
3181 cmd->retry_for_fw_reset = 0;
3182
3183 if (cmd->scmd)
3184 cmd->scmd->SCp.ptr = NULL;
3185
3186 switch (hdr->cmd) {
3187 case MFI_CMD_INVALID:
3188 /* Some older 1068 controller FW may keep a pended
3189 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3190 when booting the kdump kernel. Ignore this command to
3191 prevent a kernel panic on shutdown of the kdump kernel. */
3192 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3193 "completed\n");
3194 dev_warn(&instance->pdev->dev, "If you have a controller "
3195 "other than PERC5, please upgrade your firmware\n");
3196 break;
3197 case MFI_CMD_PD_SCSI_IO:
3198 case MFI_CMD_LD_SCSI_IO:
3199
3200 /*
3201 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3202 * issued either through an IO path or an IOCTL path. If it
3203 * was via IOCTL, we will send it to internal completion.
3204 */
3205 if (cmd->sync_cmd) {
3206 cmd->sync_cmd = 0;
3207 megasas_complete_int_cmd(instance, cmd);
3208 break;
3209 }
3210
3211 case MFI_CMD_LD_READ:
3212 case MFI_CMD_LD_WRITE:
3213
3214 if (alt_status) {
3215 cmd->scmd->result = alt_status << 16;
3216 exception = 1;
3217 }
3218
3219 if (exception) {
3220
3221 atomic_dec(&instance->fw_outstanding);
3222
3223 scsi_dma_unmap(cmd->scmd);
3224 cmd->scmd->scsi_done(cmd->scmd);
3225 megasas_return_cmd(instance, cmd);
3226
3227 break;
3228 }
3229
3230 switch (hdr->cmd_status) {
3231
3232 case MFI_STAT_OK:
3233 cmd->scmd->result = DID_OK << 16;
3234 break;
3235
3236 case MFI_STAT_SCSI_IO_FAILED:
3237 case MFI_STAT_LD_INIT_IN_PROGRESS:
3238 cmd->scmd->result =
3239 (DID_ERROR << 16) | hdr->scsi_status;
3240 break;
3241
3242 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3243
3244 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3245
3246 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3247 memset(cmd->scmd->sense_buffer, 0,
3248 SCSI_SENSE_BUFFERSIZE);
3249 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3250 hdr->sense_len);
3251
3252 cmd->scmd->result |= DRIVER_SENSE << 24;
3253 }
3254
3255 break;
3256
3257 case MFI_STAT_LD_OFFLINE:
3258 case MFI_STAT_DEVICE_NOT_FOUND:
3259 cmd->scmd->result = DID_BAD_TARGET << 16;
3260 break;
3261
3262 default:
3263 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3264 hdr->cmd_status);
3265 cmd->scmd->result = DID_ERROR << 16;
3266 break;
3267 }
3268
3269 atomic_dec(&instance->fw_outstanding);
3270
3271 scsi_dma_unmap(cmd->scmd);
3272 cmd->scmd->scsi_done(cmd->scmd);
3273 megasas_return_cmd(instance, cmd);
3274
3275 break;
3276
3277 case MFI_CMD_SMP:
3278 case MFI_CMD_STP:
3279 case MFI_CMD_DCMD:
3280 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3281 /* Check for LD map update */
3282 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3283 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3284 fusion->fast_path_io = 0;
3285 spin_lock_irqsave(instance->host->host_lock, flags);
3286 instance->map_update_cmd = NULL;
3287 if (cmd->frame->hdr.cmd_status != 0) {
3288 if (cmd->frame->hdr.cmd_status !=
3289 MFI_STAT_NOT_FOUND)
3290 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3291 cmd->frame->hdr.cmd_status);
3292 else {
3293 megasas_return_cmd(instance, cmd);
3294 spin_unlock_irqrestore(
3295 instance->host->host_lock,
3296 flags);
3297 break;
3298 }
3299 } else
3300 instance->map_id++;
3301 megasas_return_cmd(instance, cmd);
3302
3303 /*
3304 * Set fast path IO to ZERO.
3305 * Validate Map will set proper value.
3306 * Meanwhile all IOs will go as LD IO.
3307 */
3308 if (MR_ValidateMapInfo(instance))
3309 fusion->fast_path_io = 1;
3310 else
3311 fusion->fast_path_io = 0;
3312 megasas_sync_map_info(instance);
3313 spin_unlock_irqrestore(instance->host->host_lock,
3314 flags);
3315 break;
3316 }
3317 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3318 opcode == MR_DCMD_CTRL_EVENT_GET) {
3319 spin_lock_irqsave(&poll_aen_lock, flags);
3320 megasas_poll_wait_aen = 0;
3321 spin_unlock_irqrestore(&poll_aen_lock, flags);
3322 }
3323
3324 /* FW has an updated PD sequence */
3325 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3326 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3327
3328 spin_lock_irqsave(instance->host->host_lock, flags);
3329 status = cmd->frame->hdr.cmd_status;
3330 instance->jbod_seq_cmd = NULL;
3331 megasas_return_cmd(instance, cmd);
3332
3333 if (status == MFI_STAT_OK) {
3334 instance->pd_seq_map_id++;
3335 /* Re-register a pd sync seq num cmd */
3336 if (megasas_sync_pd_seq_num(instance, true))
3337 instance->use_seqnum_jbod_fp = false;
3338 } else
3339 instance->use_seqnum_jbod_fp = false;
3340
3341 spin_unlock_irqrestore(instance->host->host_lock, flags);
3342 break;
3343 }
3344
3345 /*
3346 * See if got an event notification
3347 */
3348 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3349 megasas_service_aen(instance, cmd);
3350 else
3351 megasas_complete_int_cmd(instance, cmd);
3352
3353 break;
3354
3355 case MFI_CMD_ABORT:
3356 /*
3357 * Cmd issued to abort another cmd returned
3358 */
3359 megasas_complete_abort(instance, cmd);
3360 break;
3361
3362 default:
3363 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3364 hdr->cmd);
3365 break;
3366 }
3367 }
3368
3369 /**
3370 * megasas_issue_pending_cmds_again - issue all pending cmds
3371 * in FW again because of the fw reset
3372 * @instance: Adapter soft state
3373 */
3374 static inline void
3375 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3376 {
3377 struct megasas_cmd *cmd;
3378 struct list_head clist_local;
3379 union megasas_evt_class_locale class_locale;
3380 unsigned long flags;
3381 u32 seq_num;
3382
3383 INIT_LIST_HEAD(&clist_local);
3384 spin_lock_irqsave(&instance->hba_lock, flags);
3385 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3386 spin_unlock_irqrestore(&instance->hba_lock, flags);
3387
3388 while (!list_empty(&clist_local)) {
3389 cmd = list_entry((&clist_local)->next,
3390 struct megasas_cmd, list);
3391 list_del_init(&cmd->list);
3392
3393 if (cmd->sync_cmd || cmd->scmd) {
3394 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3395 "detected to be pending while HBA reset\n",
3396 cmd, cmd->scmd, cmd->sync_cmd);
3397
3398 cmd->retry_for_fw_reset++;
3399
3400 if (cmd->retry_for_fw_reset == 3) {
3401 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3402 "was tried multiple times during reset."
3403 "Shutting down the HBA\n",
3404 cmd, cmd->scmd, cmd->sync_cmd);
3405 instance->instancet->disable_intr(instance);
3406 atomic_set(&instance->fw_reset_no_pci_access, 1);
3407 megaraid_sas_kill_hba(instance);
3408 return;
3409 }
3410 }
3411
3412 if (cmd->sync_cmd == 1) {
3413 if (cmd->scmd) {
3414 dev_notice(&instance->pdev->dev, "unexpected"
3415 "cmd attached to internal command!\n");
3416 }
3417 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3418 "on the internal reset queue,"
3419 "issue it again.\n", cmd);
3420 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3421 instance->instancet->fire_cmd(instance,
3422 cmd->frame_phys_addr,
3423 0, instance->reg_set);
3424 } else if (cmd->scmd) {
3425 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3426 "detected on the internal queue, issue again.\n",
3427 cmd, cmd->scmd->cmnd[0]);
3428
3429 atomic_inc(&instance->fw_outstanding);
3430 instance->instancet->fire_cmd(instance,
3431 cmd->frame_phys_addr,
3432 cmd->frame_count-1, instance->reg_set);
3433 } else {
3434 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3435 "internal reset defer list while re-issue!!\n",
3436 cmd);
3437 }
3438 }
3439
3440 if (instance->aen_cmd) {
3441 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3442 megasas_return_cmd(instance, instance->aen_cmd);
3443
3444 instance->aen_cmd = NULL;
3445 }
3446
3447 /*
3448 * Initiate AEN (Asynchronous Event Notification)
3449 */
3450 seq_num = instance->last_seq_num;
3451 class_locale.members.reserved = 0;
3452 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3453 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3454
3455 megasas_register_aen(instance, seq_num, class_locale.word);
3456 }
3457
3458 /**
3459 * Move the internal reset pending commands to a deferred queue.
3460 *
3461 * We move the commands pending at internal reset time to a
3462 * pending queue. This queue would be flushed after successful
3463 * completion of the internal reset sequence. if the internal reset
3464 * did not complete in time, the kernel reset handler would flush
3465 * these commands.
3466 **/
3467 static void
3468 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3469 {
3470 struct megasas_cmd *cmd;
3471 int i;
3472 u16 max_cmd = instance->max_fw_cmds;
3473 u32 defer_index;
3474 unsigned long flags;
3475
3476 defer_index = 0;
3477 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3478 for (i = 0; i < max_cmd; i++) {
3479 cmd = instance->cmd_list[i];
3480 if (cmd->sync_cmd == 1 || cmd->scmd) {
3481 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3482 "on the defer queue as internal\n",
3483 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3484
3485 if (!list_empty(&cmd->list)) {
3486 dev_notice(&instance->pdev->dev, "ERROR while"
3487 " moving this cmd:%p, %d %p, it was"
3488 "discovered on some list?\n",
3489 cmd, cmd->sync_cmd, cmd->scmd);
3490
3491 list_del_init(&cmd->list);
3492 }
3493 defer_index++;
3494 list_add_tail(&cmd->list,
3495 &instance->internal_reset_pending_q);
3496 }
3497 }
3498 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3499 }
3500
3501
3502 static void
3503 process_fw_state_change_wq(struct work_struct *work)
3504 {
3505 struct megasas_instance *instance =
3506 container_of(work, struct megasas_instance, work_init);
3507 u32 wait;
3508 unsigned long flags;
3509
3510 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3511 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3512 atomic_read(&instance->adprecovery));
3513 return ;
3514 }
3515
3516 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3517 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3518 "state, restarting it...\n");
3519
3520 instance->instancet->disable_intr(instance);
3521 atomic_set(&instance->fw_outstanding, 0);
3522
3523 atomic_set(&instance->fw_reset_no_pci_access, 1);
3524 instance->instancet->adp_reset(instance, instance->reg_set);
3525 atomic_set(&instance->fw_reset_no_pci_access, 0);
3526
3527 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3528 "initiating next stage...\n");
3529
3530 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3531 "state 2 starting...\n");
3532
3533 /* waiting for about 20 second before start the second init */
3534 for (wait = 0; wait < 30; wait++) {
3535 msleep(1000);
3536 }
3537
3538 if (megasas_transition_to_ready(instance, 1)) {
3539 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3540
3541 atomic_set(&instance->fw_reset_no_pci_access, 1);
3542 megaraid_sas_kill_hba(instance);
3543 return ;
3544 }
3545
3546 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3547 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3548 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3549 ) {
3550 *instance->consumer = *instance->producer;
3551 } else {
3552 *instance->consumer = 0;
3553 *instance->producer = 0;
3554 }
3555
3556 megasas_issue_init_mfi(instance);
3557
3558 spin_lock_irqsave(&instance->hba_lock, flags);
3559 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3560 spin_unlock_irqrestore(&instance->hba_lock, flags);
3561 instance->instancet->enable_intr(instance);
3562
3563 megasas_issue_pending_cmds_again(instance);
3564 instance->issuepend_done = 1;
3565 }
3566 }
3567
3568 /**
3569 * megasas_deplete_reply_queue - Processes all completed commands
3570 * @instance: Adapter soft state
3571 * @alt_status: Alternate status to be returned to
3572 * SCSI mid-layer instead of the status
3573 * returned by the FW
3574 * Note: this must be called with hba lock held
3575 */
3576 static int
3577 megasas_deplete_reply_queue(struct megasas_instance *instance,
3578 u8 alt_status)
3579 {
3580 u32 mfiStatus;
3581 u32 fw_state;
3582
3583 if ((mfiStatus = instance->instancet->check_reset(instance,
3584 instance->reg_set)) == 1) {
3585 return IRQ_HANDLED;
3586 }
3587
3588 if ((mfiStatus = instance->instancet->clear_intr(
3589 instance->reg_set)
3590 ) == 0) {
3591 /* Hardware may not set outbound_intr_status in MSI-X mode */
3592 if (!instance->msix_vectors)
3593 return IRQ_NONE;
3594 }
3595
3596 instance->mfiStatus = mfiStatus;
3597
3598 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3599 fw_state = instance->instancet->read_fw_status_reg(
3600 instance->reg_set) & MFI_STATE_MASK;
3601
3602 if (fw_state != MFI_STATE_FAULT) {
3603 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3604 fw_state);
3605 }
3606
3607 if ((fw_state == MFI_STATE_FAULT) &&
3608 (instance->disableOnlineCtrlReset == 0)) {
3609 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3610
3611 if ((instance->pdev->device ==
3612 PCI_DEVICE_ID_LSI_SAS1064R) ||
3613 (instance->pdev->device ==
3614 PCI_DEVICE_ID_DELL_PERC5) ||
3615 (instance->pdev->device ==
3616 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3617
3618 *instance->consumer =
3619 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3620 }
3621
3622
3623 instance->instancet->disable_intr(instance);
3624 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3625 instance->issuepend_done = 0;
3626
3627 atomic_set(&instance->fw_outstanding, 0);
3628 megasas_internal_reset_defer_cmds(instance);
3629
3630 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3631 fw_state, atomic_read(&instance->adprecovery));
3632
3633 schedule_work(&instance->work_init);
3634 return IRQ_HANDLED;
3635
3636 } else {
3637 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3638 fw_state, instance->disableOnlineCtrlReset);
3639 }
3640 }
3641
3642 tasklet_schedule(&instance->isr_tasklet);
3643 return IRQ_HANDLED;
3644 }
3645 /**
3646 * megasas_isr - isr entry point
3647 */
3648 static irqreturn_t megasas_isr(int irq, void *devp)
3649 {
3650 struct megasas_irq_context *irq_context = devp;
3651 struct megasas_instance *instance = irq_context->instance;
3652 unsigned long flags;
3653 irqreturn_t rc;
3654
3655 if (atomic_read(&instance->fw_reset_no_pci_access))
3656 return IRQ_HANDLED;
3657
3658 spin_lock_irqsave(&instance->hba_lock, flags);
3659 rc = megasas_deplete_reply_queue(instance, DID_OK);
3660 spin_unlock_irqrestore(&instance->hba_lock, flags);
3661
3662 return rc;
3663 }
3664
3665 /**
3666 * megasas_transition_to_ready - Move the FW to READY state
3667 * @instance: Adapter soft state
3668 *
3669 * During the initialization, FW passes can potentially be in any one of
3670 * several possible states. If the FW in operational, waiting-for-handshake
3671 * states, driver must take steps to bring it to ready state. Otherwise, it
3672 * has to wait for the ready state.
3673 */
3674 int
3675 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3676 {
3677 int i;
3678 u8 max_wait;
3679 u32 fw_state;
3680 u32 cur_state;
3681 u32 abs_state, curr_abs_state;
3682
3683 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
3684 fw_state = abs_state & MFI_STATE_MASK;
3685
3686 if (fw_state != MFI_STATE_READY)
3687 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3688 " state\n");
3689
3690 while (fw_state != MFI_STATE_READY) {
3691
3692 switch (fw_state) {
3693
3694 case MFI_STATE_FAULT:
3695 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3696 if (ocr) {
3697 max_wait = MEGASAS_RESET_WAIT_TIME;
3698 cur_state = MFI_STATE_FAULT;
3699 break;
3700 } else
3701 return -ENODEV;
3702
3703 case MFI_STATE_WAIT_HANDSHAKE:
3704 /*
3705 * Set the CLR bit in inbound doorbell
3706 */
3707 if ((instance->pdev->device ==
3708 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3709 (instance->pdev->device ==
3710 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3711 (instance->ctrl_context))
3712 writel(
3713 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3714 &instance->reg_set->doorbell);
3715 else
3716 writel(
3717 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3718 &instance->reg_set->inbound_doorbell);
3719
3720 max_wait = MEGASAS_RESET_WAIT_TIME;
3721 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3722 break;
3723
3724 case MFI_STATE_BOOT_MESSAGE_PENDING:
3725 if ((instance->pdev->device ==
3726 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3727 (instance->pdev->device ==
3728 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3729 (instance->ctrl_context))
3730 writel(MFI_INIT_HOTPLUG,
3731 &instance->reg_set->doorbell);
3732 else
3733 writel(MFI_INIT_HOTPLUG,
3734 &instance->reg_set->inbound_doorbell);
3735
3736 max_wait = MEGASAS_RESET_WAIT_TIME;
3737 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3738 break;
3739
3740 case MFI_STATE_OPERATIONAL:
3741 /*
3742 * Bring it to READY state; assuming max wait 10 secs
3743 */
3744 instance->instancet->disable_intr(instance);
3745 if ((instance->pdev->device ==
3746 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3747 (instance->pdev->device ==
3748 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3749 (instance->ctrl_context)) {
3750 writel(MFI_RESET_FLAGS,
3751 &instance->reg_set->doorbell);
3752
3753 if (instance->ctrl_context) {
3754 for (i = 0; i < (10 * 1000); i += 20) {
3755 if (readl(
3756 &instance->
3757 reg_set->
3758 doorbell) & 1)
3759 msleep(20);
3760 else
3761 break;
3762 }
3763 }
3764 } else
3765 writel(MFI_RESET_FLAGS,
3766 &instance->reg_set->inbound_doorbell);
3767
3768 max_wait = MEGASAS_RESET_WAIT_TIME;
3769 cur_state = MFI_STATE_OPERATIONAL;
3770 break;
3771
3772 case MFI_STATE_UNDEFINED:
3773 /*
3774 * This state should not last for more than 2 seconds
3775 */
3776 max_wait = MEGASAS_RESET_WAIT_TIME;
3777 cur_state = MFI_STATE_UNDEFINED;
3778 break;
3779
3780 case MFI_STATE_BB_INIT:
3781 max_wait = MEGASAS_RESET_WAIT_TIME;
3782 cur_state = MFI_STATE_BB_INIT;
3783 break;
3784
3785 case MFI_STATE_FW_INIT:
3786 max_wait = MEGASAS_RESET_WAIT_TIME;
3787 cur_state = MFI_STATE_FW_INIT;
3788 break;
3789
3790 case MFI_STATE_FW_INIT_2:
3791 max_wait = MEGASAS_RESET_WAIT_TIME;
3792 cur_state = MFI_STATE_FW_INIT_2;
3793 break;
3794
3795 case MFI_STATE_DEVICE_SCAN:
3796 max_wait = MEGASAS_RESET_WAIT_TIME;
3797 cur_state = MFI_STATE_DEVICE_SCAN;
3798 break;
3799
3800 case MFI_STATE_FLUSH_CACHE:
3801 max_wait = MEGASAS_RESET_WAIT_TIME;
3802 cur_state = MFI_STATE_FLUSH_CACHE;
3803 break;
3804
3805 default:
3806 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3807 fw_state);
3808 return -ENODEV;
3809 }
3810
3811 /*
3812 * The cur_state should not last for more than max_wait secs
3813 */
3814 for (i = 0; i < (max_wait * 1000); i++) {
3815 curr_abs_state = instance->instancet->
3816 read_fw_status_reg(instance->reg_set);
3817
3818 if (abs_state == curr_abs_state) {
3819 msleep(1);
3820 } else
3821 break;
3822 }
3823
3824 /*
3825 * Return error if fw_state hasn't changed after max_wait
3826 */
3827 if (curr_abs_state == abs_state) {
3828 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3829 "in %d secs\n", fw_state, max_wait);
3830 return -ENODEV;
3831 }
3832
3833 abs_state = curr_abs_state;
3834 fw_state = curr_abs_state & MFI_STATE_MASK;
3835 }
3836 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3837
3838 return 0;
3839 }
3840
3841 /**
3842 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3843 * @instance: Adapter soft state
3844 */
3845 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3846 {
3847 int i;
3848 u16 max_cmd = instance->max_mfi_cmds;
3849 struct megasas_cmd *cmd;
3850
3851 if (!instance->frame_dma_pool)
3852 return;
3853
3854 /*
3855 * Return all frames to pool
3856 */
3857 for (i = 0; i < max_cmd; i++) {
3858
3859 cmd = instance->cmd_list[i];
3860
3861 if (cmd->frame)
3862 pci_pool_free(instance->frame_dma_pool, cmd->frame,
3863 cmd->frame_phys_addr);
3864
3865 if (cmd->sense)
3866 pci_pool_free(instance->sense_dma_pool, cmd->sense,
3867 cmd->sense_phys_addr);
3868 }
3869
3870 /*
3871 * Now destroy the pool itself
3872 */
3873 pci_pool_destroy(instance->frame_dma_pool);
3874 pci_pool_destroy(instance->sense_dma_pool);
3875
3876 instance->frame_dma_pool = NULL;
3877 instance->sense_dma_pool = NULL;
3878 }
3879
3880 /**
3881 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3882 * @instance: Adapter soft state
3883 *
3884 * Each command packet has an embedded DMA memory buffer that is used for
3885 * filling MFI frame and the SG list that immediately follows the frame. This
3886 * function creates those DMA memory buffers for each command packet by using
3887 * PCI pool facility.
3888 */
3889 static int megasas_create_frame_pool(struct megasas_instance *instance)
3890 {
3891 int i;
3892 u16 max_cmd;
3893 u32 sge_sz;
3894 u32 frame_count;
3895 struct megasas_cmd *cmd;
3896
3897 max_cmd = instance->max_mfi_cmds;
3898
3899 /*
3900 * Size of our frame is 64 bytes for MFI frame, followed by max SG
3901 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
3902 */
3903 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3904 sizeof(struct megasas_sge32);
3905
3906 if (instance->flag_ieee)
3907 sge_sz = sizeof(struct megasas_sge_skinny);
3908
3909 /*
3910 * For MFI controllers.
3911 * max_num_sge = 60
3912 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
3913 * Total 960 byte (15 MFI frame of 64 byte)
3914 *
3915 * Fusion adapter require only 3 extra frame.
3916 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
3917 * max_sge_sz = 12 byte (sizeof megasas_sge64)
3918 * Total 192 byte (3 MFI frame of 64 byte)
3919 */
3920 frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
3921 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
3922 /*
3923 * Use DMA pool facility provided by PCI layer
3924 */
3925 instance->frame_dma_pool = pci_pool_create("megasas frame pool",
3926 instance->pdev, instance->mfi_frame_size,
3927 256, 0);
3928
3929 if (!instance->frame_dma_pool) {
3930 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3931 return -ENOMEM;
3932 }
3933
3934 instance->sense_dma_pool = pci_pool_create("megasas sense pool",
3935 instance->pdev, 128, 4, 0);
3936
3937 if (!instance->sense_dma_pool) {
3938 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3939
3940 pci_pool_destroy(instance->frame_dma_pool);
3941 instance->frame_dma_pool = NULL;
3942
3943 return -ENOMEM;
3944 }
3945
3946 /*
3947 * Allocate and attach a frame to each of the commands in cmd_list.
3948 * By making cmd->index as the context instead of the &cmd, we can
3949 * always use 32bit context regardless of the architecture
3950 */
3951 for (i = 0; i < max_cmd; i++) {
3952
3953 cmd = instance->cmd_list[i];
3954
3955 cmd->frame = pci_pool_alloc(instance->frame_dma_pool,
3956 GFP_KERNEL, &cmd->frame_phys_addr);
3957
3958 cmd->sense = pci_pool_alloc(instance->sense_dma_pool,
3959 GFP_KERNEL, &cmd->sense_phys_addr);
3960
3961 /*
3962 * megasas_teardown_frame_pool() takes care of freeing
3963 * whatever has been allocated
3964 */
3965 if (!cmd->frame || !cmd->sense) {
3966 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
3967 megasas_teardown_frame_pool(instance);
3968 return -ENOMEM;
3969 }
3970
3971 memset(cmd->frame, 0, instance->mfi_frame_size);
3972 cmd->frame->io.context = cpu_to_le32(cmd->index);
3973 cmd->frame->io.pad_0 = 0;
3974 if (!instance->ctrl_context && reset_devices)
3975 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
3976 }
3977
3978 return 0;
3979 }
3980
3981 /**
3982 * megasas_free_cmds - Free all the cmds in the free cmd pool
3983 * @instance: Adapter soft state
3984 */
3985 void megasas_free_cmds(struct megasas_instance *instance)
3986 {
3987 int i;
3988
3989 /* First free the MFI frame pool */
3990 megasas_teardown_frame_pool(instance);
3991
3992 /* Free all the commands in the cmd_list */
3993 for (i = 0; i < instance->max_mfi_cmds; i++)
3994
3995 kfree(instance->cmd_list[i]);
3996
3997 /* Free the cmd_list buffer itself */
3998 kfree(instance->cmd_list);
3999 instance->cmd_list = NULL;
4000
4001 INIT_LIST_HEAD(&instance->cmd_pool);
4002 }
4003
4004 /**
4005 * megasas_alloc_cmds - Allocates the command packets
4006 * @instance: Adapter soft state
4007 *
4008 * Each command that is issued to the FW, whether IO commands from the OS or
4009 * internal commands like IOCTLs, are wrapped in local data structure called
4010 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4011 * the FW.
4012 *
4013 * Each frame has a 32-bit field called context (tag). This context is used
4014 * to get back the megasas_cmd from the frame when a frame gets completed in
4015 * the ISR. Typically the address of the megasas_cmd itself would be used as
4016 * the context. But we wanted to keep the differences between 32 and 64 bit
4017 * systems to the mininum. We always use 32 bit integers for the context. In
4018 * this driver, the 32 bit values are the indices into an array cmd_list.
4019 * This array is used only to look up the megasas_cmd given the context. The
4020 * free commands themselves are maintained in a linked list called cmd_pool.
4021 */
4022 int megasas_alloc_cmds(struct megasas_instance *instance)
4023 {
4024 int i;
4025 int j;
4026 u16 max_cmd;
4027 struct megasas_cmd *cmd;
4028 struct fusion_context *fusion;
4029
4030 fusion = instance->ctrl_context;
4031 max_cmd = instance->max_mfi_cmds;
4032
4033 /*
4034 * instance->cmd_list is an array of struct megasas_cmd pointers.
4035 * Allocate the dynamic array first and then allocate individual
4036 * commands.
4037 */
4038 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4039
4040 if (!instance->cmd_list) {
4041 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4042 return -ENOMEM;
4043 }
4044
4045 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4046
4047 for (i = 0; i < max_cmd; i++) {
4048 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4049 GFP_KERNEL);
4050
4051 if (!instance->cmd_list[i]) {
4052
4053 for (j = 0; j < i; j++)
4054 kfree(instance->cmd_list[j]);
4055
4056 kfree(instance->cmd_list);
4057 instance->cmd_list = NULL;
4058
4059 return -ENOMEM;
4060 }
4061 }
4062
4063 for (i = 0; i < max_cmd; i++) {
4064 cmd = instance->cmd_list[i];
4065 memset(cmd, 0, sizeof(struct megasas_cmd));
4066 cmd->index = i;
4067 cmd->scmd = NULL;
4068 cmd->instance = instance;
4069
4070 list_add_tail(&cmd->list, &instance->cmd_pool);
4071 }
4072
4073 /*
4074 * Create a frame pool and assign one frame to each cmd
4075 */
4076 if (megasas_create_frame_pool(instance)) {
4077 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4078 megasas_free_cmds(instance);
4079 }
4080
4081 return 0;
4082 }
4083
4084 /*
4085 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4086 * @instance: Adapter soft state
4087 *
4088 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4089 * or FW is not under OCR.
4090 */
4091 inline int
4092 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4093
4094 if (!instance->ctrl_context)
4095 return KILL_ADAPTER;
4096 else if (instance->unload ||
4097 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4098 return IGNORE_TIMEOUT;
4099 else
4100 return INITIATE_OCR;
4101 }
4102
4103 static void
4104 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4105 {
4106 int ret;
4107 struct megasas_cmd *cmd;
4108 struct megasas_dcmd_frame *dcmd;
4109
4110 struct MR_PRIV_DEVICE *mr_device_priv_data;
4111 u16 device_id = 0;
4112
4113 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4114 cmd = megasas_get_cmd(instance);
4115
4116 if (!cmd) {
4117 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4118 return;
4119 }
4120
4121 dcmd = &cmd->frame->dcmd;
4122
4123 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4124 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4125
4126 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4127 dcmd->cmd = MFI_CMD_DCMD;
4128 dcmd->cmd_status = 0xFF;
4129 dcmd->sge_count = 1;
4130 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4131 dcmd->timeout = 0;
4132 dcmd->pad_0 = 0;
4133 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4134 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4135 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
4136 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
4137
4138 if (instance->ctrl_context && !instance->mask_interrupts)
4139 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4140 else
4141 ret = megasas_issue_polled(instance, cmd);
4142
4143 switch (ret) {
4144 case DCMD_SUCCESS:
4145 mr_device_priv_data = sdev->hostdata;
4146 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4147 mr_device_priv_data->interface_type =
4148 instance->pd_info->state.ddf.pdType.intf;
4149 break;
4150
4151 case DCMD_TIMEOUT:
4152
4153 switch (dcmd_timeout_ocr_possible(instance)) {
4154 case INITIATE_OCR:
4155 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4156 megasas_reset_fusion(instance->host,
4157 MFI_IO_TIMEOUT_OCR);
4158 break;
4159 case KILL_ADAPTER:
4160 megaraid_sas_kill_hba(instance);
4161 break;
4162 case IGNORE_TIMEOUT:
4163 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4164 __func__, __LINE__);
4165 break;
4166 }
4167
4168 break;
4169 }
4170
4171 if (ret != DCMD_TIMEOUT)
4172 megasas_return_cmd(instance, cmd);
4173
4174 return;
4175 }
4176 /*
4177 * megasas_get_pd_list_info - Returns FW's pd_list structure
4178 * @instance: Adapter soft state
4179 * @pd_list: pd_list structure
4180 *
4181 * Issues an internal command (DCMD) to get the FW's controller PD
4182 * list structure. This information is mainly used to find out SYSTEM
4183 * supported by the FW.
4184 */
4185 static int
4186 megasas_get_pd_list(struct megasas_instance *instance)
4187 {
4188 int ret = 0, pd_index = 0;
4189 struct megasas_cmd *cmd;
4190 struct megasas_dcmd_frame *dcmd;
4191 struct MR_PD_LIST *ci;
4192 struct MR_PD_ADDRESS *pd_addr;
4193 dma_addr_t ci_h = 0;
4194
4195 if (instance->pd_list_not_supported) {
4196 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4197 "not supported by firmware\n");
4198 return ret;
4199 }
4200
4201 cmd = megasas_get_cmd(instance);
4202
4203 if (!cmd) {
4204 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4205 return -ENOMEM;
4206 }
4207
4208 dcmd = &cmd->frame->dcmd;
4209
4210 ci = pci_alloc_consistent(instance->pdev,
4211 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
4212
4213 if (!ci) {
4214 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
4215 megasas_return_cmd(instance, cmd);
4216 return -ENOMEM;
4217 }
4218
4219 memset(ci, 0, sizeof(*ci));
4220 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4221
4222 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4223 dcmd->mbox.b[1] = 0;
4224 dcmd->cmd = MFI_CMD_DCMD;
4225 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4226 dcmd->sge_count = 1;
4227 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4228 dcmd->timeout = 0;
4229 dcmd->pad_0 = 0;
4230 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4231 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4232 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4233 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4234
4235 if (instance->ctrl_context && !instance->mask_interrupts)
4236 ret = megasas_issue_blocked_cmd(instance, cmd,
4237 MFI_IO_TIMEOUT_SECS);
4238 else
4239 ret = megasas_issue_polled(instance, cmd);
4240
4241 switch (ret) {
4242 case DCMD_FAILED:
4243 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4244 "failed/not supported by firmware\n");
4245
4246 if (instance->ctrl_context)
4247 megaraid_sas_kill_hba(instance);
4248 else
4249 instance->pd_list_not_supported = 1;
4250 break;
4251 case DCMD_TIMEOUT:
4252
4253 switch (dcmd_timeout_ocr_possible(instance)) {
4254 case INITIATE_OCR:
4255 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4256 /*
4257 * DCMD failed from AEN path.
4258 * AEN path already hold reset_mutex to avoid PCI access
4259 * while OCR is in progress.
4260 */
4261 mutex_unlock(&instance->reset_mutex);
4262 megasas_reset_fusion(instance->host,
4263 MFI_IO_TIMEOUT_OCR);
4264 mutex_lock(&instance->reset_mutex);
4265 break;
4266 case KILL_ADAPTER:
4267 megaraid_sas_kill_hba(instance);
4268 break;
4269 case IGNORE_TIMEOUT:
4270 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4271 __func__, __LINE__);
4272 break;
4273 }
4274
4275 break;
4276
4277 case DCMD_SUCCESS:
4278 pd_addr = ci->addr;
4279
4280 if ((le32_to_cpu(ci->count) >
4281 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4282 break;
4283
4284 memset(instance->local_pd_list, 0,
4285 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4286
4287 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4288 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4289 le16_to_cpu(pd_addr->deviceId);
4290 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4291 pd_addr->scsiDevType;
4292 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4293 MR_PD_STATE_SYSTEM;
4294 pd_addr++;
4295 }
4296
4297 memcpy(instance->pd_list, instance->local_pd_list,
4298 sizeof(instance->pd_list));
4299 break;
4300
4301 }
4302
4303 pci_free_consistent(instance->pdev,
4304 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
4305 ci, ci_h);
4306
4307 if (ret != DCMD_TIMEOUT)
4308 megasas_return_cmd(instance, cmd);
4309
4310 return ret;
4311 }
4312
4313 /*
4314 * megasas_get_ld_list_info - Returns FW's ld_list structure
4315 * @instance: Adapter soft state
4316 * @ld_list: ld_list structure
4317 *
4318 * Issues an internal command (DCMD) to get the FW's controller PD
4319 * list structure. This information is mainly used to find out SYSTEM
4320 * supported by the FW.
4321 */
4322 static int
4323 megasas_get_ld_list(struct megasas_instance *instance)
4324 {
4325 int ret = 0, ld_index = 0, ids = 0;
4326 struct megasas_cmd *cmd;
4327 struct megasas_dcmd_frame *dcmd;
4328 struct MR_LD_LIST *ci;
4329 dma_addr_t ci_h = 0;
4330 u32 ld_count;
4331
4332 cmd = megasas_get_cmd(instance);
4333
4334 if (!cmd) {
4335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4336 return -ENOMEM;
4337 }
4338
4339 dcmd = &cmd->frame->dcmd;
4340
4341 ci = pci_alloc_consistent(instance->pdev,
4342 sizeof(struct MR_LD_LIST),
4343 &ci_h);
4344
4345 if (!ci) {
4346 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
4347 megasas_return_cmd(instance, cmd);
4348 return -ENOMEM;
4349 }
4350
4351 memset(ci, 0, sizeof(*ci));
4352 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4353
4354 if (instance->supportmax256vd)
4355 dcmd->mbox.b[0] = 1;
4356 dcmd->cmd = MFI_CMD_DCMD;
4357 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4358 dcmd->sge_count = 1;
4359 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4360 dcmd->timeout = 0;
4361 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4362 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4363 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4364 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
4365 dcmd->pad_0 = 0;
4366
4367 if (instance->ctrl_context && !instance->mask_interrupts)
4368 ret = megasas_issue_blocked_cmd(instance, cmd,
4369 MFI_IO_TIMEOUT_SECS);
4370 else
4371 ret = megasas_issue_polled(instance, cmd);
4372
4373 ld_count = le32_to_cpu(ci->ldCount);
4374
4375 switch (ret) {
4376 case DCMD_FAILED:
4377 megaraid_sas_kill_hba(instance);
4378 break;
4379 case DCMD_TIMEOUT:
4380
4381 switch (dcmd_timeout_ocr_possible(instance)) {
4382 case INITIATE_OCR:
4383 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4384 /*
4385 * DCMD failed from AEN path.
4386 * AEN path already hold reset_mutex to avoid PCI access
4387 * while OCR is in progress.
4388 */
4389 mutex_unlock(&instance->reset_mutex);
4390 megasas_reset_fusion(instance->host,
4391 MFI_IO_TIMEOUT_OCR);
4392 mutex_lock(&instance->reset_mutex);
4393 break;
4394 case KILL_ADAPTER:
4395 megaraid_sas_kill_hba(instance);
4396 break;
4397 case IGNORE_TIMEOUT:
4398 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4399 __func__, __LINE__);
4400 break;
4401 }
4402
4403 break;
4404
4405 case DCMD_SUCCESS:
4406 if (ld_count > instance->fw_supported_vd_count)
4407 break;
4408
4409 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4410
4411 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4412 if (ci->ldList[ld_index].state != 0) {
4413 ids = ci->ldList[ld_index].ref.targetId;
4414 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4415 }
4416 }
4417
4418 break;
4419 }
4420
4421 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_LIST), ci, ci_h);
4422
4423 if (ret != DCMD_TIMEOUT)
4424 megasas_return_cmd(instance, cmd);
4425
4426 return ret;
4427 }
4428
4429 /**
4430 * megasas_ld_list_query - Returns FW's ld_list structure
4431 * @instance: Adapter soft state
4432 * @ld_list: ld_list structure
4433 *
4434 * Issues an internal command (DCMD) to get the FW's controller PD
4435 * list structure. This information is mainly used to find out SYSTEM
4436 * supported by the FW.
4437 */
4438 static int
4439 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4440 {
4441 int ret = 0, ld_index = 0, ids = 0;
4442 struct megasas_cmd *cmd;
4443 struct megasas_dcmd_frame *dcmd;
4444 struct MR_LD_TARGETID_LIST *ci;
4445 dma_addr_t ci_h = 0;
4446 u32 tgtid_count;
4447
4448 cmd = megasas_get_cmd(instance);
4449
4450 if (!cmd) {
4451 dev_warn(&instance->pdev->dev,
4452 "megasas_ld_list_query: Failed to get cmd\n");
4453 return -ENOMEM;
4454 }
4455
4456 dcmd = &cmd->frame->dcmd;
4457
4458 ci = pci_alloc_consistent(instance->pdev,
4459 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
4460
4461 if (!ci) {
4462 dev_warn(&instance->pdev->dev,
4463 "Failed to alloc mem for ld_list_query\n");
4464 megasas_return_cmd(instance, cmd);
4465 return -ENOMEM;
4466 }
4467
4468 memset(ci, 0, sizeof(*ci));
4469 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4470
4471 dcmd->mbox.b[0] = query_type;
4472 if (instance->supportmax256vd)
4473 dcmd->mbox.b[2] = 1;
4474
4475 dcmd->cmd = MFI_CMD_DCMD;
4476 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4477 dcmd->sge_count = 1;
4478 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4479 dcmd->timeout = 0;
4480 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4481 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4482 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4483 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4484 dcmd->pad_0 = 0;
4485
4486 if (instance->ctrl_context && !instance->mask_interrupts)
4487 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4488 else
4489 ret = megasas_issue_polled(instance, cmd);
4490
4491 switch (ret) {
4492 case DCMD_FAILED:
4493 dev_info(&instance->pdev->dev,
4494 "DCMD not supported by firmware - %s %d\n",
4495 __func__, __LINE__);
4496 ret = megasas_get_ld_list(instance);
4497 break;
4498 case DCMD_TIMEOUT:
4499 switch (dcmd_timeout_ocr_possible(instance)) {
4500 case INITIATE_OCR:
4501 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4502 /*
4503 * DCMD failed from AEN path.
4504 * AEN path already hold reset_mutex to avoid PCI access
4505 * while OCR is in progress.
4506 */
4507 mutex_unlock(&instance->reset_mutex);
4508 megasas_reset_fusion(instance->host,
4509 MFI_IO_TIMEOUT_OCR);
4510 mutex_lock(&instance->reset_mutex);
4511 break;
4512 case KILL_ADAPTER:
4513 megaraid_sas_kill_hba(instance);
4514 break;
4515 case IGNORE_TIMEOUT:
4516 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4517 __func__, __LINE__);
4518 break;
4519 }
4520
4521 break;
4522 case DCMD_SUCCESS:
4523 tgtid_count = le32_to_cpu(ci->count);
4524
4525 if ((tgtid_count > (instance->fw_supported_vd_count)))
4526 break;
4527
4528 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4529 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4530 ids = ci->targetId[ld_index];
4531 instance->ld_ids[ids] = ci->targetId[ld_index];
4532 }
4533
4534 break;
4535 }
4536
4537 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
4538 ci, ci_h);
4539
4540 if (ret != DCMD_TIMEOUT)
4541 megasas_return_cmd(instance, cmd);
4542
4543 return ret;
4544 }
4545
4546 /*
4547 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4548 * instance : Controller's instance
4549 */
4550 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4551 {
4552 struct fusion_context *fusion;
4553 u32 ventura_map_sz = 0;
4554
4555 fusion = instance->ctrl_context;
4556 /* For MFI based controllers return dummy success */
4557 if (!fusion)
4558 return;
4559
4560 instance->supportmax256vd =
4561 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
4562 /* Below is additional check to address future FW enhancement */
4563 if (instance->ctrl_info->max_lds > 64)
4564 instance->supportmax256vd = 1;
4565
4566 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4567 * MEGASAS_MAX_DEV_PER_CHANNEL;
4568 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4569 * MEGASAS_MAX_DEV_PER_CHANNEL;
4570 if (instance->supportmax256vd) {
4571 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4572 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4573 } else {
4574 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4575 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4576 }
4577
4578 dev_info(&instance->pdev->dev,
4579 "firmware type\t: %s\n",
4580 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4581 "Legacy(64 VD) firmware");
4582
4583 if (instance->max_raid_mapsize) {
4584 ventura_map_sz = instance->max_raid_mapsize *
4585 MR_MIN_MAP_SIZE; /* 64k */
4586 fusion->current_map_sz = ventura_map_sz;
4587 fusion->max_map_sz = ventura_map_sz;
4588 } else {
4589 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4590 (sizeof(struct MR_LD_SPAN_MAP) *
4591 (instance->fw_supported_vd_count - 1));
4592 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4593
4594 fusion->max_map_sz =
4595 max(fusion->old_map_sz, fusion->new_map_sz);
4596
4597 if (instance->supportmax256vd)
4598 fusion->current_map_sz = fusion->new_map_sz;
4599 else
4600 fusion->current_map_sz = fusion->old_map_sz;
4601 }
4602 /* irrespective of FW raid maps, driver raid map is constant */
4603 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4604 }
4605
4606 /**
4607 * megasas_get_controller_info - Returns FW's controller structure
4608 * @instance: Adapter soft state
4609 *
4610 * Issues an internal command (DCMD) to get the FW's controller structure.
4611 * This information is mainly used to find out the maximum IO transfer per
4612 * command supported by the FW.
4613 */
4614 int
4615 megasas_get_ctrl_info(struct megasas_instance *instance)
4616 {
4617 int ret = 0;
4618 struct megasas_cmd *cmd;
4619 struct megasas_dcmd_frame *dcmd;
4620 struct megasas_ctrl_info *ci;
4621 struct megasas_ctrl_info *ctrl_info;
4622 dma_addr_t ci_h = 0;
4623
4624 ctrl_info = instance->ctrl_info;
4625
4626 cmd = megasas_get_cmd(instance);
4627
4628 if (!cmd) {
4629 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4630 return -ENOMEM;
4631 }
4632
4633 dcmd = &cmd->frame->dcmd;
4634
4635 ci = pci_alloc_consistent(instance->pdev,
4636 sizeof(struct megasas_ctrl_info), &ci_h);
4637
4638 if (!ci) {
4639 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4640 megasas_return_cmd(instance, cmd);
4641 return -ENOMEM;
4642 }
4643
4644 memset(ci, 0, sizeof(*ci));
4645 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4646
4647 dcmd->cmd = MFI_CMD_DCMD;
4648 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4649 dcmd->sge_count = 1;
4650 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
4651 dcmd->timeout = 0;
4652 dcmd->pad_0 = 0;
4653 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4654 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4655 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
4656 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4657 dcmd->mbox.b[0] = 1;
4658
4659 if (instance->ctrl_context && !instance->mask_interrupts)
4660 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4661 else
4662 ret = megasas_issue_polled(instance, cmd);
4663
4664 switch (ret) {
4665 case DCMD_SUCCESS:
4666 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
4667 /* Save required controller information in
4668 * CPU endianness format.
4669 */
4670 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4671 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4672 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4673 le16_to_cpus((u16 *)&ctrl_info->adapter_operations4);
4674
4675 /* Update the latest Ext VD info.
4676 * From Init path, store current firmware details.
4677 * From OCR path, detect any firmware properties changes.
4678 * in case of Firmware upgrade without system reboot.
4679 */
4680 megasas_update_ext_vd_details(instance);
4681 instance->use_seqnum_jbod_fp =
4682 ctrl_info->adapterOperations3.useSeqNumJbodFP;
4683 instance->support_morethan256jbod =
4684 ctrl_info->adapter_operations4.support_pd_map_target_id;
4685
4686 /*Check whether controller is iMR or MR */
4687 instance->is_imr = (ctrl_info->memory_size ? 0 : 1);
4688 dev_info(&instance->pdev->dev,
4689 "controller type\t: %s(%dMB)\n",
4690 instance->is_imr ? "iMR" : "MR",
4691 le16_to_cpu(ctrl_info->memory_size));
4692
4693 instance->disableOnlineCtrlReset =
4694 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4695 instance->secure_jbod_support =
4696 ctrl_info->adapterOperations3.supportSecurityonJBOD;
4697 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4698 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4699 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4700 instance->secure_jbod_support ? "Yes" : "No");
4701 break;
4702
4703 case DCMD_TIMEOUT:
4704 switch (dcmd_timeout_ocr_possible(instance)) {
4705 case INITIATE_OCR:
4706 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4707 megasas_reset_fusion(instance->host,
4708 MFI_IO_TIMEOUT_OCR);
4709 break;
4710 case KILL_ADAPTER:
4711 megaraid_sas_kill_hba(instance);
4712 break;
4713 case IGNORE_TIMEOUT:
4714 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4715 __func__, __LINE__);
4716 break;
4717 }
4718 case DCMD_FAILED:
4719 megaraid_sas_kill_hba(instance);
4720 break;
4721
4722 }
4723
4724 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
4725 ci, ci_h);
4726
4727 megasas_return_cmd(instance, cmd);
4728
4729
4730 return ret;
4731 }
4732
4733 /*
4734 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4735 * to firmware
4736 *
4737 * @instance: Adapter soft state
4738 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4739 MR_CRASH_BUF_TURN_OFF = 0
4740 MR_CRASH_BUF_TURN_ON = 1
4741 * @return 0 on success non-zero on failure.
4742 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4743 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4744 * that driver supports crash dump feature. This DCMD will be sent only if
4745 * crash dump feature is supported by the FW.
4746 *
4747 */
4748 int megasas_set_crash_dump_params(struct megasas_instance *instance,
4749 u8 crash_buf_state)
4750 {
4751 int ret = 0;
4752 struct megasas_cmd *cmd;
4753 struct megasas_dcmd_frame *dcmd;
4754
4755 cmd = megasas_get_cmd(instance);
4756
4757 if (!cmd) {
4758 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4759 return -ENOMEM;
4760 }
4761
4762
4763 dcmd = &cmd->frame->dcmd;
4764
4765 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4766 dcmd->mbox.b[0] = crash_buf_state;
4767 dcmd->cmd = MFI_CMD_DCMD;
4768 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4769 dcmd->sge_count = 1;
4770 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4771 dcmd->timeout = 0;
4772 dcmd->pad_0 = 0;
4773 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4774 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4775 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4776 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4777
4778 if (instance->ctrl_context && !instance->mask_interrupts)
4779 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4780 else
4781 ret = megasas_issue_polled(instance, cmd);
4782
4783 if (ret == DCMD_TIMEOUT) {
4784 switch (dcmd_timeout_ocr_possible(instance)) {
4785 case INITIATE_OCR:
4786 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4787 megasas_reset_fusion(instance->host,
4788 MFI_IO_TIMEOUT_OCR);
4789 break;
4790 case KILL_ADAPTER:
4791 megaraid_sas_kill_hba(instance);
4792 break;
4793 case IGNORE_TIMEOUT:
4794 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4795 __func__, __LINE__);
4796 break;
4797 }
4798 } else
4799 megasas_return_cmd(instance, cmd);
4800
4801 return ret;
4802 }
4803
4804 /**
4805 * megasas_issue_init_mfi - Initializes the FW
4806 * @instance: Adapter soft state
4807 *
4808 * Issues the INIT MFI cmd
4809 */
4810 static int
4811 megasas_issue_init_mfi(struct megasas_instance *instance)
4812 {
4813 __le32 context;
4814 struct megasas_cmd *cmd;
4815 struct megasas_init_frame *init_frame;
4816 struct megasas_init_queue_info *initq_info;
4817 dma_addr_t init_frame_h;
4818 dma_addr_t initq_info_h;
4819
4820 /*
4821 * Prepare a init frame. Note the init frame points to queue info
4822 * structure. Each frame has SGL allocated after first 64 bytes. For
4823 * this frame - since we don't need any SGL - we use SGL's space as
4824 * queue info structure
4825 *
4826 * We will not get a NULL command below. We just created the pool.
4827 */
4828 cmd = megasas_get_cmd(instance);
4829
4830 init_frame = (struct megasas_init_frame *)cmd->frame;
4831 initq_info = (struct megasas_init_queue_info *)
4832 ((unsigned long)init_frame + 64);
4833
4834 init_frame_h = cmd->frame_phys_addr;
4835 initq_info_h = init_frame_h + 64;
4836
4837 context = init_frame->context;
4838 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
4839 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
4840 init_frame->context = context;
4841
4842 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
4843 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
4844
4845 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
4846 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
4847
4848 init_frame->cmd = MFI_CMD_INIT;
4849 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
4850 init_frame->queue_info_new_phys_addr_lo =
4851 cpu_to_le32(lower_32_bits(initq_info_h));
4852 init_frame->queue_info_new_phys_addr_hi =
4853 cpu_to_le32(upper_32_bits(initq_info_h));
4854
4855 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
4856
4857 /*
4858 * disable the intr before firing the init frame to FW
4859 */
4860 instance->instancet->disable_intr(instance);
4861
4862 /*
4863 * Issue the init frame in polled mode
4864 */
4865
4866 if (megasas_issue_polled(instance, cmd)) {
4867 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4868 megasas_return_cmd(instance, cmd);
4869 goto fail_fw_init;
4870 }
4871
4872 megasas_return_cmd(instance, cmd);
4873
4874 return 0;
4875
4876 fail_fw_init:
4877 return -EINVAL;
4878 }
4879
4880 static u32
4881 megasas_init_adapter_mfi(struct megasas_instance *instance)
4882 {
4883 struct megasas_register_set __iomem *reg_set;
4884 u32 context_sz;
4885 u32 reply_q_sz;
4886
4887 reg_set = instance->reg_set;
4888
4889 /*
4890 * Get various operational parameters from status register
4891 */
4892 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
4893 /*
4894 * Reduce the max supported cmds by 1. This is to ensure that the
4895 * reply_q_sz (1 more than the max cmd that driver may send)
4896 * does not exceed max cmds that the FW can support
4897 */
4898 instance->max_fw_cmds = instance->max_fw_cmds-1;
4899 instance->max_mfi_cmds = instance->max_fw_cmds;
4900 instance->max_num_sge = (instance->instancet->read_fw_status_reg(reg_set) & 0xFF0000) >>
4901 0x10;
4902 /*
4903 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
4904 * are reserved for IOCTL + driver's internal DCMDs.
4905 */
4906 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4907 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4908 instance->max_scsi_cmds = (instance->max_fw_cmds -
4909 MEGASAS_SKINNY_INT_CMDS);
4910 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4911 } else {
4912 instance->max_scsi_cmds = (instance->max_fw_cmds -
4913 MEGASAS_INT_CMDS);
4914 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
4915 }
4916
4917 instance->cur_can_queue = instance->max_scsi_cmds;
4918 /*
4919 * Create a pool of commands
4920 */
4921 if (megasas_alloc_cmds(instance))
4922 goto fail_alloc_cmds;
4923
4924 /*
4925 * Allocate memory for reply queue. Length of reply queue should
4926 * be _one_ more than the maximum commands handled by the firmware.
4927 *
4928 * Note: When FW completes commands, it places corresponding contex
4929 * values in this circular reply queue. This circular queue is a fairly
4930 * typical producer-consumer queue. FW is the producer (of completed
4931 * commands) and the driver is the consumer.
4932 */
4933 context_sz = sizeof(u32);
4934 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
4935
4936 instance->reply_queue = pci_alloc_consistent(instance->pdev,
4937 reply_q_sz,
4938 &instance->reply_queue_h);
4939
4940 if (!instance->reply_queue) {
4941 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4942 goto fail_reply_queue;
4943 }
4944
4945 if (megasas_issue_init_mfi(instance))
4946 goto fail_fw_init;
4947
4948 if (megasas_get_ctrl_info(instance)) {
4949 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4950 "Fail from %s %d\n", instance->unique_id,
4951 __func__, __LINE__);
4952 goto fail_fw_init;
4953 }
4954
4955 instance->fw_support_ieee = 0;
4956 instance->fw_support_ieee =
4957 (instance->instancet->read_fw_status_reg(reg_set) &
4958 0x04000000);
4959
4960 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4961 instance->fw_support_ieee);
4962
4963 if (instance->fw_support_ieee)
4964 instance->flag_ieee = 1;
4965
4966 return 0;
4967
4968 fail_fw_init:
4969
4970 pci_free_consistent(instance->pdev, reply_q_sz,
4971 instance->reply_queue, instance->reply_queue_h);
4972 fail_reply_queue:
4973 megasas_free_cmds(instance);
4974
4975 fail_alloc_cmds:
4976 return 1;
4977 }
4978
4979 /*
4980 * megasas_setup_irqs_ioapic - register legacy interrupts.
4981 * @instance: Adapter soft state
4982 *
4983 * Do not enable interrupt, only setup ISRs.
4984 *
4985 * Return 0 on success.
4986 */
4987 static int
4988 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
4989 {
4990 struct pci_dev *pdev;
4991
4992 pdev = instance->pdev;
4993 instance->irq_context[0].instance = instance;
4994 instance->irq_context[0].MSIxIndex = 0;
4995 if (request_irq(pci_irq_vector(pdev, 0),
4996 instance->instancet->service_isr, IRQF_SHARED,
4997 "megasas", &instance->irq_context[0])) {
4998 dev_err(&instance->pdev->dev,
4999 "Failed to register IRQ from %s %d\n",
5000 __func__, __LINE__);
5001 return -1;
5002 }
5003 return 0;
5004 }
5005
5006 /**
5007 * megasas_setup_irqs_msix - register MSI-x interrupts.
5008 * @instance: Adapter soft state
5009 * @is_probe: Driver probe check
5010 *
5011 * Do not enable interrupt, only setup ISRs.
5012 *
5013 * Return 0 on success.
5014 */
5015 static int
5016 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5017 {
5018 int i, j;
5019 struct pci_dev *pdev;
5020
5021 pdev = instance->pdev;
5022
5023 /* Try MSI-x */
5024 for (i = 0; i < instance->msix_vectors; i++) {
5025 instance->irq_context[i].instance = instance;
5026 instance->irq_context[i].MSIxIndex = i;
5027 if (request_irq(pci_irq_vector(pdev, i),
5028 instance->instancet->service_isr, 0, "megasas",
5029 &instance->irq_context[i])) {
5030 dev_err(&instance->pdev->dev,
5031 "Failed to register IRQ for vector %d.\n", i);
5032 for (j = 0; j < i; j++)
5033 free_irq(pci_irq_vector(pdev, j),
5034 &instance->irq_context[j]);
5035 /* Retry irq register for IO_APIC*/
5036 instance->msix_vectors = 0;
5037 if (is_probe)
5038 return megasas_setup_irqs_ioapic(instance);
5039 else
5040 return -1;
5041 }
5042 }
5043 return 0;
5044 }
5045
5046 /*
5047 * megasas_destroy_irqs- unregister interrupts.
5048 * @instance: Adapter soft state
5049 * return: void
5050 */
5051 static void
5052 megasas_destroy_irqs(struct megasas_instance *instance) {
5053
5054 int i;
5055
5056 if (instance->msix_vectors)
5057 for (i = 0; i < instance->msix_vectors; i++) {
5058 free_irq(pci_irq_vector(instance->pdev, i),
5059 &instance->irq_context[i]);
5060 }
5061 else
5062 free_irq(pci_irq_vector(instance->pdev, 0),
5063 &instance->irq_context[0]);
5064 }
5065
5066 /**
5067 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5068 * @instance: Adapter soft state
5069 * @is_probe: Driver probe check
5070 *
5071 * Return 0 on success.
5072 */
5073 void
5074 megasas_setup_jbod_map(struct megasas_instance *instance)
5075 {
5076 int i;
5077 struct fusion_context *fusion = instance->ctrl_context;
5078 u32 pd_seq_map_sz;
5079
5080 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5081 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5082
5083 if (reset_devices || !fusion ||
5084 !instance->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
5085 dev_info(&instance->pdev->dev,
5086 "Jbod map is not supported %s %d\n",
5087 __func__, __LINE__);
5088 instance->use_seqnum_jbod_fp = false;
5089 return;
5090 }
5091
5092 if (fusion->pd_seq_sync[0])
5093 goto skip_alloc;
5094
5095 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5096 fusion->pd_seq_sync[i] = dma_alloc_coherent
5097 (&instance->pdev->dev, pd_seq_map_sz,
5098 &fusion->pd_seq_phys[i], GFP_KERNEL);
5099 if (!fusion->pd_seq_sync[i]) {
5100 dev_err(&instance->pdev->dev,
5101 "Failed to allocate memory from %s %d\n",
5102 __func__, __LINE__);
5103 if (i == 1) {
5104 dma_free_coherent(&instance->pdev->dev,
5105 pd_seq_map_sz, fusion->pd_seq_sync[0],
5106 fusion->pd_seq_phys[0]);
5107 fusion->pd_seq_sync[0] = NULL;
5108 }
5109 instance->use_seqnum_jbod_fp = false;
5110 return;
5111 }
5112 }
5113
5114 skip_alloc:
5115 if (!megasas_sync_pd_seq_num(instance, false) &&
5116 !megasas_sync_pd_seq_num(instance, true))
5117 instance->use_seqnum_jbod_fp = true;
5118 else
5119 instance->use_seqnum_jbod_fp = false;
5120 }
5121
5122 /**
5123 * megasas_init_fw - Initializes the FW
5124 * @instance: Adapter soft state
5125 *
5126 * This is the main function for initializing firmware
5127 */
5128
5129 static int megasas_init_fw(struct megasas_instance *instance)
5130 {
5131 u32 max_sectors_1;
5132 u32 max_sectors_2, tmp_sectors, msix_enable;
5133 u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5134 resource_size_t base_addr;
5135 struct megasas_register_set __iomem *reg_set;
5136 struct megasas_ctrl_info *ctrl_info = NULL;
5137 unsigned long bar_list;
5138 int i, j, loop, fw_msix_count = 0;
5139 struct IOV_111 *iovPtr;
5140 struct fusion_context *fusion;
5141
5142 fusion = instance->ctrl_context;
5143
5144 /* Find first memory bar */
5145 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5146 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5147 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5148 "megasas: LSI")) {
5149 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5150 return -EBUSY;
5151 }
5152
5153 base_addr = pci_resource_start(instance->pdev, instance->bar);
5154 instance->reg_set = ioremap_nocache(base_addr, 8192);
5155
5156 if (!instance->reg_set) {
5157 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5158 goto fail_ioremap;
5159 }
5160
5161 reg_set = instance->reg_set;
5162
5163 if (fusion)
5164 instance->instancet = &megasas_instance_template_fusion;
5165 else {
5166 switch (instance->pdev->device) {
5167 case PCI_DEVICE_ID_LSI_SAS1078R:
5168 case PCI_DEVICE_ID_LSI_SAS1078DE:
5169 instance->instancet = &megasas_instance_template_ppc;
5170 break;
5171 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5172 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5173 instance->instancet = &megasas_instance_template_gen2;
5174 break;
5175 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5176 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5177 instance->instancet = &megasas_instance_template_skinny;
5178 break;
5179 case PCI_DEVICE_ID_LSI_SAS1064R:
5180 case PCI_DEVICE_ID_DELL_PERC5:
5181 default:
5182 instance->instancet = &megasas_instance_template_xscale;
5183 instance->pd_list_not_supported = 1;
5184 break;
5185 }
5186 }
5187
5188 if (megasas_transition_to_ready(instance, 0)) {
5189 atomic_set(&instance->fw_reset_no_pci_access, 1);
5190 instance->instancet->adp_reset
5191 (instance, instance->reg_set);
5192 atomic_set(&instance->fw_reset_no_pci_access, 0);
5193 dev_info(&instance->pdev->dev,
5194 "FW restarted successfully from %s!\n",
5195 __func__);
5196
5197 /*waitting for about 30 second before retry*/
5198 ssleep(30);
5199
5200 if (megasas_transition_to_ready(instance, 0))
5201 goto fail_ready_state;
5202 }
5203
5204 if (instance->is_ventura) {
5205 scratch_pad_3 =
5206 readl(&instance->reg_set->outbound_scratch_pad_3);
5207 instance->max_raid_mapsize = ((scratch_pad_3 >>
5208 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5209 MR_MAX_RAID_MAP_SIZE_MASK);
5210 }
5211
5212 /* Check if MSI-X is supported while in ready state */
5213 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
5214 0x4000000) >> 0x1a;
5215 if (msix_enable && !msix_disable) {
5216 int irq_flags = PCI_IRQ_MSIX;
5217
5218 scratch_pad_2 = readl
5219 (&instance->reg_set->outbound_scratch_pad_2);
5220 /* Check max MSI-X vectors */
5221 if (fusion) {
5222 if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
5223 instance->msix_vectors = (scratch_pad_2
5224 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5225 fw_msix_count = instance->msix_vectors;
5226 } else { /* Invader series supports more than 8 MSI-x vectors*/
5227 instance->msix_vectors = ((scratch_pad_2
5228 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5229 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5230 if (instance->msix_vectors > 16)
5231 instance->msix_combined = true;
5232
5233 if (rdpq_enable)
5234 instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5235 1 : 0;
5236 fw_msix_count = instance->msix_vectors;
5237 /* Save 1-15 reply post index address to local memory
5238 * Index 0 is already saved from reg offset
5239 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5240 */
5241 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5242 instance->reply_post_host_index_addr[loop] =
5243 (u32 __iomem *)
5244 ((u8 __iomem *)instance->reg_set +
5245 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5246 + (loop * 0x10));
5247 }
5248 }
5249 if (msix_vectors)
5250 instance->msix_vectors = min(msix_vectors,
5251 instance->msix_vectors);
5252 } else /* MFI adapters */
5253 instance->msix_vectors = 1;
5254 /* Don't bother allocating more MSI-X vectors than cpus */
5255 instance->msix_vectors = min(instance->msix_vectors,
5256 (unsigned int)num_online_cpus());
5257 if (smp_affinity_enable)
5258 irq_flags |= PCI_IRQ_AFFINITY;
5259 i = pci_alloc_irq_vectors(instance->pdev, 1,
5260 instance->msix_vectors, irq_flags);
5261 if (i > 0)
5262 instance->msix_vectors = i;
5263 else
5264 instance->msix_vectors = 0;
5265 }
5266 /*
5267 * MSI-X host index 0 is common for all adapter.
5268 * It is used for all MPT based Adapters.
5269 */
5270 if (instance->msix_combined) {
5271 instance->reply_post_host_index_addr[0] =
5272 (u32 *)((u8 *)instance->reg_set +
5273 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5274 } else {
5275 instance->reply_post_host_index_addr[0] =
5276 (u32 *)((u8 *)instance->reg_set +
5277 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5278 }
5279
5280 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5281 if (i < 0)
5282 goto fail_setup_irqs;
5283
5284 dev_info(&instance->pdev->dev,
5285 "firmware supports msix\t: (%d)", fw_msix_count);
5286 dev_info(&instance->pdev->dev,
5287 "current msix/online cpus\t: (%d/%d)\n",
5288 instance->msix_vectors, (unsigned int)num_online_cpus());
5289 dev_info(&instance->pdev->dev,
5290 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5291
5292 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5293 (unsigned long)instance);
5294
5295 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
5296 GFP_KERNEL);
5297 if (instance->ctrl_info == NULL)
5298 goto fail_init_adapter;
5299
5300 /*
5301 * Below are default value for legacy Firmware.
5302 * non-fusion based controllers
5303 */
5304 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5305 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5306 /* Get operational params, sge flags, send init cmd to controller */
5307 if (instance->instancet->init_adapter(instance))
5308 goto fail_init_adapter;
5309
5310 if (instance->is_ventura) {
5311 scratch_pad_4 =
5312 readl(&instance->reg_set->outbound_scratch_pad_4);
5313 if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
5314 MR_DEFAULT_NVME_PAGE_SHIFT)
5315 instance->nvme_page_size =
5316 (1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK));
5317
5318 dev_info(&instance->pdev->dev,
5319 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5320 }
5321
5322 if (instance->msix_vectors ?
5323 megasas_setup_irqs_msix(instance, 1) :
5324 megasas_setup_irqs_ioapic(instance))
5325 goto fail_init_adapter;
5326
5327 instance->instancet->enable_intr(instance);
5328
5329 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5330
5331 megasas_setup_jbod_map(instance);
5332
5333 /** for passthrough
5334 * the following function will get the PD LIST.
5335 */
5336 memset(instance->pd_list, 0,
5337 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5338 if (megasas_get_pd_list(instance) < 0) {
5339 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5340 goto fail_get_ld_pd_list;
5341 }
5342
5343 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5344
5345 /* stream detection initialization */
5346 if (instance->is_ventura && fusion) {
5347 fusion->stream_detect_by_ld =
5348 kzalloc(sizeof(struct LD_STREAM_DETECT *)
5349 * MAX_LOGICAL_DRIVES_EXT,
5350 GFP_KERNEL);
5351 if (!fusion->stream_detect_by_ld) {
5352 dev_err(&instance->pdev->dev,
5353 "unable to allocate stream detection for pool of LDs\n");
5354 goto fail_get_ld_pd_list;
5355 }
5356 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5357 fusion->stream_detect_by_ld[i] =
5358 kmalloc(sizeof(struct LD_STREAM_DETECT),
5359 GFP_KERNEL);
5360 if (!fusion->stream_detect_by_ld[i]) {
5361 dev_err(&instance->pdev->dev,
5362 "unable to allocate stream detect by LD\n ");
5363 for (j = 0; j < i; ++j)
5364 kfree(fusion->stream_detect_by_ld[j]);
5365 kfree(fusion->stream_detect_by_ld);
5366 fusion->stream_detect_by_ld = NULL;
5367 goto fail_get_ld_pd_list;
5368 }
5369 fusion->stream_detect_by_ld[i]->mru_bit_map
5370 = MR_STREAM_BITMAP;
5371 }
5372 }
5373
5374 if (megasas_ld_list_query(instance,
5375 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
5376 goto fail_get_ld_pd_list;
5377
5378 /*
5379 * Compute the max allowed sectors per IO: The controller info has two
5380 * limits on max sectors. Driver should use the minimum of these two.
5381 *
5382 * 1 << stripe_sz_ops.min = max sectors per strip
5383 *
5384 * Note that older firmwares ( < FW ver 30) didn't report information
5385 * to calculate max_sectors_1. So the number ended up as zero always.
5386 */
5387 tmp_sectors = 0;
5388 ctrl_info = instance->ctrl_info;
5389
5390 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5391 le16_to_cpu(ctrl_info->max_strips_per_io);
5392 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5393
5394 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5395
5396 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5397 instance->passive = ctrl_info->cluster.passive;
5398 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5399 instance->UnevenSpanSupport =
5400 ctrl_info->adapterOperations2.supportUnevenSpans;
5401 if (instance->UnevenSpanSupport) {
5402 struct fusion_context *fusion = instance->ctrl_context;
5403 if (MR_ValidateMapInfo(instance))
5404 fusion->fast_path_io = 1;
5405 else
5406 fusion->fast_path_io = 0;
5407
5408 }
5409 if (ctrl_info->host_interface.SRIOV) {
5410 instance->requestorId = ctrl_info->iov.requestorId;
5411 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5412 if (!ctrl_info->adapterOperations2.activePassive)
5413 instance->PlasmaFW111 = 1;
5414
5415 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5416 instance->PlasmaFW111 ? "1.11" : "new");
5417
5418 if (instance->PlasmaFW111) {
5419 iovPtr = (struct IOV_111 *)
5420 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5421 instance->requestorId = iovPtr->requestorId;
5422 }
5423 }
5424 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5425 instance->requestorId);
5426 }
5427
5428 instance->crash_dump_fw_support =
5429 ctrl_info->adapterOperations3.supportCrashDump;
5430 instance->crash_dump_drv_support =
5431 (instance->crash_dump_fw_support &&
5432 instance->crash_dump_buf);
5433 if (instance->crash_dump_drv_support)
5434 megasas_set_crash_dump_params(instance,
5435 MR_CRASH_BUF_TURN_OFF);
5436
5437 else {
5438 if (instance->crash_dump_buf)
5439 pci_free_consistent(instance->pdev,
5440 CRASH_DMA_BUF_SIZE,
5441 instance->crash_dump_buf,
5442 instance->crash_dump_h);
5443 instance->crash_dump_buf = NULL;
5444 }
5445
5446
5447 dev_info(&instance->pdev->dev,
5448 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5449 le16_to_cpu(ctrl_info->pci.vendor_id),
5450 le16_to_cpu(ctrl_info->pci.device_id),
5451 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5452 le16_to_cpu(ctrl_info->pci.sub_device_id));
5453 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5454 instance->UnevenSpanSupport ? "yes" : "no");
5455 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5456 instance->crash_dump_drv_support ? "yes" : "no");
5457 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5458 instance->use_seqnum_jbod_fp ? "yes" : "no");
5459
5460
5461 instance->max_sectors_per_req = instance->max_num_sge *
5462 SGE_BUFFER_SIZE / 512;
5463 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5464 instance->max_sectors_per_req = tmp_sectors;
5465
5466 /* Check for valid throttlequeuedepth module parameter */
5467 if (throttlequeuedepth &&
5468 throttlequeuedepth <= instance->max_scsi_cmds)
5469 instance->throttlequeuedepth = throttlequeuedepth;
5470 else
5471 instance->throttlequeuedepth =
5472 MEGASAS_THROTTLE_QUEUE_DEPTH;
5473
5474 if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
5475 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5476
5477 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5478 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5479
5480 /* Launch SR-IOV heartbeat timer */
5481 if (instance->requestorId) {
5482 if (!megasas_sriov_start_heartbeat(instance, 1))
5483 megasas_start_timer(instance,
5484 &instance->sriov_heartbeat_timer,
5485 megasas_sriov_heartbeat_handler,
5486 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
5487 else
5488 instance->skip_heartbeat_timer_del = 1;
5489 }
5490
5491 return 0;
5492
5493 fail_get_ld_pd_list:
5494 instance->instancet->disable_intr(instance);
5495 fail_init_adapter:
5496 megasas_destroy_irqs(instance);
5497 fail_setup_irqs:
5498 if (instance->msix_vectors)
5499 pci_free_irq_vectors(instance->pdev);
5500 instance->msix_vectors = 0;
5501 fail_ready_state:
5502 kfree(instance->ctrl_info);
5503 instance->ctrl_info = NULL;
5504 iounmap(instance->reg_set);
5505
5506 fail_ioremap:
5507 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5508
5509 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5510 __func__, __LINE__);
5511 return -EINVAL;
5512 }
5513
5514 /**
5515 * megasas_release_mfi - Reverses the FW initialization
5516 * @instance: Adapter soft state
5517 */
5518 static void megasas_release_mfi(struct megasas_instance *instance)
5519 {
5520 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5521
5522 if (instance->reply_queue)
5523 pci_free_consistent(instance->pdev, reply_q_sz,
5524 instance->reply_queue, instance->reply_queue_h);
5525
5526 megasas_free_cmds(instance);
5527
5528 iounmap(instance->reg_set);
5529
5530 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5531 }
5532
5533 /**
5534 * megasas_get_seq_num - Gets latest event sequence numbers
5535 * @instance: Adapter soft state
5536 * @eli: FW event log sequence numbers information
5537 *
5538 * FW maintains a log of all events in a non-volatile area. Upper layers would
5539 * usually find out the latest sequence number of the events, the seq number at
5540 * the boot etc. They would "read" all the events below the latest seq number
5541 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5542 * number), they would subsribe to AEN (asynchronous event notification) and
5543 * wait for the events to happen.
5544 */
5545 static int
5546 megasas_get_seq_num(struct megasas_instance *instance,
5547 struct megasas_evt_log_info *eli)
5548 {
5549 struct megasas_cmd *cmd;
5550 struct megasas_dcmd_frame *dcmd;
5551 struct megasas_evt_log_info *el_info;
5552 dma_addr_t el_info_h = 0;
5553
5554 cmd = megasas_get_cmd(instance);
5555
5556 if (!cmd) {
5557 return -ENOMEM;
5558 }
5559
5560 dcmd = &cmd->frame->dcmd;
5561 el_info = pci_alloc_consistent(instance->pdev,
5562 sizeof(struct megasas_evt_log_info),
5563 &el_info_h);
5564
5565 if (!el_info) {
5566 megasas_return_cmd(instance, cmd);
5567 return -ENOMEM;
5568 }
5569
5570 memset(el_info, 0, sizeof(*el_info));
5571 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5572
5573 dcmd->cmd = MFI_CMD_DCMD;
5574 dcmd->cmd_status = 0x0;
5575 dcmd->sge_count = 1;
5576 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5577 dcmd->timeout = 0;
5578 dcmd->pad_0 = 0;
5579 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5580 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5581 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);
5582 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5583
5584 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS) ==
5585 DCMD_SUCCESS) {
5586 /*
5587 * Copy the data back into callers buffer
5588 */
5589 eli->newest_seq_num = el_info->newest_seq_num;
5590 eli->oldest_seq_num = el_info->oldest_seq_num;
5591 eli->clear_seq_num = el_info->clear_seq_num;
5592 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5593 eli->boot_seq_num = el_info->boot_seq_num;
5594 } else
5595 dev_err(&instance->pdev->dev, "DCMD failed "
5596 "from %s\n", __func__);
5597
5598 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
5599 el_info, el_info_h);
5600
5601 megasas_return_cmd(instance, cmd);
5602
5603 return 0;
5604 }
5605
5606 /**
5607 * megasas_register_aen - Registers for asynchronous event notification
5608 * @instance: Adapter soft state
5609 * @seq_num: The starting sequence number
5610 * @class_locale: Class of the event
5611 *
5612 * This function subscribes for AEN for events beyond the @seq_num. It requests
5613 * to be notified if and only if the event is of type @class_locale
5614 */
5615 static int
5616 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
5617 u32 class_locale_word)
5618 {
5619 int ret_val;
5620 struct megasas_cmd *cmd;
5621 struct megasas_dcmd_frame *dcmd;
5622 union megasas_evt_class_locale curr_aen;
5623 union megasas_evt_class_locale prev_aen;
5624
5625 /*
5626 * If there an AEN pending already (aen_cmd), check if the
5627 * class_locale of that pending AEN is inclusive of the new
5628 * AEN request we currently have. If it is, then we don't have
5629 * to do anything. In other words, whichever events the current
5630 * AEN request is subscribing to, have already been subscribed
5631 * to.
5632 *
5633 * If the old_cmd is _not_ inclusive, then we have to abort
5634 * that command, form a class_locale that is superset of both
5635 * old and current and re-issue to the FW
5636 */
5637
5638 curr_aen.word = class_locale_word;
5639
5640 if (instance->aen_cmd) {
5641
5642 prev_aen.word =
5643 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
5644
5645 /*
5646 * A class whose enum value is smaller is inclusive of all
5647 * higher values. If a PROGRESS (= -1) was previously
5648 * registered, then a new registration requests for higher
5649 * classes need not be sent to FW. They are automatically
5650 * included.
5651 *
5652 * Locale numbers don't have such hierarchy. They are bitmap
5653 * values
5654 */
5655 if ((prev_aen.members.class <= curr_aen.members.class) &&
5656 !((prev_aen.members.locale & curr_aen.members.locale) ^
5657 curr_aen.members.locale)) {
5658 /*
5659 * Previously issued event registration includes
5660 * current request. Nothing to do.
5661 */
5662 return 0;
5663 } else {
5664 curr_aen.members.locale |= prev_aen.members.locale;
5665
5666 if (prev_aen.members.class < curr_aen.members.class)
5667 curr_aen.members.class = prev_aen.members.class;
5668
5669 instance->aen_cmd->abort_aen = 1;
5670 ret_val = megasas_issue_blocked_abort_cmd(instance,
5671 instance->
5672 aen_cmd, 30);
5673
5674 if (ret_val) {
5675 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
5676 "previous AEN command\n");
5677 return ret_val;
5678 }
5679 }
5680 }
5681
5682 cmd = megasas_get_cmd(instance);
5683
5684 if (!cmd)
5685 return -ENOMEM;
5686
5687 dcmd = &cmd->frame->dcmd;
5688
5689 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
5690
5691 /*
5692 * Prepare DCMD for aen registration
5693 */
5694 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5695
5696 dcmd->cmd = MFI_CMD_DCMD;
5697 dcmd->cmd_status = 0x0;
5698 dcmd->sge_count = 1;
5699 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5700 dcmd->timeout = 0;
5701 dcmd->pad_0 = 0;
5702 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
5703 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
5704 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
5705 instance->last_seq_num = seq_num;
5706 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
5707 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h);
5708 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail));
5709
5710 if (instance->aen_cmd != NULL) {
5711 megasas_return_cmd(instance, cmd);
5712 return 0;
5713 }
5714
5715 /*
5716 * Store reference to the cmd used to register for AEN. When an
5717 * application wants us to register for AEN, we have to abort this
5718 * cmd and re-register with a new EVENT LOCALE supplied by that app
5719 */
5720 instance->aen_cmd = cmd;
5721
5722 /*
5723 * Issue the aen registration frame
5724 */
5725 instance->instancet->issue_dcmd(instance, cmd);
5726
5727 return 0;
5728 }
5729
5730 /* megasas_get_target_prop - Send DCMD with below details to firmware.
5731 *
5732 * This DCMD will fetch few properties of LD/system PD defined
5733 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
5734 *
5735 * DCMD send by drivers whenever new target is added to the OS.
5736 *
5737 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
5738 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
5739 * 0 = system PD, 1 = LD.
5740 * dcmd.mbox.s[1] - TargetID for LD/system PD.
5741 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
5742 *
5743 * @instance: Adapter soft state
5744 * @sdev: OS provided scsi device
5745 *
5746 * Returns 0 on success non-zero on failure.
5747 */
5748 static int
5749 megasas_get_target_prop(struct megasas_instance *instance,
5750 struct scsi_device *sdev)
5751 {
5752 int ret;
5753 struct megasas_cmd *cmd;
5754 struct megasas_dcmd_frame *dcmd;
5755 u16 targetId = (sdev->channel % 2) + sdev->id;
5756
5757 cmd = megasas_get_cmd(instance);
5758
5759 if (!cmd) {
5760 dev_err(&instance->pdev->dev,
5761 "Failed to get cmd %s\n", __func__);
5762 return -ENOMEM;
5763 }
5764
5765 dcmd = &cmd->frame->dcmd;
5766
5767 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
5768 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5769 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
5770
5771 dcmd->mbox.s[1] = cpu_to_le16(targetId);
5772 dcmd->cmd = MFI_CMD_DCMD;
5773 dcmd->cmd_status = 0xFF;
5774 dcmd->sge_count = 1;
5775 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
5776 dcmd->timeout = 0;
5777 dcmd->pad_0 = 0;
5778 dcmd->data_xfer_len =
5779 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5780 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
5781 dcmd->sgl.sge32[0].phys_addr =
5782 cpu_to_le32(instance->tgt_prop_h);
5783 dcmd->sgl.sge32[0].length =
5784 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
5785
5786 if (instance->ctrl_context && !instance->mask_interrupts)
5787 ret = megasas_issue_blocked_cmd(instance,
5788 cmd, MFI_IO_TIMEOUT_SECS);
5789 else
5790 ret = megasas_issue_polled(instance, cmd);
5791
5792 switch (ret) {
5793 case DCMD_TIMEOUT:
5794 switch (dcmd_timeout_ocr_possible(instance)) {
5795 case INITIATE_OCR:
5796 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5797 megasas_reset_fusion(instance->host,
5798 MFI_IO_TIMEOUT_OCR);
5799 break;
5800 case KILL_ADAPTER:
5801 megaraid_sas_kill_hba(instance);
5802 break;
5803 case IGNORE_TIMEOUT:
5804 dev_info(&instance->pdev->dev,
5805 "Ignore DCMD timeout: %s %d\n",
5806 __func__, __LINE__);
5807 break;
5808 }
5809 break;
5810
5811 default:
5812 megasas_return_cmd(instance, cmd);
5813 }
5814 if (ret != DCMD_SUCCESS)
5815 dev_err(&instance->pdev->dev,
5816 "return from %s %d return value %d\n",
5817 __func__, __LINE__, ret);
5818
5819 return ret;
5820 }
5821
5822 /**
5823 * megasas_start_aen - Subscribes to AEN during driver load time
5824 * @instance: Adapter soft state
5825 */
5826 static int megasas_start_aen(struct megasas_instance *instance)
5827 {
5828 struct megasas_evt_log_info eli;
5829 union megasas_evt_class_locale class_locale;
5830
5831 /*
5832 * Get the latest sequence number from FW
5833 */
5834 memset(&eli, 0, sizeof(eli));
5835
5836 if (megasas_get_seq_num(instance, &eli))
5837 return -1;
5838
5839 /*
5840 * Register AEN with FW for latest sequence number plus 1
5841 */
5842 class_locale.members.reserved = 0;
5843 class_locale.members.locale = MR_EVT_LOCALE_ALL;
5844 class_locale.members.class = MR_EVT_CLASS_DEBUG;
5845
5846 return megasas_register_aen(instance,
5847 le32_to_cpu(eli.newest_seq_num) + 1,
5848 class_locale.word);
5849 }
5850
5851 /**
5852 * megasas_io_attach - Attaches this driver to SCSI mid-layer
5853 * @instance: Adapter soft state
5854 */
5855 static int megasas_io_attach(struct megasas_instance *instance)
5856 {
5857 struct Scsi_Host *host = instance->host;
5858
5859 /*
5860 * Export parameters required by SCSI mid-layer
5861 */
5862 host->unique_id = instance->unique_id;
5863 host->can_queue = instance->max_scsi_cmds;
5864 host->this_id = instance->init_id;
5865 host->sg_tablesize = instance->max_num_sge;
5866
5867 if (instance->fw_support_ieee)
5868 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
5869
5870 /*
5871 * Check if the module parameter value for max_sectors can be used
5872 */
5873 if (max_sectors && max_sectors < instance->max_sectors_per_req)
5874 instance->max_sectors_per_req = max_sectors;
5875 else {
5876 if (max_sectors) {
5877 if (((instance->pdev->device ==
5878 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
5879 (instance->pdev->device ==
5880 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
5881 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5882 instance->max_sectors_per_req = max_sectors;
5883 } else {
5884 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5885 "and <= %d (or < 1MB for GEN2 controller)\n",
5886 instance->max_sectors_per_req);
5887 }
5888 }
5889 }
5890
5891 host->max_sectors = instance->max_sectors_per_req;
5892 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
5893 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
5894 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
5895 host->max_lun = MEGASAS_MAX_LUN;
5896 host->max_cmd_len = 16;
5897
5898 /*
5899 * Notify the mid-layer about the new controller
5900 */
5901 if (scsi_add_host(host, &instance->pdev->dev)) {
5902 dev_err(&instance->pdev->dev,
5903 "Failed to add host from %s %d\n",
5904 __func__, __LINE__);
5905 return -ENODEV;
5906 }
5907
5908 return 0;
5909 }
5910
5911 static int
5912 megasas_set_dma_mask(struct pci_dev *pdev)
5913 {
5914 /*
5915 * All our controllers are capable of performing 64-bit DMA
5916 */
5917 if (IS_DMA64) {
5918 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
5919
5920 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5921 goto fail_set_dma_mask;
5922 }
5923 } else {
5924 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
5925 goto fail_set_dma_mask;
5926 }
5927 /*
5928 * Ensure that all data structures are allocated in 32-bit
5929 * memory.
5930 */
5931 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
5932 /* Try 32bit DMA mask and 32 bit Consistent dma mask */
5933 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
5934 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
5935 dev_info(&pdev->dev, "set 32bit DMA mask"
5936 "and 32 bit consistent mask\n");
5937 else
5938 goto fail_set_dma_mask;
5939 }
5940
5941 return 0;
5942
5943 fail_set_dma_mask:
5944 return 1;
5945 }
5946
5947 /**
5948 * megasas_probe_one - PCI hotplug entry point
5949 * @pdev: PCI device structure
5950 * @id: PCI ids of supported hotplugged adapter
5951 */
5952 static int megasas_probe_one(struct pci_dev *pdev,
5953 const struct pci_device_id *id)
5954 {
5955 int rval, pos;
5956 struct Scsi_Host *host;
5957 struct megasas_instance *instance;
5958 u16 control = 0;
5959 struct fusion_context *fusion = NULL;
5960
5961 /* Reset MSI-X in the kdump kernel */
5962 if (reset_devices) {
5963 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
5964 if (pos) {
5965 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
5966 &control);
5967 if (control & PCI_MSIX_FLAGS_ENABLE) {
5968 dev_info(&pdev->dev, "resetting MSI-X\n");
5969 pci_write_config_word(pdev,
5970 pos + PCI_MSIX_FLAGS,
5971 control &
5972 ~PCI_MSIX_FLAGS_ENABLE);
5973 }
5974 }
5975 }
5976
5977 /*
5978 * PCI prepping: enable device set bus mastering and dma mask
5979 */
5980 rval = pci_enable_device_mem(pdev);
5981
5982 if (rval) {
5983 return rval;
5984 }
5985
5986 pci_set_master(pdev);
5987
5988 if (megasas_set_dma_mask(pdev))
5989 goto fail_set_dma_mask;
5990
5991 host = scsi_host_alloc(&megasas_template,
5992 sizeof(struct megasas_instance));
5993
5994 if (!host) {
5995 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
5996 goto fail_alloc_instance;
5997 }
5998
5999 instance = (struct megasas_instance *)host->hostdata;
6000 memset(instance, 0, sizeof(*instance));
6001 atomic_set(&instance->fw_reset_no_pci_access, 0);
6002 instance->pdev = pdev;
6003
6004 switch (instance->pdev->device) {
6005 case PCI_DEVICE_ID_LSI_VENTURA:
6006 case PCI_DEVICE_ID_LSI_HARPOON:
6007 case PCI_DEVICE_ID_LSI_TOMCAT:
6008 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6009 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6010 instance->is_ventura = true;
6011 case PCI_DEVICE_ID_LSI_FUSION:
6012 case PCI_DEVICE_ID_LSI_PLASMA:
6013 case PCI_DEVICE_ID_LSI_INVADER:
6014 case PCI_DEVICE_ID_LSI_FURY:
6015 case PCI_DEVICE_ID_LSI_INTRUDER:
6016 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6017 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6018 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6019 {
6020 if (megasas_alloc_fusion_context(instance)) {
6021 megasas_free_fusion_context(instance);
6022 goto fail_alloc_dma_buf;
6023 }
6024 fusion = instance->ctrl_context;
6025
6026 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
6027 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
6028 fusion->adapter_type = THUNDERBOLT_SERIES;
6029 else if (instance->is_ventura)
6030 fusion->adapter_type = VENTURA_SERIES;
6031 else
6032 fusion->adapter_type = INVADER_SERIES;
6033 }
6034 break;
6035 default: /* For all other supported controllers */
6036
6037 instance->producer =
6038 pci_alloc_consistent(pdev, sizeof(u32),
6039 &instance->producer_h);
6040 instance->consumer =
6041 pci_alloc_consistent(pdev, sizeof(u32),
6042 &instance->consumer_h);
6043
6044 if (!instance->producer || !instance->consumer) {
6045 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
6046 "memory for producer, consumer\n");
6047 goto fail_alloc_dma_buf;
6048 }
6049
6050 *instance->producer = 0;
6051 *instance->consumer = 0;
6052 break;
6053 }
6054
6055 /* Crash dump feature related initialisation*/
6056 instance->drv_buf_index = 0;
6057 instance->drv_buf_alloc = 0;
6058 instance->crash_dump_fw_support = 0;
6059 instance->crash_dump_app_support = 0;
6060 instance->fw_crash_state = UNAVAILABLE;
6061 spin_lock_init(&instance->crashdump_lock);
6062 instance->crash_dump_buf = NULL;
6063
6064 megasas_poll_wait_aen = 0;
6065 instance->flag_ieee = 0;
6066 instance->ev = NULL;
6067 instance->issuepend_done = 1;
6068 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6069 instance->is_imr = 0;
6070
6071 instance->evt_detail = pci_alloc_consistent(pdev,
6072 sizeof(struct
6073 megasas_evt_detail),
6074 &instance->evt_detail_h);
6075
6076 if (!instance->evt_detail) {
6077 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
6078 "event detail structure\n");
6079 goto fail_alloc_dma_buf;
6080 }
6081
6082 if (!reset_devices) {
6083 instance->system_info_buf = pci_zalloc_consistent(pdev,
6084 sizeof(struct MR_DRV_SYSTEM_INFO),
6085 &instance->system_info_h);
6086 if (!instance->system_info_buf)
6087 dev_info(&instance->pdev->dev, "Can't allocate system info buffer\n");
6088
6089 instance->pd_info = pci_alloc_consistent(pdev,
6090 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6091
6092 instance->pd_info = pci_alloc_consistent(pdev,
6093 sizeof(struct MR_PD_INFO), &instance->pd_info_h);
6094 instance->tgt_prop = pci_alloc_consistent(pdev,
6095 sizeof(struct MR_TARGET_PROPERTIES), &instance->tgt_prop_h);
6096
6097 if (!instance->pd_info)
6098 dev_err(&instance->pdev->dev, "Failed to alloc mem for pd_info\n");
6099
6100 if (!instance->tgt_prop)
6101 dev_err(&instance->pdev->dev, "Failed to alloc mem for tgt_prop\n");
6102
6103 instance->crash_dump_buf = pci_alloc_consistent(pdev,
6104 CRASH_DMA_BUF_SIZE,
6105 &instance->crash_dump_h);
6106 if (!instance->crash_dump_buf)
6107 dev_err(&pdev->dev, "Can't allocate Firmware "
6108 "crash dump DMA buffer\n");
6109 }
6110
6111 /*
6112 * Initialize locks and queues
6113 */
6114 INIT_LIST_HEAD(&instance->cmd_pool);
6115 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6116
6117 atomic_set(&instance->fw_outstanding,0);
6118
6119 init_waitqueue_head(&instance->int_cmd_wait_q);
6120 init_waitqueue_head(&instance->abort_cmd_wait_q);
6121
6122 spin_lock_init(&instance->mfi_pool_lock);
6123 spin_lock_init(&instance->hba_lock);
6124 spin_lock_init(&instance->stream_lock);
6125 spin_lock_init(&instance->completion_lock);
6126
6127 mutex_init(&instance->reset_mutex);
6128 mutex_init(&instance->hba_mutex);
6129
6130 /*
6131 * Initialize PCI related and misc parameters
6132 */
6133 instance->host = host;
6134 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6135 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6136 instance->ctrl_info = NULL;
6137
6138
6139 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6140 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6141 instance->flag_ieee = 1;
6142
6143 megasas_dbg_lvl = 0;
6144 instance->flag = 0;
6145 instance->unload = 1;
6146 instance->last_time = 0;
6147 instance->disableOnlineCtrlReset = 1;
6148 instance->UnevenSpanSupport = 0;
6149
6150 if (instance->ctrl_context) {
6151 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6152 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
6153 } else
6154 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6155
6156 /*
6157 * Initialize MFI Firmware
6158 */
6159 if (megasas_init_fw(instance))
6160 goto fail_init_mfi;
6161
6162 if (instance->requestorId) {
6163 if (instance->PlasmaFW111) {
6164 instance->vf_affiliation_111 =
6165 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
6166 &instance->vf_affiliation_111_h);
6167 if (!instance->vf_affiliation_111)
6168 dev_warn(&pdev->dev, "Can't allocate "
6169 "memory for VF affiliation buffer\n");
6170 } else {
6171 instance->vf_affiliation =
6172 pci_alloc_consistent(pdev,
6173 (MAX_LOGICAL_DRIVES + 1) *
6174 sizeof(struct MR_LD_VF_AFFILIATION),
6175 &instance->vf_affiliation_h);
6176 if (!instance->vf_affiliation)
6177 dev_warn(&pdev->dev, "Can't allocate "
6178 "memory for VF affiliation buffer\n");
6179 }
6180 }
6181
6182 /*
6183 * Store instance in PCI softstate
6184 */
6185 pci_set_drvdata(pdev, instance);
6186
6187 /*
6188 * Add this controller to megasas_mgmt_info structure so that it
6189 * can be exported to management applications
6190 */
6191 megasas_mgmt_info.count++;
6192 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6193 megasas_mgmt_info.max_index++;
6194
6195 /*
6196 * Register with SCSI mid-layer
6197 */
6198 if (megasas_io_attach(instance))
6199 goto fail_io_attach;
6200
6201 instance->unload = 0;
6202 /*
6203 * Trigger SCSI to scan our drives
6204 */
6205 scsi_scan_host(host);
6206
6207 /*
6208 * Initiate AEN (Asynchronous Event Notification)
6209 */
6210 if (megasas_start_aen(instance)) {
6211 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6212 goto fail_start_aen;
6213 }
6214
6215 /* Get current SR-IOV LD/VF affiliation */
6216 if (instance->requestorId)
6217 megasas_get_ld_vf_affiliation(instance, 1);
6218
6219 return 0;
6220
6221 fail_start_aen:
6222 fail_io_attach:
6223 megasas_mgmt_info.count--;
6224 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6225 megasas_mgmt_info.max_index--;
6226
6227 instance->instancet->disable_intr(instance);
6228 megasas_destroy_irqs(instance);
6229
6230 if (instance->ctrl_context)
6231 megasas_release_fusion(instance);
6232 else
6233 megasas_release_mfi(instance);
6234 if (instance->msix_vectors)
6235 pci_free_irq_vectors(instance->pdev);
6236 fail_init_mfi:
6237 fail_alloc_dma_buf:
6238 if (instance->evt_detail)
6239 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6240 instance->evt_detail,
6241 instance->evt_detail_h);
6242
6243 if (instance->pd_info)
6244 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6245 instance->pd_info,
6246 instance->pd_info_h);
6247 if (instance->tgt_prop)
6248 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6249 instance->tgt_prop,
6250 instance->tgt_prop_h);
6251 if (instance->producer)
6252 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6253 instance->producer_h);
6254 if (instance->consumer)
6255 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6256 instance->consumer_h);
6257 scsi_host_put(host);
6258
6259 fail_alloc_instance:
6260 fail_set_dma_mask:
6261 pci_disable_device(pdev);
6262
6263 return -ENODEV;
6264 }
6265
6266 /**
6267 * megasas_flush_cache - Requests FW to flush all its caches
6268 * @instance: Adapter soft state
6269 */
6270 static void megasas_flush_cache(struct megasas_instance *instance)
6271 {
6272 struct megasas_cmd *cmd;
6273 struct megasas_dcmd_frame *dcmd;
6274
6275 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6276 return;
6277
6278 cmd = megasas_get_cmd(instance);
6279
6280 if (!cmd)
6281 return;
6282
6283 dcmd = &cmd->frame->dcmd;
6284
6285 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6286
6287 dcmd->cmd = MFI_CMD_DCMD;
6288 dcmd->cmd_status = 0x0;
6289 dcmd->sge_count = 0;
6290 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6291 dcmd->timeout = 0;
6292 dcmd->pad_0 = 0;
6293 dcmd->data_xfer_len = 0;
6294 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6295 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6296
6297 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6298 != DCMD_SUCCESS) {
6299 dev_err(&instance->pdev->dev,
6300 "return from %s %d\n", __func__, __LINE__);
6301 return;
6302 }
6303
6304 megasas_return_cmd(instance, cmd);
6305 }
6306
6307 /**
6308 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6309 * @instance: Adapter soft state
6310 * @opcode: Shutdown/Hibernate
6311 */
6312 static void megasas_shutdown_controller(struct megasas_instance *instance,
6313 u32 opcode)
6314 {
6315 struct megasas_cmd *cmd;
6316 struct megasas_dcmd_frame *dcmd;
6317
6318 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6319 return;
6320
6321 cmd = megasas_get_cmd(instance);
6322
6323 if (!cmd)
6324 return;
6325
6326 if (instance->aen_cmd)
6327 megasas_issue_blocked_abort_cmd(instance,
6328 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
6329 if (instance->map_update_cmd)
6330 megasas_issue_blocked_abort_cmd(instance,
6331 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
6332 if (instance->jbod_seq_cmd)
6333 megasas_issue_blocked_abort_cmd(instance,
6334 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
6335
6336 dcmd = &cmd->frame->dcmd;
6337
6338 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6339
6340 dcmd->cmd = MFI_CMD_DCMD;
6341 dcmd->cmd_status = 0x0;
6342 dcmd->sge_count = 0;
6343 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6344 dcmd->timeout = 0;
6345 dcmd->pad_0 = 0;
6346 dcmd->data_xfer_len = 0;
6347 dcmd->opcode = cpu_to_le32(opcode);
6348
6349 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6350 != DCMD_SUCCESS) {
6351 dev_err(&instance->pdev->dev,
6352 "return from %s %d\n", __func__, __LINE__);
6353 return;
6354 }
6355
6356 megasas_return_cmd(instance, cmd);
6357 }
6358
6359 #ifdef CONFIG_PM
6360 /**
6361 * megasas_suspend - driver suspend entry point
6362 * @pdev: PCI device structure
6363 * @state: PCI power state to suspend routine
6364 */
6365 static int
6366 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
6367 {
6368 struct Scsi_Host *host;
6369 struct megasas_instance *instance;
6370
6371 instance = pci_get_drvdata(pdev);
6372 host = instance->host;
6373 instance->unload = 1;
6374
6375 /* Shutdown SR-IOV heartbeat timer */
6376 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6377 del_timer_sync(&instance->sriov_heartbeat_timer);
6378
6379 megasas_flush_cache(instance);
6380 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
6381
6382 /* cancel the delayed work if this work still in queue */
6383 if (instance->ev != NULL) {
6384 struct megasas_aen_event *ev = instance->ev;
6385 cancel_delayed_work_sync(&ev->hotplug_work);
6386 instance->ev = NULL;
6387 }
6388
6389 tasklet_kill(&instance->isr_tasklet);
6390
6391 pci_set_drvdata(instance->pdev, instance);
6392 instance->instancet->disable_intr(instance);
6393
6394 megasas_destroy_irqs(instance);
6395
6396 if (instance->msix_vectors)
6397 pci_free_irq_vectors(instance->pdev);
6398
6399 pci_save_state(pdev);
6400 pci_disable_device(pdev);
6401
6402 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6403
6404 return 0;
6405 }
6406
6407 /**
6408 * megasas_resume- driver resume entry point
6409 * @pdev: PCI device structure
6410 */
6411 static int
6412 megasas_resume(struct pci_dev *pdev)
6413 {
6414 int rval;
6415 struct Scsi_Host *host;
6416 struct megasas_instance *instance;
6417 int irq_flags = PCI_IRQ_LEGACY;
6418
6419 instance = pci_get_drvdata(pdev);
6420 host = instance->host;
6421 pci_set_power_state(pdev, PCI_D0);
6422 pci_enable_wake(pdev, PCI_D0, 0);
6423 pci_restore_state(pdev);
6424
6425 /*
6426 * PCI prepping: enable device set bus mastering and dma mask
6427 */
6428 rval = pci_enable_device_mem(pdev);
6429
6430 if (rval) {
6431 dev_err(&pdev->dev, "Enable device failed\n");
6432 return rval;
6433 }
6434
6435 pci_set_master(pdev);
6436
6437 if (megasas_set_dma_mask(pdev))
6438 goto fail_set_dma_mask;
6439
6440 /*
6441 * Initialize MFI Firmware
6442 */
6443
6444 atomic_set(&instance->fw_outstanding, 0);
6445
6446 /*
6447 * We expect the FW state to be READY
6448 */
6449 if (megasas_transition_to_ready(instance, 0))
6450 goto fail_ready_state;
6451
6452 /* Now re-enable MSI-X */
6453 if (instance->msix_vectors) {
6454 irq_flags = PCI_IRQ_MSIX;
6455 if (smp_affinity_enable)
6456 irq_flags |= PCI_IRQ_AFFINITY;
6457 }
6458 rval = pci_alloc_irq_vectors(instance->pdev, 1,
6459 instance->msix_vectors ?
6460 instance->msix_vectors : 1, irq_flags);
6461 if (rval < 0)
6462 goto fail_reenable_msix;
6463
6464 if (instance->ctrl_context) {
6465 megasas_reset_reply_desc(instance);
6466 if (megasas_ioc_init_fusion(instance)) {
6467 megasas_free_cmds(instance);
6468 megasas_free_cmds_fusion(instance);
6469 goto fail_init_mfi;
6470 }
6471 if (!megasas_get_map_info(instance))
6472 megasas_sync_map_info(instance);
6473 } else {
6474 *instance->producer = 0;
6475 *instance->consumer = 0;
6476 if (megasas_issue_init_mfi(instance))
6477 goto fail_init_mfi;
6478 }
6479
6480 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6481 (unsigned long)instance);
6482
6483 if (instance->msix_vectors ?
6484 megasas_setup_irqs_msix(instance, 0) :
6485 megasas_setup_irqs_ioapic(instance))
6486 goto fail_init_mfi;
6487
6488 /* Re-launch SR-IOV heartbeat timer */
6489 if (instance->requestorId) {
6490 if (!megasas_sriov_start_heartbeat(instance, 0))
6491 megasas_start_timer(instance,
6492 &instance->sriov_heartbeat_timer,
6493 megasas_sriov_heartbeat_handler,
6494 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
6495 else {
6496 instance->skip_heartbeat_timer_del = 1;
6497 goto fail_init_mfi;
6498 }
6499 }
6500
6501 instance->instancet->enable_intr(instance);
6502 megasas_setup_jbod_map(instance);
6503 instance->unload = 0;
6504
6505 /*
6506 * Initiate AEN (Asynchronous Event Notification)
6507 */
6508 if (megasas_start_aen(instance))
6509 dev_err(&instance->pdev->dev, "Start AEN failed\n");
6510
6511 return 0;
6512
6513 fail_init_mfi:
6514 if (instance->evt_detail)
6515 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6516 instance->evt_detail,
6517 instance->evt_detail_h);
6518
6519 if (instance->pd_info)
6520 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6521 instance->pd_info,
6522 instance->pd_info_h);
6523 if (instance->tgt_prop)
6524 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6525 instance->tgt_prop,
6526 instance->tgt_prop_h);
6527 if (instance->producer)
6528 pci_free_consistent(pdev, sizeof(u32), instance->producer,
6529 instance->producer_h);
6530 if (instance->consumer)
6531 pci_free_consistent(pdev, sizeof(u32), instance->consumer,
6532 instance->consumer_h);
6533 scsi_host_put(host);
6534
6535 fail_set_dma_mask:
6536 fail_ready_state:
6537 fail_reenable_msix:
6538
6539 pci_disable_device(pdev);
6540
6541 return -ENODEV;
6542 }
6543 #else
6544 #define megasas_suspend NULL
6545 #define megasas_resume NULL
6546 #endif
6547
6548 static inline int
6549 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
6550 {
6551 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
6552 int i;
6553
6554 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6555 return 1;
6556
6557 for (i = 0; i < wait_time; i++) {
6558 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
6559 break;
6560
6561 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
6562 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
6563
6564 msleep(1000);
6565 }
6566
6567 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
6568 dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n",
6569 __func__);
6570 return 1;
6571 }
6572
6573 return 0;
6574 }
6575
6576 /**
6577 * megasas_detach_one - PCI hot"un"plug entry point
6578 * @pdev: PCI device structure
6579 */
6580 static void megasas_detach_one(struct pci_dev *pdev)
6581 {
6582 int i;
6583 struct Scsi_Host *host;
6584 struct megasas_instance *instance;
6585 struct fusion_context *fusion;
6586 u32 pd_seq_map_sz;
6587
6588 instance = pci_get_drvdata(pdev);
6589 instance->unload = 1;
6590 host = instance->host;
6591 fusion = instance->ctrl_context;
6592
6593 /* Shutdown SR-IOV heartbeat timer */
6594 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6595 del_timer_sync(&instance->sriov_heartbeat_timer);
6596
6597 if (instance->fw_crash_state != UNAVAILABLE)
6598 megasas_free_host_crash_buffer(instance);
6599 scsi_remove_host(instance->host);
6600
6601 if (megasas_wait_for_adapter_operational(instance))
6602 goto skip_firing_dcmds;
6603
6604 megasas_flush_cache(instance);
6605 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6606
6607 skip_firing_dcmds:
6608 /* cancel the delayed work if this work still in queue*/
6609 if (instance->ev != NULL) {
6610 struct megasas_aen_event *ev = instance->ev;
6611 cancel_delayed_work_sync(&ev->hotplug_work);
6612 instance->ev = NULL;
6613 }
6614
6615 /* cancel all wait events */
6616 wake_up_all(&instance->int_cmd_wait_q);
6617
6618 tasklet_kill(&instance->isr_tasklet);
6619
6620 /*
6621 * Take the instance off the instance array. Note that we will not
6622 * decrement the max_index. We let this array be sparse array
6623 */
6624 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6625 if (megasas_mgmt_info.instance[i] == instance) {
6626 megasas_mgmt_info.count--;
6627 megasas_mgmt_info.instance[i] = NULL;
6628
6629 break;
6630 }
6631 }
6632
6633 instance->instancet->disable_intr(instance);
6634
6635 megasas_destroy_irqs(instance);
6636
6637 if (instance->msix_vectors)
6638 pci_free_irq_vectors(instance->pdev);
6639
6640 if (instance->is_ventura) {
6641 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
6642 kfree(fusion->stream_detect_by_ld[i]);
6643 kfree(fusion->stream_detect_by_ld);
6644 fusion->stream_detect_by_ld = NULL;
6645 }
6646
6647
6648 if (instance->ctrl_context) {
6649 megasas_release_fusion(instance);
6650 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
6651 (sizeof(struct MR_PD_CFG_SEQ) *
6652 (MAX_PHYSICAL_DEVICES - 1));
6653 for (i = 0; i < 2 ; i++) {
6654 if (fusion->ld_map[i])
6655 dma_free_coherent(&instance->pdev->dev,
6656 fusion->max_map_sz,
6657 fusion->ld_map[i],
6658 fusion->ld_map_phys[i]);
6659 if (fusion->ld_drv_map[i])
6660 free_pages((ulong)fusion->ld_drv_map[i],
6661 fusion->drv_map_pages);
6662 if (fusion->pd_seq_sync[i])
6663 dma_free_coherent(&instance->pdev->dev,
6664 pd_seq_map_sz,
6665 fusion->pd_seq_sync[i],
6666 fusion->pd_seq_phys[i]);
6667 }
6668 megasas_free_fusion_context(instance);
6669 } else {
6670 megasas_release_mfi(instance);
6671 pci_free_consistent(pdev, sizeof(u32),
6672 instance->producer,
6673 instance->producer_h);
6674 pci_free_consistent(pdev, sizeof(u32),
6675 instance->consumer,
6676 instance->consumer_h);
6677 }
6678
6679 kfree(instance->ctrl_info);
6680
6681 if (instance->evt_detail)
6682 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
6683 instance->evt_detail, instance->evt_detail_h);
6684 if (instance->pd_info)
6685 pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
6686 instance->pd_info,
6687 instance->pd_info_h);
6688 if (instance->tgt_prop)
6689 pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
6690 instance->tgt_prop,
6691 instance->tgt_prop_h);
6692 if (instance->vf_affiliation)
6693 pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
6694 sizeof(struct MR_LD_VF_AFFILIATION),
6695 instance->vf_affiliation,
6696 instance->vf_affiliation_h);
6697
6698 if (instance->vf_affiliation_111)
6699 pci_free_consistent(pdev,
6700 sizeof(struct MR_LD_VF_AFFILIATION_111),
6701 instance->vf_affiliation_111,
6702 instance->vf_affiliation_111_h);
6703
6704 if (instance->hb_host_mem)
6705 pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
6706 instance->hb_host_mem,
6707 instance->hb_host_mem_h);
6708
6709 if (instance->crash_dump_buf)
6710 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
6711 instance->crash_dump_buf, instance->crash_dump_h);
6712
6713 if (instance->system_info_buf)
6714 pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
6715 instance->system_info_buf, instance->system_info_h);
6716
6717 scsi_host_put(host);
6718
6719 pci_disable_device(pdev);
6720 }
6721
6722 /**
6723 * megasas_shutdown - Shutdown entry point
6724 * @device: Generic device structure
6725 */
6726 static void megasas_shutdown(struct pci_dev *pdev)
6727 {
6728 struct megasas_instance *instance = pci_get_drvdata(pdev);
6729
6730 instance->unload = 1;
6731
6732 if (megasas_wait_for_adapter_operational(instance))
6733 goto skip_firing_dcmds;
6734
6735 megasas_flush_cache(instance);
6736 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
6737
6738 skip_firing_dcmds:
6739 instance->instancet->disable_intr(instance);
6740 megasas_destroy_irqs(instance);
6741
6742 if (instance->msix_vectors)
6743 pci_free_irq_vectors(instance->pdev);
6744 }
6745
6746 /**
6747 * megasas_mgmt_open - char node "open" entry point
6748 */
6749 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
6750 {
6751 /*
6752 * Allow only those users with admin rights
6753 */
6754 if (!capable(CAP_SYS_ADMIN))
6755 return -EACCES;
6756
6757 return 0;
6758 }
6759
6760 /**
6761 * megasas_mgmt_fasync - Async notifier registration from applications
6762 *
6763 * This function adds the calling process to a driver global queue. When an
6764 * event occurs, SIGIO will be sent to all processes in this queue.
6765 */
6766 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
6767 {
6768 int rc;
6769
6770 mutex_lock(&megasas_async_queue_mutex);
6771
6772 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
6773
6774 mutex_unlock(&megasas_async_queue_mutex);
6775
6776 if (rc >= 0) {
6777 /* For sanity check when we get ioctl */
6778 filep->private_data = filep;
6779 return 0;
6780 }
6781
6782 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
6783
6784 return rc;
6785 }
6786
6787 /**
6788 * megasas_mgmt_poll - char node "poll" entry point
6789 * */
6790 static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
6791 {
6792 unsigned int mask;
6793 unsigned long flags;
6794
6795 poll_wait(file, &megasas_poll_wait, wait);
6796 spin_lock_irqsave(&poll_aen_lock, flags);
6797 if (megasas_poll_wait_aen)
6798 mask = (POLLIN | POLLRDNORM);
6799 else
6800 mask = 0;
6801 megasas_poll_wait_aen = 0;
6802 spin_unlock_irqrestore(&poll_aen_lock, flags);
6803 return mask;
6804 }
6805
6806 /*
6807 * megasas_set_crash_dump_params_ioctl:
6808 * Send CRASH_DUMP_MODE DCMD to all controllers
6809 * @cmd: MFI command frame
6810 */
6811
6812 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
6813 {
6814 struct megasas_instance *local_instance;
6815 int i, error = 0;
6816 int crash_support;
6817
6818 crash_support = cmd->frame->dcmd.mbox.w[0];
6819
6820 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
6821 local_instance = megasas_mgmt_info.instance[i];
6822 if (local_instance && local_instance->crash_dump_drv_support) {
6823 if ((atomic_read(&local_instance->adprecovery) ==
6824 MEGASAS_HBA_OPERATIONAL) &&
6825 !megasas_set_crash_dump_params(local_instance,
6826 crash_support)) {
6827 local_instance->crash_dump_app_support =
6828 crash_support;
6829 dev_info(&local_instance->pdev->dev,
6830 "Application firmware crash "
6831 "dump mode set success\n");
6832 error = 0;
6833 } else {
6834 dev_info(&local_instance->pdev->dev,
6835 "Application firmware crash "
6836 "dump mode set failed\n");
6837 error = -1;
6838 }
6839 }
6840 }
6841 return error;
6842 }
6843
6844 /**
6845 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
6846 * @instance: Adapter soft state
6847 * @argp: User's ioctl packet
6848 */
6849 static int
6850 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6851 struct megasas_iocpacket __user * user_ioc,
6852 struct megasas_iocpacket *ioc)
6853 {
6854 struct megasas_sge32 *kern_sge32;
6855 struct megasas_cmd *cmd;
6856 void *kbuff_arr[MAX_IOCTL_SGE];
6857 dma_addr_t buf_handle = 0;
6858 int error = 0, i;
6859 void *sense = NULL;
6860 dma_addr_t sense_handle;
6861 unsigned long *sense_ptr;
6862
6863 memset(kbuff_arr, 0, sizeof(kbuff_arr));
6864
6865 if (ioc->sge_count > MAX_IOCTL_SGE) {
6866 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
6867 ioc->sge_count, MAX_IOCTL_SGE);
6868 return -EINVAL;
6869 }
6870
6871 cmd = megasas_get_cmd(instance);
6872 if (!cmd) {
6873 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
6874 return -ENOMEM;
6875 }
6876
6877 /*
6878 * User's IOCTL packet has 2 frames (maximum). Copy those two
6879 * frames into our cmd's frames. cmd->frame's context will get
6880 * overwritten when we copy from user's frames. So set that value
6881 * alone separately
6882 */
6883 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
6884 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
6885 cmd->frame->hdr.pad_0 = 0;
6886 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE |
6887 MFI_FRAME_SGL64 |
6888 MFI_FRAME_SENSE64));
6889
6890 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_SHUTDOWN) {
6891 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
6892 megasas_return_cmd(instance, cmd);
6893 return -1;
6894 }
6895 }
6896
6897 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
6898 error = megasas_set_crash_dump_params_ioctl(cmd);
6899 megasas_return_cmd(instance, cmd);
6900 return error;
6901 }
6902
6903 /*
6904 * The management interface between applications and the fw uses
6905 * MFI frames. E.g, RAID configuration changes, LD property changes
6906 * etc are accomplishes through different kinds of MFI frames. The
6907 * driver needs to care only about substituting user buffers with
6908 * kernel buffers in SGLs. The location of SGL is embedded in the
6909 * struct iocpacket itself.
6910 */
6911 kern_sge32 = (struct megasas_sge32 *)
6912 ((unsigned long)cmd->frame + ioc->sgl_off);
6913
6914 /*
6915 * For each user buffer, create a mirror buffer and copy in
6916 */
6917 for (i = 0; i < ioc->sge_count; i++) {
6918 if (!ioc->sgl[i].iov_len)
6919 continue;
6920
6921 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
6922 ioc->sgl[i].iov_len,
6923 &buf_handle, GFP_KERNEL);
6924 if (!kbuff_arr[i]) {
6925 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6926 "kernel SGL buffer for IOCTL\n");
6927 error = -ENOMEM;
6928 goto out;
6929 }
6930
6931 /*
6932 * We don't change the dma_coherent_mask, so
6933 * pci_alloc_consistent only returns 32bit addresses
6934 */
6935 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
6936 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
6937
6938 /*
6939 * We created a kernel buffer corresponding to the
6940 * user buffer. Now copy in from the user buffer
6941 */
6942 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
6943 (u32) (ioc->sgl[i].iov_len))) {
6944 error = -EFAULT;
6945 goto out;
6946 }
6947 }
6948
6949 if (ioc->sense_len) {
6950 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
6951 &sense_handle, GFP_KERNEL);
6952 if (!sense) {
6953 error = -ENOMEM;
6954 goto out;
6955 }
6956
6957 sense_ptr =
6958 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
6959 *sense_ptr = cpu_to_le32(sense_handle);
6960 }
6961
6962 /*
6963 * Set the sync_cmd flag so that the ISR knows not to complete this
6964 * cmd to the SCSI mid-layer
6965 */
6966 cmd->sync_cmd = 1;
6967 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
6968 cmd->sync_cmd = 0;
6969 dev_err(&instance->pdev->dev,
6970 "return -EBUSY from %s %d opcode 0x%x cmd->cmd_status_drv 0x%x\n",
6971 __func__, __LINE__, cmd->frame->dcmd.opcode,
6972 cmd->cmd_status_drv);
6973 return -EBUSY;
6974 }
6975
6976 cmd->sync_cmd = 0;
6977
6978 if (instance->unload == 1) {
6979 dev_info(&instance->pdev->dev, "Driver unload is in progress "
6980 "don't submit data to application\n");
6981 goto out;
6982 }
6983 /*
6984 * copy out the kernel buffers to user buffers
6985 */
6986 for (i = 0; i < ioc->sge_count; i++) {
6987 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
6988 ioc->sgl[i].iov_len)) {
6989 error = -EFAULT;
6990 goto out;
6991 }
6992 }
6993
6994 /*
6995 * copy out the sense
6996 */
6997 if (ioc->sense_len) {
6998 /*
6999 * sense_ptr points to the location that has the user
7000 * sense buffer address
7001 */
7002 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7003 ioc->sense_off);
7004
7005 if (copy_to_user((void __user *)((unsigned long)
7006 get_unaligned((unsigned long *)sense_ptr)),
7007 sense, ioc->sense_len)) {
7008 dev_err(&instance->pdev->dev, "Failed to copy out to user "
7009 "sense data\n");
7010 error = -EFAULT;
7011 goto out;
7012 }
7013 }
7014
7015 /*
7016 * copy the status codes returned by the fw
7017 */
7018 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7019 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7020 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7021 error = -EFAULT;
7022 }
7023
7024 out:
7025 if (sense) {
7026 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7027 sense, sense_handle);
7028 }
7029
7030 for (i = 0; i < ioc->sge_count; i++) {
7031 if (kbuff_arr[i]) {
7032 dma_free_coherent(&instance->pdev->dev,
7033 le32_to_cpu(kern_sge32[i].length),
7034 kbuff_arr[i],
7035 le32_to_cpu(kern_sge32[i].phys_addr));
7036 kbuff_arr[i] = NULL;
7037 }
7038 }
7039
7040 megasas_return_cmd(instance, cmd);
7041 return error;
7042 }
7043
7044 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7045 {
7046 struct megasas_iocpacket __user *user_ioc =
7047 (struct megasas_iocpacket __user *)arg;
7048 struct megasas_iocpacket *ioc;
7049 struct megasas_instance *instance;
7050 int error;
7051 int i;
7052 unsigned long flags;
7053 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7054
7055 ioc = memdup_user(user_ioc, sizeof(*ioc));
7056 if (IS_ERR(ioc))
7057 return PTR_ERR(ioc);
7058
7059 instance = megasas_lookup_instance(ioc->host_no);
7060 if (!instance) {
7061 error = -ENODEV;
7062 goto out_kfree_ioc;
7063 }
7064
7065 /* Adjust ioctl wait time for VF mode */
7066 if (instance->requestorId)
7067 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7068
7069 /* Block ioctls in VF mode */
7070 if (instance->requestorId && !allow_vf_ioctls) {
7071 error = -ENODEV;
7072 goto out_kfree_ioc;
7073 }
7074
7075 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7076 dev_err(&instance->pdev->dev, "Controller in crit error\n");
7077 error = -ENODEV;
7078 goto out_kfree_ioc;
7079 }
7080
7081 if (instance->unload == 1) {
7082 error = -ENODEV;
7083 goto out_kfree_ioc;
7084 }
7085
7086 if (down_interruptible(&instance->ioctl_sem)) {
7087 error = -ERESTARTSYS;
7088 goto out_kfree_ioc;
7089 }
7090
7091 for (i = 0; i < wait_time; i++) {
7092
7093 spin_lock_irqsave(&instance->hba_lock, flags);
7094 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7095 spin_unlock_irqrestore(&instance->hba_lock, flags);
7096 break;
7097 }
7098 spin_unlock_irqrestore(&instance->hba_lock, flags);
7099
7100 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7101 dev_notice(&instance->pdev->dev, "waiting"
7102 "for controller reset to finish\n");
7103 }
7104
7105 msleep(1000);
7106 }
7107
7108 spin_lock_irqsave(&instance->hba_lock, flags);
7109 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7110 spin_unlock_irqrestore(&instance->hba_lock, flags);
7111
7112 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7113 error = -ENODEV;
7114 goto out_up;
7115 }
7116 spin_unlock_irqrestore(&instance->hba_lock, flags);
7117
7118 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7119 out_up:
7120 up(&instance->ioctl_sem);
7121
7122 out_kfree_ioc:
7123 kfree(ioc);
7124 return error;
7125 }
7126
7127 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7128 {
7129 struct megasas_instance *instance;
7130 struct megasas_aen aen;
7131 int error;
7132 int i;
7133 unsigned long flags;
7134 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
7135
7136 if (file->private_data != file) {
7137 printk(KERN_DEBUG "megasas: fasync_helper was not "
7138 "called first\n");
7139 return -EINVAL;
7140 }
7141
7142 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7143 return -EFAULT;
7144
7145 instance = megasas_lookup_instance(aen.host_no);
7146
7147 if (!instance)
7148 return -ENODEV;
7149
7150 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7151 return -ENODEV;
7152 }
7153
7154 if (instance->unload == 1) {
7155 return -ENODEV;
7156 }
7157
7158 for (i = 0; i < wait_time; i++) {
7159
7160 spin_lock_irqsave(&instance->hba_lock, flags);
7161 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
7162 spin_unlock_irqrestore(&instance->hba_lock,
7163 flags);
7164 break;
7165 }
7166
7167 spin_unlock_irqrestore(&instance->hba_lock, flags);
7168
7169 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
7170 dev_notice(&instance->pdev->dev, "waiting for"
7171 "controller reset to finish\n");
7172 }
7173
7174 msleep(1000);
7175 }
7176
7177 spin_lock_irqsave(&instance->hba_lock, flags);
7178 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
7179 spin_unlock_irqrestore(&instance->hba_lock, flags);
7180 dev_err(&instance->pdev->dev, "timed out while waiting for HBA to recover\n");
7181 return -ENODEV;
7182 }
7183 spin_unlock_irqrestore(&instance->hba_lock, flags);
7184
7185 mutex_lock(&instance->reset_mutex);
7186 error = megasas_register_aen(instance, aen.seq_num,
7187 aen.class_locale_word);
7188 mutex_unlock(&instance->reset_mutex);
7189 return error;
7190 }
7191
7192 /**
7193 * megasas_mgmt_ioctl - char node ioctl entry point
7194 */
7195 static long
7196 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7197 {
7198 switch (cmd) {
7199 case MEGASAS_IOC_FIRMWARE:
7200 return megasas_mgmt_ioctl_fw(file, arg);
7201
7202 case MEGASAS_IOC_GET_AEN:
7203 return megasas_mgmt_ioctl_aen(file, arg);
7204 }
7205
7206 return -ENOTTY;
7207 }
7208
7209 #ifdef CONFIG_COMPAT
7210 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7211 {
7212 struct compat_megasas_iocpacket __user *cioc =
7213 (struct compat_megasas_iocpacket __user *)arg;
7214 struct megasas_iocpacket __user *ioc =
7215 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7216 int i;
7217 int error = 0;
7218 compat_uptr_t ptr;
7219 u32 local_sense_off;
7220 u32 local_sense_len;
7221 u32 user_sense_off;
7222
7223 if (clear_user(ioc, sizeof(*ioc)))
7224 return -EFAULT;
7225
7226 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7227 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7228 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7229 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7230 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7231 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7232 return -EFAULT;
7233
7234 /*
7235 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7236 * sense_len is not null, so prepare the 64bit value under
7237 * the same condition.
7238 */
7239 if (get_user(local_sense_off, &ioc->sense_off) ||
7240 get_user(local_sense_len, &ioc->sense_len) ||
7241 get_user(user_sense_off, &cioc->sense_off))
7242 return -EFAULT;
7243
7244 if (local_sense_len) {
7245 void __user **sense_ioc_ptr =
7246 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7247 compat_uptr_t *sense_cioc_ptr =
7248 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7249 if (get_user(ptr, sense_cioc_ptr) ||
7250 put_user(compat_ptr(ptr), sense_ioc_ptr))
7251 return -EFAULT;
7252 }
7253
7254 for (i = 0; i < MAX_IOCTL_SGE; i++) {
7255 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7256 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7257 copy_in_user(&ioc->sgl[i].iov_len,
7258 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7259 return -EFAULT;
7260 }
7261
7262 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7263
7264 if (copy_in_user(&cioc->frame.hdr.cmd_status,
7265 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7266 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7267 return -EFAULT;
7268 }
7269 return error;
7270 }
7271
7272 static long
7273 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7274 unsigned long arg)
7275 {
7276 switch (cmd) {
7277 case MEGASAS_IOC_FIRMWARE32:
7278 return megasas_mgmt_compat_ioctl_fw(file, arg);
7279 case MEGASAS_IOC_GET_AEN:
7280 return megasas_mgmt_ioctl_aen(file, arg);
7281 }
7282
7283 return -ENOTTY;
7284 }
7285 #endif
7286
7287 /*
7288 * File operations structure for management interface
7289 */
7290 static const struct file_operations megasas_mgmt_fops = {
7291 .owner = THIS_MODULE,
7292 .open = megasas_mgmt_open,
7293 .fasync = megasas_mgmt_fasync,
7294 .unlocked_ioctl = megasas_mgmt_ioctl,
7295 .poll = megasas_mgmt_poll,
7296 #ifdef CONFIG_COMPAT
7297 .compat_ioctl = megasas_mgmt_compat_ioctl,
7298 #endif
7299 .llseek = noop_llseek,
7300 };
7301
7302 /*
7303 * PCI hotplug support registration structure
7304 */
7305 static struct pci_driver megasas_pci_driver = {
7306
7307 .name = "megaraid_sas",
7308 .id_table = megasas_pci_table,
7309 .probe = megasas_probe_one,
7310 .remove = megasas_detach_one,
7311 .suspend = megasas_suspend,
7312 .resume = megasas_resume,
7313 .shutdown = megasas_shutdown,
7314 };
7315
7316 /*
7317 * Sysfs driver attributes
7318 */
7319 static ssize_t megasas_sysfs_show_version(struct device_driver *dd, char *buf)
7320 {
7321 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7322 MEGASAS_VERSION);
7323 }
7324
7325 static DRIVER_ATTR(version, S_IRUGO, megasas_sysfs_show_version, NULL);
7326
7327 static ssize_t
7328 megasas_sysfs_show_release_date(struct device_driver *dd, char *buf)
7329 {
7330 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7331 MEGASAS_RELDATE);
7332 }
7333
7334 static DRIVER_ATTR(release_date, S_IRUGO, megasas_sysfs_show_release_date, NULL);
7335
7336 static ssize_t
7337 megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf)
7338 {
7339 return sprintf(buf, "%u\n", support_poll_for_event);
7340 }
7341
7342 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
7343 megasas_sysfs_show_support_poll_for_event, NULL);
7344
7345 static ssize_t
7346 megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
7347 {
7348 return sprintf(buf, "%u\n", support_device_change);
7349 }
7350
7351 static DRIVER_ATTR(support_device_change, S_IRUGO,
7352 megasas_sysfs_show_support_device_change, NULL);
7353
7354 static ssize_t
7355 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
7356 {
7357 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7358 }
7359
7360 static ssize_t
7361 megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
7362 {
7363 int retval = count;
7364
7365 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
7366 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
7367 retval = -EINVAL;
7368 }
7369 return retval;
7370 }
7371
7372 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
7373 megasas_sysfs_set_dbg_lvl);
7374
7375 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
7376 {
7377 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
7378 scsi_remove_device(sdev);
7379 scsi_device_put(sdev);
7380 }
7381
7382 static void
7383 megasas_aen_polling(struct work_struct *work)
7384 {
7385 struct megasas_aen_event *ev =
7386 container_of(work, struct megasas_aen_event, hotplug_work.work);
7387 struct megasas_instance *instance = ev->instance;
7388 union megasas_evt_class_locale class_locale;
7389 struct Scsi_Host *host;
7390 struct scsi_device *sdev1;
7391 u16 pd_index = 0;
7392 u16 ld_index = 0;
7393 int i, j, doscan = 0;
7394 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
7395 int error;
7396 u8 dcmd_ret = DCMD_SUCCESS;
7397
7398 if (!instance) {
7399 printk(KERN_ERR "invalid instance!\n");
7400 kfree(ev);
7401 return;
7402 }
7403
7404 /* Adjust event workqueue thread wait time for VF mode */
7405 if (instance->requestorId)
7406 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
7407
7408 /* Don't run the event workqueue thread if OCR is running */
7409 mutex_lock(&instance->reset_mutex);
7410
7411 instance->ev = NULL;
7412 host = instance->host;
7413 if (instance->evt_detail) {
7414 megasas_decode_evt(instance);
7415
7416 switch (le32_to_cpu(instance->evt_detail->code)) {
7417
7418 case MR_EVT_PD_INSERTED:
7419 case MR_EVT_PD_REMOVED:
7420 dcmd_ret = megasas_get_pd_list(instance);
7421 if (dcmd_ret == DCMD_SUCCESS)
7422 doscan = SCAN_PD_CHANNEL;
7423 break;
7424
7425 case MR_EVT_LD_OFFLINE:
7426 case MR_EVT_CFG_CLEARED:
7427 case MR_EVT_LD_DELETED:
7428 case MR_EVT_LD_CREATED:
7429 if (!instance->requestorId ||
7430 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7431 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7432
7433 if (dcmd_ret == DCMD_SUCCESS)
7434 doscan = SCAN_VD_CHANNEL;
7435
7436 break;
7437
7438 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
7439 case MR_EVT_FOREIGN_CFG_IMPORTED:
7440 case MR_EVT_LD_STATE_CHANGE:
7441 dcmd_ret = megasas_get_pd_list(instance);
7442
7443 if (dcmd_ret != DCMD_SUCCESS)
7444 break;
7445
7446 if (!instance->requestorId ||
7447 (instance->requestorId && megasas_get_ld_vf_affiliation(instance, 0)))
7448 dcmd_ret = megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
7449
7450 if (dcmd_ret != DCMD_SUCCESS)
7451 break;
7452
7453 doscan = SCAN_VD_CHANNEL | SCAN_PD_CHANNEL;
7454 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
7455 instance->host->host_no);
7456 break;
7457
7458 case MR_EVT_CTRL_PROP_CHANGED:
7459 dcmd_ret = megasas_get_ctrl_info(instance);
7460 break;
7461 default:
7462 doscan = 0;
7463 break;
7464 }
7465 } else {
7466 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
7467 mutex_unlock(&instance->reset_mutex);
7468 kfree(ev);
7469 return;
7470 }
7471
7472 mutex_unlock(&instance->reset_mutex);
7473
7474 if (doscan & SCAN_PD_CHANNEL) {
7475 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
7476 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7477 pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j;
7478 sdev1 = scsi_device_lookup(host, i, j, 0);
7479 if (instance->pd_list[pd_index].driveState ==
7480 MR_PD_STATE_SYSTEM) {
7481 if (!sdev1)
7482 scsi_add_device(host, i, j, 0);
7483 else
7484 scsi_device_put(sdev1);
7485 } else {
7486 if (sdev1)
7487 megasas_remove_scsi_device(sdev1);
7488 }
7489 }
7490 }
7491 }
7492
7493 if (doscan & SCAN_VD_CHANNEL) {
7494 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
7495 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
7496 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
7497 sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7498 if (instance->ld_ids[ld_index] != 0xff) {
7499 if (!sdev1)
7500 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
7501 else
7502 scsi_device_put(sdev1);
7503 } else {
7504 if (sdev1)
7505 megasas_remove_scsi_device(sdev1);
7506 }
7507 }
7508 }
7509 }
7510
7511 if (dcmd_ret == DCMD_SUCCESS)
7512 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
7513 else
7514 seq_num = instance->last_seq_num;
7515
7516 /* Register AEN with FW for latest sequence number plus 1 */
7517 class_locale.members.reserved = 0;
7518 class_locale.members.locale = MR_EVT_LOCALE_ALL;
7519 class_locale.members.class = MR_EVT_CLASS_DEBUG;
7520
7521 if (instance->aen_cmd != NULL) {
7522 kfree(ev);
7523 return;
7524 }
7525
7526 mutex_lock(&instance->reset_mutex);
7527 error = megasas_register_aen(instance, seq_num,
7528 class_locale.word);
7529 if (error)
7530 dev_err(&instance->pdev->dev,
7531 "register aen failed error %x\n", error);
7532
7533 mutex_unlock(&instance->reset_mutex);
7534 kfree(ev);
7535 }
7536
7537 /**
7538 * megasas_init - Driver load entry point
7539 */
7540 static int __init megasas_init(void)
7541 {
7542 int rval;
7543
7544 /*
7545 * Booted in kdump kernel, minimize memory footprints by
7546 * disabling few features
7547 */
7548 if (reset_devices) {
7549 msix_vectors = 1;
7550 rdpq_enable = 0;
7551 dual_qdepth_disable = 1;
7552 }
7553
7554 /*
7555 * Announce driver version and other information
7556 */
7557 pr_info("megasas: %s\n", MEGASAS_VERSION);
7558
7559 spin_lock_init(&poll_aen_lock);
7560
7561 support_poll_for_event = 2;
7562 support_device_change = 1;
7563
7564 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
7565
7566 /*
7567 * Register character device node
7568 */
7569 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
7570
7571 if (rval < 0) {
7572 printk(KERN_DEBUG "megasas: failed to open device node\n");
7573 return rval;
7574 }
7575
7576 megasas_mgmt_majorno = rval;
7577
7578 /*
7579 * Register ourselves as PCI hotplug module
7580 */
7581 rval = pci_register_driver(&megasas_pci_driver);
7582
7583 if (rval) {
7584 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
7585 goto err_pcidrv;
7586 }
7587
7588 rval = driver_create_file(&megasas_pci_driver.driver,
7589 &driver_attr_version);
7590 if (rval)
7591 goto err_dcf_attr_ver;
7592
7593 rval = driver_create_file(&megasas_pci_driver.driver,
7594 &driver_attr_release_date);
7595 if (rval)
7596 goto err_dcf_rel_date;
7597
7598 rval = driver_create_file(&megasas_pci_driver.driver,
7599 &driver_attr_support_poll_for_event);
7600 if (rval)
7601 goto err_dcf_support_poll_for_event;
7602
7603 rval = driver_create_file(&megasas_pci_driver.driver,
7604 &driver_attr_dbg_lvl);
7605 if (rval)
7606 goto err_dcf_dbg_lvl;
7607 rval = driver_create_file(&megasas_pci_driver.driver,
7608 &driver_attr_support_device_change);
7609 if (rval)
7610 goto err_dcf_support_device_change;
7611
7612 return rval;
7613
7614 err_dcf_support_device_change:
7615 driver_remove_file(&megasas_pci_driver.driver,
7616 &driver_attr_dbg_lvl);
7617 err_dcf_dbg_lvl:
7618 driver_remove_file(&megasas_pci_driver.driver,
7619 &driver_attr_support_poll_for_event);
7620 err_dcf_support_poll_for_event:
7621 driver_remove_file(&megasas_pci_driver.driver,
7622 &driver_attr_release_date);
7623 err_dcf_rel_date:
7624 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7625 err_dcf_attr_ver:
7626 pci_unregister_driver(&megasas_pci_driver);
7627 err_pcidrv:
7628 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7629 return rval;
7630 }
7631
7632 /**
7633 * megasas_exit - Driver unload entry point
7634 */
7635 static void __exit megasas_exit(void)
7636 {
7637 driver_remove_file(&megasas_pci_driver.driver,
7638 &driver_attr_dbg_lvl);
7639 driver_remove_file(&megasas_pci_driver.driver,
7640 &driver_attr_support_poll_for_event);
7641 driver_remove_file(&megasas_pci_driver.driver,
7642 &driver_attr_support_device_change);
7643 driver_remove_file(&megasas_pci_driver.driver,
7644 &driver_attr_release_date);
7645 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
7646
7647 pci_unregister_driver(&megasas_pci_driver);
7648 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
7649 }
7650
7651 module_init(megasas_init);
7652 module_exit(megasas_exit);