]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_base.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 152
[mirror_ubuntu-focal-kernel.git] / drivers / scsi / megaraid / megaraid_sas_base.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux MegaRAID driver for SAS based RAID controllers
4 *
5 * Copyright (c) 2003-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
8 *
9 * Authors: Broadcom Inc.
10 * Sreenivas Bagalkote
11 * Sumant Patro
12 * Bo Yang
13 * Adam Radford
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
16 *
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
18 */
19
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_cmnd.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_tcq.h>
45 #include "megaraid_sas_fusion.h"
46 #include "megaraid_sas.h"
47
48 /*
49 * Number of sectors per IO command
50 * Will be set in megasas_init_mfi if user does not provide
51 */
52 static unsigned int max_sectors;
53 module_param_named(max_sectors, max_sectors, int, 0);
54 MODULE_PARM_DESC(max_sectors,
55 "Maximum number of sectors per IO command");
56
57 static int msix_disable;
58 module_param(msix_disable, int, S_IRUGO);
59 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
60
61 static unsigned int msix_vectors;
62 module_param(msix_vectors, int, S_IRUGO);
63 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
64
65 static int allow_vf_ioctls;
66 module_param(allow_vf_ioctls, int, S_IRUGO);
67 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
68
69 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
70 module_param(throttlequeuedepth, int, S_IRUGO);
71 MODULE_PARM_DESC(throttlequeuedepth,
72 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
73
74 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
75 module_param(resetwaittime, int, S_IRUGO);
76 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
77
78 int smp_affinity_enable = 1;
79 module_param(smp_affinity_enable, int, S_IRUGO);
80 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
81
82 int rdpq_enable = 1;
83 module_param(rdpq_enable, int, S_IRUGO);
84 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
85
86 unsigned int dual_qdepth_disable;
87 module_param(dual_qdepth_disable, int, S_IRUGO);
88 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
89
90 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
91 module_param(scmd_timeout, int, S_IRUGO);
92 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
93
94 MODULE_LICENSE("GPL");
95 MODULE_VERSION(MEGASAS_VERSION);
96 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
97 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
98
99 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
100 static int megasas_get_pd_list(struct megasas_instance *instance);
101 static int megasas_ld_list_query(struct megasas_instance *instance,
102 u8 query_type);
103 static int megasas_issue_init_mfi(struct megasas_instance *instance);
104 static int megasas_register_aen(struct megasas_instance *instance,
105 u32 seq_num, u32 class_locale_word);
106 static void megasas_get_pd_info(struct megasas_instance *instance,
107 struct scsi_device *sdev);
108
109 /*
110 * PCI ID table for all supported controllers
111 */
112 static struct pci_device_id megasas_pci_table[] = {
113
114 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
115 /* xscale IOP */
116 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
117 /* ppc IOP */
118 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
119 /* ppc IOP */
120 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
121 /* gen2*/
122 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
123 /* gen2*/
124 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
125 /* skinny*/
126 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
127 /* skinny*/
128 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
129 /* xscale IOP, vega */
130 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
131 /* xscale IOP */
132 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
133 /* Fusion */
134 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
135 /* Plasma */
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
137 /* Invader */
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
139 /* Fury */
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
141 /* Intruder */
142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
143 /* Intruder 24 port*/
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
145 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
146 /* VENTURA */
147 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
149 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
151 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
152 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
153 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
155 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
157 {}
158 };
159
160 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
161
162 static int megasas_mgmt_majorno;
163 struct megasas_mgmt_info megasas_mgmt_info;
164 static struct fasync_struct *megasas_async_queue;
165 static DEFINE_MUTEX(megasas_async_queue_mutex);
166
167 static int megasas_poll_wait_aen;
168 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
169 static u32 support_poll_for_event;
170 u32 megasas_dbg_lvl;
171 static u32 support_device_change;
172 static bool support_nvme_encapsulation;
173
174 /* define lock for aen poll */
175 spinlock_t poll_aen_lock;
176
177 void
178 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
179 u8 alt_status);
180 static u32
181 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
182 static int
183 megasas_adp_reset_gen2(struct megasas_instance *instance,
184 struct megasas_register_set __iomem *reg_set);
185 static irqreturn_t megasas_isr(int irq, void *devp);
186 static u32
187 megasas_init_adapter_mfi(struct megasas_instance *instance);
188 u32
189 megasas_build_and_issue_cmd(struct megasas_instance *instance,
190 struct scsi_cmnd *scmd);
191 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
192 int
193 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
194 int seconds);
195 void megasas_fusion_ocr_wq(struct work_struct *work);
196 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
197 int initial);
198 static int
199 megasas_set_dma_mask(struct megasas_instance *instance);
200 static int
201 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
202 static inline void
203 megasas_free_ctrl_mem(struct megasas_instance *instance);
204 static inline int
205 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
206 static inline void
207 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
208 static inline void
209 megasas_init_ctrl_params(struct megasas_instance *instance);
210
211 u32 megasas_readl(struct megasas_instance *instance,
212 const volatile void __iomem *addr)
213 {
214 u32 i = 0, ret_val;
215 /*
216 * Due to a HW errata in Aero controllers, reads to certain
217 * Fusion registers could intermittently return all zeroes.
218 * This behavior is transient in nature and subsequent reads will
219 * return valid value. As a workaround in driver, retry readl for
220 * upto three times until a non-zero value is read.
221 */
222 if (instance->adapter_type == AERO_SERIES) {
223 do {
224 ret_val = readl(addr);
225 i++;
226 } while (ret_val == 0 && i < 3);
227 return ret_val;
228 } else {
229 return readl(addr);
230 }
231 }
232
233 /**
234 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
235 * @instance: Adapter soft state
236 * @dcmd: DCMD frame inside MFI command
237 * @dma_addr: DMA address of buffer to be passed to FW
238 * @dma_len: Length of DMA buffer to be passed to FW
239 * @return: void
240 */
241 void megasas_set_dma_settings(struct megasas_instance *instance,
242 struct megasas_dcmd_frame *dcmd,
243 dma_addr_t dma_addr, u32 dma_len)
244 {
245 if (instance->consistent_mask_64bit) {
246 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
247 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
248 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
249
250 } else {
251 dcmd->sgl.sge32[0].phys_addr =
252 cpu_to_le32(lower_32_bits(dma_addr));
253 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
254 dcmd->flags = cpu_to_le16(dcmd->flags);
255 }
256 }
257
258 void
259 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
260 {
261 instance->instancet->fire_cmd(instance,
262 cmd->frame_phys_addr, 0, instance->reg_set);
263 return;
264 }
265
266 /**
267 * megasas_get_cmd - Get a command from the free pool
268 * @instance: Adapter soft state
269 *
270 * Returns a free command from the pool
271 */
272 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
273 *instance)
274 {
275 unsigned long flags;
276 struct megasas_cmd *cmd = NULL;
277
278 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
279
280 if (!list_empty(&instance->cmd_pool)) {
281 cmd = list_entry((&instance->cmd_pool)->next,
282 struct megasas_cmd, list);
283 list_del_init(&cmd->list);
284 } else {
285 dev_err(&instance->pdev->dev, "Command pool empty!\n");
286 }
287
288 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
289 return cmd;
290 }
291
292 /**
293 * megasas_return_cmd - Return a cmd to free command pool
294 * @instance: Adapter soft state
295 * @cmd: Command packet to be returned to free command pool
296 */
297 void
298 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
299 {
300 unsigned long flags;
301 u32 blk_tags;
302 struct megasas_cmd_fusion *cmd_fusion;
303 struct fusion_context *fusion = instance->ctrl_context;
304
305 /* This flag is used only for fusion adapter.
306 * Wait for Interrupt for Polled mode DCMD
307 */
308 if (cmd->flags & DRV_DCMD_POLLED_MODE)
309 return;
310
311 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
312
313 if (fusion) {
314 blk_tags = instance->max_scsi_cmds + cmd->index;
315 cmd_fusion = fusion->cmd_list[blk_tags];
316 megasas_return_cmd_fusion(instance, cmd_fusion);
317 }
318 cmd->scmd = NULL;
319 cmd->frame_count = 0;
320 cmd->flags = 0;
321 memset(cmd->frame, 0, instance->mfi_frame_size);
322 cmd->frame->io.context = cpu_to_le32(cmd->index);
323 if (!fusion && reset_devices)
324 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
325 list_add(&cmd->list, (&instance->cmd_pool)->next);
326
327 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
328
329 }
330
331 static const char *
332 format_timestamp(uint32_t timestamp)
333 {
334 static char buffer[32];
335
336 if ((timestamp & 0xff000000) == 0xff000000)
337 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
338 0x00ffffff);
339 else
340 snprintf(buffer, sizeof(buffer), "%us", timestamp);
341 return buffer;
342 }
343
344 static const char *
345 format_class(int8_t class)
346 {
347 static char buffer[6];
348
349 switch (class) {
350 case MFI_EVT_CLASS_DEBUG:
351 return "debug";
352 case MFI_EVT_CLASS_PROGRESS:
353 return "progress";
354 case MFI_EVT_CLASS_INFO:
355 return "info";
356 case MFI_EVT_CLASS_WARNING:
357 return "WARN";
358 case MFI_EVT_CLASS_CRITICAL:
359 return "CRIT";
360 case MFI_EVT_CLASS_FATAL:
361 return "FATAL";
362 case MFI_EVT_CLASS_DEAD:
363 return "DEAD";
364 default:
365 snprintf(buffer, sizeof(buffer), "%d", class);
366 return buffer;
367 }
368 }
369
370 /**
371 * megasas_decode_evt: Decode FW AEN event and print critical event
372 * for information.
373 * @instance: Adapter soft state
374 */
375 static void
376 megasas_decode_evt(struct megasas_instance *instance)
377 {
378 struct megasas_evt_detail *evt_detail = instance->evt_detail;
379 union megasas_evt_class_locale class_locale;
380 class_locale.word = le32_to_cpu(evt_detail->cl.word);
381
382 if (class_locale.members.class >= MFI_EVT_CLASS_CRITICAL)
383 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
384 le32_to_cpu(evt_detail->seq_num),
385 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
386 (class_locale.members.locale),
387 format_class(class_locale.members.class),
388 evt_detail->description);
389 }
390
391 /**
392 * The following functions are defined for xscale
393 * (deviceid : 1064R, PERC5) controllers
394 */
395
396 /**
397 * megasas_enable_intr_xscale - Enables interrupts
398 * @regs: MFI register set
399 */
400 static inline void
401 megasas_enable_intr_xscale(struct megasas_instance *instance)
402 {
403 struct megasas_register_set __iomem *regs;
404
405 regs = instance->reg_set;
406 writel(0, &(regs)->outbound_intr_mask);
407
408 /* Dummy readl to force pci flush */
409 readl(&regs->outbound_intr_mask);
410 }
411
412 /**
413 * megasas_disable_intr_xscale -Disables interrupt
414 * @regs: MFI register set
415 */
416 static inline void
417 megasas_disable_intr_xscale(struct megasas_instance *instance)
418 {
419 struct megasas_register_set __iomem *regs;
420 u32 mask = 0x1f;
421
422 regs = instance->reg_set;
423 writel(mask, &regs->outbound_intr_mask);
424 /* Dummy readl to force pci flush */
425 readl(&regs->outbound_intr_mask);
426 }
427
428 /**
429 * megasas_read_fw_status_reg_xscale - returns the current FW status value
430 * @regs: MFI register set
431 */
432 static u32
433 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
434 {
435 return readl(&instance->reg_set->outbound_msg_0);
436 }
437 /**
438 * megasas_clear_interrupt_xscale - Check & clear interrupt
439 * @regs: MFI register set
440 */
441 static int
442 megasas_clear_intr_xscale(struct megasas_instance *instance)
443 {
444 u32 status;
445 u32 mfiStatus = 0;
446 struct megasas_register_set __iomem *regs;
447 regs = instance->reg_set;
448
449 /*
450 * Check if it is our interrupt
451 */
452 status = readl(&regs->outbound_intr_status);
453
454 if (status & MFI_OB_INTR_STATUS_MASK)
455 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
456 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
457 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
458
459 /*
460 * Clear the interrupt by writing back the same value
461 */
462 if (mfiStatus)
463 writel(status, &regs->outbound_intr_status);
464
465 /* Dummy readl to force pci flush */
466 readl(&regs->outbound_intr_status);
467
468 return mfiStatus;
469 }
470
471 /**
472 * megasas_fire_cmd_xscale - Sends command to the FW
473 * @frame_phys_addr : Physical address of cmd
474 * @frame_count : Number of frames for the command
475 * @regs : MFI register set
476 */
477 static inline void
478 megasas_fire_cmd_xscale(struct megasas_instance *instance,
479 dma_addr_t frame_phys_addr,
480 u32 frame_count,
481 struct megasas_register_set __iomem *regs)
482 {
483 unsigned long flags;
484
485 spin_lock_irqsave(&instance->hba_lock, flags);
486 writel((frame_phys_addr >> 3)|(frame_count),
487 &(regs)->inbound_queue_port);
488 spin_unlock_irqrestore(&instance->hba_lock, flags);
489 }
490
491 /**
492 * megasas_adp_reset_xscale - For controller reset
493 * @regs: MFI register set
494 */
495 static int
496 megasas_adp_reset_xscale(struct megasas_instance *instance,
497 struct megasas_register_set __iomem *regs)
498 {
499 u32 i;
500 u32 pcidata;
501
502 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
503
504 for (i = 0; i < 3; i++)
505 msleep(1000); /* sleep for 3 secs */
506 pcidata = 0;
507 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
508 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
509 if (pcidata & 0x2) {
510 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
511 pcidata &= ~0x2;
512 pci_write_config_dword(instance->pdev,
513 MFI_1068_PCSR_OFFSET, pcidata);
514
515 for (i = 0; i < 2; i++)
516 msleep(1000); /* need to wait 2 secs again */
517
518 pcidata = 0;
519 pci_read_config_dword(instance->pdev,
520 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
521 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
522 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
523 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
524 pcidata = 0;
525 pci_write_config_dword(instance->pdev,
526 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
527 }
528 }
529 return 0;
530 }
531
532 /**
533 * megasas_check_reset_xscale - For controller reset check
534 * @regs: MFI register set
535 */
536 static int
537 megasas_check_reset_xscale(struct megasas_instance *instance,
538 struct megasas_register_set __iomem *regs)
539 {
540 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
541 (le32_to_cpu(*instance->consumer) ==
542 MEGASAS_ADPRESET_INPROG_SIGN))
543 return 1;
544 return 0;
545 }
546
547 static struct megasas_instance_template megasas_instance_template_xscale = {
548
549 .fire_cmd = megasas_fire_cmd_xscale,
550 .enable_intr = megasas_enable_intr_xscale,
551 .disable_intr = megasas_disable_intr_xscale,
552 .clear_intr = megasas_clear_intr_xscale,
553 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
554 .adp_reset = megasas_adp_reset_xscale,
555 .check_reset = megasas_check_reset_xscale,
556 .service_isr = megasas_isr,
557 .tasklet = megasas_complete_cmd_dpc,
558 .init_adapter = megasas_init_adapter_mfi,
559 .build_and_issue_cmd = megasas_build_and_issue_cmd,
560 .issue_dcmd = megasas_issue_dcmd,
561 };
562
563 /**
564 * This is the end of set of functions & definitions specific
565 * to xscale (deviceid : 1064R, PERC5) controllers
566 */
567
568 /**
569 * The following functions are defined for ppc (deviceid : 0x60)
570 * controllers
571 */
572
573 /**
574 * megasas_enable_intr_ppc - Enables interrupts
575 * @regs: MFI register set
576 */
577 static inline void
578 megasas_enable_intr_ppc(struct megasas_instance *instance)
579 {
580 struct megasas_register_set __iomem *regs;
581
582 regs = instance->reg_set;
583 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
584
585 writel(~0x80000000, &(regs)->outbound_intr_mask);
586
587 /* Dummy readl to force pci flush */
588 readl(&regs->outbound_intr_mask);
589 }
590
591 /**
592 * megasas_disable_intr_ppc - Disable interrupt
593 * @regs: MFI register set
594 */
595 static inline void
596 megasas_disable_intr_ppc(struct megasas_instance *instance)
597 {
598 struct megasas_register_set __iomem *regs;
599 u32 mask = 0xFFFFFFFF;
600
601 regs = instance->reg_set;
602 writel(mask, &regs->outbound_intr_mask);
603 /* Dummy readl to force pci flush */
604 readl(&regs->outbound_intr_mask);
605 }
606
607 /**
608 * megasas_read_fw_status_reg_ppc - returns the current FW status value
609 * @regs: MFI register set
610 */
611 static u32
612 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
613 {
614 return readl(&instance->reg_set->outbound_scratch_pad_0);
615 }
616
617 /**
618 * megasas_clear_interrupt_ppc - Check & clear interrupt
619 * @regs: MFI register set
620 */
621 static int
622 megasas_clear_intr_ppc(struct megasas_instance *instance)
623 {
624 u32 status, mfiStatus = 0;
625 struct megasas_register_set __iomem *regs;
626 regs = instance->reg_set;
627
628 /*
629 * Check if it is our interrupt
630 */
631 status = readl(&regs->outbound_intr_status);
632
633 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
634 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
635
636 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
637 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
638
639 /*
640 * Clear the interrupt by writing back the same value
641 */
642 writel(status, &regs->outbound_doorbell_clear);
643
644 /* Dummy readl to force pci flush */
645 readl(&regs->outbound_doorbell_clear);
646
647 return mfiStatus;
648 }
649
650 /**
651 * megasas_fire_cmd_ppc - Sends command to the FW
652 * @frame_phys_addr : Physical address of cmd
653 * @frame_count : Number of frames for the command
654 * @regs : MFI register set
655 */
656 static inline void
657 megasas_fire_cmd_ppc(struct megasas_instance *instance,
658 dma_addr_t frame_phys_addr,
659 u32 frame_count,
660 struct megasas_register_set __iomem *regs)
661 {
662 unsigned long flags;
663
664 spin_lock_irqsave(&instance->hba_lock, flags);
665 writel((frame_phys_addr | (frame_count<<1))|1,
666 &(regs)->inbound_queue_port);
667 spin_unlock_irqrestore(&instance->hba_lock, flags);
668 }
669
670 /**
671 * megasas_check_reset_ppc - For controller reset check
672 * @regs: MFI register set
673 */
674 static int
675 megasas_check_reset_ppc(struct megasas_instance *instance,
676 struct megasas_register_set __iomem *regs)
677 {
678 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
679 return 1;
680
681 return 0;
682 }
683
684 static struct megasas_instance_template megasas_instance_template_ppc = {
685
686 .fire_cmd = megasas_fire_cmd_ppc,
687 .enable_intr = megasas_enable_intr_ppc,
688 .disable_intr = megasas_disable_intr_ppc,
689 .clear_intr = megasas_clear_intr_ppc,
690 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
691 .adp_reset = megasas_adp_reset_xscale,
692 .check_reset = megasas_check_reset_ppc,
693 .service_isr = megasas_isr,
694 .tasklet = megasas_complete_cmd_dpc,
695 .init_adapter = megasas_init_adapter_mfi,
696 .build_and_issue_cmd = megasas_build_and_issue_cmd,
697 .issue_dcmd = megasas_issue_dcmd,
698 };
699
700 /**
701 * megasas_enable_intr_skinny - Enables interrupts
702 * @regs: MFI register set
703 */
704 static inline void
705 megasas_enable_intr_skinny(struct megasas_instance *instance)
706 {
707 struct megasas_register_set __iomem *regs;
708
709 regs = instance->reg_set;
710 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
711
712 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
713
714 /* Dummy readl to force pci flush */
715 readl(&regs->outbound_intr_mask);
716 }
717
718 /**
719 * megasas_disable_intr_skinny - Disables interrupt
720 * @regs: MFI register set
721 */
722 static inline void
723 megasas_disable_intr_skinny(struct megasas_instance *instance)
724 {
725 struct megasas_register_set __iomem *regs;
726 u32 mask = 0xFFFFFFFF;
727
728 regs = instance->reg_set;
729 writel(mask, &regs->outbound_intr_mask);
730 /* Dummy readl to force pci flush */
731 readl(&regs->outbound_intr_mask);
732 }
733
734 /**
735 * megasas_read_fw_status_reg_skinny - returns the current FW status value
736 * @regs: MFI register set
737 */
738 static u32
739 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
740 {
741 return readl(&instance->reg_set->outbound_scratch_pad_0);
742 }
743
744 /**
745 * megasas_clear_interrupt_skinny - Check & clear interrupt
746 * @regs: MFI register set
747 */
748 static int
749 megasas_clear_intr_skinny(struct megasas_instance *instance)
750 {
751 u32 status;
752 u32 mfiStatus = 0;
753 struct megasas_register_set __iomem *regs;
754 regs = instance->reg_set;
755
756 /*
757 * Check if it is our interrupt
758 */
759 status = readl(&regs->outbound_intr_status);
760
761 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
762 return 0;
763 }
764
765 /*
766 * Check if it is our interrupt
767 */
768 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
769 MFI_STATE_FAULT) {
770 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
771 } else
772 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
773
774 /*
775 * Clear the interrupt by writing back the same value
776 */
777 writel(status, &regs->outbound_intr_status);
778
779 /*
780 * dummy read to flush PCI
781 */
782 readl(&regs->outbound_intr_status);
783
784 return mfiStatus;
785 }
786
787 /**
788 * megasas_fire_cmd_skinny - Sends command to the FW
789 * @frame_phys_addr : Physical address of cmd
790 * @frame_count : Number of frames for the command
791 * @regs : MFI register set
792 */
793 static inline void
794 megasas_fire_cmd_skinny(struct megasas_instance *instance,
795 dma_addr_t frame_phys_addr,
796 u32 frame_count,
797 struct megasas_register_set __iomem *regs)
798 {
799 unsigned long flags;
800
801 spin_lock_irqsave(&instance->hba_lock, flags);
802 writel(upper_32_bits(frame_phys_addr),
803 &(regs)->inbound_high_queue_port);
804 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
805 &(regs)->inbound_low_queue_port);
806 spin_unlock_irqrestore(&instance->hba_lock, flags);
807 }
808
809 /**
810 * megasas_check_reset_skinny - For controller reset check
811 * @regs: MFI register set
812 */
813 static int
814 megasas_check_reset_skinny(struct megasas_instance *instance,
815 struct megasas_register_set __iomem *regs)
816 {
817 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
818 return 1;
819
820 return 0;
821 }
822
823 static struct megasas_instance_template megasas_instance_template_skinny = {
824
825 .fire_cmd = megasas_fire_cmd_skinny,
826 .enable_intr = megasas_enable_intr_skinny,
827 .disable_intr = megasas_disable_intr_skinny,
828 .clear_intr = megasas_clear_intr_skinny,
829 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
830 .adp_reset = megasas_adp_reset_gen2,
831 .check_reset = megasas_check_reset_skinny,
832 .service_isr = megasas_isr,
833 .tasklet = megasas_complete_cmd_dpc,
834 .init_adapter = megasas_init_adapter_mfi,
835 .build_and_issue_cmd = megasas_build_and_issue_cmd,
836 .issue_dcmd = megasas_issue_dcmd,
837 };
838
839
840 /**
841 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
842 * controllers
843 */
844
845 /**
846 * megasas_enable_intr_gen2 - Enables interrupts
847 * @regs: MFI register set
848 */
849 static inline void
850 megasas_enable_intr_gen2(struct megasas_instance *instance)
851 {
852 struct megasas_register_set __iomem *regs;
853
854 regs = instance->reg_set;
855 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
856
857 /* write ~0x00000005 (4 & 1) to the intr mask*/
858 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
859
860 /* Dummy readl to force pci flush */
861 readl(&regs->outbound_intr_mask);
862 }
863
864 /**
865 * megasas_disable_intr_gen2 - Disables interrupt
866 * @regs: MFI register set
867 */
868 static inline void
869 megasas_disable_intr_gen2(struct megasas_instance *instance)
870 {
871 struct megasas_register_set __iomem *regs;
872 u32 mask = 0xFFFFFFFF;
873
874 regs = instance->reg_set;
875 writel(mask, &regs->outbound_intr_mask);
876 /* Dummy readl to force pci flush */
877 readl(&regs->outbound_intr_mask);
878 }
879
880 /**
881 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
882 * @regs: MFI register set
883 */
884 static u32
885 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
886 {
887 return readl(&instance->reg_set->outbound_scratch_pad_0);
888 }
889
890 /**
891 * megasas_clear_interrupt_gen2 - Check & clear interrupt
892 * @regs: MFI register set
893 */
894 static int
895 megasas_clear_intr_gen2(struct megasas_instance *instance)
896 {
897 u32 status;
898 u32 mfiStatus = 0;
899 struct megasas_register_set __iomem *regs;
900 regs = instance->reg_set;
901
902 /*
903 * Check if it is our interrupt
904 */
905 status = readl(&regs->outbound_intr_status);
906
907 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
908 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
909 }
910 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
911 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
912 }
913
914 /*
915 * Clear the interrupt by writing back the same value
916 */
917 if (mfiStatus)
918 writel(status, &regs->outbound_doorbell_clear);
919
920 /* Dummy readl to force pci flush */
921 readl(&regs->outbound_intr_status);
922
923 return mfiStatus;
924 }
925 /**
926 * megasas_fire_cmd_gen2 - Sends command to the FW
927 * @frame_phys_addr : Physical address of cmd
928 * @frame_count : Number of frames for the command
929 * @regs : MFI register set
930 */
931 static inline void
932 megasas_fire_cmd_gen2(struct megasas_instance *instance,
933 dma_addr_t frame_phys_addr,
934 u32 frame_count,
935 struct megasas_register_set __iomem *regs)
936 {
937 unsigned long flags;
938
939 spin_lock_irqsave(&instance->hba_lock, flags);
940 writel((frame_phys_addr | (frame_count<<1))|1,
941 &(regs)->inbound_queue_port);
942 spin_unlock_irqrestore(&instance->hba_lock, flags);
943 }
944
945 /**
946 * megasas_adp_reset_gen2 - For controller reset
947 * @regs: MFI register set
948 */
949 static int
950 megasas_adp_reset_gen2(struct megasas_instance *instance,
951 struct megasas_register_set __iomem *reg_set)
952 {
953 u32 retry = 0 ;
954 u32 HostDiag;
955 u32 __iomem *seq_offset = &reg_set->seq_offset;
956 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
957
958 if (instance->instancet == &megasas_instance_template_skinny) {
959 seq_offset = &reg_set->fusion_seq_offset;
960 hostdiag_offset = &reg_set->fusion_host_diag;
961 }
962
963 writel(0, seq_offset);
964 writel(4, seq_offset);
965 writel(0xb, seq_offset);
966 writel(2, seq_offset);
967 writel(7, seq_offset);
968 writel(0xd, seq_offset);
969
970 msleep(1000);
971
972 HostDiag = (u32)readl(hostdiag_offset);
973
974 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
975 msleep(100);
976 HostDiag = (u32)readl(hostdiag_offset);
977 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
978 retry, HostDiag);
979
980 if (retry++ >= 100)
981 return 1;
982
983 }
984
985 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
986
987 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
988
989 ssleep(10);
990
991 HostDiag = (u32)readl(hostdiag_offset);
992 while (HostDiag & DIAG_RESET_ADAPTER) {
993 msleep(100);
994 HostDiag = (u32)readl(hostdiag_offset);
995 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
996 retry, HostDiag);
997
998 if (retry++ >= 1000)
999 return 1;
1000
1001 }
1002 return 0;
1003 }
1004
1005 /**
1006 * megasas_check_reset_gen2 - For controller reset check
1007 * @regs: MFI register set
1008 */
1009 static int
1010 megasas_check_reset_gen2(struct megasas_instance *instance,
1011 struct megasas_register_set __iomem *regs)
1012 {
1013 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1014 return 1;
1015
1016 return 0;
1017 }
1018
1019 static struct megasas_instance_template megasas_instance_template_gen2 = {
1020
1021 .fire_cmd = megasas_fire_cmd_gen2,
1022 .enable_intr = megasas_enable_intr_gen2,
1023 .disable_intr = megasas_disable_intr_gen2,
1024 .clear_intr = megasas_clear_intr_gen2,
1025 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1026 .adp_reset = megasas_adp_reset_gen2,
1027 .check_reset = megasas_check_reset_gen2,
1028 .service_isr = megasas_isr,
1029 .tasklet = megasas_complete_cmd_dpc,
1030 .init_adapter = megasas_init_adapter_mfi,
1031 .build_and_issue_cmd = megasas_build_and_issue_cmd,
1032 .issue_dcmd = megasas_issue_dcmd,
1033 };
1034
1035 /**
1036 * This is the end of set of functions & definitions
1037 * specific to gen2 (deviceid : 0x78, 0x79) controllers
1038 */
1039
1040 /*
1041 * Template added for TB (Fusion)
1042 */
1043 extern struct megasas_instance_template megasas_instance_template_fusion;
1044
1045 /**
1046 * megasas_issue_polled - Issues a polling command
1047 * @instance: Adapter soft state
1048 * @cmd: Command packet to be issued
1049 *
1050 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1051 */
1052 int
1053 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1054 {
1055 struct megasas_header *frame_hdr = &cmd->frame->hdr;
1056
1057 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1058 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1059
1060 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1061 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1062 __func__, __LINE__);
1063 return DCMD_NOT_FIRED;
1064 }
1065
1066 instance->instancet->issue_dcmd(instance, cmd);
1067
1068 return wait_and_poll(instance, cmd, instance->requestorId ?
1069 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1070 }
1071
1072 /**
1073 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1074 * @instance: Adapter soft state
1075 * @cmd: Command to be issued
1076 * @timeout: Timeout in seconds
1077 *
1078 * This function waits on an event for the command to be returned from ISR.
1079 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1080 * Used to issue ioctl commands.
1081 */
1082 int
1083 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1084 struct megasas_cmd *cmd, int timeout)
1085 {
1086 int ret = 0;
1087 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1088
1089 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1090 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1091 __func__, __LINE__);
1092 return DCMD_NOT_FIRED;
1093 }
1094
1095 instance->instancet->issue_dcmd(instance, cmd);
1096
1097 if (timeout) {
1098 ret = wait_event_timeout(instance->int_cmd_wait_q,
1099 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1100 if (!ret) {
1101 dev_err(&instance->pdev->dev, "Failed from %s %d DCMD Timed out\n",
1102 __func__, __LINE__);
1103 return DCMD_TIMEOUT;
1104 }
1105 } else
1106 wait_event(instance->int_cmd_wait_q,
1107 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1108
1109 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1110 DCMD_SUCCESS : DCMD_FAILED;
1111 }
1112
1113 /**
1114 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1115 * @instance: Adapter soft state
1116 * @cmd_to_abort: Previously issued cmd to be aborted
1117 * @timeout: Timeout in seconds
1118 *
1119 * MFI firmware can abort previously issued AEN comamnd (automatic event
1120 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1121 * cmd and waits for return status.
1122 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1123 */
1124 static int
1125 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1126 struct megasas_cmd *cmd_to_abort, int timeout)
1127 {
1128 struct megasas_cmd *cmd;
1129 struct megasas_abort_frame *abort_fr;
1130 int ret = 0;
1131
1132 cmd = megasas_get_cmd(instance);
1133
1134 if (!cmd)
1135 return -1;
1136
1137 abort_fr = &cmd->frame->abort;
1138
1139 /*
1140 * Prepare and issue the abort frame
1141 */
1142 abort_fr->cmd = MFI_CMD_ABORT;
1143 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1144 abort_fr->flags = cpu_to_le16(0);
1145 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1146 abort_fr->abort_mfi_phys_addr_lo =
1147 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1148 abort_fr->abort_mfi_phys_addr_hi =
1149 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1150
1151 cmd->sync_cmd = 1;
1152 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
1153
1154 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1155 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1156 __func__, __LINE__);
1157 return DCMD_NOT_FIRED;
1158 }
1159
1160 instance->instancet->issue_dcmd(instance, cmd);
1161
1162 if (timeout) {
1163 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1164 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ);
1165 if (!ret) {
1166 dev_err(&instance->pdev->dev, "Failed from %s %d Abort Timed out\n",
1167 __func__, __LINE__);
1168 return DCMD_TIMEOUT;
1169 }
1170 } else
1171 wait_event(instance->abort_cmd_wait_q,
1172 cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS);
1173
1174 cmd->sync_cmd = 0;
1175
1176 megasas_return_cmd(instance, cmd);
1177 return (cmd->cmd_status_drv == MFI_STAT_OK) ?
1178 DCMD_SUCCESS : DCMD_FAILED;
1179 }
1180
1181 /**
1182 * megasas_make_sgl32 - Prepares 32-bit SGL
1183 * @instance: Adapter soft state
1184 * @scp: SCSI command from the mid-layer
1185 * @mfi_sgl: SGL to be filled in
1186 *
1187 * If successful, this function returns the number of SG elements. Otherwise,
1188 * it returnes -1.
1189 */
1190 static int
1191 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1192 union megasas_sgl *mfi_sgl)
1193 {
1194 int i;
1195 int sge_count;
1196 struct scatterlist *os_sgl;
1197
1198 sge_count = scsi_dma_map(scp);
1199 BUG_ON(sge_count < 0);
1200
1201 if (sge_count) {
1202 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1203 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1204 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1205 }
1206 }
1207 return sge_count;
1208 }
1209
1210 /**
1211 * megasas_make_sgl64 - Prepares 64-bit SGL
1212 * @instance: Adapter soft state
1213 * @scp: SCSI command from the mid-layer
1214 * @mfi_sgl: SGL to be filled in
1215 *
1216 * If successful, this function returns the number of SG elements. Otherwise,
1217 * it returnes -1.
1218 */
1219 static int
1220 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1221 union megasas_sgl *mfi_sgl)
1222 {
1223 int i;
1224 int sge_count;
1225 struct scatterlist *os_sgl;
1226
1227 sge_count = scsi_dma_map(scp);
1228 BUG_ON(sge_count < 0);
1229
1230 if (sge_count) {
1231 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1232 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1233 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1234 }
1235 }
1236 return sge_count;
1237 }
1238
1239 /**
1240 * megasas_make_sgl_skinny - Prepares IEEE SGL
1241 * @instance: Adapter soft state
1242 * @scp: SCSI command from the mid-layer
1243 * @mfi_sgl: SGL to be filled in
1244 *
1245 * If successful, this function returns the number of SG elements. Otherwise,
1246 * it returnes -1.
1247 */
1248 static int
1249 megasas_make_sgl_skinny(struct megasas_instance *instance,
1250 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1251 {
1252 int i;
1253 int sge_count;
1254 struct scatterlist *os_sgl;
1255
1256 sge_count = scsi_dma_map(scp);
1257
1258 if (sge_count) {
1259 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1260 mfi_sgl->sge_skinny[i].length =
1261 cpu_to_le32(sg_dma_len(os_sgl));
1262 mfi_sgl->sge_skinny[i].phys_addr =
1263 cpu_to_le64(sg_dma_address(os_sgl));
1264 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1265 }
1266 }
1267 return sge_count;
1268 }
1269
1270 /**
1271 * megasas_get_frame_count - Computes the number of frames
1272 * @frame_type : type of frame- io or pthru frame
1273 * @sge_count : number of sg elements
1274 *
1275 * Returns the number of frames required for numnber of sge's (sge_count)
1276 */
1277
1278 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1279 u8 sge_count, u8 frame_type)
1280 {
1281 int num_cnt;
1282 int sge_bytes;
1283 u32 sge_sz;
1284 u32 frame_count = 0;
1285
1286 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1287 sizeof(struct megasas_sge32);
1288
1289 if (instance->flag_ieee) {
1290 sge_sz = sizeof(struct megasas_sge_skinny);
1291 }
1292
1293 /*
1294 * Main frame can contain 2 SGEs for 64-bit SGLs and
1295 * 3 SGEs for 32-bit SGLs for ldio &
1296 * 1 SGEs for 64-bit SGLs and
1297 * 2 SGEs for 32-bit SGLs for pthru frame
1298 */
1299 if (unlikely(frame_type == PTHRU_FRAME)) {
1300 if (instance->flag_ieee == 1) {
1301 num_cnt = sge_count - 1;
1302 } else if (IS_DMA64)
1303 num_cnt = sge_count - 1;
1304 else
1305 num_cnt = sge_count - 2;
1306 } else {
1307 if (instance->flag_ieee == 1) {
1308 num_cnt = sge_count - 1;
1309 } else if (IS_DMA64)
1310 num_cnt = sge_count - 2;
1311 else
1312 num_cnt = sge_count - 3;
1313 }
1314
1315 if (num_cnt > 0) {
1316 sge_bytes = sge_sz * num_cnt;
1317
1318 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1319 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1320 }
1321 /* Main frame */
1322 frame_count += 1;
1323
1324 if (frame_count > 7)
1325 frame_count = 8;
1326 return frame_count;
1327 }
1328
1329 /**
1330 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1331 * @instance: Adapter soft state
1332 * @scp: SCSI command
1333 * @cmd: Command to be prepared in
1334 *
1335 * This function prepares CDB commands. These are typcially pass-through
1336 * commands to the devices.
1337 */
1338 static int
1339 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1340 struct megasas_cmd *cmd)
1341 {
1342 u32 is_logical;
1343 u32 device_id;
1344 u16 flags = 0;
1345 struct megasas_pthru_frame *pthru;
1346
1347 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1348 device_id = MEGASAS_DEV_INDEX(scp);
1349 pthru = (struct megasas_pthru_frame *)cmd->frame;
1350
1351 if (scp->sc_data_direction == DMA_TO_DEVICE)
1352 flags = MFI_FRAME_DIR_WRITE;
1353 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1354 flags = MFI_FRAME_DIR_READ;
1355 else if (scp->sc_data_direction == DMA_NONE)
1356 flags = MFI_FRAME_DIR_NONE;
1357
1358 if (instance->flag_ieee == 1) {
1359 flags |= MFI_FRAME_IEEE;
1360 }
1361
1362 /*
1363 * Prepare the DCDB frame
1364 */
1365 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1366 pthru->cmd_status = 0x0;
1367 pthru->scsi_status = 0x0;
1368 pthru->target_id = device_id;
1369 pthru->lun = scp->device->lun;
1370 pthru->cdb_len = scp->cmd_len;
1371 pthru->timeout = 0;
1372 pthru->pad_0 = 0;
1373 pthru->flags = cpu_to_le16(flags);
1374 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1375
1376 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1377
1378 /*
1379 * If the command is for the tape device, set the
1380 * pthru timeout to the os layer timeout value.
1381 */
1382 if (scp->device->type == TYPE_TAPE) {
1383 if ((scp->request->timeout / HZ) > 0xFFFF)
1384 pthru->timeout = cpu_to_le16(0xFFFF);
1385 else
1386 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1387 }
1388
1389 /*
1390 * Construct SGL
1391 */
1392 if (instance->flag_ieee == 1) {
1393 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1394 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1395 &pthru->sgl);
1396 } else if (IS_DMA64) {
1397 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1398 pthru->sge_count = megasas_make_sgl64(instance, scp,
1399 &pthru->sgl);
1400 } else
1401 pthru->sge_count = megasas_make_sgl32(instance, scp,
1402 &pthru->sgl);
1403
1404 if (pthru->sge_count > instance->max_num_sge) {
1405 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1406 pthru->sge_count);
1407 return 0;
1408 }
1409
1410 /*
1411 * Sense info specific
1412 */
1413 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1414 pthru->sense_buf_phys_addr_hi =
1415 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1416 pthru->sense_buf_phys_addr_lo =
1417 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1418
1419 /*
1420 * Compute the total number of frames this command consumes. FW uses
1421 * this number to pull sufficient number of frames from host memory.
1422 */
1423 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1424 PTHRU_FRAME);
1425
1426 return cmd->frame_count;
1427 }
1428
1429 /**
1430 * megasas_build_ldio - Prepares IOs to logical devices
1431 * @instance: Adapter soft state
1432 * @scp: SCSI command
1433 * @cmd: Command to be prepared
1434 *
1435 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1436 */
1437 static int
1438 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1439 struct megasas_cmd *cmd)
1440 {
1441 u32 device_id;
1442 u8 sc = scp->cmnd[0];
1443 u16 flags = 0;
1444 struct megasas_io_frame *ldio;
1445
1446 device_id = MEGASAS_DEV_INDEX(scp);
1447 ldio = (struct megasas_io_frame *)cmd->frame;
1448
1449 if (scp->sc_data_direction == DMA_TO_DEVICE)
1450 flags = MFI_FRAME_DIR_WRITE;
1451 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1452 flags = MFI_FRAME_DIR_READ;
1453
1454 if (instance->flag_ieee == 1) {
1455 flags |= MFI_FRAME_IEEE;
1456 }
1457
1458 /*
1459 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1460 */
1461 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1462 ldio->cmd_status = 0x0;
1463 ldio->scsi_status = 0x0;
1464 ldio->target_id = device_id;
1465 ldio->timeout = 0;
1466 ldio->reserved_0 = 0;
1467 ldio->pad_0 = 0;
1468 ldio->flags = cpu_to_le16(flags);
1469 ldio->start_lba_hi = 0;
1470 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1471
1472 /*
1473 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1474 */
1475 if (scp->cmd_len == 6) {
1476 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1477 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1478 ((u32) scp->cmnd[2] << 8) |
1479 (u32) scp->cmnd[3]);
1480
1481 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1482 }
1483
1484 /*
1485 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1486 */
1487 else if (scp->cmd_len == 10) {
1488 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1489 ((u32) scp->cmnd[7] << 8));
1490 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1491 ((u32) scp->cmnd[3] << 16) |
1492 ((u32) scp->cmnd[4] << 8) |
1493 (u32) scp->cmnd[5]);
1494 }
1495
1496 /*
1497 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1498 */
1499 else if (scp->cmd_len == 12) {
1500 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1501 ((u32) scp->cmnd[7] << 16) |
1502 ((u32) scp->cmnd[8] << 8) |
1503 (u32) scp->cmnd[9]);
1504
1505 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1506 ((u32) scp->cmnd[3] << 16) |
1507 ((u32) scp->cmnd[4] << 8) |
1508 (u32) scp->cmnd[5]);
1509 }
1510
1511 /*
1512 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1513 */
1514 else if (scp->cmd_len == 16) {
1515 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1516 ((u32) scp->cmnd[11] << 16) |
1517 ((u32) scp->cmnd[12] << 8) |
1518 (u32) scp->cmnd[13]);
1519
1520 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1521 ((u32) scp->cmnd[7] << 16) |
1522 ((u32) scp->cmnd[8] << 8) |
1523 (u32) scp->cmnd[9]);
1524
1525 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1526 ((u32) scp->cmnd[3] << 16) |
1527 ((u32) scp->cmnd[4] << 8) |
1528 (u32) scp->cmnd[5]);
1529
1530 }
1531
1532 /*
1533 * Construct SGL
1534 */
1535 if (instance->flag_ieee) {
1536 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1537 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1538 &ldio->sgl);
1539 } else if (IS_DMA64) {
1540 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1541 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1542 } else
1543 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1544
1545 if (ldio->sge_count > instance->max_num_sge) {
1546 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1547 ldio->sge_count);
1548 return 0;
1549 }
1550
1551 /*
1552 * Sense info specific
1553 */
1554 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1555 ldio->sense_buf_phys_addr_hi = 0;
1556 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1557
1558 /*
1559 * Compute the total number of frames this command consumes. FW uses
1560 * this number to pull sufficient number of frames from host memory.
1561 */
1562 cmd->frame_count = megasas_get_frame_count(instance,
1563 ldio->sge_count, IO_FRAME);
1564
1565 return cmd->frame_count;
1566 }
1567
1568 /**
1569 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1570 * and whether it's RW or non RW
1571 * @scmd: SCSI command
1572 *
1573 */
1574 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1575 {
1576 int ret;
1577
1578 switch (cmd->cmnd[0]) {
1579 case READ_10:
1580 case WRITE_10:
1581 case READ_12:
1582 case WRITE_12:
1583 case READ_6:
1584 case WRITE_6:
1585 case READ_16:
1586 case WRITE_16:
1587 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1588 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1589 break;
1590 default:
1591 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1592 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1593 }
1594 return ret;
1595 }
1596
1597 /**
1598 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1599 * in FW
1600 * @instance: Adapter soft state
1601 */
1602 static inline void
1603 megasas_dump_pending_frames(struct megasas_instance *instance)
1604 {
1605 struct megasas_cmd *cmd;
1606 int i,n;
1607 union megasas_sgl *mfi_sgl;
1608 struct megasas_io_frame *ldio;
1609 struct megasas_pthru_frame *pthru;
1610 u32 sgcount;
1611 u16 max_cmd = instance->max_fw_cmds;
1612
1613 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1614 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1615 if (IS_DMA64)
1616 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1617 else
1618 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1619
1620 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1621 for (i = 0; i < max_cmd; i++) {
1622 cmd = instance->cmd_list[i];
1623 if (!cmd->scmd)
1624 continue;
1625 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1626 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1627 ldio = (struct megasas_io_frame *)cmd->frame;
1628 mfi_sgl = &ldio->sgl;
1629 sgcount = ldio->sge_count;
1630 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1631 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1632 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1633 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1634 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1635 } else {
1636 pthru = (struct megasas_pthru_frame *) cmd->frame;
1637 mfi_sgl = &pthru->sgl;
1638 sgcount = pthru->sge_count;
1639 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1640 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1641 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1642 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1643 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1644 }
1645 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1646 for (n = 0; n < sgcount; n++) {
1647 if (IS_DMA64)
1648 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1649 le32_to_cpu(mfi_sgl->sge64[n].length),
1650 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1651 else
1652 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1653 le32_to_cpu(mfi_sgl->sge32[n].length),
1654 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1655 }
1656 }
1657 } /*for max_cmd*/
1658 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1659 for (i = 0; i < max_cmd; i++) {
1660
1661 cmd = instance->cmd_list[i];
1662
1663 if (cmd->sync_cmd == 1)
1664 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1665 }
1666 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1667 }
1668
1669 u32
1670 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1671 struct scsi_cmnd *scmd)
1672 {
1673 struct megasas_cmd *cmd;
1674 u32 frame_count;
1675
1676 cmd = megasas_get_cmd(instance);
1677 if (!cmd)
1678 return SCSI_MLQUEUE_HOST_BUSY;
1679
1680 /*
1681 * Logical drive command
1682 */
1683 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1684 frame_count = megasas_build_ldio(instance, scmd, cmd);
1685 else
1686 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1687
1688 if (!frame_count)
1689 goto out_return_cmd;
1690
1691 cmd->scmd = scmd;
1692 scmd->SCp.ptr = (char *)cmd;
1693
1694 /*
1695 * Issue the command to the FW
1696 */
1697 atomic_inc(&instance->fw_outstanding);
1698
1699 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1700 cmd->frame_count-1, instance->reg_set);
1701
1702 return 0;
1703 out_return_cmd:
1704 megasas_return_cmd(instance, cmd);
1705 return SCSI_MLQUEUE_HOST_BUSY;
1706 }
1707
1708
1709 /**
1710 * megasas_queue_command - Queue entry point
1711 * @scmd: SCSI command to be queued
1712 * @done: Callback entry point
1713 */
1714 static int
1715 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1716 {
1717 struct megasas_instance *instance;
1718 struct MR_PRIV_DEVICE *mr_device_priv_data;
1719
1720 instance = (struct megasas_instance *)
1721 scmd->device->host->hostdata;
1722
1723 if (instance->unload == 1) {
1724 scmd->result = DID_NO_CONNECT << 16;
1725 scmd->scsi_done(scmd);
1726 return 0;
1727 }
1728
1729 if (instance->issuepend_done == 0)
1730 return SCSI_MLQUEUE_HOST_BUSY;
1731
1732
1733 /* Check for an mpio path and adjust behavior */
1734 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1735 if (megasas_check_mpio_paths(instance, scmd) ==
1736 (DID_REQUEUE << 16)) {
1737 return SCSI_MLQUEUE_HOST_BUSY;
1738 } else {
1739 scmd->result = DID_NO_CONNECT << 16;
1740 scmd->scsi_done(scmd);
1741 return 0;
1742 }
1743 }
1744
1745 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1746 scmd->result = DID_NO_CONNECT << 16;
1747 scmd->scsi_done(scmd);
1748 return 0;
1749 }
1750
1751 mr_device_priv_data = scmd->device->hostdata;
1752 if (!mr_device_priv_data) {
1753 scmd->result = DID_NO_CONNECT << 16;
1754 scmd->scsi_done(scmd);
1755 return 0;
1756 }
1757
1758 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1759 return SCSI_MLQUEUE_HOST_BUSY;
1760
1761 if (mr_device_priv_data->tm_busy)
1762 return SCSI_MLQUEUE_DEVICE_BUSY;
1763
1764
1765 scmd->result = 0;
1766
1767 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1768 (scmd->device->id >= instance->fw_supported_vd_count ||
1769 scmd->device->lun)) {
1770 scmd->result = DID_BAD_TARGET << 16;
1771 goto out_done;
1772 }
1773
1774 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1775 MEGASAS_IS_LOGICAL(scmd->device) &&
1776 (!instance->fw_sync_cache_support)) {
1777 scmd->result = DID_OK << 16;
1778 goto out_done;
1779 }
1780
1781 return instance->instancet->build_and_issue_cmd(instance, scmd);
1782
1783 out_done:
1784 scmd->scsi_done(scmd);
1785 return 0;
1786 }
1787
1788 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1789 {
1790 int i;
1791
1792 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1793
1794 if ((megasas_mgmt_info.instance[i]) &&
1795 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1796 return megasas_mgmt_info.instance[i];
1797 }
1798
1799 return NULL;
1800 }
1801
1802 /*
1803 * megasas_set_dynamic_target_properties -
1804 * Device property set by driver may not be static and it is required to be
1805 * updated after OCR
1806 *
1807 * set tm_capable.
1808 * set dma alignment (only for eedp protection enable vd).
1809 *
1810 * @sdev: OS provided scsi device
1811 *
1812 * Returns void
1813 */
1814 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1815 bool is_target_prop)
1816 {
1817 u16 pd_index = 0, ld;
1818 u32 device_id;
1819 struct megasas_instance *instance;
1820 struct fusion_context *fusion;
1821 struct MR_PRIV_DEVICE *mr_device_priv_data;
1822 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1823 struct MR_LD_RAID *raid;
1824 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1825
1826 instance = megasas_lookup_instance(sdev->host->host_no);
1827 fusion = instance->ctrl_context;
1828 mr_device_priv_data = sdev->hostdata;
1829
1830 if (!fusion || !mr_device_priv_data)
1831 return;
1832
1833 if (MEGASAS_IS_LOGICAL(sdev)) {
1834 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1835 + sdev->id;
1836 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1837 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1838 if (ld >= instance->fw_supported_vd_count)
1839 return;
1840 raid = MR_LdRaidGet(ld, local_map_ptr);
1841
1842 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1843 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1844
1845 mr_device_priv_data->is_tm_capable =
1846 raid->capability.tmCapable;
1847 } else if (instance->use_seqnum_jbod_fp) {
1848 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1849 sdev->id;
1850 pd_sync = (void *)fusion->pd_seq_sync
1851 [(instance->pd_seq_map_id - 1) & 1];
1852 mr_device_priv_data->is_tm_capable =
1853 pd_sync->seq[pd_index].capability.tmCapable;
1854 }
1855
1856 if (is_target_prop && instance->tgt_prop->reset_tmo) {
1857 /*
1858 * If FW provides a target reset timeout value, driver will use
1859 * it. If not set, fallback to default values.
1860 */
1861 mr_device_priv_data->target_reset_tmo =
1862 min_t(u8, instance->max_reset_tmo,
1863 instance->tgt_prop->reset_tmo);
1864 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1865 } else {
1866 mr_device_priv_data->target_reset_tmo =
1867 MEGASAS_DEFAULT_TM_TIMEOUT;
1868 mr_device_priv_data->task_abort_tmo =
1869 MEGASAS_DEFAULT_TM_TIMEOUT;
1870 }
1871 }
1872
1873 /*
1874 * megasas_set_nvme_device_properties -
1875 * set nomerges=2
1876 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1877 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1878 *
1879 * MR firmware provides value in KB. Caller of this function converts
1880 * kb into bytes.
1881 *
1882 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1883 * MR firmware provides value 128 as (32 * 4K) = 128K.
1884 *
1885 * @sdev: scsi device
1886 * @max_io_size: maximum io transfer size
1887 *
1888 */
1889 static inline void
1890 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1891 {
1892 struct megasas_instance *instance;
1893 u32 mr_nvme_pg_size;
1894
1895 instance = (struct megasas_instance *)sdev->host->hostdata;
1896 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1897 MR_DEFAULT_NVME_PAGE_SIZE);
1898
1899 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1900
1901 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1902 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1903 }
1904
1905
1906 /*
1907 * megasas_set_static_target_properties -
1908 * Device property set by driver are static and it is not required to be
1909 * updated after OCR.
1910 *
1911 * set io timeout
1912 * set device queue depth
1913 * set nvme device properties. see - megasas_set_nvme_device_properties
1914 *
1915 * @sdev: scsi device
1916 * @is_target_prop true, if fw provided target properties.
1917 */
1918 static void megasas_set_static_target_properties(struct scsi_device *sdev,
1919 bool is_target_prop)
1920 {
1921 u16 target_index = 0;
1922 u8 interface_type;
1923 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1924 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
1925 u32 tgt_device_qd;
1926 struct megasas_instance *instance;
1927 struct MR_PRIV_DEVICE *mr_device_priv_data;
1928
1929 instance = megasas_lookup_instance(sdev->host->host_no);
1930 mr_device_priv_data = sdev->hostdata;
1931 interface_type = mr_device_priv_data->interface_type;
1932
1933 /*
1934 * The RAID firmware may require extended timeouts.
1935 */
1936 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
1937
1938 target_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
1939
1940 switch (interface_type) {
1941 case SAS_PD:
1942 device_qd = MEGASAS_SAS_QD;
1943 break;
1944 case SATA_PD:
1945 device_qd = MEGASAS_SATA_QD;
1946 break;
1947 case NVME_PD:
1948 device_qd = MEGASAS_NVME_QD;
1949 break;
1950 }
1951
1952 if (is_target_prop) {
1953 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1954 if (tgt_device_qd &&
1955 (tgt_device_qd <= instance->host->can_queue))
1956 device_qd = tgt_device_qd;
1957
1958 /* max_io_size_kb will be set to non zero for
1959 * nvme based vd and syspd.
1960 */
1961 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
1962 }
1963
1964 if (instance->nvme_page_size && max_io_size_kb)
1965 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
1966
1967 scsi_change_queue_depth(sdev, device_qd);
1968
1969 }
1970
1971
1972 static int megasas_slave_configure(struct scsi_device *sdev)
1973 {
1974 u16 pd_index = 0;
1975 struct megasas_instance *instance;
1976 int ret_target_prop = DCMD_FAILED;
1977 bool is_target_prop = false;
1978
1979 instance = megasas_lookup_instance(sdev->host->host_no);
1980 if (instance->pd_list_not_supported) {
1981 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
1982 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1983 sdev->id;
1984 if (instance->pd_list[pd_index].driveState !=
1985 MR_PD_STATE_SYSTEM)
1986 return -ENXIO;
1987 }
1988 }
1989
1990 mutex_lock(&instance->reset_mutex);
1991 /* Send DCMD to Firmware and cache the information */
1992 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
1993 megasas_get_pd_info(instance, sdev);
1994
1995 /* Some ventura firmware may not have instance->nvme_page_size set.
1996 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
1997 */
1998 if ((instance->tgt_prop) && (instance->nvme_page_size))
1999 ret_target_prop = megasas_get_target_prop(instance, sdev);
2000
2001 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2002 megasas_set_static_target_properties(sdev, is_target_prop);
2003
2004 /* This sdev property may change post OCR */
2005 megasas_set_dynamic_target_properties(sdev, is_target_prop);
2006
2007 mutex_unlock(&instance->reset_mutex);
2008
2009 return 0;
2010 }
2011
2012 static int megasas_slave_alloc(struct scsi_device *sdev)
2013 {
2014 u16 pd_index = 0;
2015 struct megasas_instance *instance ;
2016 struct MR_PRIV_DEVICE *mr_device_priv_data;
2017
2018 instance = megasas_lookup_instance(sdev->host->host_no);
2019 if (!MEGASAS_IS_LOGICAL(sdev)) {
2020 /*
2021 * Open the OS scan to the SYSTEM PD
2022 */
2023 pd_index =
2024 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2025 sdev->id;
2026 if ((instance->pd_list_not_supported ||
2027 instance->pd_list[pd_index].driveState ==
2028 MR_PD_STATE_SYSTEM)) {
2029 goto scan_target;
2030 }
2031 return -ENXIO;
2032 }
2033
2034 scan_target:
2035 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2036 GFP_KERNEL);
2037 if (!mr_device_priv_data)
2038 return -ENOMEM;
2039 sdev->hostdata = mr_device_priv_data;
2040
2041 atomic_set(&mr_device_priv_data->r1_ldio_hint,
2042 instance->r1_ldio_hint_default);
2043 return 0;
2044 }
2045
2046 static void megasas_slave_destroy(struct scsi_device *sdev)
2047 {
2048 kfree(sdev->hostdata);
2049 sdev->hostdata = NULL;
2050 }
2051
2052 /*
2053 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2054 * kill adapter
2055 * @instance: Adapter soft state
2056 *
2057 */
2058 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2059 {
2060 int i;
2061 struct megasas_cmd *cmd_mfi;
2062 struct megasas_cmd_fusion *cmd_fusion;
2063 struct fusion_context *fusion = instance->ctrl_context;
2064
2065 /* Find all outstanding ioctls */
2066 if (fusion) {
2067 for (i = 0; i < instance->max_fw_cmds; i++) {
2068 cmd_fusion = fusion->cmd_list[i];
2069 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2070 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2071 if (cmd_mfi->sync_cmd &&
2072 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2073 cmd_mfi->frame->hdr.cmd_status =
2074 MFI_STAT_WRONG_STATE;
2075 megasas_complete_cmd(instance,
2076 cmd_mfi, DID_OK);
2077 }
2078 }
2079 }
2080 } else {
2081 for (i = 0; i < instance->max_fw_cmds; i++) {
2082 cmd_mfi = instance->cmd_list[i];
2083 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2084 MFI_CMD_ABORT)
2085 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2086 }
2087 }
2088 }
2089
2090
2091 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2092 {
2093 /* Set critical error to block I/O & ioctls in case caller didn't */
2094 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2095 /* Wait 1 second to ensure IO or ioctls in build have posted */
2096 msleep(1000);
2097 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2098 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2099 (instance->adapter_type != MFI_SERIES)) {
2100 if (!instance->requestorId) {
2101 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2102 /* Flush */
2103 readl(&instance->reg_set->doorbell);
2104 }
2105 if (instance->requestorId && instance->peerIsPresent)
2106 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2107 } else {
2108 writel(MFI_STOP_ADP,
2109 &instance->reg_set->inbound_doorbell);
2110 }
2111 /* Complete outstanding ioctls when adapter is killed */
2112 megasas_complete_outstanding_ioctls(instance);
2113 }
2114
2115 /**
2116 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2117 * restored to max value
2118 * @instance: Adapter soft state
2119 *
2120 */
2121 void
2122 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2123 {
2124 unsigned long flags;
2125
2126 if (instance->flag & MEGASAS_FW_BUSY
2127 && time_after(jiffies, instance->last_time + 5 * HZ)
2128 && atomic_read(&instance->fw_outstanding) <
2129 instance->throttlequeuedepth + 1) {
2130
2131 spin_lock_irqsave(instance->host->host_lock, flags);
2132 instance->flag &= ~MEGASAS_FW_BUSY;
2133
2134 instance->host->can_queue = instance->cur_can_queue;
2135 spin_unlock_irqrestore(instance->host->host_lock, flags);
2136 }
2137 }
2138
2139 /**
2140 * megasas_complete_cmd_dpc - Returns FW's controller structure
2141 * @instance_addr: Address of adapter soft state
2142 *
2143 * Tasklet to complete cmds
2144 */
2145 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2146 {
2147 u32 producer;
2148 u32 consumer;
2149 u32 context;
2150 struct megasas_cmd *cmd;
2151 struct megasas_instance *instance =
2152 (struct megasas_instance *)instance_addr;
2153 unsigned long flags;
2154
2155 /* If we have already declared adapter dead, donot complete cmds */
2156 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2157 return;
2158
2159 spin_lock_irqsave(&instance->completion_lock, flags);
2160
2161 producer = le32_to_cpu(*instance->producer);
2162 consumer = le32_to_cpu(*instance->consumer);
2163
2164 while (consumer != producer) {
2165 context = le32_to_cpu(instance->reply_queue[consumer]);
2166 if (context >= instance->max_fw_cmds) {
2167 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2168 context);
2169 BUG();
2170 }
2171
2172 cmd = instance->cmd_list[context];
2173
2174 megasas_complete_cmd(instance, cmd, DID_OK);
2175
2176 consumer++;
2177 if (consumer == (instance->max_fw_cmds + 1)) {
2178 consumer = 0;
2179 }
2180 }
2181
2182 *instance->consumer = cpu_to_le32(producer);
2183
2184 spin_unlock_irqrestore(&instance->completion_lock, flags);
2185
2186 /*
2187 * Check if we can restore can_queue
2188 */
2189 megasas_check_and_restore_queue_depth(instance);
2190 }
2191
2192 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2193
2194 /**
2195 * megasas_start_timer - Initializes sriov heartbeat timer object
2196 * @instance: Adapter soft state
2197 *
2198 */
2199 void megasas_start_timer(struct megasas_instance *instance)
2200 {
2201 struct timer_list *timer = &instance->sriov_heartbeat_timer;
2202
2203 timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2204 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2205 add_timer(timer);
2206 }
2207
2208 static void
2209 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2210
2211 static void
2212 process_fw_state_change_wq(struct work_struct *work);
2213
2214 void megasas_do_ocr(struct megasas_instance *instance)
2215 {
2216 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2217 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2218 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2219 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2220 }
2221 instance->instancet->disable_intr(instance);
2222 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2223 instance->issuepend_done = 0;
2224
2225 atomic_set(&instance->fw_outstanding, 0);
2226 megasas_internal_reset_defer_cmds(instance);
2227 process_fw_state_change_wq(&instance->work_init);
2228 }
2229
2230 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2231 int initial)
2232 {
2233 struct megasas_cmd *cmd;
2234 struct megasas_dcmd_frame *dcmd;
2235 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2236 dma_addr_t new_affiliation_111_h;
2237 int ld, retval = 0;
2238 u8 thisVf;
2239
2240 cmd = megasas_get_cmd(instance);
2241
2242 if (!cmd) {
2243 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2244 "Failed to get cmd for scsi%d\n",
2245 instance->host->host_no);
2246 return -ENOMEM;
2247 }
2248
2249 dcmd = &cmd->frame->dcmd;
2250
2251 if (!instance->vf_affiliation_111) {
2252 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2253 "affiliation for scsi%d\n", instance->host->host_no);
2254 megasas_return_cmd(instance, cmd);
2255 return -ENOMEM;
2256 }
2257
2258 if (initial)
2259 memset(instance->vf_affiliation_111, 0,
2260 sizeof(struct MR_LD_VF_AFFILIATION_111));
2261 else {
2262 new_affiliation_111 =
2263 dma_alloc_coherent(&instance->pdev->dev,
2264 sizeof(struct MR_LD_VF_AFFILIATION_111),
2265 &new_affiliation_111_h, GFP_KERNEL);
2266 if (!new_affiliation_111) {
2267 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2268 "memory for new affiliation for scsi%d\n",
2269 instance->host->host_no);
2270 megasas_return_cmd(instance, cmd);
2271 return -ENOMEM;
2272 }
2273 }
2274
2275 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2276
2277 dcmd->cmd = MFI_CMD_DCMD;
2278 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2279 dcmd->sge_count = 1;
2280 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2281 dcmd->timeout = 0;
2282 dcmd->pad_0 = 0;
2283 dcmd->data_xfer_len =
2284 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2285 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2286
2287 if (initial)
2288 dcmd->sgl.sge32[0].phys_addr =
2289 cpu_to_le32(instance->vf_affiliation_111_h);
2290 else
2291 dcmd->sgl.sge32[0].phys_addr =
2292 cpu_to_le32(new_affiliation_111_h);
2293
2294 dcmd->sgl.sge32[0].length = cpu_to_le32(
2295 sizeof(struct MR_LD_VF_AFFILIATION_111));
2296
2297 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2298 "scsi%d\n", instance->host->host_no);
2299
2300 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2301 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2302 " failed with status 0x%x for scsi%d\n",
2303 dcmd->cmd_status, instance->host->host_no);
2304 retval = 1; /* Do a scan if we couldn't get affiliation */
2305 goto out;
2306 }
2307
2308 if (!initial) {
2309 thisVf = new_affiliation_111->thisVf;
2310 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2311 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2312 new_affiliation_111->map[ld].policy[thisVf]) {
2313 dev_warn(&instance->pdev->dev, "SR-IOV: "
2314 "Got new LD/VF affiliation for scsi%d\n",
2315 instance->host->host_no);
2316 memcpy(instance->vf_affiliation_111,
2317 new_affiliation_111,
2318 sizeof(struct MR_LD_VF_AFFILIATION_111));
2319 retval = 1;
2320 goto out;
2321 }
2322 }
2323 out:
2324 if (new_affiliation_111) {
2325 dma_free_coherent(&instance->pdev->dev,
2326 sizeof(struct MR_LD_VF_AFFILIATION_111),
2327 new_affiliation_111,
2328 new_affiliation_111_h);
2329 }
2330
2331 megasas_return_cmd(instance, cmd);
2332
2333 return retval;
2334 }
2335
2336 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2337 int initial)
2338 {
2339 struct megasas_cmd *cmd;
2340 struct megasas_dcmd_frame *dcmd;
2341 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2342 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2343 dma_addr_t new_affiliation_h;
2344 int i, j, retval = 0, found = 0, doscan = 0;
2345 u8 thisVf;
2346
2347 cmd = megasas_get_cmd(instance);
2348
2349 if (!cmd) {
2350 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2351 "Failed to get cmd for scsi%d\n",
2352 instance->host->host_no);
2353 return -ENOMEM;
2354 }
2355
2356 dcmd = &cmd->frame->dcmd;
2357
2358 if (!instance->vf_affiliation) {
2359 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2360 "affiliation for scsi%d\n", instance->host->host_no);
2361 megasas_return_cmd(instance, cmd);
2362 return -ENOMEM;
2363 }
2364
2365 if (initial)
2366 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2367 sizeof(struct MR_LD_VF_AFFILIATION));
2368 else {
2369 new_affiliation =
2370 dma_alloc_coherent(&instance->pdev->dev,
2371 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2372 &new_affiliation_h, GFP_KERNEL);
2373 if (!new_affiliation) {
2374 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2375 "memory for new affiliation for scsi%d\n",
2376 instance->host->host_no);
2377 megasas_return_cmd(instance, cmd);
2378 return -ENOMEM;
2379 }
2380 }
2381
2382 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2383
2384 dcmd->cmd = MFI_CMD_DCMD;
2385 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2386 dcmd->sge_count = 1;
2387 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2388 dcmd->timeout = 0;
2389 dcmd->pad_0 = 0;
2390 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2391 sizeof(struct MR_LD_VF_AFFILIATION));
2392 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2393
2394 if (initial)
2395 dcmd->sgl.sge32[0].phys_addr =
2396 cpu_to_le32(instance->vf_affiliation_h);
2397 else
2398 dcmd->sgl.sge32[0].phys_addr =
2399 cpu_to_le32(new_affiliation_h);
2400
2401 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2402 sizeof(struct MR_LD_VF_AFFILIATION));
2403
2404 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2405 "scsi%d\n", instance->host->host_no);
2406
2407
2408 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2409 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2410 " failed with status 0x%x for scsi%d\n",
2411 dcmd->cmd_status, instance->host->host_no);
2412 retval = 1; /* Do a scan if we couldn't get affiliation */
2413 goto out;
2414 }
2415
2416 if (!initial) {
2417 if (!new_affiliation->ldCount) {
2418 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2419 "affiliation for passive path for scsi%d\n",
2420 instance->host->host_no);
2421 retval = 1;
2422 goto out;
2423 }
2424 newmap = new_affiliation->map;
2425 savedmap = instance->vf_affiliation->map;
2426 thisVf = new_affiliation->thisVf;
2427 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2428 found = 0;
2429 for (j = 0; j < instance->vf_affiliation->ldCount;
2430 j++) {
2431 if (newmap->ref.targetId ==
2432 savedmap->ref.targetId) {
2433 found = 1;
2434 if (newmap->policy[thisVf] !=
2435 savedmap->policy[thisVf]) {
2436 doscan = 1;
2437 goto out;
2438 }
2439 }
2440 savedmap = (struct MR_LD_VF_MAP *)
2441 ((unsigned char *)savedmap +
2442 savedmap->size);
2443 }
2444 if (!found && newmap->policy[thisVf] !=
2445 MR_LD_ACCESS_HIDDEN) {
2446 doscan = 1;
2447 goto out;
2448 }
2449 newmap = (struct MR_LD_VF_MAP *)
2450 ((unsigned char *)newmap + newmap->size);
2451 }
2452
2453 newmap = new_affiliation->map;
2454 savedmap = instance->vf_affiliation->map;
2455
2456 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2457 found = 0;
2458 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2459 if (savedmap->ref.targetId ==
2460 newmap->ref.targetId) {
2461 found = 1;
2462 if (savedmap->policy[thisVf] !=
2463 newmap->policy[thisVf]) {
2464 doscan = 1;
2465 goto out;
2466 }
2467 }
2468 newmap = (struct MR_LD_VF_MAP *)
2469 ((unsigned char *)newmap +
2470 newmap->size);
2471 }
2472 if (!found && savedmap->policy[thisVf] !=
2473 MR_LD_ACCESS_HIDDEN) {
2474 doscan = 1;
2475 goto out;
2476 }
2477 savedmap = (struct MR_LD_VF_MAP *)
2478 ((unsigned char *)savedmap +
2479 savedmap->size);
2480 }
2481 }
2482 out:
2483 if (doscan) {
2484 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2485 "affiliation for scsi%d\n", instance->host->host_no);
2486 memcpy(instance->vf_affiliation, new_affiliation,
2487 new_affiliation->size);
2488 retval = 1;
2489 }
2490
2491 if (new_affiliation)
2492 dma_free_coherent(&instance->pdev->dev,
2493 (MAX_LOGICAL_DRIVES + 1) *
2494 sizeof(struct MR_LD_VF_AFFILIATION),
2495 new_affiliation, new_affiliation_h);
2496 megasas_return_cmd(instance, cmd);
2497
2498 return retval;
2499 }
2500
2501 /* This function will get the current SR-IOV LD/VF affiliation */
2502 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2503 int initial)
2504 {
2505 int retval;
2506
2507 if (instance->PlasmaFW111)
2508 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2509 else
2510 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2511 return retval;
2512 }
2513
2514 /* This function will tell FW to start the SR-IOV heartbeat */
2515 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2516 int initial)
2517 {
2518 struct megasas_cmd *cmd;
2519 struct megasas_dcmd_frame *dcmd;
2520 int retval = 0;
2521
2522 cmd = megasas_get_cmd(instance);
2523
2524 if (!cmd) {
2525 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2526 "Failed to get cmd for scsi%d\n",
2527 instance->host->host_no);
2528 return -ENOMEM;
2529 }
2530
2531 dcmd = &cmd->frame->dcmd;
2532
2533 if (initial) {
2534 instance->hb_host_mem =
2535 dma_alloc_coherent(&instance->pdev->dev,
2536 sizeof(struct MR_CTRL_HB_HOST_MEM),
2537 &instance->hb_host_mem_h,
2538 GFP_KERNEL);
2539 if (!instance->hb_host_mem) {
2540 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2541 " memory for heartbeat host memory for scsi%d\n",
2542 instance->host->host_no);
2543 retval = -ENOMEM;
2544 goto out;
2545 }
2546 }
2547
2548 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2549
2550 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2551 dcmd->cmd = MFI_CMD_DCMD;
2552 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2553 dcmd->sge_count = 1;
2554 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2555 dcmd->timeout = 0;
2556 dcmd->pad_0 = 0;
2557 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2558 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2559
2560 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2561 sizeof(struct MR_CTRL_HB_HOST_MEM));
2562
2563 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2564 instance->host->host_no);
2565
2566 if ((instance->adapter_type != MFI_SERIES) &&
2567 !instance->mask_interrupts)
2568 retval = megasas_issue_blocked_cmd(instance, cmd,
2569 MEGASAS_ROUTINE_WAIT_TIME_VF);
2570 else
2571 retval = megasas_issue_polled(instance, cmd);
2572
2573 if (retval) {
2574 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2575 "_MEM_ALLOC DCMD %s for scsi%d\n",
2576 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2577 "timed out" : "failed", instance->host->host_no);
2578 retval = 1;
2579 }
2580
2581 out:
2582 megasas_return_cmd(instance, cmd);
2583
2584 return retval;
2585 }
2586
2587 /* Handler for SR-IOV heartbeat */
2588 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2589 {
2590 struct megasas_instance *instance =
2591 from_timer(instance, t, sriov_heartbeat_timer);
2592
2593 if (instance->hb_host_mem->HB.fwCounter !=
2594 instance->hb_host_mem->HB.driverCounter) {
2595 instance->hb_host_mem->HB.driverCounter =
2596 instance->hb_host_mem->HB.fwCounter;
2597 mod_timer(&instance->sriov_heartbeat_timer,
2598 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2599 } else {
2600 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2601 "completed for scsi%d\n", instance->host->host_no);
2602 schedule_work(&instance->work_init);
2603 }
2604 }
2605
2606 /**
2607 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2608 * @instance: Adapter soft state
2609 *
2610 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2611 * complete all its outstanding commands. Returns error if one or more IOs
2612 * are pending after this time period. It also marks the controller dead.
2613 */
2614 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2615 {
2616 int i, sl, outstanding;
2617 u32 reset_index;
2618 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2619 unsigned long flags;
2620 struct list_head clist_local;
2621 struct megasas_cmd *reset_cmd;
2622 u32 fw_state;
2623
2624 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2625 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2626 __func__, __LINE__);
2627 return FAILED;
2628 }
2629
2630 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2631
2632 INIT_LIST_HEAD(&clist_local);
2633 spin_lock_irqsave(&instance->hba_lock, flags);
2634 list_splice_init(&instance->internal_reset_pending_q,
2635 &clist_local);
2636 spin_unlock_irqrestore(&instance->hba_lock, flags);
2637
2638 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2639 for (i = 0; i < wait_time; i++) {
2640 msleep(1000);
2641 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2642 break;
2643 }
2644
2645 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2646 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2647 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2648 return FAILED;
2649 }
2650
2651 reset_index = 0;
2652 while (!list_empty(&clist_local)) {
2653 reset_cmd = list_entry((&clist_local)->next,
2654 struct megasas_cmd, list);
2655 list_del_init(&reset_cmd->list);
2656 if (reset_cmd->scmd) {
2657 reset_cmd->scmd->result = DID_REQUEUE << 16;
2658 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2659 reset_index, reset_cmd,
2660 reset_cmd->scmd->cmnd[0]);
2661
2662 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2663 megasas_return_cmd(instance, reset_cmd);
2664 } else if (reset_cmd->sync_cmd) {
2665 dev_notice(&instance->pdev->dev, "%p synch cmds"
2666 "reset queue\n",
2667 reset_cmd);
2668
2669 reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
2670 instance->instancet->fire_cmd(instance,
2671 reset_cmd->frame_phys_addr,
2672 0, instance->reg_set);
2673 } else {
2674 dev_notice(&instance->pdev->dev, "%p unexpected"
2675 "cmds lst\n",
2676 reset_cmd);
2677 }
2678 reset_index++;
2679 }
2680
2681 return SUCCESS;
2682 }
2683
2684 for (i = 0; i < resetwaittime; i++) {
2685 outstanding = atomic_read(&instance->fw_outstanding);
2686
2687 if (!outstanding)
2688 break;
2689
2690 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2691 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2692 "commands to complete\n",i,outstanding);
2693 /*
2694 * Call cmd completion routine. Cmd to be
2695 * be completed directly without depending on isr.
2696 */
2697 megasas_complete_cmd_dpc((unsigned long)instance);
2698 }
2699
2700 msleep(1000);
2701 }
2702
2703 i = 0;
2704 outstanding = atomic_read(&instance->fw_outstanding);
2705 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2706
2707 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2708 goto no_outstanding;
2709
2710 if (instance->disableOnlineCtrlReset)
2711 goto kill_hba_and_failed;
2712 do {
2713 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2714 dev_info(&instance->pdev->dev,
2715 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2716 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2717 if (i == 3)
2718 goto kill_hba_and_failed;
2719 megasas_do_ocr(instance);
2720
2721 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2722 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2723 __func__, __LINE__);
2724 return FAILED;
2725 }
2726 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2727 __func__, __LINE__);
2728
2729 for (sl = 0; sl < 10; sl++)
2730 msleep(500);
2731
2732 outstanding = atomic_read(&instance->fw_outstanding);
2733
2734 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2735 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2736 goto no_outstanding;
2737 }
2738 i++;
2739 } while (i <= 3);
2740
2741 no_outstanding:
2742
2743 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2744 __func__, __LINE__);
2745 return SUCCESS;
2746
2747 kill_hba_and_failed:
2748
2749 /* Reset not supported, kill adapter */
2750 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2751 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2752 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2753 atomic_read(&instance->fw_outstanding));
2754 megasas_dump_pending_frames(instance);
2755 megaraid_sas_kill_hba(instance);
2756
2757 return FAILED;
2758 }
2759
2760 /**
2761 * megasas_generic_reset - Generic reset routine
2762 * @scmd: Mid-layer SCSI command
2763 *
2764 * This routine implements a generic reset handler for device, bus and host
2765 * reset requests. Device, bus and host specific reset handlers can use this
2766 * function after they do their specific tasks.
2767 */
2768 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2769 {
2770 int ret_val;
2771 struct megasas_instance *instance;
2772
2773 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2774
2775 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2776 scmd->cmnd[0], scmd->retries);
2777
2778 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2779 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2780 return FAILED;
2781 }
2782
2783 ret_val = megasas_wait_for_outstanding(instance);
2784 if (ret_val == SUCCESS)
2785 dev_notice(&instance->pdev->dev, "reset successful\n");
2786 else
2787 dev_err(&instance->pdev->dev, "failed to do reset\n");
2788
2789 return ret_val;
2790 }
2791
2792 /**
2793 * megasas_reset_timer - quiesce the adapter if required
2794 * @scmd: scsi cmnd
2795 *
2796 * Sets the FW busy flag and reduces the host->can_queue if the
2797 * cmd has not been completed within the timeout period.
2798 */
2799 static enum
2800 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2801 {
2802 struct megasas_instance *instance;
2803 unsigned long flags;
2804
2805 if (time_after(jiffies, scmd->jiffies_at_alloc +
2806 (scmd_timeout * 2) * HZ)) {
2807 return BLK_EH_DONE;
2808 }
2809
2810 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2811 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2812 /* FW is busy, throttle IO */
2813 spin_lock_irqsave(instance->host->host_lock, flags);
2814
2815 instance->host->can_queue = instance->throttlequeuedepth;
2816 instance->last_time = jiffies;
2817 instance->flag |= MEGASAS_FW_BUSY;
2818
2819 spin_unlock_irqrestore(instance->host->host_lock, flags);
2820 }
2821 return BLK_EH_RESET_TIMER;
2822 }
2823
2824 /**
2825 * megasas_dump_frame - This function will dump MPT/MFI frame
2826 */
2827 static inline void
2828 megasas_dump_frame(void *mpi_request, int sz)
2829 {
2830 int i;
2831 __le32 *mfp = (__le32 *)mpi_request;
2832
2833 printk(KERN_INFO "IO request frame:\n\t");
2834 for (i = 0; i < sz / sizeof(__le32); i++) {
2835 if (i && ((i % 8) == 0))
2836 printk("\n\t");
2837 printk("%08x ", le32_to_cpu(mfp[i]));
2838 }
2839 printk("\n");
2840 }
2841
2842 /**
2843 * megasas_reset_bus_host - Bus & host reset handler entry point
2844 */
2845 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2846 {
2847 int ret;
2848 struct megasas_instance *instance;
2849
2850 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2851
2852 scmd_printk(KERN_INFO, scmd,
2853 "Controller reset is requested due to IO timeout\n"
2854 "SCSI command pointer: (%p)\t SCSI host state: %d\t"
2855 " SCSI host busy: %d\t FW outstanding: %d\n",
2856 scmd, scmd->device->host->shost_state,
2857 scsi_host_busy(scmd->device->host),
2858 atomic_read(&instance->fw_outstanding));
2859
2860 /*
2861 * First wait for all commands to complete
2862 */
2863 if (instance->adapter_type == MFI_SERIES) {
2864 ret = megasas_generic_reset(scmd);
2865 } else {
2866 struct megasas_cmd_fusion *cmd;
2867 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2868 if (cmd)
2869 megasas_dump_frame(cmd->io_request,
2870 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
2871 ret = megasas_reset_fusion(scmd->device->host,
2872 SCSIIO_TIMEOUT_OCR);
2873 }
2874
2875 return ret;
2876 }
2877
2878 /**
2879 * megasas_task_abort - Issues task abort request to firmware
2880 * (supported only for fusion adapters)
2881 * @scmd: SCSI command pointer
2882 */
2883 static int megasas_task_abort(struct scsi_cmnd *scmd)
2884 {
2885 int ret;
2886 struct megasas_instance *instance;
2887
2888 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2889
2890 if (instance->adapter_type != MFI_SERIES)
2891 ret = megasas_task_abort_fusion(scmd);
2892 else {
2893 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
2894 ret = FAILED;
2895 }
2896
2897 return ret;
2898 }
2899
2900 /**
2901 * megasas_reset_target: Issues target reset request to firmware
2902 * (supported only for fusion adapters)
2903 * @scmd: SCSI command pointer
2904 */
2905 static int megasas_reset_target(struct scsi_cmnd *scmd)
2906 {
2907 int ret;
2908 struct megasas_instance *instance;
2909
2910 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2911
2912 if (instance->adapter_type != MFI_SERIES)
2913 ret = megasas_reset_target_fusion(scmd);
2914 else {
2915 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
2916 ret = FAILED;
2917 }
2918
2919 return ret;
2920 }
2921
2922 /**
2923 * megasas_bios_param - Returns disk geometry for a disk
2924 * @sdev: device handle
2925 * @bdev: block device
2926 * @capacity: drive capacity
2927 * @geom: geometry parameters
2928 */
2929 static int
2930 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2931 sector_t capacity, int geom[])
2932 {
2933 int heads;
2934 int sectors;
2935 sector_t cylinders;
2936 unsigned long tmp;
2937
2938 /* Default heads (64) & sectors (32) */
2939 heads = 64;
2940 sectors = 32;
2941
2942 tmp = heads * sectors;
2943 cylinders = capacity;
2944
2945 sector_div(cylinders, tmp);
2946
2947 /*
2948 * Handle extended translation size for logical drives > 1Gb
2949 */
2950
2951 if (capacity >= 0x200000) {
2952 heads = 255;
2953 sectors = 63;
2954 tmp = heads*sectors;
2955 cylinders = capacity;
2956 sector_div(cylinders, tmp);
2957 }
2958
2959 geom[0] = heads;
2960 geom[1] = sectors;
2961 geom[2] = cylinders;
2962
2963 return 0;
2964 }
2965
2966 static void megasas_aen_polling(struct work_struct *work);
2967
2968 /**
2969 * megasas_service_aen - Processes an event notification
2970 * @instance: Adapter soft state
2971 * @cmd: AEN command completed by the ISR
2972 *
2973 * For AEN, driver sends a command down to FW that is held by the FW till an
2974 * event occurs. When an event of interest occurs, FW completes the command
2975 * that it was previously holding.
2976 *
2977 * This routines sends SIGIO signal to processes that have registered with the
2978 * driver for AEN.
2979 */
2980 static void
2981 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2982 {
2983 unsigned long flags;
2984
2985 /*
2986 * Don't signal app if it is just an aborted previously registered aen
2987 */
2988 if ((!cmd->abort_aen) && (instance->unload == 0)) {
2989 spin_lock_irqsave(&poll_aen_lock, flags);
2990 megasas_poll_wait_aen = 1;
2991 spin_unlock_irqrestore(&poll_aen_lock, flags);
2992 wake_up(&megasas_poll_wait);
2993 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
2994 }
2995 else
2996 cmd->abort_aen = 0;
2997
2998 instance->aen_cmd = NULL;
2999
3000 megasas_return_cmd(instance, cmd);
3001
3002 if ((instance->unload == 0) &&
3003 ((instance->issuepend_done == 1))) {
3004 struct megasas_aen_event *ev;
3005
3006 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3007 if (!ev) {
3008 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3009 } else {
3010 ev->instance = instance;
3011 instance->ev = ev;
3012 INIT_DELAYED_WORK(&ev->hotplug_work,
3013 megasas_aen_polling);
3014 schedule_delayed_work(&ev->hotplug_work, 0);
3015 }
3016 }
3017 }
3018
3019 static ssize_t
3020 megasas_fw_crash_buffer_store(struct device *cdev,
3021 struct device_attribute *attr, const char *buf, size_t count)
3022 {
3023 struct Scsi_Host *shost = class_to_shost(cdev);
3024 struct megasas_instance *instance =
3025 (struct megasas_instance *) shost->hostdata;
3026 int val = 0;
3027 unsigned long flags;
3028
3029 if (kstrtoint(buf, 0, &val) != 0)
3030 return -EINVAL;
3031
3032 spin_lock_irqsave(&instance->crashdump_lock, flags);
3033 instance->fw_crash_buffer_offset = val;
3034 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3035 return strlen(buf);
3036 }
3037
3038 static ssize_t
3039 megasas_fw_crash_buffer_show(struct device *cdev,
3040 struct device_attribute *attr, char *buf)
3041 {
3042 struct Scsi_Host *shost = class_to_shost(cdev);
3043 struct megasas_instance *instance =
3044 (struct megasas_instance *) shost->hostdata;
3045 u32 size;
3046 unsigned long buff_addr;
3047 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3048 unsigned long src_addr;
3049 unsigned long flags;
3050 u32 buff_offset;
3051
3052 spin_lock_irqsave(&instance->crashdump_lock, flags);
3053 buff_offset = instance->fw_crash_buffer_offset;
3054 if (!instance->crash_dump_buf &&
3055 !((instance->fw_crash_state == AVAILABLE) ||
3056 (instance->fw_crash_state == COPYING))) {
3057 dev_err(&instance->pdev->dev,
3058 "Firmware crash dump is not available\n");
3059 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3060 return -EINVAL;
3061 }
3062
3063 buff_addr = (unsigned long) buf;
3064
3065 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3066 dev_err(&instance->pdev->dev,
3067 "Firmware crash dump offset is out of range\n");
3068 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3069 return 0;
3070 }
3071
3072 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3073 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3074
3075 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3076 (buff_offset % dmachunk);
3077 memcpy(buf, (void *)src_addr, size);
3078 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3079
3080 return size;
3081 }
3082
3083 static ssize_t
3084 megasas_fw_crash_buffer_size_show(struct device *cdev,
3085 struct device_attribute *attr, char *buf)
3086 {
3087 struct Scsi_Host *shost = class_to_shost(cdev);
3088 struct megasas_instance *instance =
3089 (struct megasas_instance *) shost->hostdata;
3090
3091 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3092 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3093 }
3094
3095 static ssize_t
3096 megasas_fw_crash_state_store(struct device *cdev,
3097 struct device_attribute *attr, const char *buf, size_t count)
3098 {
3099 struct Scsi_Host *shost = class_to_shost(cdev);
3100 struct megasas_instance *instance =
3101 (struct megasas_instance *) shost->hostdata;
3102 int val = 0;
3103 unsigned long flags;
3104
3105 if (kstrtoint(buf, 0, &val) != 0)
3106 return -EINVAL;
3107
3108 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3109 dev_err(&instance->pdev->dev, "application updates invalid "
3110 "firmware crash state\n");
3111 return -EINVAL;
3112 }
3113
3114 instance->fw_crash_state = val;
3115
3116 if ((val == COPIED) || (val == COPY_ERROR)) {
3117 spin_lock_irqsave(&instance->crashdump_lock, flags);
3118 megasas_free_host_crash_buffer(instance);
3119 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3120 if (val == COPY_ERROR)
3121 dev_info(&instance->pdev->dev, "application failed to "
3122 "copy Firmware crash dump\n");
3123 else
3124 dev_info(&instance->pdev->dev, "Firmware crash dump "
3125 "copied successfully\n");
3126 }
3127 return strlen(buf);
3128 }
3129
3130 static ssize_t
3131 megasas_fw_crash_state_show(struct device *cdev,
3132 struct device_attribute *attr, char *buf)
3133 {
3134 struct Scsi_Host *shost = class_to_shost(cdev);
3135 struct megasas_instance *instance =
3136 (struct megasas_instance *) shost->hostdata;
3137
3138 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3139 }
3140
3141 static ssize_t
3142 megasas_page_size_show(struct device *cdev,
3143 struct device_attribute *attr, char *buf)
3144 {
3145 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3146 }
3147
3148 static ssize_t
3149 megasas_ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3150 char *buf)
3151 {
3152 struct Scsi_Host *shost = class_to_shost(cdev);
3153 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3154
3155 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3156 }
3157
3158 static ssize_t
3159 megasas_fw_cmds_outstanding_show(struct device *cdev,
3160 struct device_attribute *attr, char *buf)
3161 {
3162 struct Scsi_Host *shost = class_to_shost(cdev);
3163 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3164
3165 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3166 }
3167
3168 static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
3169 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
3170 static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
3171 megasas_fw_crash_buffer_size_show, NULL);
3172 static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
3173 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
3174 static DEVICE_ATTR(page_size, S_IRUGO,
3175 megasas_page_size_show, NULL);
3176 static DEVICE_ATTR(ldio_outstanding, S_IRUGO,
3177 megasas_ldio_outstanding_show, NULL);
3178 static DEVICE_ATTR(fw_cmds_outstanding, S_IRUGO,
3179 megasas_fw_cmds_outstanding_show, NULL);
3180
3181 struct device_attribute *megaraid_host_attrs[] = {
3182 &dev_attr_fw_crash_buffer_size,
3183 &dev_attr_fw_crash_buffer,
3184 &dev_attr_fw_crash_state,
3185 &dev_attr_page_size,
3186 &dev_attr_ldio_outstanding,
3187 &dev_attr_fw_cmds_outstanding,
3188 NULL,
3189 };
3190
3191 /*
3192 * Scsi host template for megaraid_sas driver
3193 */
3194 static struct scsi_host_template megasas_template = {
3195
3196 .module = THIS_MODULE,
3197 .name = "Avago SAS based MegaRAID driver",
3198 .proc_name = "megaraid_sas",
3199 .slave_configure = megasas_slave_configure,
3200 .slave_alloc = megasas_slave_alloc,
3201 .slave_destroy = megasas_slave_destroy,
3202 .queuecommand = megasas_queue_command,
3203 .eh_target_reset_handler = megasas_reset_target,
3204 .eh_abort_handler = megasas_task_abort,
3205 .eh_host_reset_handler = megasas_reset_bus_host,
3206 .eh_timed_out = megasas_reset_timer,
3207 .shost_attrs = megaraid_host_attrs,
3208 .bios_param = megasas_bios_param,
3209 .change_queue_depth = scsi_change_queue_depth,
3210 .no_write_same = 1,
3211 };
3212
3213 /**
3214 * megasas_complete_int_cmd - Completes an internal command
3215 * @instance: Adapter soft state
3216 * @cmd: Command to be completed
3217 *
3218 * The megasas_issue_blocked_cmd() function waits for a command to complete
3219 * after it issues a command. This function wakes up that waiting routine by
3220 * calling wake_up() on the wait queue.
3221 */
3222 static void
3223 megasas_complete_int_cmd(struct megasas_instance *instance,
3224 struct megasas_cmd *cmd)
3225 {
3226 cmd->cmd_status_drv = cmd->frame->io.cmd_status;
3227 wake_up(&instance->int_cmd_wait_q);
3228 }
3229
3230 /**
3231 * megasas_complete_abort - Completes aborting a command
3232 * @instance: Adapter soft state
3233 * @cmd: Cmd that was issued to abort another cmd
3234 *
3235 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3236 * after it issues an abort on a previously issued command. This function
3237 * wakes up all functions waiting on the same wait queue.
3238 */
3239 static void
3240 megasas_complete_abort(struct megasas_instance *instance,
3241 struct megasas_cmd *cmd)
3242 {
3243 if (cmd->sync_cmd) {
3244 cmd->sync_cmd = 0;
3245 cmd->cmd_status_drv = 0;
3246 wake_up(&instance->abort_cmd_wait_q);
3247 }
3248 }
3249
3250 /**
3251 * megasas_complete_cmd - Completes a command
3252 * @instance: Adapter soft state
3253 * @cmd: Command to be completed
3254 * @alt_status: If non-zero, use this value as status to
3255 * SCSI mid-layer instead of the value returned
3256 * by the FW. This should be used if caller wants
3257 * an alternate status (as in the case of aborted
3258 * commands)
3259 */
3260 void
3261 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3262 u8 alt_status)
3263 {
3264 int exception = 0;
3265 struct megasas_header *hdr = &cmd->frame->hdr;
3266 unsigned long flags;
3267 struct fusion_context *fusion = instance->ctrl_context;
3268 u32 opcode, status;
3269
3270 /* flag for the retry reset */
3271 cmd->retry_for_fw_reset = 0;
3272
3273 if (cmd->scmd)
3274 cmd->scmd->SCp.ptr = NULL;
3275
3276 switch (hdr->cmd) {
3277 case MFI_CMD_INVALID:
3278 /* Some older 1068 controller FW may keep a pended
3279 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3280 when booting the kdump kernel. Ignore this command to
3281 prevent a kernel panic on shutdown of the kdump kernel. */
3282 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3283 "completed\n");
3284 dev_warn(&instance->pdev->dev, "If you have a controller "
3285 "other than PERC5, please upgrade your firmware\n");
3286 break;
3287 case MFI_CMD_PD_SCSI_IO:
3288 case MFI_CMD_LD_SCSI_IO:
3289
3290 /*
3291 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3292 * issued either through an IO path or an IOCTL path. If it
3293 * was via IOCTL, we will send it to internal completion.
3294 */
3295 if (cmd->sync_cmd) {
3296 cmd->sync_cmd = 0;
3297 megasas_complete_int_cmd(instance, cmd);
3298 break;
3299 }
3300 /* fall through */
3301
3302 case MFI_CMD_LD_READ:
3303 case MFI_CMD_LD_WRITE:
3304
3305 if (alt_status) {
3306 cmd->scmd->result = alt_status << 16;
3307 exception = 1;
3308 }
3309
3310 if (exception) {
3311
3312 atomic_dec(&instance->fw_outstanding);
3313
3314 scsi_dma_unmap(cmd->scmd);
3315 cmd->scmd->scsi_done(cmd->scmd);
3316 megasas_return_cmd(instance, cmd);
3317
3318 break;
3319 }
3320
3321 switch (hdr->cmd_status) {
3322
3323 case MFI_STAT_OK:
3324 cmd->scmd->result = DID_OK << 16;
3325 break;
3326
3327 case MFI_STAT_SCSI_IO_FAILED:
3328 case MFI_STAT_LD_INIT_IN_PROGRESS:
3329 cmd->scmd->result =
3330 (DID_ERROR << 16) | hdr->scsi_status;
3331 break;
3332
3333 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3334
3335 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3336
3337 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3338 memset(cmd->scmd->sense_buffer, 0,
3339 SCSI_SENSE_BUFFERSIZE);
3340 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3341 hdr->sense_len);
3342
3343 cmd->scmd->result |= DRIVER_SENSE << 24;
3344 }
3345
3346 break;
3347
3348 case MFI_STAT_LD_OFFLINE:
3349 case MFI_STAT_DEVICE_NOT_FOUND:
3350 cmd->scmd->result = DID_BAD_TARGET << 16;
3351 break;
3352
3353 default:
3354 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3355 hdr->cmd_status);
3356 cmd->scmd->result = DID_ERROR << 16;
3357 break;
3358 }
3359
3360 atomic_dec(&instance->fw_outstanding);
3361
3362 scsi_dma_unmap(cmd->scmd);
3363 cmd->scmd->scsi_done(cmd->scmd);
3364 megasas_return_cmd(instance, cmd);
3365
3366 break;
3367
3368 case MFI_CMD_SMP:
3369 case MFI_CMD_STP:
3370 case MFI_CMD_NVME:
3371 megasas_complete_int_cmd(instance, cmd);
3372 break;
3373
3374 case MFI_CMD_DCMD:
3375 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3376 /* Check for LD map update */
3377 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3378 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3379 fusion->fast_path_io = 0;
3380 spin_lock_irqsave(instance->host->host_lock, flags);
3381 status = cmd->frame->hdr.cmd_status;
3382 instance->map_update_cmd = NULL;
3383 if (status != MFI_STAT_OK) {
3384 if (status != MFI_STAT_NOT_FOUND)
3385 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3386 cmd->frame->hdr.cmd_status);
3387 else {
3388 megasas_return_cmd(instance, cmd);
3389 spin_unlock_irqrestore(
3390 instance->host->host_lock,
3391 flags);
3392 break;
3393 }
3394 }
3395
3396 megasas_return_cmd(instance, cmd);
3397
3398 /*
3399 * Set fast path IO to ZERO.
3400 * Validate Map will set proper value.
3401 * Meanwhile all IOs will go as LD IO.
3402 */
3403 if (status == MFI_STAT_OK &&
3404 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3405 instance->map_id++;
3406 fusion->fast_path_io = 1;
3407 } else {
3408 fusion->fast_path_io = 0;
3409 }
3410
3411 megasas_sync_map_info(instance);
3412 spin_unlock_irqrestore(instance->host->host_lock,
3413 flags);
3414 break;
3415 }
3416 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3417 opcode == MR_DCMD_CTRL_EVENT_GET) {
3418 spin_lock_irqsave(&poll_aen_lock, flags);
3419 megasas_poll_wait_aen = 0;
3420 spin_unlock_irqrestore(&poll_aen_lock, flags);
3421 }
3422
3423 /* FW has an updated PD sequence */
3424 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3425 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3426
3427 spin_lock_irqsave(instance->host->host_lock, flags);
3428 status = cmd->frame->hdr.cmd_status;
3429 instance->jbod_seq_cmd = NULL;
3430 megasas_return_cmd(instance, cmd);
3431
3432 if (status == MFI_STAT_OK) {
3433 instance->pd_seq_map_id++;
3434 /* Re-register a pd sync seq num cmd */
3435 if (megasas_sync_pd_seq_num(instance, true))
3436 instance->use_seqnum_jbod_fp = false;
3437 } else
3438 instance->use_seqnum_jbod_fp = false;
3439
3440 spin_unlock_irqrestore(instance->host->host_lock, flags);
3441 break;
3442 }
3443
3444 /*
3445 * See if got an event notification
3446 */
3447 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3448 megasas_service_aen(instance, cmd);
3449 else
3450 megasas_complete_int_cmd(instance, cmd);
3451
3452 break;
3453
3454 case MFI_CMD_ABORT:
3455 /*
3456 * Cmd issued to abort another cmd returned
3457 */
3458 megasas_complete_abort(instance, cmd);
3459 break;
3460
3461 default:
3462 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3463 hdr->cmd);
3464 megasas_complete_int_cmd(instance, cmd);
3465 break;
3466 }
3467 }
3468
3469 /**
3470 * megasas_issue_pending_cmds_again - issue all pending cmds
3471 * in FW again because of the fw reset
3472 * @instance: Adapter soft state
3473 */
3474 static inline void
3475 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3476 {
3477 struct megasas_cmd *cmd;
3478 struct list_head clist_local;
3479 union megasas_evt_class_locale class_locale;
3480 unsigned long flags;
3481 u32 seq_num;
3482
3483 INIT_LIST_HEAD(&clist_local);
3484 spin_lock_irqsave(&instance->hba_lock, flags);
3485 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3486 spin_unlock_irqrestore(&instance->hba_lock, flags);
3487
3488 while (!list_empty(&clist_local)) {
3489 cmd = list_entry((&clist_local)->next,
3490 struct megasas_cmd, list);
3491 list_del_init(&cmd->list);
3492
3493 if (cmd->sync_cmd || cmd->scmd) {
3494 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3495 "detected to be pending while HBA reset\n",
3496 cmd, cmd->scmd, cmd->sync_cmd);
3497
3498 cmd->retry_for_fw_reset++;
3499
3500 if (cmd->retry_for_fw_reset == 3) {
3501 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3502 "was tried multiple times during reset."
3503 "Shutting down the HBA\n",
3504 cmd, cmd->scmd, cmd->sync_cmd);
3505 instance->instancet->disable_intr(instance);
3506 atomic_set(&instance->fw_reset_no_pci_access, 1);
3507 megaraid_sas_kill_hba(instance);
3508 return;
3509 }
3510 }
3511
3512 if (cmd->sync_cmd == 1) {
3513 if (cmd->scmd) {
3514 dev_notice(&instance->pdev->dev, "unexpected"
3515 "cmd attached to internal command!\n");
3516 }
3517 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3518 "on the internal reset queue,"
3519 "issue it again.\n", cmd);
3520 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3521 instance->instancet->fire_cmd(instance,
3522 cmd->frame_phys_addr,
3523 0, instance->reg_set);
3524 } else if (cmd->scmd) {
3525 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3526 "detected on the internal queue, issue again.\n",
3527 cmd, cmd->scmd->cmnd[0]);
3528
3529 atomic_inc(&instance->fw_outstanding);
3530 instance->instancet->fire_cmd(instance,
3531 cmd->frame_phys_addr,
3532 cmd->frame_count-1, instance->reg_set);
3533 } else {
3534 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3535 "internal reset defer list while re-issue!!\n",
3536 cmd);
3537 }
3538 }
3539
3540 if (instance->aen_cmd) {
3541 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3542 megasas_return_cmd(instance, instance->aen_cmd);
3543
3544 instance->aen_cmd = NULL;
3545 }
3546
3547 /*
3548 * Initiate AEN (Asynchronous Event Notification)
3549 */
3550 seq_num = instance->last_seq_num;
3551 class_locale.members.reserved = 0;
3552 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3553 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3554
3555 megasas_register_aen(instance, seq_num, class_locale.word);
3556 }
3557
3558 /**
3559 * Move the internal reset pending commands to a deferred queue.
3560 *
3561 * We move the commands pending at internal reset time to a
3562 * pending queue. This queue would be flushed after successful
3563 * completion of the internal reset sequence. if the internal reset
3564 * did not complete in time, the kernel reset handler would flush
3565 * these commands.
3566 **/
3567 static void
3568 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3569 {
3570 struct megasas_cmd *cmd;
3571 int i;
3572 u16 max_cmd = instance->max_fw_cmds;
3573 u32 defer_index;
3574 unsigned long flags;
3575
3576 defer_index = 0;
3577 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3578 for (i = 0; i < max_cmd; i++) {
3579 cmd = instance->cmd_list[i];
3580 if (cmd->sync_cmd == 1 || cmd->scmd) {
3581 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3582 "on the defer queue as internal\n",
3583 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3584
3585 if (!list_empty(&cmd->list)) {
3586 dev_notice(&instance->pdev->dev, "ERROR while"
3587 " moving this cmd:%p, %d %p, it was"
3588 "discovered on some list?\n",
3589 cmd, cmd->sync_cmd, cmd->scmd);
3590
3591 list_del_init(&cmd->list);
3592 }
3593 defer_index++;
3594 list_add_tail(&cmd->list,
3595 &instance->internal_reset_pending_q);
3596 }
3597 }
3598 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3599 }
3600
3601
3602 static void
3603 process_fw_state_change_wq(struct work_struct *work)
3604 {
3605 struct megasas_instance *instance =
3606 container_of(work, struct megasas_instance, work_init);
3607 u32 wait;
3608 unsigned long flags;
3609
3610 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3611 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3612 atomic_read(&instance->adprecovery));
3613 return ;
3614 }
3615
3616 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3617 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3618 "state, restarting it...\n");
3619
3620 instance->instancet->disable_intr(instance);
3621 atomic_set(&instance->fw_outstanding, 0);
3622
3623 atomic_set(&instance->fw_reset_no_pci_access, 1);
3624 instance->instancet->adp_reset(instance, instance->reg_set);
3625 atomic_set(&instance->fw_reset_no_pci_access, 0);
3626
3627 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3628 "initiating next stage...\n");
3629
3630 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3631 "state 2 starting...\n");
3632
3633 /* waiting for about 20 second before start the second init */
3634 for (wait = 0; wait < 30; wait++) {
3635 msleep(1000);
3636 }
3637
3638 if (megasas_transition_to_ready(instance, 1)) {
3639 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3640
3641 atomic_set(&instance->fw_reset_no_pci_access, 1);
3642 megaraid_sas_kill_hba(instance);
3643 return ;
3644 }
3645
3646 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3647 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3648 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3649 ) {
3650 *instance->consumer = *instance->producer;
3651 } else {
3652 *instance->consumer = 0;
3653 *instance->producer = 0;
3654 }
3655
3656 megasas_issue_init_mfi(instance);
3657
3658 spin_lock_irqsave(&instance->hba_lock, flags);
3659 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3660 spin_unlock_irqrestore(&instance->hba_lock, flags);
3661 instance->instancet->enable_intr(instance);
3662
3663 megasas_issue_pending_cmds_again(instance);
3664 instance->issuepend_done = 1;
3665 }
3666 }
3667
3668 /**
3669 * megasas_deplete_reply_queue - Processes all completed commands
3670 * @instance: Adapter soft state
3671 * @alt_status: Alternate status to be returned to
3672 * SCSI mid-layer instead of the status
3673 * returned by the FW
3674 * Note: this must be called with hba lock held
3675 */
3676 static int
3677 megasas_deplete_reply_queue(struct megasas_instance *instance,
3678 u8 alt_status)
3679 {
3680 u32 mfiStatus;
3681 u32 fw_state;
3682
3683 if ((mfiStatus = instance->instancet->check_reset(instance,
3684 instance->reg_set)) == 1) {
3685 return IRQ_HANDLED;
3686 }
3687
3688 mfiStatus = instance->instancet->clear_intr(instance);
3689 if (mfiStatus == 0) {
3690 /* Hardware may not set outbound_intr_status in MSI-X mode */
3691 if (!instance->msix_vectors)
3692 return IRQ_NONE;
3693 }
3694
3695 instance->mfiStatus = mfiStatus;
3696
3697 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3698 fw_state = instance->instancet->read_fw_status_reg(
3699 instance) & MFI_STATE_MASK;
3700
3701 if (fw_state != MFI_STATE_FAULT) {
3702 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3703 fw_state);
3704 }
3705
3706 if ((fw_state == MFI_STATE_FAULT) &&
3707 (instance->disableOnlineCtrlReset == 0)) {
3708 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3709
3710 if ((instance->pdev->device ==
3711 PCI_DEVICE_ID_LSI_SAS1064R) ||
3712 (instance->pdev->device ==
3713 PCI_DEVICE_ID_DELL_PERC5) ||
3714 (instance->pdev->device ==
3715 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3716
3717 *instance->consumer =
3718 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3719 }
3720
3721
3722 instance->instancet->disable_intr(instance);
3723 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3724 instance->issuepend_done = 0;
3725
3726 atomic_set(&instance->fw_outstanding, 0);
3727 megasas_internal_reset_defer_cmds(instance);
3728
3729 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3730 fw_state, atomic_read(&instance->adprecovery));
3731
3732 schedule_work(&instance->work_init);
3733 return IRQ_HANDLED;
3734
3735 } else {
3736 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3737 fw_state, instance->disableOnlineCtrlReset);
3738 }
3739 }
3740
3741 tasklet_schedule(&instance->isr_tasklet);
3742 return IRQ_HANDLED;
3743 }
3744 /**
3745 * megasas_isr - isr entry point
3746 */
3747 static irqreturn_t megasas_isr(int irq, void *devp)
3748 {
3749 struct megasas_irq_context *irq_context = devp;
3750 struct megasas_instance *instance = irq_context->instance;
3751 unsigned long flags;
3752 irqreturn_t rc;
3753
3754 if (atomic_read(&instance->fw_reset_no_pci_access))
3755 return IRQ_HANDLED;
3756
3757 spin_lock_irqsave(&instance->hba_lock, flags);
3758 rc = megasas_deplete_reply_queue(instance, DID_OK);
3759 spin_unlock_irqrestore(&instance->hba_lock, flags);
3760
3761 return rc;
3762 }
3763
3764 /**
3765 * megasas_transition_to_ready - Move the FW to READY state
3766 * @instance: Adapter soft state
3767 *
3768 * During the initialization, FW passes can potentially be in any one of
3769 * several possible states. If the FW in operational, waiting-for-handshake
3770 * states, driver must take steps to bring it to ready state. Otherwise, it
3771 * has to wait for the ready state.
3772 */
3773 int
3774 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3775 {
3776 int i;
3777 u8 max_wait;
3778 u32 fw_state;
3779 u32 cur_state;
3780 u32 abs_state, curr_abs_state;
3781
3782 abs_state = instance->instancet->read_fw_status_reg(instance);
3783 fw_state = abs_state & MFI_STATE_MASK;
3784
3785 if (fw_state != MFI_STATE_READY)
3786 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3787 " state\n");
3788
3789 while (fw_state != MFI_STATE_READY) {
3790
3791 switch (fw_state) {
3792
3793 case MFI_STATE_FAULT:
3794 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3795 if (ocr) {
3796 max_wait = MEGASAS_RESET_WAIT_TIME;
3797 cur_state = MFI_STATE_FAULT;
3798 break;
3799 } else
3800 return -ENODEV;
3801
3802 case MFI_STATE_WAIT_HANDSHAKE:
3803 /*
3804 * Set the CLR bit in inbound doorbell
3805 */
3806 if ((instance->pdev->device ==
3807 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3808 (instance->pdev->device ==
3809 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3810 (instance->adapter_type != MFI_SERIES))
3811 writel(
3812 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3813 &instance->reg_set->doorbell);
3814 else
3815 writel(
3816 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
3817 &instance->reg_set->inbound_doorbell);
3818
3819 max_wait = MEGASAS_RESET_WAIT_TIME;
3820 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3821 break;
3822
3823 case MFI_STATE_BOOT_MESSAGE_PENDING:
3824 if ((instance->pdev->device ==
3825 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3826 (instance->pdev->device ==
3827 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3828 (instance->adapter_type != MFI_SERIES))
3829 writel(MFI_INIT_HOTPLUG,
3830 &instance->reg_set->doorbell);
3831 else
3832 writel(MFI_INIT_HOTPLUG,
3833 &instance->reg_set->inbound_doorbell);
3834
3835 max_wait = MEGASAS_RESET_WAIT_TIME;
3836 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3837 break;
3838
3839 case MFI_STATE_OPERATIONAL:
3840 /*
3841 * Bring it to READY state; assuming max wait 10 secs
3842 */
3843 instance->instancet->disable_intr(instance);
3844 if ((instance->pdev->device ==
3845 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
3846 (instance->pdev->device ==
3847 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
3848 (instance->adapter_type != MFI_SERIES)) {
3849 writel(MFI_RESET_FLAGS,
3850 &instance->reg_set->doorbell);
3851
3852 if (instance->adapter_type != MFI_SERIES) {
3853 for (i = 0; i < (10 * 1000); i += 20) {
3854 if (megasas_readl(
3855 instance,
3856 &instance->
3857 reg_set->
3858 doorbell) & 1)
3859 msleep(20);
3860 else
3861 break;
3862 }
3863 }
3864 } else
3865 writel(MFI_RESET_FLAGS,
3866 &instance->reg_set->inbound_doorbell);
3867
3868 max_wait = MEGASAS_RESET_WAIT_TIME;
3869 cur_state = MFI_STATE_OPERATIONAL;
3870 break;
3871
3872 case MFI_STATE_UNDEFINED:
3873 /*
3874 * This state should not last for more than 2 seconds
3875 */
3876 max_wait = MEGASAS_RESET_WAIT_TIME;
3877 cur_state = MFI_STATE_UNDEFINED;
3878 break;
3879
3880 case MFI_STATE_BB_INIT:
3881 max_wait = MEGASAS_RESET_WAIT_TIME;
3882 cur_state = MFI_STATE_BB_INIT;
3883 break;
3884
3885 case MFI_STATE_FW_INIT:
3886 max_wait = MEGASAS_RESET_WAIT_TIME;
3887 cur_state = MFI_STATE_FW_INIT;
3888 break;
3889
3890 case MFI_STATE_FW_INIT_2:
3891 max_wait = MEGASAS_RESET_WAIT_TIME;
3892 cur_state = MFI_STATE_FW_INIT_2;
3893 break;
3894
3895 case MFI_STATE_DEVICE_SCAN:
3896 max_wait = MEGASAS_RESET_WAIT_TIME;
3897 cur_state = MFI_STATE_DEVICE_SCAN;
3898 break;
3899
3900 case MFI_STATE_FLUSH_CACHE:
3901 max_wait = MEGASAS_RESET_WAIT_TIME;
3902 cur_state = MFI_STATE_FLUSH_CACHE;
3903 break;
3904
3905 default:
3906 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3907 fw_state);
3908 return -ENODEV;
3909 }
3910
3911 /*
3912 * The cur_state should not last for more than max_wait secs
3913 */
3914 for (i = 0; i < max_wait * 50; i++) {
3915 curr_abs_state = instance->instancet->
3916 read_fw_status_reg(instance);
3917
3918 if (abs_state == curr_abs_state) {
3919 msleep(20);
3920 } else
3921 break;
3922 }
3923
3924 /*
3925 * Return error if fw_state hasn't changed after max_wait
3926 */
3927 if (curr_abs_state == abs_state) {
3928 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3929 "in %d secs\n", fw_state, max_wait);
3930 return -ENODEV;
3931 }
3932
3933 abs_state = curr_abs_state;
3934 fw_state = curr_abs_state & MFI_STATE_MASK;
3935 }
3936 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3937
3938 return 0;
3939 }
3940
3941 /**
3942 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
3943 * @instance: Adapter soft state
3944 */
3945 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
3946 {
3947 int i;
3948 u16 max_cmd = instance->max_mfi_cmds;
3949 struct megasas_cmd *cmd;
3950
3951 if (!instance->frame_dma_pool)
3952 return;
3953
3954 /*
3955 * Return all frames to pool
3956 */
3957 for (i = 0; i < max_cmd; i++) {
3958
3959 cmd = instance->cmd_list[i];
3960
3961 if (cmd->frame)
3962 dma_pool_free(instance->frame_dma_pool, cmd->frame,
3963 cmd->frame_phys_addr);
3964
3965 if (cmd->sense)
3966 dma_pool_free(instance->sense_dma_pool, cmd->sense,
3967 cmd->sense_phys_addr);
3968 }
3969
3970 /*
3971 * Now destroy the pool itself
3972 */
3973 dma_pool_destroy(instance->frame_dma_pool);
3974 dma_pool_destroy(instance->sense_dma_pool);
3975
3976 instance->frame_dma_pool = NULL;
3977 instance->sense_dma_pool = NULL;
3978 }
3979
3980 /**
3981 * megasas_create_frame_pool - Creates DMA pool for cmd frames
3982 * @instance: Adapter soft state
3983 *
3984 * Each command packet has an embedded DMA memory buffer that is used for
3985 * filling MFI frame and the SG list that immediately follows the frame. This
3986 * function creates those DMA memory buffers for each command packet by using
3987 * PCI pool facility.
3988 */
3989 static int megasas_create_frame_pool(struct megasas_instance *instance)
3990 {
3991 int i;
3992 u16 max_cmd;
3993 u32 sge_sz;
3994 u32 frame_count;
3995 struct megasas_cmd *cmd;
3996
3997 max_cmd = instance->max_mfi_cmds;
3998
3999 /*
4000 * Size of our frame is 64 bytes for MFI frame, followed by max SG
4001 * elements and finally SCSI_SENSE_BUFFERSIZE bytes for sense buffer
4002 */
4003 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
4004 sizeof(struct megasas_sge32);
4005
4006 if (instance->flag_ieee)
4007 sge_sz = sizeof(struct megasas_sge_skinny);
4008
4009 /*
4010 * For MFI controllers.
4011 * max_num_sge = 60
4012 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
4013 * Total 960 byte (15 MFI frame of 64 byte)
4014 *
4015 * Fusion adapter require only 3 extra frame.
4016 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4017 * max_sge_sz = 12 byte (sizeof megasas_sge64)
4018 * Total 192 byte (3 MFI frame of 64 byte)
4019 */
4020 frame_count = (instance->adapter_type == MFI_SERIES) ?
4021 (15 + 1) : (3 + 1);
4022 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4023 /*
4024 * Use DMA pool facility provided by PCI layer
4025 */
4026 instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4027 &instance->pdev->dev,
4028 instance->mfi_frame_size, 256, 0);
4029
4030 if (!instance->frame_dma_pool) {
4031 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4032 return -ENOMEM;
4033 }
4034
4035 instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4036 &instance->pdev->dev, 128,
4037 4, 0);
4038
4039 if (!instance->sense_dma_pool) {
4040 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4041
4042 dma_pool_destroy(instance->frame_dma_pool);
4043 instance->frame_dma_pool = NULL;
4044
4045 return -ENOMEM;
4046 }
4047
4048 /*
4049 * Allocate and attach a frame to each of the commands in cmd_list.
4050 * By making cmd->index as the context instead of the &cmd, we can
4051 * always use 32bit context regardless of the architecture
4052 */
4053 for (i = 0; i < max_cmd; i++) {
4054
4055 cmd = instance->cmd_list[i];
4056
4057 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4058 GFP_KERNEL, &cmd->frame_phys_addr);
4059
4060 cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4061 GFP_KERNEL, &cmd->sense_phys_addr);
4062
4063 /*
4064 * megasas_teardown_frame_pool() takes care of freeing
4065 * whatever has been allocated
4066 */
4067 if (!cmd->frame || !cmd->sense) {
4068 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4069 megasas_teardown_frame_pool(instance);
4070 return -ENOMEM;
4071 }
4072
4073 cmd->frame->io.context = cpu_to_le32(cmd->index);
4074 cmd->frame->io.pad_0 = 0;
4075 if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4076 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4077 }
4078
4079 return 0;
4080 }
4081
4082 /**
4083 * megasas_free_cmds - Free all the cmds in the free cmd pool
4084 * @instance: Adapter soft state
4085 */
4086 void megasas_free_cmds(struct megasas_instance *instance)
4087 {
4088 int i;
4089
4090 /* First free the MFI frame pool */
4091 megasas_teardown_frame_pool(instance);
4092
4093 /* Free all the commands in the cmd_list */
4094 for (i = 0; i < instance->max_mfi_cmds; i++)
4095
4096 kfree(instance->cmd_list[i]);
4097
4098 /* Free the cmd_list buffer itself */
4099 kfree(instance->cmd_list);
4100 instance->cmd_list = NULL;
4101
4102 INIT_LIST_HEAD(&instance->cmd_pool);
4103 }
4104
4105 /**
4106 * megasas_alloc_cmds - Allocates the command packets
4107 * @instance: Adapter soft state
4108 *
4109 * Each command that is issued to the FW, whether IO commands from the OS or
4110 * internal commands like IOCTLs, are wrapped in local data structure called
4111 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4112 * the FW.
4113 *
4114 * Each frame has a 32-bit field called context (tag). This context is used
4115 * to get back the megasas_cmd from the frame when a frame gets completed in
4116 * the ISR. Typically the address of the megasas_cmd itself would be used as
4117 * the context. But we wanted to keep the differences between 32 and 64 bit
4118 * systems to the mininum. We always use 32 bit integers for the context. In
4119 * this driver, the 32 bit values are the indices into an array cmd_list.
4120 * This array is used only to look up the megasas_cmd given the context. The
4121 * free commands themselves are maintained in a linked list called cmd_pool.
4122 */
4123 int megasas_alloc_cmds(struct megasas_instance *instance)
4124 {
4125 int i;
4126 int j;
4127 u16 max_cmd;
4128 struct megasas_cmd *cmd;
4129
4130 max_cmd = instance->max_mfi_cmds;
4131
4132 /*
4133 * instance->cmd_list is an array of struct megasas_cmd pointers.
4134 * Allocate the dynamic array first and then allocate individual
4135 * commands.
4136 */
4137 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4138
4139 if (!instance->cmd_list) {
4140 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4141 return -ENOMEM;
4142 }
4143
4144 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4145
4146 for (i = 0; i < max_cmd; i++) {
4147 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4148 GFP_KERNEL);
4149
4150 if (!instance->cmd_list[i]) {
4151
4152 for (j = 0; j < i; j++)
4153 kfree(instance->cmd_list[j]);
4154
4155 kfree(instance->cmd_list);
4156 instance->cmd_list = NULL;
4157
4158 return -ENOMEM;
4159 }
4160 }
4161
4162 for (i = 0; i < max_cmd; i++) {
4163 cmd = instance->cmd_list[i];
4164 memset(cmd, 0, sizeof(struct megasas_cmd));
4165 cmd->index = i;
4166 cmd->scmd = NULL;
4167 cmd->instance = instance;
4168
4169 list_add_tail(&cmd->list, &instance->cmd_pool);
4170 }
4171
4172 /*
4173 * Create a frame pool and assign one frame to each cmd
4174 */
4175 if (megasas_create_frame_pool(instance)) {
4176 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4177 megasas_free_cmds(instance);
4178 return -ENOMEM;
4179 }
4180
4181 return 0;
4182 }
4183
4184 /*
4185 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4186 * @instance: Adapter soft state
4187 *
4188 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4189 * or FW is not under OCR.
4190 */
4191 inline int
4192 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4193
4194 if (instance->adapter_type == MFI_SERIES)
4195 return KILL_ADAPTER;
4196 else if (instance->unload ||
4197 test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
4198 return IGNORE_TIMEOUT;
4199 else
4200 return INITIATE_OCR;
4201 }
4202
4203 static void
4204 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4205 {
4206 int ret;
4207 struct megasas_cmd *cmd;
4208 struct megasas_dcmd_frame *dcmd;
4209
4210 struct MR_PRIV_DEVICE *mr_device_priv_data;
4211 u16 device_id = 0;
4212
4213 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4214 cmd = megasas_get_cmd(instance);
4215
4216 if (!cmd) {
4217 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4218 return;
4219 }
4220
4221 dcmd = &cmd->frame->dcmd;
4222
4223 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4224 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4225
4226 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4227 dcmd->cmd = MFI_CMD_DCMD;
4228 dcmd->cmd_status = 0xFF;
4229 dcmd->sge_count = 1;
4230 dcmd->flags = MFI_FRAME_DIR_READ;
4231 dcmd->timeout = 0;
4232 dcmd->pad_0 = 0;
4233 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4234 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4235
4236 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4237 sizeof(struct MR_PD_INFO));
4238
4239 if ((instance->adapter_type != MFI_SERIES) &&
4240 !instance->mask_interrupts)
4241 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4242 else
4243 ret = megasas_issue_polled(instance, cmd);
4244
4245 switch (ret) {
4246 case DCMD_SUCCESS:
4247 mr_device_priv_data = sdev->hostdata;
4248 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4249 mr_device_priv_data->interface_type =
4250 instance->pd_info->state.ddf.pdType.intf;
4251 break;
4252
4253 case DCMD_TIMEOUT:
4254
4255 switch (dcmd_timeout_ocr_possible(instance)) {
4256 case INITIATE_OCR:
4257 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4258 megasas_reset_fusion(instance->host,
4259 MFI_IO_TIMEOUT_OCR);
4260 break;
4261 case KILL_ADAPTER:
4262 megaraid_sas_kill_hba(instance);
4263 break;
4264 case IGNORE_TIMEOUT:
4265 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4266 __func__, __LINE__);
4267 break;
4268 }
4269
4270 break;
4271 }
4272
4273 if (ret != DCMD_TIMEOUT)
4274 megasas_return_cmd(instance, cmd);
4275
4276 return;
4277 }
4278 /*
4279 * megasas_get_pd_list_info - Returns FW's pd_list structure
4280 * @instance: Adapter soft state
4281 * @pd_list: pd_list structure
4282 *
4283 * Issues an internal command (DCMD) to get the FW's controller PD
4284 * list structure. This information is mainly used to find out SYSTEM
4285 * supported by the FW.
4286 */
4287 static int
4288 megasas_get_pd_list(struct megasas_instance *instance)
4289 {
4290 int ret = 0, pd_index = 0;
4291 struct megasas_cmd *cmd;
4292 struct megasas_dcmd_frame *dcmd;
4293 struct MR_PD_LIST *ci;
4294 struct MR_PD_ADDRESS *pd_addr;
4295 dma_addr_t ci_h = 0;
4296
4297 if (instance->pd_list_not_supported) {
4298 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4299 "not supported by firmware\n");
4300 return ret;
4301 }
4302
4303 ci = instance->pd_list_buf;
4304 ci_h = instance->pd_list_buf_h;
4305
4306 cmd = megasas_get_cmd(instance);
4307
4308 if (!cmd) {
4309 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4310 return -ENOMEM;
4311 }
4312
4313 dcmd = &cmd->frame->dcmd;
4314
4315 memset(ci, 0, sizeof(*ci));
4316 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4317
4318 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4319 dcmd->mbox.b[1] = 0;
4320 dcmd->cmd = MFI_CMD_DCMD;
4321 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4322 dcmd->sge_count = 1;
4323 dcmd->flags = MFI_FRAME_DIR_READ;
4324 dcmd->timeout = 0;
4325 dcmd->pad_0 = 0;
4326 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4327 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4328
4329 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4330 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4331
4332 if ((instance->adapter_type != MFI_SERIES) &&
4333 !instance->mask_interrupts)
4334 ret = megasas_issue_blocked_cmd(instance, cmd,
4335 MFI_IO_TIMEOUT_SECS);
4336 else
4337 ret = megasas_issue_polled(instance, cmd);
4338
4339 switch (ret) {
4340 case DCMD_FAILED:
4341 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4342 "failed/not supported by firmware\n");
4343
4344 if (instance->adapter_type != MFI_SERIES)
4345 megaraid_sas_kill_hba(instance);
4346 else
4347 instance->pd_list_not_supported = 1;
4348 break;
4349 case DCMD_TIMEOUT:
4350
4351 switch (dcmd_timeout_ocr_possible(instance)) {
4352 case INITIATE_OCR:
4353 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4354 /*
4355 * DCMD failed from AEN path.
4356 * AEN path already hold reset_mutex to avoid PCI access
4357 * while OCR is in progress.
4358 */
4359 mutex_unlock(&instance->reset_mutex);
4360 megasas_reset_fusion(instance->host,
4361 MFI_IO_TIMEOUT_OCR);
4362 mutex_lock(&instance->reset_mutex);
4363 break;
4364 case KILL_ADAPTER:
4365 megaraid_sas_kill_hba(instance);
4366 break;
4367 case IGNORE_TIMEOUT:
4368 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4369 __func__, __LINE__);
4370 break;
4371 }
4372
4373 break;
4374
4375 case DCMD_SUCCESS:
4376 pd_addr = ci->addr;
4377
4378 if ((le32_to_cpu(ci->count) >
4379 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4380 break;
4381
4382 memset(instance->local_pd_list, 0,
4383 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4384
4385 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4386 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4387 le16_to_cpu(pd_addr->deviceId);
4388 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4389 pd_addr->scsiDevType;
4390 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4391 MR_PD_STATE_SYSTEM;
4392 pd_addr++;
4393 }
4394
4395 memcpy(instance->pd_list, instance->local_pd_list,
4396 sizeof(instance->pd_list));
4397 break;
4398
4399 }
4400
4401 if (ret != DCMD_TIMEOUT)
4402 megasas_return_cmd(instance, cmd);
4403
4404 return ret;
4405 }
4406
4407 /*
4408 * megasas_get_ld_list_info - Returns FW's ld_list structure
4409 * @instance: Adapter soft state
4410 * @ld_list: ld_list structure
4411 *
4412 * Issues an internal command (DCMD) to get the FW's controller PD
4413 * list structure. This information is mainly used to find out SYSTEM
4414 * supported by the FW.
4415 */
4416 static int
4417 megasas_get_ld_list(struct megasas_instance *instance)
4418 {
4419 int ret = 0, ld_index = 0, ids = 0;
4420 struct megasas_cmd *cmd;
4421 struct megasas_dcmd_frame *dcmd;
4422 struct MR_LD_LIST *ci;
4423 dma_addr_t ci_h = 0;
4424 u32 ld_count;
4425
4426 ci = instance->ld_list_buf;
4427 ci_h = instance->ld_list_buf_h;
4428
4429 cmd = megasas_get_cmd(instance);
4430
4431 if (!cmd) {
4432 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4433 return -ENOMEM;
4434 }
4435
4436 dcmd = &cmd->frame->dcmd;
4437
4438 memset(ci, 0, sizeof(*ci));
4439 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4440
4441 if (instance->supportmax256vd)
4442 dcmd->mbox.b[0] = 1;
4443 dcmd->cmd = MFI_CMD_DCMD;
4444 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4445 dcmd->sge_count = 1;
4446 dcmd->flags = MFI_FRAME_DIR_READ;
4447 dcmd->timeout = 0;
4448 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4449 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4450 dcmd->pad_0 = 0;
4451
4452 megasas_set_dma_settings(instance, dcmd, ci_h,
4453 sizeof(struct MR_LD_LIST));
4454
4455 if ((instance->adapter_type != MFI_SERIES) &&
4456 !instance->mask_interrupts)
4457 ret = megasas_issue_blocked_cmd(instance, cmd,
4458 MFI_IO_TIMEOUT_SECS);
4459 else
4460 ret = megasas_issue_polled(instance, cmd);
4461
4462 ld_count = le32_to_cpu(ci->ldCount);
4463
4464 switch (ret) {
4465 case DCMD_FAILED:
4466 megaraid_sas_kill_hba(instance);
4467 break;
4468 case DCMD_TIMEOUT:
4469
4470 switch (dcmd_timeout_ocr_possible(instance)) {
4471 case INITIATE_OCR:
4472 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4473 /*
4474 * DCMD failed from AEN path.
4475 * AEN path already hold reset_mutex to avoid PCI access
4476 * while OCR is in progress.
4477 */
4478 mutex_unlock(&instance->reset_mutex);
4479 megasas_reset_fusion(instance->host,
4480 MFI_IO_TIMEOUT_OCR);
4481 mutex_lock(&instance->reset_mutex);
4482 break;
4483 case KILL_ADAPTER:
4484 megaraid_sas_kill_hba(instance);
4485 break;
4486 case IGNORE_TIMEOUT:
4487 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4488 __func__, __LINE__);
4489 break;
4490 }
4491
4492 break;
4493
4494 case DCMD_SUCCESS:
4495 if (ld_count > instance->fw_supported_vd_count)
4496 break;
4497
4498 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4499
4500 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4501 if (ci->ldList[ld_index].state != 0) {
4502 ids = ci->ldList[ld_index].ref.targetId;
4503 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4504 }
4505 }
4506
4507 break;
4508 }
4509
4510 if (ret != DCMD_TIMEOUT)
4511 megasas_return_cmd(instance, cmd);
4512
4513 return ret;
4514 }
4515
4516 /**
4517 * megasas_ld_list_query - Returns FW's ld_list structure
4518 * @instance: Adapter soft state
4519 * @ld_list: ld_list structure
4520 *
4521 * Issues an internal command (DCMD) to get the FW's controller PD
4522 * list structure. This information is mainly used to find out SYSTEM
4523 * supported by the FW.
4524 */
4525 static int
4526 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4527 {
4528 int ret = 0, ld_index = 0, ids = 0;
4529 struct megasas_cmd *cmd;
4530 struct megasas_dcmd_frame *dcmd;
4531 struct MR_LD_TARGETID_LIST *ci;
4532 dma_addr_t ci_h = 0;
4533 u32 tgtid_count;
4534
4535 ci = instance->ld_targetid_list_buf;
4536 ci_h = instance->ld_targetid_list_buf_h;
4537
4538 cmd = megasas_get_cmd(instance);
4539
4540 if (!cmd) {
4541 dev_warn(&instance->pdev->dev,
4542 "megasas_ld_list_query: Failed to get cmd\n");
4543 return -ENOMEM;
4544 }
4545
4546 dcmd = &cmd->frame->dcmd;
4547
4548 memset(ci, 0, sizeof(*ci));
4549 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4550
4551 dcmd->mbox.b[0] = query_type;
4552 if (instance->supportmax256vd)
4553 dcmd->mbox.b[2] = 1;
4554
4555 dcmd->cmd = MFI_CMD_DCMD;
4556 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4557 dcmd->sge_count = 1;
4558 dcmd->flags = MFI_FRAME_DIR_READ;
4559 dcmd->timeout = 0;
4560 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4561 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4562 dcmd->pad_0 = 0;
4563
4564 megasas_set_dma_settings(instance, dcmd, ci_h,
4565 sizeof(struct MR_LD_TARGETID_LIST));
4566
4567 if ((instance->adapter_type != MFI_SERIES) &&
4568 !instance->mask_interrupts)
4569 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4570 else
4571 ret = megasas_issue_polled(instance, cmd);
4572
4573 switch (ret) {
4574 case DCMD_FAILED:
4575 dev_info(&instance->pdev->dev,
4576 "DCMD not supported by firmware - %s %d\n",
4577 __func__, __LINE__);
4578 ret = megasas_get_ld_list(instance);
4579 break;
4580 case DCMD_TIMEOUT:
4581 switch (dcmd_timeout_ocr_possible(instance)) {
4582 case INITIATE_OCR:
4583 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4584 /*
4585 * DCMD failed from AEN path.
4586 * AEN path already hold reset_mutex to avoid PCI access
4587 * while OCR is in progress.
4588 */
4589 mutex_unlock(&instance->reset_mutex);
4590 megasas_reset_fusion(instance->host,
4591 MFI_IO_TIMEOUT_OCR);
4592 mutex_lock(&instance->reset_mutex);
4593 break;
4594 case KILL_ADAPTER:
4595 megaraid_sas_kill_hba(instance);
4596 break;
4597 case IGNORE_TIMEOUT:
4598 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4599 __func__, __LINE__);
4600 break;
4601 }
4602
4603 break;
4604 case DCMD_SUCCESS:
4605 tgtid_count = le32_to_cpu(ci->count);
4606
4607 if ((tgtid_count > (instance->fw_supported_vd_count)))
4608 break;
4609
4610 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4611 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4612 ids = ci->targetId[ld_index];
4613 instance->ld_ids[ids] = ci->targetId[ld_index];
4614 }
4615
4616 break;
4617 }
4618
4619 if (ret != DCMD_TIMEOUT)
4620 megasas_return_cmd(instance, cmd);
4621
4622 return ret;
4623 }
4624
4625 /**
4626 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
4627 * dcmd.mbox - reserved
4628 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
4629 * Desc: This DCMD will return the combined device list
4630 * Status: MFI_STAT_OK - List returned successfully
4631 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4632 * disabled
4633 * @instance: Adapter soft state
4634 * @is_probe: Driver probe check
4635 * Return: 0 if DCMD succeeded
4636 * non-zero if failed
4637 */
4638 static int
4639 megasas_host_device_list_query(struct megasas_instance *instance,
4640 bool is_probe)
4641 {
4642 int ret, i, target_id;
4643 struct megasas_cmd *cmd;
4644 struct megasas_dcmd_frame *dcmd;
4645 struct MR_HOST_DEVICE_LIST *ci;
4646 u32 count;
4647 dma_addr_t ci_h;
4648
4649 ci = instance->host_device_list_buf;
4650 ci_h = instance->host_device_list_buf_h;
4651
4652 cmd = megasas_get_cmd(instance);
4653
4654 if (!cmd) {
4655 dev_warn(&instance->pdev->dev,
4656 "%s: failed to get cmd\n",
4657 __func__);
4658 return -ENOMEM;
4659 }
4660
4661 dcmd = &cmd->frame->dcmd;
4662
4663 memset(ci, 0, sizeof(*ci));
4664 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4665
4666 dcmd->mbox.b[0] = is_probe ? 0 : 1;
4667 dcmd->cmd = MFI_CMD_DCMD;
4668 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4669 dcmd->sge_count = 1;
4670 dcmd->flags = MFI_FRAME_DIR_READ;
4671 dcmd->timeout = 0;
4672 dcmd->pad_0 = 0;
4673 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4674 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4675
4676 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4677
4678 if (!instance->mask_interrupts) {
4679 ret = megasas_issue_blocked_cmd(instance, cmd,
4680 MFI_IO_TIMEOUT_SECS);
4681 } else {
4682 ret = megasas_issue_polled(instance, cmd);
4683 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4684 }
4685
4686 switch (ret) {
4687 case DCMD_SUCCESS:
4688 /* Fill the internal pd_list and ld_ids array based on
4689 * targetIds returned by FW
4690 */
4691 count = le32_to_cpu(ci->count);
4692
4693 memset(instance->local_pd_list, 0,
4694 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4695 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4696 for (i = 0; i < count; i++) {
4697 target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4698 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4699 instance->local_pd_list[target_id].tid = target_id;
4700 instance->local_pd_list[target_id].driveType =
4701 ci->host_device_list[i].scsi_type;
4702 instance->local_pd_list[target_id].driveState =
4703 MR_PD_STATE_SYSTEM;
4704 } else {
4705 instance->ld_ids[target_id] = target_id;
4706 }
4707 }
4708
4709 memcpy(instance->pd_list, instance->local_pd_list,
4710 sizeof(instance->pd_list));
4711 break;
4712
4713 case DCMD_TIMEOUT:
4714 switch (dcmd_timeout_ocr_possible(instance)) {
4715 case INITIATE_OCR:
4716 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4717 megasas_reset_fusion(instance->host,
4718 MFI_IO_TIMEOUT_OCR);
4719 break;
4720 case KILL_ADAPTER:
4721 megaraid_sas_kill_hba(instance);
4722 break;
4723 case IGNORE_TIMEOUT:
4724 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4725 __func__, __LINE__);
4726 break;
4727 }
4728 break;
4729 case DCMD_FAILED:
4730 dev_err(&instance->pdev->dev,
4731 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4732 __func__);
4733 break;
4734 }
4735
4736 if (ret != DCMD_TIMEOUT)
4737 megasas_return_cmd(instance, cmd);
4738
4739 return ret;
4740 }
4741
4742 /*
4743 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4744 * instance : Controller's instance
4745 */
4746 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4747 {
4748 struct fusion_context *fusion;
4749 u32 ventura_map_sz = 0;
4750
4751 fusion = instance->ctrl_context;
4752 /* For MFI based controllers return dummy success */
4753 if (!fusion)
4754 return;
4755
4756 instance->supportmax256vd =
4757 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
4758 /* Below is additional check to address future FW enhancement */
4759 if (instance->ctrl_info_buf->max_lds > 64)
4760 instance->supportmax256vd = 1;
4761
4762 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
4763 * MEGASAS_MAX_DEV_PER_CHANNEL;
4764 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
4765 * MEGASAS_MAX_DEV_PER_CHANNEL;
4766 if (instance->supportmax256vd) {
4767 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
4768 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4769 } else {
4770 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4771 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4772 }
4773
4774 dev_info(&instance->pdev->dev,
4775 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
4776 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
4777 instance->ctrl_info_buf->max_lds);
4778
4779 if (instance->max_raid_mapsize) {
4780 ventura_map_sz = instance->max_raid_mapsize *
4781 MR_MIN_MAP_SIZE; /* 64k */
4782 fusion->current_map_sz = ventura_map_sz;
4783 fusion->max_map_sz = ventura_map_sz;
4784 } else {
4785 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4786 (sizeof(struct MR_LD_SPAN_MAP) *
4787 (instance->fw_supported_vd_count - 1));
4788 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4789
4790 fusion->max_map_sz =
4791 max(fusion->old_map_sz, fusion->new_map_sz);
4792
4793 if (instance->supportmax256vd)
4794 fusion->current_map_sz = fusion->new_map_sz;
4795 else
4796 fusion->current_map_sz = fusion->old_map_sz;
4797 }
4798 /* irrespective of FW raid maps, driver raid map is constant */
4799 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
4800 }
4801
4802 /*
4803 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
4804 * dcmd.hdr.length - number of bytes to read
4805 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
4806 * Desc: Fill in snapdump properties
4807 * Status: MFI_STAT_OK- Command successful
4808 */
4809 void megasas_get_snapdump_properties(struct megasas_instance *instance)
4810 {
4811 int ret = 0;
4812 struct megasas_cmd *cmd;
4813 struct megasas_dcmd_frame *dcmd;
4814 struct MR_SNAPDUMP_PROPERTIES *ci;
4815 dma_addr_t ci_h = 0;
4816
4817 ci = instance->snapdump_prop;
4818 ci_h = instance->snapdump_prop_h;
4819
4820 if (!ci)
4821 return;
4822
4823 cmd = megasas_get_cmd(instance);
4824
4825 if (!cmd) {
4826 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
4827 return;
4828 }
4829
4830 dcmd = &cmd->frame->dcmd;
4831
4832 memset(ci, 0, sizeof(*ci));
4833 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4834
4835 dcmd->cmd = MFI_CMD_DCMD;
4836 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4837 dcmd->sge_count = 1;
4838 dcmd->flags = MFI_FRAME_DIR_READ;
4839 dcmd->timeout = 0;
4840 dcmd->pad_0 = 0;
4841 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
4842 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
4843
4844 megasas_set_dma_settings(instance, dcmd, ci_h,
4845 sizeof(struct MR_SNAPDUMP_PROPERTIES));
4846
4847 if (!instance->mask_interrupts) {
4848 ret = megasas_issue_blocked_cmd(instance, cmd,
4849 MFI_IO_TIMEOUT_SECS);
4850 } else {
4851 ret = megasas_issue_polled(instance, cmd);
4852 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4853 }
4854
4855 switch (ret) {
4856 case DCMD_SUCCESS:
4857 instance->snapdump_wait_time =
4858 min_t(u8, ci->trigger_min_num_sec_before_ocr,
4859 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
4860 break;
4861
4862 case DCMD_TIMEOUT:
4863 switch (dcmd_timeout_ocr_possible(instance)) {
4864 case INITIATE_OCR:
4865 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4866 megasas_reset_fusion(instance->host,
4867 MFI_IO_TIMEOUT_OCR);
4868 break;
4869 case KILL_ADAPTER:
4870 megaraid_sas_kill_hba(instance);
4871 break;
4872 case IGNORE_TIMEOUT:
4873 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4874 __func__, __LINE__);
4875 break;
4876 }
4877 }
4878
4879 if (ret != DCMD_TIMEOUT)
4880 megasas_return_cmd(instance, cmd);
4881 }
4882
4883 /**
4884 * megasas_get_controller_info - Returns FW's controller structure
4885 * @instance: Adapter soft state
4886 *
4887 * Issues an internal command (DCMD) to get the FW's controller structure.
4888 * This information is mainly used to find out the maximum IO transfer per
4889 * command supported by the FW.
4890 */
4891 int
4892 megasas_get_ctrl_info(struct megasas_instance *instance)
4893 {
4894 int ret = 0;
4895 struct megasas_cmd *cmd;
4896 struct megasas_dcmd_frame *dcmd;
4897 struct megasas_ctrl_info *ci;
4898 dma_addr_t ci_h = 0;
4899
4900 ci = instance->ctrl_info_buf;
4901 ci_h = instance->ctrl_info_buf_h;
4902
4903 cmd = megasas_get_cmd(instance);
4904
4905 if (!cmd) {
4906 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4907 return -ENOMEM;
4908 }
4909
4910 dcmd = &cmd->frame->dcmd;
4911
4912 memset(ci, 0, sizeof(*ci));
4913 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4914
4915 dcmd->cmd = MFI_CMD_DCMD;
4916 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4917 dcmd->sge_count = 1;
4918 dcmd->flags = MFI_FRAME_DIR_READ;
4919 dcmd->timeout = 0;
4920 dcmd->pad_0 = 0;
4921 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4922 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
4923 dcmd->mbox.b[0] = 1;
4924
4925 megasas_set_dma_settings(instance, dcmd, ci_h,
4926 sizeof(struct megasas_ctrl_info));
4927
4928 if ((instance->adapter_type != MFI_SERIES) &&
4929 !instance->mask_interrupts) {
4930 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4931 } else {
4932 ret = megasas_issue_polled(instance, cmd);
4933 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4934 }
4935
4936 switch (ret) {
4937 case DCMD_SUCCESS:
4938 /* Save required controller information in
4939 * CPU endianness format.
4940 */
4941 le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
4942 le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
4943 le32_to_cpus((u32 *)&ci->adapterOperations2);
4944 le32_to_cpus((u32 *)&ci->adapterOperations3);
4945 le16_to_cpus((u16 *)&ci->adapter_operations4);
4946
4947 /* Update the latest Ext VD info.
4948 * From Init path, store current firmware details.
4949 * From OCR path, detect any firmware properties changes.
4950 * in case of Firmware upgrade without system reboot.
4951 */
4952 megasas_update_ext_vd_details(instance);
4953 instance->use_seqnum_jbod_fp =
4954 ci->adapterOperations3.useSeqNumJbodFP;
4955 instance->support_morethan256jbod =
4956 ci->adapter_operations4.support_pd_map_target_id;
4957 instance->support_nvme_passthru =
4958 ci->adapter_operations4.support_nvme_passthru;
4959 instance->task_abort_tmo = ci->TaskAbortTO;
4960 instance->max_reset_tmo = ci->MaxResetTO;
4961
4962 /*Check whether controller is iMR or MR */
4963 instance->is_imr = (ci->memory_size ? 0 : 1);
4964
4965 instance->snapdump_wait_time =
4966 (ci->properties.on_off_properties2.enable_snap_dump ?
4967 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
4968
4969 instance->enable_fw_dev_list =
4970 ci->properties.on_off_properties2.enable_fw_dev_list;
4971
4972 dev_info(&instance->pdev->dev,
4973 "controller type\t: %s(%dMB)\n",
4974 instance->is_imr ? "iMR" : "MR",
4975 le16_to_cpu(ci->memory_size));
4976
4977 instance->disableOnlineCtrlReset =
4978 ci->properties.OnOffProperties.disableOnlineCtrlReset;
4979 instance->secure_jbod_support =
4980 ci->adapterOperations3.supportSecurityonJBOD;
4981 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
4982 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
4983 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
4984 instance->secure_jbod_support ? "Yes" : "No");
4985 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
4986 instance->support_nvme_passthru ? "Yes" : "No");
4987 dev_info(&instance->pdev->dev,
4988 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
4989 instance->task_abort_tmo, instance->max_reset_tmo);
4990
4991 break;
4992
4993 case DCMD_TIMEOUT:
4994 switch (dcmd_timeout_ocr_possible(instance)) {
4995 case INITIATE_OCR:
4996 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4997 megasas_reset_fusion(instance->host,
4998 MFI_IO_TIMEOUT_OCR);
4999 break;
5000 case KILL_ADAPTER:
5001 megaraid_sas_kill_hba(instance);
5002 break;
5003 case IGNORE_TIMEOUT:
5004 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5005 __func__, __LINE__);
5006 break;
5007 }
5008 break;
5009 case DCMD_FAILED:
5010 megaraid_sas_kill_hba(instance);
5011 break;
5012
5013 }
5014
5015 if (ret != DCMD_TIMEOUT)
5016 megasas_return_cmd(instance, cmd);
5017
5018 return ret;
5019 }
5020
5021 /*
5022 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
5023 * to firmware
5024 *
5025 * @instance: Adapter soft state
5026 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
5027 MR_CRASH_BUF_TURN_OFF = 0
5028 MR_CRASH_BUF_TURN_ON = 1
5029 * @return 0 on success non-zero on failure.
5030 * Issues an internal command (DCMD) to set parameters for crash dump feature.
5031 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5032 * that driver supports crash dump feature. This DCMD will be sent only if
5033 * crash dump feature is supported by the FW.
5034 *
5035 */
5036 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5037 u8 crash_buf_state)
5038 {
5039 int ret = 0;
5040 struct megasas_cmd *cmd;
5041 struct megasas_dcmd_frame *dcmd;
5042
5043 cmd = megasas_get_cmd(instance);
5044
5045 if (!cmd) {
5046 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5047 return -ENOMEM;
5048 }
5049
5050
5051 dcmd = &cmd->frame->dcmd;
5052
5053 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5054 dcmd->mbox.b[0] = crash_buf_state;
5055 dcmd->cmd = MFI_CMD_DCMD;
5056 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5057 dcmd->sge_count = 1;
5058 dcmd->flags = MFI_FRAME_DIR_NONE;
5059 dcmd->timeout = 0;
5060 dcmd->pad_0 = 0;
5061 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5062 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5063
5064 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5065 CRASH_DMA_BUF_SIZE);
5066
5067 if ((instance->adapter_type != MFI_SERIES) &&
5068 !instance->mask_interrupts)
5069 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5070 else
5071 ret = megasas_issue_polled(instance, cmd);
5072
5073 if (ret == DCMD_TIMEOUT) {
5074 switch (dcmd_timeout_ocr_possible(instance)) {
5075 case INITIATE_OCR:
5076 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5077 megasas_reset_fusion(instance->host,
5078 MFI_IO_TIMEOUT_OCR);
5079 break;
5080 case KILL_ADAPTER:
5081 megaraid_sas_kill_hba(instance);
5082 break;
5083 case IGNORE_TIMEOUT:
5084 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5085 __func__, __LINE__);
5086 break;
5087 }
5088 } else
5089 megasas_return_cmd(instance, cmd);
5090
5091 return ret;
5092 }
5093
5094 /**
5095 * megasas_issue_init_mfi - Initializes the FW
5096 * @instance: Adapter soft state
5097 *
5098 * Issues the INIT MFI cmd
5099 */
5100 static int
5101 megasas_issue_init_mfi(struct megasas_instance *instance)
5102 {
5103 __le32 context;
5104 struct megasas_cmd *cmd;
5105 struct megasas_init_frame *init_frame;
5106 struct megasas_init_queue_info *initq_info;
5107 dma_addr_t init_frame_h;
5108 dma_addr_t initq_info_h;
5109
5110 /*
5111 * Prepare a init frame. Note the init frame points to queue info
5112 * structure. Each frame has SGL allocated after first 64 bytes. For
5113 * this frame - since we don't need any SGL - we use SGL's space as
5114 * queue info structure
5115 *
5116 * We will not get a NULL command below. We just created the pool.
5117 */
5118 cmd = megasas_get_cmd(instance);
5119
5120 init_frame = (struct megasas_init_frame *)cmd->frame;
5121 initq_info = (struct megasas_init_queue_info *)
5122 ((unsigned long)init_frame + 64);
5123
5124 init_frame_h = cmd->frame_phys_addr;
5125 initq_info_h = init_frame_h + 64;
5126
5127 context = init_frame->context;
5128 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5129 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5130 init_frame->context = context;
5131
5132 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5133 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5134
5135 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5136 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5137
5138 init_frame->cmd = MFI_CMD_INIT;
5139 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5140 init_frame->queue_info_new_phys_addr_lo =
5141 cpu_to_le32(lower_32_bits(initq_info_h));
5142 init_frame->queue_info_new_phys_addr_hi =
5143 cpu_to_le32(upper_32_bits(initq_info_h));
5144
5145 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5146
5147 /*
5148 * disable the intr before firing the init frame to FW
5149 */
5150 instance->instancet->disable_intr(instance);
5151
5152 /*
5153 * Issue the init frame in polled mode
5154 */
5155
5156 if (megasas_issue_polled(instance, cmd)) {
5157 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5158 megasas_return_cmd(instance, cmd);
5159 goto fail_fw_init;
5160 }
5161
5162 megasas_return_cmd(instance, cmd);
5163
5164 return 0;
5165
5166 fail_fw_init:
5167 return -EINVAL;
5168 }
5169
5170 static u32
5171 megasas_init_adapter_mfi(struct megasas_instance *instance)
5172 {
5173 u32 context_sz;
5174 u32 reply_q_sz;
5175
5176 /*
5177 * Get various operational parameters from status register
5178 */
5179 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5180 /*
5181 * Reduce the max supported cmds by 1. This is to ensure that the
5182 * reply_q_sz (1 more than the max cmd that driver may send)
5183 * does not exceed max cmds that the FW can support
5184 */
5185 instance->max_fw_cmds = instance->max_fw_cmds-1;
5186 instance->max_mfi_cmds = instance->max_fw_cmds;
5187 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5188 0x10;
5189 /*
5190 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5191 * are reserved for IOCTL + driver's internal DCMDs.
5192 */
5193 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5194 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5195 instance->max_scsi_cmds = (instance->max_fw_cmds -
5196 MEGASAS_SKINNY_INT_CMDS);
5197 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5198 } else {
5199 instance->max_scsi_cmds = (instance->max_fw_cmds -
5200 MEGASAS_INT_CMDS);
5201 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5202 }
5203
5204 instance->cur_can_queue = instance->max_scsi_cmds;
5205 /*
5206 * Create a pool of commands
5207 */
5208 if (megasas_alloc_cmds(instance))
5209 goto fail_alloc_cmds;
5210
5211 /*
5212 * Allocate memory for reply queue. Length of reply queue should
5213 * be _one_ more than the maximum commands handled by the firmware.
5214 *
5215 * Note: When FW completes commands, it places corresponding contex
5216 * values in this circular reply queue. This circular queue is a fairly
5217 * typical producer-consumer queue. FW is the producer (of completed
5218 * commands) and the driver is the consumer.
5219 */
5220 context_sz = sizeof(u32);
5221 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5222
5223 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5224 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5225
5226 if (!instance->reply_queue) {
5227 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5228 goto fail_reply_queue;
5229 }
5230
5231 if (megasas_issue_init_mfi(instance))
5232 goto fail_fw_init;
5233
5234 if (megasas_get_ctrl_info(instance)) {
5235 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5236 "Fail from %s %d\n", instance->unique_id,
5237 __func__, __LINE__);
5238 goto fail_fw_init;
5239 }
5240
5241 instance->fw_support_ieee = 0;
5242 instance->fw_support_ieee =
5243 (instance->instancet->read_fw_status_reg(instance) &
5244 0x04000000);
5245
5246 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5247 instance->fw_support_ieee);
5248
5249 if (instance->fw_support_ieee)
5250 instance->flag_ieee = 1;
5251
5252 return 0;
5253
5254 fail_fw_init:
5255
5256 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5257 instance->reply_queue, instance->reply_queue_h);
5258 fail_reply_queue:
5259 megasas_free_cmds(instance);
5260
5261 fail_alloc_cmds:
5262 return 1;
5263 }
5264
5265 /*
5266 * megasas_setup_irqs_ioapic - register legacy interrupts.
5267 * @instance: Adapter soft state
5268 *
5269 * Do not enable interrupt, only setup ISRs.
5270 *
5271 * Return 0 on success.
5272 */
5273 static int
5274 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5275 {
5276 struct pci_dev *pdev;
5277
5278 pdev = instance->pdev;
5279 instance->irq_context[0].instance = instance;
5280 instance->irq_context[0].MSIxIndex = 0;
5281 if (request_irq(pci_irq_vector(pdev, 0),
5282 instance->instancet->service_isr, IRQF_SHARED,
5283 "megasas", &instance->irq_context[0])) {
5284 dev_err(&instance->pdev->dev,
5285 "Failed to register IRQ from %s %d\n",
5286 __func__, __LINE__);
5287 return -1;
5288 }
5289 return 0;
5290 }
5291
5292 /**
5293 * megasas_setup_irqs_msix - register MSI-x interrupts.
5294 * @instance: Adapter soft state
5295 * @is_probe: Driver probe check
5296 *
5297 * Do not enable interrupt, only setup ISRs.
5298 *
5299 * Return 0 on success.
5300 */
5301 static int
5302 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5303 {
5304 int i, j;
5305 struct pci_dev *pdev;
5306
5307 pdev = instance->pdev;
5308
5309 /* Try MSI-x */
5310 for (i = 0; i < instance->msix_vectors; i++) {
5311 instance->irq_context[i].instance = instance;
5312 instance->irq_context[i].MSIxIndex = i;
5313 if (request_irq(pci_irq_vector(pdev, i),
5314 instance->instancet->service_isr, 0, "megasas",
5315 &instance->irq_context[i])) {
5316 dev_err(&instance->pdev->dev,
5317 "Failed to register IRQ for vector %d.\n", i);
5318 for (j = 0; j < i; j++)
5319 free_irq(pci_irq_vector(pdev, j),
5320 &instance->irq_context[j]);
5321 /* Retry irq register for IO_APIC*/
5322 instance->msix_vectors = 0;
5323 if (is_probe) {
5324 pci_free_irq_vectors(instance->pdev);
5325 return megasas_setup_irqs_ioapic(instance);
5326 } else {
5327 return -1;
5328 }
5329 }
5330 }
5331 return 0;
5332 }
5333
5334 /*
5335 * megasas_destroy_irqs- unregister interrupts.
5336 * @instance: Adapter soft state
5337 * return: void
5338 */
5339 static void
5340 megasas_destroy_irqs(struct megasas_instance *instance) {
5341
5342 int i;
5343
5344 if (instance->msix_vectors)
5345 for (i = 0; i < instance->msix_vectors; i++) {
5346 free_irq(pci_irq_vector(instance->pdev, i),
5347 &instance->irq_context[i]);
5348 }
5349 else
5350 free_irq(pci_irq_vector(instance->pdev, 0),
5351 &instance->irq_context[0]);
5352 }
5353
5354 /**
5355 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5356 * @instance: Adapter soft state
5357 * @is_probe: Driver probe check
5358 *
5359 * Return 0 on success.
5360 */
5361 void
5362 megasas_setup_jbod_map(struct megasas_instance *instance)
5363 {
5364 int i;
5365 struct fusion_context *fusion = instance->ctrl_context;
5366 u32 pd_seq_map_sz;
5367
5368 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5369 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5370
5371 if (reset_devices || !fusion ||
5372 !instance->ctrl_info_buf->adapterOperations3.useSeqNumJbodFP) {
5373 dev_info(&instance->pdev->dev,
5374 "Jbod map is not supported %s %d\n",
5375 __func__, __LINE__);
5376 instance->use_seqnum_jbod_fp = false;
5377 return;
5378 }
5379
5380 if (fusion->pd_seq_sync[0])
5381 goto skip_alloc;
5382
5383 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5384 fusion->pd_seq_sync[i] = dma_alloc_coherent
5385 (&instance->pdev->dev, pd_seq_map_sz,
5386 &fusion->pd_seq_phys[i], GFP_KERNEL);
5387 if (!fusion->pd_seq_sync[i]) {
5388 dev_err(&instance->pdev->dev,
5389 "Failed to allocate memory from %s %d\n",
5390 __func__, __LINE__);
5391 if (i == 1) {
5392 dma_free_coherent(&instance->pdev->dev,
5393 pd_seq_map_sz, fusion->pd_seq_sync[0],
5394 fusion->pd_seq_phys[0]);
5395 fusion->pd_seq_sync[0] = NULL;
5396 }
5397 instance->use_seqnum_jbod_fp = false;
5398 return;
5399 }
5400 }
5401
5402 skip_alloc:
5403 if (!megasas_sync_pd_seq_num(instance, false) &&
5404 !megasas_sync_pd_seq_num(instance, true))
5405 instance->use_seqnum_jbod_fp = true;
5406 else
5407 instance->use_seqnum_jbod_fp = false;
5408 }
5409
5410 static void megasas_setup_reply_map(struct megasas_instance *instance)
5411 {
5412 const struct cpumask *mask;
5413 unsigned int queue, cpu;
5414
5415 for (queue = 0; queue < instance->msix_vectors; queue++) {
5416 mask = pci_irq_get_affinity(instance->pdev, queue);
5417 if (!mask)
5418 goto fallback;
5419
5420 for_each_cpu(cpu, mask)
5421 instance->reply_map[cpu] = queue;
5422 }
5423 return;
5424
5425 fallback:
5426 for_each_possible_cpu(cpu)
5427 instance->reply_map[cpu] = cpu % instance->msix_vectors;
5428 }
5429
5430 /**
5431 * megasas_get_device_list - Get the PD and LD device list from FW.
5432 * @instance: Adapter soft state
5433 * @return: Success or failure
5434 *
5435 * Issue DCMDs to Firmware to get the PD and LD list.
5436 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5437 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5438 */
5439 static
5440 int megasas_get_device_list(struct megasas_instance *instance)
5441 {
5442 memset(instance->pd_list, 0,
5443 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5444 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5445
5446 if (instance->enable_fw_dev_list) {
5447 if (megasas_host_device_list_query(instance, true))
5448 return FAILED;
5449 } else {
5450 if (megasas_get_pd_list(instance) < 0) {
5451 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5452 return FAILED;
5453 }
5454
5455 if (megasas_ld_list_query(instance,
5456 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5457 dev_err(&instance->pdev->dev, "failed to get LD list\n");
5458 return FAILED;
5459 }
5460 }
5461
5462 return SUCCESS;
5463 }
5464 /**
5465 * megasas_init_fw - Initializes the FW
5466 * @instance: Adapter soft state
5467 *
5468 * This is the main function for initializing firmware
5469 */
5470
5471 static int megasas_init_fw(struct megasas_instance *instance)
5472 {
5473 u32 max_sectors_1;
5474 u32 max_sectors_2, tmp_sectors, msix_enable;
5475 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5476 resource_size_t base_addr;
5477 struct megasas_ctrl_info *ctrl_info = NULL;
5478 unsigned long bar_list;
5479 int i, j, loop, fw_msix_count = 0;
5480 struct IOV_111 *iovPtr;
5481 struct fusion_context *fusion;
5482 bool do_adp_reset = true;
5483
5484 fusion = instance->ctrl_context;
5485
5486 /* Find first memory bar */
5487 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5488 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5489 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5490 "megasas: LSI")) {
5491 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5492 return -EBUSY;
5493 }
5494
5495 base_addr = pci_resource_start(instance->pdev, instance->bar);
5496 instance->reg_set = ioremap_nocache(base_addr, 8192);
5497
5498 if (!instance->reg_set) {
5499 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5500 goto fail_ioremap;
5501 }
5502
5503 if (instance->adapter_type != MFI_SERIES)
5504 instance->instancet = &megasas_instance_template_fusion;
5505 else {
5506 switch (instance->pdev->device) {
5507 case PCI_DEVICE_ID_LSI_SAS1078R:
5508 case PCI_DEVICE_ID_LSI_SAS1078DE:
5509 instance->instancet = &megasas_instance_template_ppc;
5510 break;
5511 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5512 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5513 instance->instancet = &megasas_instance_template_gen2;
5514 break;
5515 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5516 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5517 instance->instancet = &megasas_instance_template_skinny;
5518 break;
5519 case PCI_DEVICE_ID_LSI_SAS1064R:
5520 case PCI_DEVICE_ID_DELL_PERC5:
5521 default:
5522 instance->instancet = &megasas_instance_template_xscale;
5523 instance->pd_list_not_supported = 1;
5524 break;
5525 }
5526 }
5527
5528 if (megasas_transition_to_ready(instance, 0)) {
5529 if (instance->adapter_type >= INVADER_SERIES) {
5530 status_reg = instance->instancet->read_fw_status_reg(
5531 instance);
5532 do_adp_reset = status_reg & MFI_RESET_ADAPTER;
5533 }
5534
5535 if (do_adp_reset) {
5536 atomic_set(&instance->fw_reset_no_pci_access, 1);
5537 instance->instancet->adp_reset
5538 (instance, instance->reg_set);
5539 atomic_set(&instance->fw_reset_no_pci_access, 0);
5540 dev_info(&instance->pdev->dev,
5541 "FW restarted successfully from %s!\n",
5542 __func__);
5543
5544 /*waiting for about 30 second before retry*/
5545 ssleep(30);
5546
5547 if (megasas_transition_to_ready(instance, 0))
5548 goto fail_ready_state;
5549 } else {
5550 goto fail_ready_state;
5551 }
5552 }
5553
5554 megasas_init_ctrl_params(instance);
5555
5556 if (megasas_set_dma_mask(instance))
5557 goto fail_ready_state;
5558
5559 if (megasas_alloc_ctrl_mem(instance))
5560 goto fail_alloc_dma_buf;
5561
5562 if (megasas_alloc_ctrl_dma_buffers(instance))
5563 goto fail_alloc_dma_buf;
5564
5565 fusion = instance->ctrl_context;
5566
5567 if (instance->adapter_type >= VENTURA_SERIES) {
5568 scratch_pad_2 =
5569 megasas_readl(instance,
5570 &instance->reg_set->outbound_scratch_pad_2);
5571 instance->max_raid_mapsize = ((scratch_pad_2 >>
5572 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5573 MR_MAX_RAID_MAP_SIZE_MASK);
5574 }
5575
5576 /* Check if MSI-X is supported while in ready state */
5577 msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5578 0x4000000) >> 0x1a;
5579 if (msix_enable && !msix_disable) {
5580 int irq_flags = PCI_IRQ_MSIX;
5581
5582 scratch_pad_1 = megasas_readl
5583 (instance, &instance->reg_set->outbound_scratch_pad_1);
5584 /* Check max MSI-X vectors */
5585 if (fusion) {
5586 if (instance->adapter_type == THUNDERBOLT_SERIES) {
5587 /* Thunderbolt Series*/
5588 instance->msix_vectors = (scratch_pad_1
5589 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5590 fw_msix_count = instance->msix_vectors;
5591 } else {
5592 instance->msix_vectors = ((scratch_pad_1
5593 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5594 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5595
5596 /*
5597 * For Invader series, > 8 MSI-x vectors
5598 * supported by FW/HW implies combined
5599 * reply queue mode is enabled.
5600 * For Ventura series, > 16 MSI-x vectors
5601 * supported by FW/HW implies combined
5602 * reply queue mode is enabled.
5603 */
5604 switch (instance->adapter_type) {
5605 case INVADER_SERIES:
5606 if (instance->msix_vectors > 8)
5607 instance->msix_combined = true;
5608 break;
5609 case AERO_SERIES:
5610 case VENTURA_SERIES:
5611 if (instance->msix_vectors > 16)
5612 instance->msix_combined = true;
5613 break;
5614 }
5615
5616 if (rdpq_enable)
5617 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
5618 1 : 0;
5619 fw_msix_count = instance->msix_vectors;
5620 /* Save 1-15 reply post index address to local memory
5621 * Index 0 is already saved from reg offset
5622 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
5623 */
5624 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
5625 instance->reply_post_host_index_addr[loop] =
5626 (u32 __iomem *)
5627 ((u8 __iomem *)instance->reg_set +
5628 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
5629 + (loop * 0x10));
5630 }
5631 }
5632 if (msix_vectors)
5633 instance->msix_vectors = min(msix_vectors,
5634 instance->msix_vectors);
5635 } else /* MFI adapters */
5636 instance->msix_vectors = 1;
5637 /* Don't bother allocating more MSI-X vectors than cpus */
5638 instance->msix_vectors = min(instance->msix_vectors,
5639 (unsigned int)num_online_cpus());
5640 if (smp_affinity_enable)
5641 irq_flags |= PCI_IRQ_AFFINITY;
5642 i = pci_alloc_irq_vectors(instance->pdev, 1,
5643 instance->msix_vectors, irq_flags);
5644 if (i > 0)
5645 instance->msix_vectors = i;
5646 else
5647 instance->msix_vectors = 0;
5648 }
5649 /*
5650 * MSI-X host index 0 is common for all adapter.
5651 * It is used for all MPT based Adapters.
5652 */
5653 if (instance->msix_combined) {
5654 instance->reply_post_host_index_addr[0] =
5655 (u32 *)((u8 *)instance->reg_set +
5656 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
5657 } else {
5658 instance->reply_post_host_index_addr[0] =
5659 (u32 *)((u8 *)instance->reg_set +
5660 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
5661 }
5662
5663 if (!instance->msix_vectors) {
5664 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
5665 if (i < 0)
5666 goto fail_init_adapter;
5667 }
5668
5669 megasas_setup_reply_map(instance);
5670
5671 dev_info(&instance->pdev->dev,
5672 "firmware supports msix\t: (%d)", fw_msix_count);
5673 dev_info(&instance->pdev->dev,
5674 "current msix/online cpus\t: (%d/%d)\n",
5675 instance->msix_vectors, (unsigned int)num_online_cpus());
5676 dev_info(&instance->pdev->dev,
5677 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
5678
5679 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
5680 (unsigned long)instance);
5681
5682 /*
5683 * Below are default value for legacy Firmware.
5684 * non-fusion based controllers
5685 */
5686 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5687 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5688 /* Get operational params, sge flags, send init cmd to controller */
5689 if (instance->instancet->init_adapter(instance))
5690 goto fail_init_adapter;
5691
5692 if (instance->adapter_type >= VENTURA_SERIES) {
5693 scratch_pad_3 =
5694 megasas_readl(instance,
5695 &instance->reg_set->outbound_scratch_pad_3);
5696 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
5697 MR_DEFAULT_NVME_PAGE_SHIFT)
5698 instance->nvme_page_size =
5699 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
5700
5701 dev_info(&instance->pdev->dev,
5702 "NVME page size\t: (%d)\n", instance->nvme_page_size);
5703 }
5704
5705 if (instance->msix_vectors ?
5706 megasas_setup_irqs_msix(instance, 1) :
5707 megasas_setup_irqs_ioapic(instance))
5708 goto fail_init_adapter;
5709
5710 instance->instancet->enable_intr(instance);
5711
5712 dev_info(&instance->pdev->dev, "INIT adapter done\n");
5713
5714 megasas_setup_jbod_map(instance);
5715
5716 if (megasas_get_device_list(instance) != SUCCESS) {
5717 dev_err(&instance->pdev->dev,
5718 "%s: megasas_get_device_list failed\n",
5719 __func__);
5720 goto fail_get_ld_pd_list;
5721 }
5722
5723 /* stream detection initialization */
5724 if (instance->adapter_type >= VENTURA_SERIES) {
5725 fusion->stream_detect_by_ld =
5726 kcalloc(MAX_LOGICAL_DRIVES_EXT,
5727 sizeof(struct LD_STREAM_DETECT *),
5728 GFP_KERNEL);
5729 if (!fusion->stream_detect_by_ld) {
5730 dev_err(&instance->pdev->dev,
5731 "unable to allocate stream detection for pool of LDs\n");
5732 goto fail_get_ld_pd_list;
5733 }
5734 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
5735 fusion->stream_detect_by_ld[i] =
5736 kzalloc(sizeof(struct LD_STREAM_DETECT),
5737 GFP_KERNEL);
5738 if (!fusion->stream_detect_by_ld[i]) {
5739 dev_err(&instance->pdev->dev,
5740 "unable to allocate stream detect by LD\n ");
5741 for (j = 0; j < i; ++j)
5742 kfree(fusion->stream_detect_by_ld[j]);
5743 kfree(fusion->stream_detect_by_ld);
5744 fusion->stream_detect_by_ld = NULL;
5745 goto fail_get_ld_pd_list;
5746 }
5747 fusion->stream_detect_by_ld[i]->mru_bit_map
5748 = MR_STREAM_BITMAP;
5749 }
5750 }
5751
5752 /*
5753 * Compute the max allowed sectors per IO: The controller info has two
5754 * limits on max sectors. Driver should use the minimum of these two.
5755 *
5756 * 1 << stripe_sz_ops.min = max sectors per strip
5757 *
5758 * Note that older firmwares ( < FW ver 30) didn't report information
5759 * to calculate max_sectors_1. So the number ended up as zero always.
5760 */
5761 tmp_sectors = 0;
5762 ctrl_info = instance->ctrl_info_buf;
5763
5764 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
5765 le16_to_cpu(ctrl_info->max_strips_per_io);
5766 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
5767
5768 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
5769
5770 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
5771 instance->passive = ctrl_info->cluster.passive;
5772 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
5773 instance->UnevenSpanSupport =
5774 ctrl_info->adapterOperations2.supportUnevenSpans;
5775 if (instance->UnevenSpanSupport) {
5776 struct fusion_context *fusion = instance->ctrl_context;
5777 if (MR_ValidateMapInfo(instance, instance->map_id))
5778 fusion->fast_path_io = 1;
5779 else
5780 fusion->fast_path_io = 0;
5781
5782 }
5783 if (ctrl_info->host_interface.SRIOV) {
5784 instance->requestorId = ctrl_info->iov.requestorId;
5785 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
5786 if (!ctrl_info->adapterOperations2.activePassive)
5787 instance->PlasmaFW111 = 1;
5788
5789 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
5790 instance->PlasmaFW111 ? "1.11" : "new");
5791
5792 if (instance->PlasmaFW111) {
5793 iovPtr = (struct IOV_111 *)
5794 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
5795 instance->requestorId = iovPtr->requestorId;
5796 }
5797 }
5798 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
5799 instance->requestorId);
5800 }
5801
5802 instance->crash_dump_fw_support =
5803 ctrl_info->adapterOperations3.supportCrashDump;
5804 instance->crash_dump_drv_support =
5805 (instance->crash_dump_fw_support &&
5806 instance->crash_dump_buf);
5807 if (instance->crash_dump_drv_support)
5808 megasas_set_crash_dump_params(instance,
5809 MR_CRASH_BUF_TURN_OFF);
5810
5811 else {
5812 if (instance->crash_dump_buf)
5813 dma_free_coherent(&instance->pdev->dev,
5814 CRASH_DMA_BUF_SIZE,
5815 instance->crash_dump_buf,
5816 instance->crash_dump_h);
5817 instance->crash_dump_buf = NULL;
5818 }
5819
5820 if (instance->snapdump_wait_time) {
5821 megasas_get_snapdump_properties(instance);
5822 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
5823 instance->snapdump_wait_time);
5824 }
5825
5826 dev_info(&instance->pdev->dev,
5827 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
5828 le16_to_cpu(ctrl_info->pci.vendor_id),
5829 le16_to_cpu(ctrl_info->pci.device_id),
5830 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
5831 le16_to_cpu(ctrl_info->pci.sub_device_id));
5832 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
5833 instance->UnevenSpanSupport ? "yes" : "no");
5834 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
5835 instance->crash_dump_drv_support ? "yes" : "no");
5836 dev_info(&instance->pdev->dev, "jbod sync map : %s\n",
5837 instance->use_seqnum_jbod_fp ? "yes" : "no");
5838
5839 instance->max_sectors_per_req = instance->max_num_sge *
5840 SGE_BUFFER_SIZE / 512;
5841 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
5842 instance->max_sectors_per_req = tmp_sectors;
5843
5844 /* Check for valid throttlequeuedepth module parameter */
5845 if (throttlequeuedepth &&
5846 throttlequeuedepth <= instance->max_scsi_cmds)
5847 instance->throttlequeuedepth = throttlequeuedepth;
5848 else
5849 instance->throttlequeuedepth =
5850 MEGASAS_THROTTLE_QUEUE_DEPTH;
5851
5852 if ((resetwaittime < 1) ||
5853 (resetwaittime > MEGASAS_RESET_WAIT_TIME))
5854 resetwaittime = MEGASAS_RESET_WAIT_TIME;
5855
5856 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
5857 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
5858
5859 /* Launch SR-IOV heartbeat timer */
5860 if (instance->requestorId) {
5861 if (!megasas_sriov_start_heartbeat(instance, 1)) {
5862 megasas_start_timer(instance);
5863 } else {
5864 instance->skip_heartbeat_timer_del = 1;
5865 goto fail_get_ld_pd_list;
5866 }
5867 }
5868
5869 /*
5870 * Create and start watchdog thread which will monitor
5871 * controller state every 1 sec and trigger OCR when
5872 * it enters fault state
5873 */
5874 if (instance->adapter_type != MFI_SERIES)
5875 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
5876 goto fail_start_watchdog;
5877
5878 return 0;
5879
5880 fail_start_watchdog:
5881 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
5882 del_timer_sync(&instance->sriov_heartbeat_timer);
5883 fail_get_ld_pd_list:
5884 instance->instancet->disable_intr(instance);
5885 megasas_destroy_irqs(instance);
5886 fail_init_adapter:
5887 if (instance->msix_vectors)
5888 pci_free_irq_vectors(instance->pdev);
5889 instance->msix_vectors = 0;
5890 fail_alloc_dma_buf:
5891 megasas_free_ctrl_dma_buffers(instance);
5892 megasas_free_ctrl_mem(instance);
5893 fail_ready_state:
5894 iounmap(instance->reg_set);
5895
5896 fail_ioremap:
5897 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5898
5899 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5900 __func__, __LINE__);
5901 return -EINVAL;
5902 }
5903
5904 /**
5905 * megasas_release_mfi - Reverses the FW initialization
5906 * @instance: Adapter soft state
5907 */
5908 static void megasas_release_mfi(struct megasas_instance *instance)
5909 {
5910 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
5911
5912 if (instance->reply_queue)
5913 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5914 instance->reply_queue, instance->reply_queue_h);
5915
5916 megasas_free_cmds(instance);
5917
5918 iounmap(instance->reg_set);
5919
5920 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
5921 }
5922
5923 /**
5924 * megasas_get_seq_num - Gets latest event sequence numbers
5925 * @instance: Adapter soft state
5926 * @eli: FW event log sequence numbers information
5927 *
5928 * FW maintains a log of all events in a non-volatile area. Upper layers would
5929 * usually find out the latest sequence number of the events, the seq number at
5930 * the boot etc. They would "read" all the events below the latest seq number
5931 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
5932 * number), they would subsribe to AEN (asynchronous event notification) and
5933 * wait for the events to happen.
5934 */
5935 static int
5936 megasas_get_seq_num(struct megasas_instance *instance,
5937 struct megasas_evt_log_info *eli)
5938 {
5939 struct megasas_cmd *cmd;
5940 struct megasas_dcmd_frame *dcmd;
5941 struct megasas_evt_log_info *el_info;
5942 dma_addr_t el_info_h = 0;
5943 int ret;
5944
5945 cmd = megasas_get_cmd(instance);
5946
5947 if (!cmd) {
5948 return -ENOMEM;
5949 }
5950
5951 dcmd = &cmd->frame->dcmd;
5952 el_info = dma_alloc_coherent(&instance->pdev->dev,
5953 sizeof(struct megasas_evt_log_info),
5954 &el_info_h, GFP_KERNEL);
5955 if (!el_info) {
5956 megasas_return_cmd(instance, cmd);
5957 return -ENOMEM;
5958 }
5959
5960 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5961
5962 dcmd->cmd = MFI_CMD_DCMD;
5963 dcmd->cmd_status = 0x0;
5964 dcmd->sge_count = 1;
5965 dcmd->flags = MFI_FRAME_DIR_READ;
5966 dcmd->timeout = 0;
5967 dcmd->pad_0 = 0;
5968 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
5969 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
5970
5971 megasas_set_dma_settings(instance, dcmd, el_info_h,
5972 sizeof(struct megasas_evt_log_info));
5973
5974 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5975 if (ret != DCMD_SUCCESS) {
5976 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5977 __func__, __LINE__);
5978 goto dcmd_failed;
5979 }
5980
5981 /*
5982 * Copy the data back into callers buffer
5983 */
5984 eli->newest_seq_num = el_info->newest_seq_num;
5985 eli->oldest_seq_num = el_info->oldest_seq_num;
5986 eli->clear_seq_num = el_info->clear_seq_num;
5987 eli->shutdown_seq_num = el_info->shutdown_seq_num;
5988 eli->boot_seq_num = el_info->boot_seq_num;
5989
5990 dcmd_failed:
5991 dma_free_coherent(&instance->pdev->dev,
5992 sizeof(struct megasas_evt_log_info),
5993 el_info, el_info_h);
5994
5995 megasas_return_cmd(instance, cmd);
5996
5997 return ret;
5998 }
5999
6000 /**
6001 * megasas_register_aen - Registers for asynchronous event notification
6002 * @instance: Adapter soft state
6003 * @seq_num: The starting sequence number
6004 * @class_locale: Class of the event
6005 *
6006 * This function subscribes for AEN for events beyond the @seq_num. It requests
6007 * to be notified if and only if the event is of type @class_locale
6008 */
6009 static int
6010 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6011 u32 class_locale_word)
6012 {
6013 int ret_val;
6014 struct megasas_cmd *cmd;
6015 struct megasas_dcmd_frame *dcmd;
6016 union megasas_evt_class_locale curr_aen;
6017 union megasas_evt_class_locale prev_aen;
6018
6019 /*
6020 * If there an AEN pending already (aen_cmd), check if the
6021 * class_locale of that pending AEN is inclusive of the new
6022 * AEN request we currently have. If it is, then we don't have
6023 * to do anything. In other words, whichever events the current
6024 * AEN request is subscribing to, have already been subscribed
6025 * to.
6026 *
6027 * If the old_cmd is _not_ inclusive, then we have to abort
6028 * that command, form a class_locale that is superset of both
6029 * old and current and re-issue to the FW
6030 */
6031
6032 curr_aen.word = class_locale_word;
6033
6034 if (instance->aen_cmd) {
6035
6036 prev_aen.word =
6037 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6038
6039 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6040 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6041 dev_info(&instance->pdev->dev,
6042 "%s %d out of range class %d send by application\n",
6043 __func__, __LINE__, curr_aen.members.class);
6044 return 0;
6045 }
6046
6047 /*
6048 * A class whose enum value is smaller is inclusive of all
6049 * higher values. If a PROGRESS (= -1) was previously
6050 * registered, then a new registration requests for higher
6051 * classes need not be sent to FW. They are automatically
6052 * included.
6053 *
6054 * Locale numbers don't have such hierarchy. They are bitmap
6055 * values
6056 */
6057 if ((prev_aen.members.class <= curr_aen.members.class) &&
6058 !((prev_aen.members.locale & curr_aen.members.locale) ^
6059 curr_aen.members.locale)) {
6060 /*
6061 * Previously issued event registration includes
6062 * current request. Nothing to do.
6063 */
6064 return 0;
6065 } else {
6066 curr_aen.members.locale |= prev_aen.members.locale;
6067
6068 if (prev_aen.members.class < curr_aen.members.class)
6069 curr_aen.members.class = prev_aen.members.class;
6070
6071 instance->aen_cmd->abort_aen = 1;
6072 ret_val = megasas_issue_blocked_abort_cmd(instance,
6073 instance->
6074 aen_cmd, 30);
6075
6076 if (ret_val) {
6077 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6078 "previous AEN command\n");
6079 return ret_val;
6080 }
6081 }
6082 }
6083
6084 cmd = megasas_get_cmd(instance);
6085
6086 if (!cmd)
6087 return -ENOMEM;
6088
6089 dcmd = &cmd->frame->dcmd;
6090
6091 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6092
6093 /*
6094 * Prepare DCMD for aen registration
6095 */
6096 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6097
6098 dcmd->cmd = MFI_CMD_DCMD;
6099 dcmd->cmd_status = 0x0;
6100 dcmd->sge_count = 1;
6101 dcmd->flags = MFI_FRAME_DIR_READ;
6102 dcmd->timeout = 0;
6103 dcmd->pad_0 = 0;
6104 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6105 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6106 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6107 instance->last_seq_num = seq_num;
6108 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6109
6110 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6111 sizeof(struct megasas_evt_detail));
6112
6113 if (instance->aen_cmd != NULL) {
6114 megasas_return_cmd(instance, cmd);
6115 return 0;
6116 }
6117
6118 /*
6119 * Store reference to the cmd used to register for AEN. When an
6120 * application wants us to register for AEN, we have to abort this
6121 * cmd and re-register with a new EVENT LOCALE supplied by that app
6122 */
6123 instance->aen_cmd = cmd;
6124
6125 /*
6126 * Issue the aen registration frame
6127 */
6128 instance->instancet->issue_dcmd(instance, cmd);
6129
6130 return 0;
6131 }
6132
6133 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6134 *
6135 * This DCMD will fetch few properties of LD/system PD defined
6136 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6137 *
6138 * DCMD send by drivers whenever new target is added to the OS.
6139 *
6140 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
6141 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
6142 * 0 = system PD, 1 = LD.
6143 * dcmd.mbox.s[1] - TargetID for LD/system PD.
6144 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
6145 *
6146 * @instance: Adapter soft state
6147 * @sdev: OS provided scsi device
6148 *
6149 * Returns 0 on success non-zero on failure.
6150 */
6151 int
6152 megasas_get_target_prop(struct megasas_instance *instance,
6153 struct scsi_device *sdev)
6154 {
6155 int ret;
6156 struct megasas_cmd *cmd;
6157 struct megasas_dcmd_frame *dcmd;
6158 u16 targetId = (sdev->channel % 2) + sdev->id;
6159
6160 cmd = megasas_get_cmd(instance);
6161
6162 if (!cmd) {
6163 dev_err(&instance->pdev->dev,
6164 "Failed to get cmd %s\n", __func__);
6165 return -ENOMEM;
6166 }
6167
6168 dcmd = &cmd->frame->dcmd;
6169
6170 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6171 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6172 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6173
6174 dcmd->mbox.s[1] = cpu_to_le16(targetId);
6175 dcmd->cmd = MFI_CMD_DCMD;
6176 dcmd->cmd_status = 0xFF;
6177 dcmd->sge_count = 1;
6178 dcmd->flags = MFI_FRAME_DIR_READ;
6179 dcmd->timeout = 0;
6180 dcmd->pad_0 = 0;
6181 dcmd->data_xfer_len =
6182 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6183 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6184
6185 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6186 sizeof(struct MR_TARGET_PROPERTIES));
6187
6188 if ((instance->adapter_type != MFI_SERIES) &&
6189 !instance->mask_interrupts)
6190 ret = megasas_issue_blocked_cmd(instance,
6191 cmd, MFI_IO_TIMEOUT_SECS);
6192 else
6193 ret = megasas_issue_polled(instance, cmd);
6194
6195 switch (ret) {
6196 case DCMD_TIMEOUT:
6197 switch (dcmd_timeout_ocr_possible(instance)) {
6198 case INITIATE_OCR:
6199 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6200 megasas_reset_fusion(instance->host,
6201 MFI_IO_TIMEOUT_OCR);
6202 break;
6203 case KILL_ADAPTER:
6204 megaraid_sas_kill_hba(instance);
6205 break;
6206 case IGNORE_TIMEOUT:
6207 dev_info(&instance->pdev->dev,
6208 "Ignore DCMD timeout: %s %d\n",
6209 __func__, __LINE__);
6210 break;
6211 }
6212 break;
6213
6214 default:
6215 megasas_return_cmd(instance, cmd);
6216 }
6217 if (ret != DCMD_SUCCESS)
6218 dev_err(&instance->pdev->dev,
6219 "return from %s %d return value %d\n",
6220 __func__, __LINE__, ret);
6221
6222 return ret;
6223 }
6224
6225 /**
6226 * megasas_start_aen - Subscribes to AEN during driver load time
6227 * @instance: Adapter soft state
6228 */
6229 static int megasas_start_aen(struct megasas_instance *instance)
6230 {
6231 struct megasas_evt_log_info eli;
6232 union megasas_evt_class_locale class_locale;
6233
6234 /*
6235 * Get the latest sequence number from FW
6236 */
6237 memset(&eli, 0, sizeof(eli));
6238
6239 if (megasas_get_seq_num(instance, &eli))
6240 return -1;
6241
6242 /*
6243 * Register AEN with FW for latest sequence number plus 1
6244 */
6245 class_locale.members.reserved = 0;
6246 class_locale.members.locale = MR_EVT_LOCALE_ALL;
6247 class_locale.members.class = MR_EVT_CLASS_DEBUG;
6248
6249 return megasas_register_aen(instance,
6250 le32_to_cpu(eli.newest_seq_num) + 1,
6251 class_locale.word);
6252 }
6253
6254 /**
6255 * megasas_io_attach - Attaches this driver to SCSI mid-layer
6256 * @instance: Adapter soft state
6257 */
6258 static int megasas_io_attach(struct megasas_instance *instance)
6259 {
6260 struct Scsi_Host *host = instance->host;
6261
6262 /*
6263 * Export parameters required by SCSI mid-layer
6264 */
6265 host->unique_id = instance->unique_id;
6266 host->can_queue = instance->max_scsi_cmds;
6267 host->this_id = instance->init_id;
6268 host->sg_tablesize = instance->max_num_sge;
6269
6270 if (instance->fw_support_ieee)
6271 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6272
6273 /*
6274 * Check if the module parameter value for max_sectors can be used
6275 */
6276 if (max_sectors && max_sectors < instance->max_sectors_per_req)
6277 instance->max_sectors_per_req = max_sectors;
6278 else {
6279 if (max_sectors) {
6280 if (((instance->pdev->device ==
6281 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6282 (instance->pdev->device ==
6283 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6284 (max_sectors <= MEGASAS_MAX_SECTORS)) {
6285 instance->max_sectors_per_req = max_sectors;
6286 } else {
6287 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6288 "and <= %d (or < 1MB for GEN2 controller)\n",
6289 instance->max_sectors_per_req);
6290 }
6291 }
6292 }
6293
6294 host->max_sectors = instance->max_sectors_per_req;
6295 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6296 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6297 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6298 host->max_lun = MEGASAS_MAX_LUN;
6299 host->max_cmd_len = 16;
6300
6301 /*
6302 * Notify the mid-layer about the new controller
6303 */
6304 if (scsi_add_host(host, &instance->pdev->dev)) {
6305 dev_err(&instance->pdev->dev,
6306 "Failed to add host from %s %d\n",
6307 __func__, __LINE__);
6308 return -ENODEV;
6309 }
6310
6311 return 0;
6312 }
6313
6314 /**
6315 * megasas_set_dma_mask - Set DMA mask for supported controllers
6316 *
6317 * @instance: Adapter soft state
6318 * Description:
6319 *
6320 * For Ventura, driver/FW will operate in 63bit DMA addresses.
6321 *
6322 * For invader-
6323 * By default, driver/FW will operate in 32bit DMA addresses
6324 * for consistent DMA mapping but if 32 bit consistent
6325 * DMA mask fails, driver will try with 63 bit consistent
6326 * mask provided FW is true 63bit DMA capable
6327 *
6328 * For older controllers(Thunderbolt and MFI based adapters)-
6329 * driver/FW will operate in 32 bit consistent DMA addresses.
6330 */
6331 static int
6332 megasas_set_dma_mask(struct megasas_instance *instance)
6333 {
6334 u64 consistent_mask;
6335 struct pci_dev *pdev;
6336 u32 scratch_pad_1;
6337
6338 pdev = instance->pdev;
6339 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6340 DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6341
6342 if (IS_DMA64) {
6343 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6344 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6345 goto fail_set_dma_mask;
6346
6347 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6348 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6349 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6350 /*
6351 * If 32 bit DMA mask fails, then try for 64 bit mask
6352 * for FW capable of handling 64 bit DMA.
6353 */
6354 scratch_pad_1 = megasas_readl
6355 (instance, &instance->reg_set->outbound_scratch_pad_1);
6356
6357 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6358 goto fail_set_dma_mask;
6359 else if (dma_set_mask_and_coherent(&pdev->dev,
6360 DMA_BIT_MASK(63)))
6361 goto fail_set_dma_mask;
6362 }
6363 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6364 goto fail_set_dma_mask;
6365
6366 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6367 instance->consistent_mask_64bit = false;
6368 else
6369 instance->consistent_mask_64bit = true;
6370
6371 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6372 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6373 (instance->consistent_mask_64bit ? "63" : "32"));
6374
6375 return 0;
6376
6377 fail_set_dma_mask:
6378 dev_err(&pdev->dev, "Failed to set DMA mask\n");
6379 return -1;
6380
6381 }
6382
6383 /*
6384 * megasas_set_adapter_type - Set adapter type.
6385 * Supported controllers can be divided in
6386 * different categories-
6387 * enum MR_ADAPTER_TYPE {
6388 * MFI_SERIES = 1,
6389 * THUNDERBOLT_SERIES = 2,
6390 * INVADER_SERIES = 3,
6391 * VENTURA_SERIES = 4,
6392 * AERO_SERIES = 5,
6393 * };
6394 * @instance: Adapter soft state
6395 * return: void
6396 */
6397 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6398 {
6399 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6400 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6401 instance->adapter_type = MFI_SERIES;
6402 } else {
6403 switch (instance->pdev->device) {
6404 case PCI_DEVICE_ID_LSI_AERO_10E1:
6405 case PCI_DEVICE_ID_LSI_AERO_10E2:
6406 case PCI_DEVICE_ID_LSI_AERO_10E5:
6407 case PCI_DEVICE_ID_LSI_AERO_10E6:
6408 instance->adapter_type = AERO_SERIES;
6409 break;
6410 case PCI_DEVICE_ID_LSI_VENTURA:
6411 case PCI_DEVICE_ID_LSI_CRUSADER:
6412 case PCI_DEVICE_ID_LSI_HARPOON:
6413 case PCI_DEVICE_ID_LSI_TOMCAT:
6414 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6415 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6416 instance->adapter_type = VENTURA_SERIES;
6417 break;
6418 case PCI_DEVICE_ID_LSI_FUSION:
6419 case PCI_DEVICE_ID_LSI_PLASMA:
6420 instance->adapter_type = THUNDERBOLT_SERIES;
6421 break;
6422 case PCI_DEVICE_ID_LSI_INVADER:
6423 case PCI_DEVICE_ID_LSI_INTRUDER:
6424 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6425 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6426 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6427 case PCI_DEVICE_ID_LSI_FURY:
6428 instance->adapter_type = INVADER_SERIES;
6429 break;
6430 default: /* For all other supported controllers */
6431 instance->adapter_type = MFI_SERIES;
6432 break;
6433 }
6434 }
6435 }
6436
6437 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6438 {
6439 instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6440 sizeof(u32), &instance->producer_h, GFP_KERNEL);
6441 instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6442 sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6443
6444 if (!instance->producer || !instance->consumer) {
6445 dev_err(&instance->pdev->dev,
6446 "Failed to allocate memory for producer, consumer\n");
6447 return -1;
6448 }
6449
6450 *instance->producer = 0;
6451 *instance->consumer = 0;
6452 return 0;
6453 }
6454
6455 /**
6456 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
6457 * structures which are not common across MFI
6458 * adapters and fusion adapters.
6459 * For MFI based adapters, allocate producer and
6460 * consumer buffers. For fusion adapters, allocate
6461 * memory for fusion context.
6462 * @instance: Adapter soft state
6463 * return: 0 for SUCCESS
6464 */
6465 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6466 {
6467 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6468 GFP_KERNEL);
6469 if (!instance->reply_map)
6470 return -ENOMEM;
6471
6472 switch (instance->adapter_type) {
6473 case MFI_SERIES:
6474 if (megasas_alloc_mfi_ctrl_mem(instance))
6475 goto fail;
6476 break;
6477 case AERO_SERIES:
6478 case VENTURA_SERIES:
6479 case THUNDERBOLT_SERIES:
6480 case INVADER_SERIES:
6481 if (megasas_alloc_fusion_context(instance))
6482 goto fail;
6483 break;
6484 }
6485
6486 return 0;
6487 fail:
6488 kfree(instance->reply_map);
6489 instance->reply_map = NULL;
6490 return -ENOMEM;
6491 }
6492
6493 /*
6494 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
6495 * producer, consumer buffers for MFI adapters
6496 *
6497 * @instance - Adapter soft instance
6498 *
6499 */
6500 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6501 {
6502 kfree(instance->reply_map);
6503 if (instance->adapter_type == MFI_SERIES) {
6504 if (instance->producer)
6505 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6506 instance->producer,
6507 instance->producer_h);
6508 if (instance->consumer)
6509 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
6510 instance->consumer,
6511 instance->consumer_h);
6512 } else {
6513 megasas_free_fusion_context(instance);
6514 }
6515 }
6516
6517 /**
6518 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
6519 * driver load time
6520 *
6521 * @instance- Adapter soft instance
6522 * @return- O for SUCCESS
6523 */
6524 static inline
6525 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
6526 {
6527 struct pci_dev *pdev = instance->pdev;
6528 struct fusion_context *fusion = instance->ctrl_context;
6529
6530 instance->evt_detail = dma_alloc_coherent(&pdev->dev,
6531 sizeof(struct megasas_evt_detail),
6532 &instance->evt_detail_h, GFP_KERNEL);
6533
6534 if (!instance->evt_detail) {
6535 dev_err(&instance->pdev->dev,
6536 "Failed to allocate event detail buffer\n");
6537 return -ENOMEM;
6538 }
6539
6540 if (fusion) {
6541 fusion->ioc_init_request =
6542 dma_alloc_coherent(&pdev->dev,
6543 sizeof(struct MPI2_IOC_INIT_REQUEST),
6544 &fusion->ioc_init_request_phys,
6545 GFP_KERNEL);
6546
6547 if (!fusion->ioc_init_request) {
6548 dev_err(&pdev->dev,
6549 "Failed to allocate PD list buffer\n");
6550 return -ENOMEM;
6551 }
6552
6553 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
6554 sizeof(struct MR_SNAPDUMP_PROPERTIES),
6555 &instance->snapdump_prop_h, GFP_KERNEL);
6556
6557 if (!instance->snapdump_prop)
6558 dev_err(&pdev->dev,
6559 "Failed to allocate snapdump properties buffer\n");
6560
6561 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
6562 HOST_DEVICE_LIST_SZ,
6563 &instance->host_device_list_buf_h,
6564 GFP_KERNEL);
6565
6566 if (!instance->host_device_list_buf) {
6567 dev_err(&pdev->dev,
6568 "Failed to allocate targetid list buffer\n");
6569 return -ENOMEM;
6570 }
6571
6572 }
6573
6574 instance->pd_list_buf =
6575 dma_alloc_coherent(&pdev->dev,
6576 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6577 &instance->pd_list_buf_h, GFP_KERNEL);
6578
6579 if (!instance->pd_list_buf) {
6580 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
6581 return -ENOMEM;
6582 }
6583
6584 instance->ctrl_info_buf =
6585 dma_alloc_coherent(&pdev->dev,
6586 sizeof(struct megasas_ctrl_info),
6587 &instance->ctrl_info_buf_h, GFP_KERNEL);
6588
6589 if (!instance->ctrl_info_buf) {
6590 dev_err(&pdev->dev,
6591 "Failed to allocate controller info buffer\n");
6592 return -ENOMEM;
6593 }
6594
6595 instance->ld_list_buf =
6596 dma_alloc_coherent(&pdev->dev,
6597 sizeof(struct MR_LD_LIST),
6598 &instance->ld_list_buf_h, GFP_KERNEL);
6599
6600 if (!instance->ld_list_buf) {
6601 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
6602 return -ENOMEM;
6603 }
6604
6605 instance->ld_targetid_list_buf =
6606 dma_alloc_coherent(&pdev->dev,
6607 sizeof(struct MR_LD_TARGETID_LIST),
6608 &instance->ld_targetid_list_buf_h, GFP_KERNEL);
6609
6610 if (!instance->ld_targetid_list_buf) {
6611 dev_err(&pdev->dev,
6612 "Failed to allocate LD targetid list buffer\n");
6613 return -ENOMEM;
6614 }
6615
6616 if (!reset_devices) {
6617 instance->system_info_buf =
6618 dma_alloc_coherent(&pdev->dev,
6619 sizeof(struct MR_DRV_SYSTEM_INFO),
6620 &instance->system_info_h, GFP_KERNEL);
6621 instance->pd_info =
6622 dma_alloc_coherent(&pdev->dev,
6623 sizeof(struct MR_PD_INFO),
6624 &instance->pd_info_h, GFP_KERNEL);
6625 instance->tgt_prop =
6626 dma_alloc_coherent(&pdev->dev,
6627 sizeof(struct MR_TARGET_PROPERTIES),
6628 &instance->tgt_prop_h, GFP_KERNEL);
6629 instance->crash_dump_buf =
6630 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
6631 &instance->crash_dump_h, GFP_KERNEL);
6632
6633 if (!instance->system_info_buf)
6634 dev_err(&instance->pdev->dev,
6635 "Failed to allocate system info buffer\n");
6636
6637 if (!instance->pd_info)
6638 dev_err(&instance->pdev->dev,
6639 "Failed to allocate pd_info buffer\n");
6640
6641 if (!instance->tgt_prop)
6642 dev_err(&instance->pdev->dev,
6643 "Failed to allocate tgt_prop buffer\n");
6644
6645 if (!instance->crash_dump_buf)
6646 dev_err(&instance->pdev->dev,
6647 "Failed to allocate crash dump buffer\n");
6648 }
6649
6650 return 0;
6651 }
6652
6653 /*
6654 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
6655 * during driver load time
6656 *
6657 * @instance- Adapter soft instance
6658 *
6659 */
6660 static inline
6661 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
6662 {
6663 struct pci_dev *pdev = instance->pdev;
6664 struct fusion_context *fusion = instance->ctrl_context;
6665
6666 if (instance->evt_detail)
6667 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
6668 instance->evt_detail,
6669 instance->evt_detail_h);
6670
6671 if (fusion && fusion->ioc_init_request)
6672 dma_free_coherent(&pdev->dev,
6673 sizeof(struct MPI2_IOC_INIT_REQUEST),
6674 fusion->ioc_init_request,
6675 fusion->ioc_init_request_phys);
6676
6677 if (instance->pd_list_buf)
6678 dma_free_coherent(&pdev->dev,
6679 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
6680 instance->pd_list_buf,
6681 instance->pd_list_buf_h);
6682
6683 if (instance->ld_list_buf)
6684 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
6685 instance->ld_list_buf,
6686 instance->ld_list_buf_h);
6687
6688 if (instance->ld_targetid_list_buf)
6689 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
6690 instance->ld_targetid_list_buf,
6691 instance->ld_targetid_list_buf_h);
6692
6693 if (instance->ctrl_info_buf)
6694 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
6695 instance->ctrl_info_buf,
6696 instance->ctrl_info_buf_h);
6697
6698 if (instance->system_info_buf)
6699 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
6700 instance->system_info_buf,
6701 instance->system_info_h);
6702
6703 if (instance->pd_info)
6704 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
6705 instance->pd_info, instance->pd_info_h);
6706
6707 if (instance->tgt_prop)
6708 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
6709 instance->tgt_prop, instance->tgt_prop_h);
6710
6711 if (instance->crash_dump_buf)
6712 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
6713 instance->crash_dump_buf,
6714 instance->crash_dump_h);
6715
6716 if (instance->snapdump_prop)
6717 dma_free_coherent(&pdev->dev,
6718 sizeof(struct MR_SNAPDUMP_PROPERTIES),
6719 instance->snapdump_prop,
6720 instance->snapdump_prop_h);
6721
6722 if (instance->host_device_list_buf)
6723 dma_free_coherent(&pdev->dev,
6724 HOST_DEVICE_LIST_SZ,
6725 instance->host_device_list_buf,
6726 instance->host_device_list_buf_h);
6727
6728 }
6729
6730 /*
6731 * megasas_init_ctrl_params - Initialize controller's instance
6732 * parameters before FW init
6733 * @instance - Adapter soft instance
6734 * @return - void
6735 */
6736 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
6737 {
6738 instance->fw_crash_state = UNAVAILABLE;
6739
6740 megasas_poll_wait_aen = 0;
6741 instance->issuepend_done = 1;
6742 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
6743
6744 /*
6745 * Initialize locks and queues
6746 */
6747 INIT_LIST_HEAD(&instance->cmd_pool);
6748 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
6749
6750 atomic_set(&instance->fw_outstanding, 0);
6751
6752 init_waitqueue_head(&instance->int_cmd_wait_q);
6753 init_waitqueue_head(&instance->abort_cmd_wait_q);
6754
6755 spin_lock_init(&instance->crashdump_lock);
6756 spin_lock_init(&instance->mfi_pool_lock);
6757 spin_lock_init(&instance->hba_lock);
6758 spin_lock_init(&instance->stream_lock);
6759 spin_lock_init(&instance->completion_lock);
6760
6761 mutex_init(&instance->reset_mutex);
6762
6763 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
6764 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
6765 instance->flag_ieee = 1;
6766
6767 megasas_dbg_lvl = 0;
6768 instance->flag = 0;
6769 instance->unload = 1;
6770 instance->last_time = 0;
6771 instance->disableOnlineCtrlReset = 1;
6772 instance->UnevenSpanSupport = 0;
6773
6774 if (instance->adapter_type != MFI_SERIES)
6775 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
6776 else
6777 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
6778 }
6779
6780 /**
6781 * megasas_probe_one - PCI hotplug entry point
6782 * @pdev: PCI device structure
6783 * @id: PCI ids of supported hotplugged adapter
6784 */
6785 static int megasas_probe_one(struct pci_dev *pdev,
6786 const struct pci_device_id *id)
6787 {
6788 int rval, pos;
6789 struct Scsi_Host *host;
6790 struct megasas_instance *instance;
6791 u16 control = 0;
6792
6793 switch (pdev->device) {
6794 case PCI_DEVICE_ID_LSI_AERO_10E1:
6795 case PCI_DEVICE_ID_LSI_AERO_10E5:
6796 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
6797 break;
6798 }
6799
6800 /* Reset MSI-X in the kdump kernel */
6801 if (reset_devices) {
6802 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6803 if (pos) {
6804 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
6805 &control);
6806 if (control & PCI_MSIX_FLAGS_ENABLE) {
6807 dev_info(&pdev->dev, "resetting MSI-X\n");
6808 pci_write_config_word(pdev,
6809 pos + PCI_MSIX_FLAGS,
6810 control &
6811 ~PCI_MSIX_FLAGS_ENABLE);
6812 }
6813 }
6814 }
6815
6816 /*
6817 * PCI prepping: enable device set bus mastering and dma mask
6818 */
6819 rval = pci_enable_device_mem(pdev);
6820
6821 if (rval) {
6822 return rval;
6823 }
6824
6825 pci_set_master(pdev);
6826
6827 host = scsi_host_alloc(&megasas_template,
6828 sizeof(struct megasas_instance));
6829
6830 if (!host) {
6831 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
6832 goto fail_alloc_instance;
6833 }
6834
6835 instance = (struct megasas_instance *)host->hostdata;
6836 memset(instance, 0, sizeof(*instance));
6837 atomic_set(&instance->fw_reset_no_pci_access, 0);
6838
6839 /*
6840 * Initialize PCI related and misc parameters
6841 */
6842 instance->pdev = pdev;
6843 instance->host = host;
6844 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
6845 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
6846
6847 megasas_set_adapter_type(instance);
6848
6849 /*
6850 * Initialize MFI Firmware
6851 */
6852 if (megasas_init_fw(instance))
6853 goto fail_init_mfi;
6854
6855 if (instance->requestorId) {
6856 if (instance->PlasmaFW111) {
6857 instance->vf_affiliation_111 =
6858 dma_alloc_coherent(&pdev->dev,
6859 sizeof(struct MR_LD_VF_AFFILIATION_111),
6860 &instance->vf_affiliation_111_h,
6861 GFP_KERNEL);
6862 if (!instance->vf_affiliation_111)
6863 dev_warn(&pdev->dev, "Can't allocate "
6864 "memory for VF affiliation buffer\n");
6865 } else {
6866 instance->vf_affiliation =
6867 dma_alloc_coherent(&pdev->dev,
6868 (MAX_LOGICAL_DRIVES + 1) *
6869 sizeof(struct MR_LD_VF_AFFILIATION),
6870 &instance->vf_affiliation_h,
6871 GFP_KERNEL);
6872 if (!instance->vf_affiliation)
6873 dev_warn(&pdev->dev, "Can't allocate "
6874 "memory for VF affiliation buffer\n");
6875 }
6876 }
6877
6878 /*
6879 * Store instance in PCI softstate
6880 */
6881 pci_set_drvdata(pdev, instance);
6882
6883 /*
6884 * Add this controller to megasas_mgmt_info structure so that it
6885 * can be exported to management applications
6886 */
6887 megasas_mgmt_info.count++;
6888 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
6889 megasas_mgmt_info.max_index++;
6890
6891 /*
6892 * Register with SCSI mid-layer
6893 */
6894 if (megasas_io_attach(instance))
6895 goto fail_io_attach;
6896
6897 instance->unload = 0;
6898 /*
6899 * Trigger SCSI to scan our drives
6900 */
6901 if (!instance->enable_fw_dev_list ||
6902 (instance->host_device_list_buf->count > 0))
6903 scsi_scan_host(host);
6904
6905 /*
6906 * Initiate AEN (Asynchronous Event Notification)
6907 */
6908 if (megasas_start_aen(instance)) {
6909 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
6910 goto fail_start_aen;
6911 }
6912
6913 /* Get current SR-IOV LD/VF affiliation */
6914 if (instance->requestorId)
6915 megasas_get_ld_vf_affiliation(instance, 1);
6916
6917 return 0;
6918
6919 fail_start_aen:
6920 fail_io_attach:
6921 megasas_mgmt_info.count--;
6922 megasas_mgmt_info.max_index--;
6923 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
6924
6925 instance->instancet->disable_intr(instance);
6926 megasas_destroy_irqs(instance);
6927
6928 if (instance->adapter_type != MFI_SERIES)
6929 megasas_release_fusion(instance);
6930 else
6931 megasas_release_mfi(instance);
6932 if (instance->msix_vectors)
6933 pci_free_irq_vectors(instance->pdev);
6934 fail_init_mfi:
6935 scsi_host_put(host);
6936 fail_alloc_instance:
6937 pci_disable_device(pdev);
6938
6939 return -ENODEV;
6940 }
6941
6942 /**
6943 * megasas_flush_cache - Requests FW to flush all its caches
6944 * @instance: Adapter soft state
6945 */
6946 static void megasas_flush_cache(struct megasas_instance *instance)
6947 {
6948 struct megasas_cmd *cmd;
6949 struct megasas_dcmd_frame *dcmd;
6950
6951 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6952 return;
6953
6954 cmd = megasas_get_cmd(instance);
6955
6956 if (!cmd)
6957 return;
6958
6959 dcmd = &cmd->frame->dcmd;
6960
6961 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6962
6963 dcmd->cmd = MFI_CMD_DCMD;
6964 dcmd->cmd_status = 0x0;
6965 dcmd->sge_count = 0;
6966 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
6967 dcmd->timeout = 0;
6968 dcmd->pad_0 = 0;
6969 dcmd->data_xfer_len = 0;
6970 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
6971 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
6972
6973 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
6974 != DCMD_SUCCESS) {
6975 dev_err(&instance->pdev->dev,
6976 "return from %s %d\n", __func__, __LINE__);
6977 return;
6978 }
6979
6980 megasas_return_cmd(instance, cmd);
6981 }
6982
6983 /**
6984 * megasas_shutdown_controller - Instructs FW to shutdown the controller
6985 * @instance: Adapter soft state
6986 * @opcode: Shutdown/Hibernate
6987 */
6988 static void megasas_shutdown_controller(struct megasas_instance *instance,
6989 u32 opcode)
6990 {
6991 struct megasas_cmd *cmd;
6992 struct megasas_dcmd_frame *dcmd;
6993
6994 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
6995 return;
6996
6997 cmd = megasas_get_cmd(instance);
6998
6999 if (!cmd)
7000 return;
7001
7002 if (instance->aen_cmd)
7003 megasas_issue_blocked_abort_cmd(instance,
7004 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7005 if (instance->map_update_cmd)
7006 megasas_issue_blocked_abort_cmd(instance,
7007 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7008 if (instance->jbod_seq_cmd)
7009 megasas_issue_blocked_abort_cmd(instance,
7010 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7011
7012 dcmd = &cmd->frame->dcmd;
7013
7014 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7015
7016 dcmd->cmd = MFI_CMD_DCMD;
7017 dcmd->cmd_status = 0x0;
7018 dcmd->sge_count = 0;
7019 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7020 dcmd->timeout = 0;
7021 dcmd->pad_0 = 0;
7022 dcmd->data_xfer_len = 0;
7023 dcmd->opcode = cpu_to_le32(opcode);
7024
7025 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7026 != DCMD_SUCCESS) {
7027 dev_err(&instance->pdev->dev,
7028 "return from %s %d\n", __func__, __LINE__);
7029 return;
7030 }
7031
7032 megasas_return_cmd(instance, cmd);
7033 }
7034
7035 #ifdef CONFIG_PM
7036 /**
7037 * megasas_suspend - driver suspend entry point
7038 * @pdev: PCI device structure
7039 * @state: PCI power state to suspend routine
7040 */
7041 static int
7042 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7043 {
7044 struct Scsi_Host *host;
7045 struct megasas_instance *instance;
7046
7047 instance = pci_get_drvdata(pdev);
7048 host = instance->host;
7049 instance->unload = 1;
7050
7051 /* Shutdown SR-IOV heartbeat timer */
7052 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7053 del_timer_sync(&instance->sriov_heartbeat_timer);
7054
7055 /* Stop the FW fault detection watchdog */
7056 if (instance->adapter_type != MFI_SERIES)
7057 megasas_fusion_stop_watchdog(instance);
7058
7059 megasas_flush_cache(instance);
7060 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7061
7062 /* cancel the delayed work if this work still in queue */
7063 if (instance->ev != NULL) {
7064 struct megasas_aen_event *ev = instance->ev;
7065 cancel_delayed_work_sync(&ev->hotplug_work);
7066 instance->ev = NULL;
7067 }
7068
7069 tasklet_kill(&instance->isr_tasklet);
7070
7071 pci_set_drvdata(instance->pdev, instance);
7072 instance->instancet->disable_intr(instance);
7073
7074 megasas_destroy_irqs(instance);
7075
7076 if (instance->msix_vectors)
7077 pci_free_irq_vectors(instance->pdev);
7078
7079 pci_save_state(pdev);
7080 pci_disable_device(pdev);
7081
7082 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7083
7084 return 0;
7085 }
7086
7087 /**
7088 * megasas_resume- driver resume entry point
7089 * @pdev: PCI device structure
7090 */
7091 static int
7092 megasas_resume(struct pci_dev *pdev)
7093 {
7094 int rval;
7095 struct Scsi_Host *host;
7096 struct megasas_instance *instance;
7097 int irq_flags = PCI_IRQ_LEGACY;
7098
7099 instance = pci_get_drvdata(pdev);
7100 host = instance->host;
7101 pci_set_power_state(pdev, PCI_D0);
7102 pci_enable_wake(pdev, PCI_D0, 0);
7103 pci_restore_state(pdev);
7104
7105 /*
7106 * PCI prepping: enable device set bus mastering and dma mask
7107 */
7108 rval = pci_enable_device_mem(pdev);
7109
7110 if (rval) {
7111 dev_err(&pdev->dev, "Enable device failed\n");
7112 return rval;
7113 }
7114
7115 pci_set_master(pdev);
7116
7117 /*
7118 * We expect the FW state to be READY
7119 */
7120 if (megasas_transition_to_ready(instance, 0))
7121 goto fail_ready_state;
7122
7123 if (megasas_set_dma_mask(instance))
7124 goto fail_set_dma_mask;
7125
7126 /*
7127 * Initialize MFI Firmware
7128 */
7129
7130 atomic_set(&instance->fw_outstanding, 0);
7131 atomic_set(&instance->ldio_outstanding, 0);
7132
7133 /* Now re-enable MSI-X */
7134 if (instance->msix_vectors) {
7135 irq_flags = PCI_IRQ_MSIX;
7136 if (smp_affinity_enable)
7137 irq_flags |= PCI_IRQ_AFFINITY;
7138 }
7139 rval = pci_alloc_irq_vectors(instance->pdev, 1,
7140 instance->msix_vectors ?
7141 instance->msix_vectors : 1, irq_flags);
7142 if (rval < 0)
7143 goto fail_reenable_msix;
7144
7145 megasas_setup_reply_map(instance);
7146
7147 if (instance->adapter_type != MFI_SERIES) {
7148 megasas_reset_reply_desc(instance);
7149 if (megasas_ioc_init_fusion(instance)) {
7150 megasas_free_cmds(instance);
7151 megasas_free_cmds_fusion(instance);
7152 goto fail_init_mfi;
7153 }
7154 if (!megasas_get_map_info(instance))
7155 megasas_sync_map_info(instance);
7156 } else {
7157 *instance->producer = 0;
7158 *instance->consumer = 0;
7159 if (megasas_issue_init_mfi(instance))
7160 goto fail_init_mfi;
7161 }
7162
7163 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7164 goto fail_init_mfi;
7165
7166 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7167 (unsigned long)instance);
7168
7169 if (instance->msix_vectors ?
7170 megasas_setup_irqs_msix(instance, 0) :
7171 megasas_setup_irqs_ioapic(instance))
7172 goto fail_init_mfi;
7173
7174 /* Re-launch SR-IOV heartbeat timer */
7175 if (instance->requestorId) {
7176 if (!megasas_sriov_start_heartbeat(instance, 0))
7177 megasas_start_timer(instance);
7178 else {
7179 instance->skip_heartbeat_timer_del = 1;
7180 goto fail_init_mfi;
7181 }
7182 }
7183
7184 instance->instancet->enable_intr(instance);
7185 megasas_setup_jbod_map(instance);
7186 instance->unload = 0;
7187
7188 /*
7189 * Initiate AEN (Asynchronous Event Notification)
7190 */
7191 if (megasas_start_aen(instance))
7192 dev_err(&instance->pdev->dev, "Start AEN failed\n");
7193
7194 /* Re-launch FW fault watchdog */
7195 if (instance->adapter_type != MFI_SERIES)
7196 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7197 goto fail_start_watchdog;
7198
7199 return 0;
7200
7201 fail_start_watchdog:
7202 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7203 del_timer_sync(&instance->sriov_heartbeat_timer);
7204 fail_init_mfi:
7205 megasas_free_ctrl_dma_buffers(instance);
7206 megasas_free_ctrl_mem(instance);
7207 scsi_host_put(host);
7208
7209 fail_reenable_msix:
7210 fail_set_dma_mask:
7211 fail_ready_state:
7212
7213 pci_disable_device(pdev);
7214
7215 return -ENODEV;
7216 }
7217 #else
7218 #define megasas_suspend NULL
7219 #define megasas_resume NULL
7220 #endif
7221
7222 static inline int
7223 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7224 {
7225 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7226 int i;
7227 u8 adp_state;
7228
7229 for (i = 0; i < wait_time; i++) {
7230 adp_state = atomic_read(&instance->adprecovery);
7231 if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7232 (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7233 break;
7234
7235 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7236 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7237
7238 msleep(1000);
7239 }
7240
7241 if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7242 dev_info(&instance->pdev->dev,
7243 "%s HBA failed to become operational, adp_state %d\n",
7244 __func__, adp_state);
7245 return 1;
7246 }
7247
7248 return 0;
7249 }
7250
7251 /**
7252 * megasas_detach_one - PCI hot"un"plug entry point
7253 * @pdev: PCI device structure
7254 */
7255 static void megasas_detach_one(struct pci_dev *pdev)
7256 {
7257 int i;
7258 struct Scsi_Host *host;
7259 struct megasas_instance *instance;
7260 struct fusion_context *fusion;
7261 u32 pd_seq_map_sz;
7262
7263 instance = pci_get_drvdata(pdev);
7264 host = instance->host;
7265 fusion = instance->ctrl_context;
7266
7267 /* Shutdown SR-IOV heartbeat timer */
7268 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7269 del_timer_sync(&instance->sriov_heartbeat_timer);
7270
7271 /* Stop the FW fault detection watchdog */
7272 if (instance->adapter_type != MFI_SERIES)
7273 megasas_fusion_stop_watchdog(instance);
7274
7275 if (instance->fw_crash_state != UNAVAILABLE)
7276 megasas_free_host_crash_buffer(instance);
7277 scsi_remove_host(instance->host);
7278 instance->unload = 1;
7279
7280 if (megasas_wait_for_adapter_operational(instance))
7281 goto skip_firing_dcmds;
7282
7283 megasas_flush_cache(instance);
7284 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7285
7286 skip_firing_dcmds:
7287 /* cancel the delayed work if this work still in queue*/
7288 if (instance->ev != NULL) {
7289 struct megasas_aen_event *ev = instance->ev;
7290 cancel_delayed_work_sync(&ev->hotplug_work);
7291 instance->ev = NULL;
7292 }
7293
7294 /* cancel all wait events */
7295 wake_up_all(&instance->int_cmd_wait_q);
7296
7297 tasklet_kill(&instance->isr_tasklet);
7298
7299 /*
7300 * Take the instance off the instance array. Note that we will not
7301 * decrement the max_index. We let this array be sparse array
7302 */
7303 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7304 if (megasas_mgmt_info.instance[i] == instance) {
7305 megasas_mgmt_info.count--;
7306 megasas_mgmt_info.instance[i] = NULL;
7307
7308 break;
7309 }
7310 }
7311
7312 instance->instancet->disable_intr(instance);
7313
7314 megasas_destroy_irqs(instance);
7315
7316 if (instance->msix_vectors)
7317 pci_free_irq_vectors(instance->pdev);
7318
7319 if (instance->adapter_type >= VENTURA_SERIES) {
7320 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7321 kfree(fusion->stream_detect_by_ld[i]);
7322 kfree(fusion->stream_detect_by_ld);
7323 fusion->stream_detect_by_ld = NULL;
7324 }
7325
7326
7327 if (instance->adapter_type != MFI_SERIES) {
7328 megasas_release_fusion(instance);
7329 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7330 (sizeof(struct MR_PD_CFG_SEQ) *
7331 (MAX_PHYSICAL_DEVICES - 1));
7332 for (i = 0; i < 2 ; i++) {
7333 if (fusion->ld_map[i])
7334 dma_free_coherent(&instance->pdev->dev,
7335 fusion->max_map_sz,
7336 fusion->ld_map[i],
7337 fusion->ld_map_phys[i]);
7338 if (fusion->ld_drv_map[i]) {
7339 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7340 vfree(fusion->ld_drv_map[i]);
7341 else
7342 free_pages((ulong)fusion->ld_drv_map[i],
7343 fusion->drv_map_pages);
7344 }
7345
7346 if (fusion->pd_seq_sync[i])
7347 dma_free_coherent(&instance->pdev->dev,
7348 pd_seq_map_sz,
7349 fusion->pd_seq_sync[i],
7350 fusion->pd_seq_phys[i]);
7351 }
7352 } else {
7353 megasas_release_mfi(instance);
7354 }
7355
7356 if (instance->vf_affiliation)
7357 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7358 sizeof(struct MR_LD_VF_AFFILIATION),
7359 instance->vf_affiliation,
7360 instance->vf_affiliation_h);
7361
7362 if (instance->vf_affiliation_111)
7363 dma_free_coherent(&pdev->dev,
7364 sizeof(struct MR_LD_VF_AFFILIATION_111),
7365 instance->vf_affiliation_111,
7366 instance->vf_affiliation_111_h);
7367
7368 if (instance->hb_host_mem)
7369 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7370 instance->hb_host_mem,
7371 instance->hb_host_mem_h);
7372
7373 megasas_free_ctrl_dma_buffers(instance);
7374
7375 megasas_free_ctrl_mem(instance);
7376
7377 scsi_host_put(host);
7378
7379 pci_disable_device(pdev);
7380 }
7381
7382 /**
7383 * megasas_shutdown - Shutdown entry point
7384 * @device: Generic device structure
7385 */
7386 static void megasas_shutdown(struct pci_dev *pdev)
7387 {
7388 struct megasas_instance *instance = pci_get_drvdata(pdev);
7389
7390 instance->unload = 1;
7391
7392 if (megasas_wait_for_adapter_operational(instance))
7393 goto skip_firing_dcmds;
7394
7395 megasas_flush_cache(instance);
7396 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7397
7398 skip_firing_dcmds:
7399 instance->instancet->disable_intr(instance);
7400 megasas_destroy_irqs(instance);
7401
7402 if (instance->msix_vectors)
7403 pci_free_irq_vectors(instance->pdev);
7404 }
7405
7406 /**
7407 * megasas_mgmt_open - char node "open" entry point
7408 */
7409 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7410 {
7411 /*
7412 * Allow only those users with admin rights
7413 */
7414 if (!capable(CAP_SYS_ADMIN))
7415 return -EACCES;
7416
7417 return 0;
7418 }
7419
7420 /**
7421 * megasas_mgmt_fasync - Async notifier registration from applications
7422 *
7423 * This function adds the calling process to a driver global queue. When an
7424 * event occurs, SIGIO will be sent to all processes in this queue.
7425 */
7426 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7427 {
7428 int rc;
7429
7430 mutex_lock(&megasas_async_queue_mutex);
7431
7432 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7433
7434 mutex_unlock(&megasas_async_queue_mutex);
7435
7436 if (rc >= 0) {
7437 /* For sanity check when we get ioctl */
7438 filep->private_data = filep;
7439 return 0;
7440 }
7441
7442 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7443
7444 return rc;
7445 }
7446
7447 /**
7448 * megasas_mgmt_poll - char node "poll" entry point
7449 * */
7450 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
7451 {
7452 __poll_t mask;
7453 unsigned long flags;
7454
7455 poll_wait(file, &megasas_poll_wait, wait);
7456 spin_lock_irqsave(&poll_aen_lock, flags);
7457 if (megasas_poll_wait_aen)
7458 mask = (EPOLLIN | EPOLLRDNORM);
7459 else
7460 mask = 0;
7461 megasas_poll_wait_aen = 0;
7462 spin_unlock_irqrestore(&poll_aen_lock, flags);
7463 return mask;
7464 }
7465
7466 /*
7467 * megasas_set_crash_dump_params_ioctl:
7468 * Send CRASH_DUMP_MODE DCMD to all controllers
7469 * @cmd: MFI command frame
7470 */
7471
7472 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
7473 {
7474 struct megasas_instance *local_instance;
7475 int i, error = 0;
7476 int crash_support;
7477
7478 crash_support = cmd->frame->dcmd.mbox.w[0];
7479
7480 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7481 local_instance = megasas_mgmt_info.instance[i];
7482 if (local_instance && local_instance->crash_dump_drv_support) {
7483 if ((atomic_read(&local_instance->adprecovery) ==
7484 MEGASAS_HBA_OPERATIONAL) &&
7485 !megasas_set_crash_dump_params(local_instance,
7486 crash_support)) {
7487 local_instance->crash_dump_app_support =
7488 crash_support;
7489 dev_info(&local_instance->pdev->dev,
7490 "Application firmware crash "
7491 "dump mode set success\n");
7492 error = 0;
7493 } else {
7494 dev_info(&local_instance->pdev->dev,
7495 "Application firmware crash "
7496 "dump mode set failed\n");
7497 error = -1;
7498 }
7499 }
7500 }
7501 return error;
7502 }
7503
7504 /**
7505 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
7506 * @instance: Adapter soft state
7507 * @argp: User's ioctl packet
7508 */
7509 static int
7510 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7511 struct megasas_iocpacket __user * user_ioc,
7512 struct megasas_iocpacket *ioc)
7513 {
7514 struct megasas_sge64 *kern_sge64 = NULL;
7515 struct megasas_sge32 *kern_sge32 = NULL;
7516 struct megasas_cmd *cmd;
7517 void *kbuff_arr[MAX_IOCTL_SGE];
7518 dma_addr_t buf_handle = 0;
7519 int error = 0, i;
7520 void *sense = NULL;
7521 dma_addr_t sense_handle;
7522 unsigned long *sense_ptr;
7523 u32 opcode = 0;
7524
7525 memset(kbuff_arr, 0, sizeof(kbuff_arr));
7526
7527 if (ioc->sge_count > MAX_IOCTL_SGE) {
7528 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
7529 ioc->sge_count, MAX_IOCTL_SGE);
7530 return -EINVAL;
7531 }
7532
7533 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
7534 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
7535 !instance->support_nvme_passthru)) {
7536 dev_err(&instance->pdev->dev,
7537 "Received invalid ioctl command 0x%x\n",
7538 ioc->frame.hdr.cmd);
7539 return -ENOTSUPP;
7540 }
7541
7542 cmd = megasas_get_cmd(instance);
7543 if (!cmd) {
7544 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
7545 return -ENOMEM;
7546 }
7547
7548 /*
7549 * User's IOCTL packet has 2 frames (maximum). Copy those two
7550 * frames into our cmd's frames. cmd->frame's context will get
7551 * overwritten when we copy from user's frames. So set that value
7552 * alone separately
7553 */
7554 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
7555 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
7556 cmd->frame->hdr.pad_0 = 0;
7557
7558 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
7559
7560 if (instance->consistent_mask_64bit)
7561 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
7562 MFI_FRAME_SENSE64));
7563 else
7564 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
7565 MFI_FRAME_SENSE64));
7566
7567 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
7568 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
7569
7570 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
7571 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
7572 megasas_return_cmd(instance, cmd);
7573 return -1;
7574 }
7575 }
7576
7577 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
7578 error = megasas_set_crash_dump_params_ioctl(cmd);
7579 megasas_return_cmd(instance, cmd);
7580 return error;
7581 }
7582
7583 /*
7584 * The management interface between applications and the fw uses
7585 * MFI frames. E.g, RAID configuration changes, LD property changes
7586 * etc are accomplishes through different kinds of MFI frames. The
7587 * driver needs to care only about substituting user buffers with
7588 * kernel buffers in SGLs. The location of SGL is embedded in the
7589 * struct iocpacket itself.
7590 */
7591 if (instance->consistent_mask_64bit)
7592 kern_sge64 = (struct megasas_sge64 *)
7593 ((unsigned long)cmd->frame + ioc->sgl_off);
7594 else
7595 kern_sge32 = (struct megasas_sge32 *)
7596 ((unsigned long)cmd->frame + ioc->sgl_off);
7597
7598 /*
7599 * For each user buffer, create a mirror buffer and copy in
7600 */
7601 for (i = 0; i < ioc->sge_count; i++) {
7602 if (!ioc->sgl[i].iov_len)
7603 continue;
7604
7605 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
7606 ioc->sgl[i].iov_len,
7607 &buf_handle, GFP_KERNEL);
7608 if (!kbuff_arr[i]) {
7609 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
7610 "kernel SGL buffer for IOCTL\n");
7611 error = -ENOMEM;
7612 goto out;
7613 }
7614
7615 /*
7616 * We don't change the dma_coherent_mask, so
7617 * dma_alloc_coherent only returns 32bit addresses
7618 */
7619 if (instance->consistent_mask_64bit) {
7620 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
7621 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7622 } else {
7623 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
7624 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
7625 }
7626
7627 /*
7628 * We created a kernel buffer corresponding to the
7629 * user buffer. Now copy in from the user buffer
7630 */
7631 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
7632 (u32) (ioc->sgl[i].iov_len))) {
7633 error = -EFAULT;
7634 goto out;
7635 }
7636 }
7637
7638 if (ioc->sense_len) {
7639 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7640 &sense_handle, GFP_KERNEL);
7641 if (!sense) {
7642 error = -ENOMEM;
7643 goto out;
7644 }
7645
7646 sense_ptr =
7647 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7648 if (instance->consistent_mask_64bit)
7649 *sense_ptr = cpu_to_le64(sense_handle);
7650 else
7651 *sense_ptr = cpu_to_le32(sense_handle);
7652 }
7653
7654 /*
7655 * Set the sync_cmd flag so that the ISR knows not to complete this
7656 * cmd to the SCSI mid-layer
7657 */
7658 cmd->sync_cmd = 1;
7659 if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) {
7660 cmd->sync_cmd = 0;
7661 dev_err(&instance->pdev->dev,
7662 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
7663 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
7664 cmd->cmd_status_drv);
7665 return -EBUSY;
7666 }
7667
7668 cmd->sync_cmd = 0;
7669
7670 if (instance->unload == 1) {
7671 dev_info(&instance->pdev->dev, "Driver unload is in progress "
7672 "don't submit data to application\n");
7673 goto out;
7674 }
7675 /*
7676 * copy out the kernel buffers to user buffers
7677 */
7678 for (i = 0; i < ioc->sge_count; i++) {
7679 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
7680 ioc->sgl[i].iov_len)) {
7681 error = -EFAULT;
7682 goto out;
7683 }
7684 }
7685
7686 /*
7687 * copy out the sense
7688 */
7689 if (ioc->sense_len) {
7690 /*
7691 * sense_ptr points to the location that has the user
7692 * sense buffer address
7693 */
7694 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
7695 ioc->sense_off);
7696
7697 if (copy_to_user((void __user *)((unsigned long)
7698 get_unaligned((unsigned long *)sense_ptr)),
7699 sense, ioc->sense_len)) {
7700 dev_err(&instance->pdev->dev, "Failed to copy out to user "
7701 "sense data\n");
7702 error = -EFAULT;
7703 goto out;
7704 }
7705 }
7706
7707 /*
7708 * copy the status codes returned by the fw
7709 */
7710 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
7711 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
7712 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
7713 error = -EFAULT;
7714 }
7715
7716 out:
7717 if (sense) {
7718 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
7719 sense, sense_handle);
7720 }
7721
7722 for (i = 0; i < ioc->sge_count; i++) {
7723 if (kbuff_arr[i]) {
7724 if (instance->consistent_mask_64bit)
7725 dma_free_coherent(&instance->pdev->dev,
7726 le32_to_cpu(kern_sge64[i].length),
7727 kbuff_arr[i],
7728 le64_to_cpu(kern_sge64[i].phys_addr));
7729 else
7730 dma_free_coherent(&instance->pdev->dev,
7731 le32_to_cpu(kern_sge32[i].length),
7732 kbuff_arr[i],
7733 le32_to_cpu(kern_sge32[i].phys_addr));
7734 kbuff_arr[i] = NULL;
7735 }
7736 }
7737
7738 megasas_return_cmd(instance, cmd);
7739 return error;
7740 }
7741
7742 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
7743 {
7744 struct megasas_iocpacket __user *user_ioc =
7745 (struct megasas_iocpacket __user *)arg;
7746 struct megasas_iocpacket *ioc;
7747 struct megasas_instance *instance;
7748 int error;
7749
7750 ioc = memdup_user(user_ioc, sizeof(*ioc));
7751 if (IS_ERR(ioc))
7752 return PTR_ERR(ioc);
7753
7754 instance = megasas_lookup_instance(ioc->host_no);
7755 if (!instance) {
7756 error = -ENODEV;
7757 goto out_kfree_ioc;
7758 }
7759
7760 /* Block ioctls in VF mode */
7761 if (instance->requestorId && !allow_vf_ioctls) {
7762 error = -ENODEV;
7763 goto out_kfree_ioc;
7764 }
7765
7766 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7767 dev_err(&instance->pdev->dev, "Controller in crit error\n");
7768 error = -ENODEV;
7769 goto out_kfree_ioc;
7770 }
7771
7772 if (instance->unload == 1) {
7773 error = -ENODEV;
7774 goto out_kfree_ioc;
7775 }
7776
7777 if (down_interruptible(&instance->ioctl_sem)) {
7778 error = -ERESTARTSYS;
7779 goto out_kfree_ioc;
7780 }
7781
7782 if (megasas_wait_for_adapter_operational(instance)) {
7783 error = -ENODEV;
7784 goto out_up;
7785 }
7786
7787 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
7788 out_up:
7789 up(&instance->ioctl_sem);
7790
7791 out_kfree_ioc:
7792 kfree(ioc);
7793 return error;
7794 }
7795
7796 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
7797 {
7798 struct megasas_instance *instance;
7799 struct megasas_aen aen;
7800 int error;
7801
7802 if (file->private_data != file) {
7803 printk(KERN_DEBUG "megasas: fasync_helper was not "
7804 "called first\n");
7805 return -EINVAL;
7806 }
7807
7808 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
7809 return -EFAULT;
7810
7811 instance = megasas_lookup_instance(aen.host_no);
7812
7813 if (!instance)
7814 return -ENODEV;
7815
7816 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
7817 return -ENODEV;
7818 }
7819
7820 if (instance->unload == 1) {
7821 return -ENODEV;
7822 }
7823
7824 if (megasas_wait_for_adapter_operational(instance))
7825 return -ENODEV;
7826
7827 mutex_lock(&instance->reset_mutex);
7828 error = megasas_register_aen(instance, aen.seq_num,
7829 aen.class_locale_word);
7830 mutex_unlock(&instance->reset_mutex);
7831 return error;
7832 }
7833
7834 /**
7835 * megasas_mgmt_ioctl - char node ioctl entry point
7836 */
7837 static long
7838 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
7839 {
7840 switch (cmd) {
7841 case MEGASAS_IOC_FIRMWARE:
7842 return megasas_mgmt_ioctl_fw(file, arg);
7843
7844 case MEGASAS_IOC_GET_AEN:
7845 return megasas_mgmt_ioctl_aen(file, arg);
7846 }
7847
7848 return -ENOTTY;
7849 }
7850
7851 #ifdef CONFIG_COMPAT
7852 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
7853 {
7854 struct compat_megasas_iocpacket __user *cioc =
7855 (struct compat_megasas_iocpacket __user *)arg;
7856 struct megasas_iocpacket __user *ioc =
7857 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
7858 int i;
7859 int error = 0;
7860 compat_uptr_t ptr;
7861 u32 local_sense_off;
7862 u32 local_sense_len;
7863 u32 user_sense_off;
7864
7865 if (clear_user(ioc, sizeof(*ioc)))
7866 return -EFAULT;
7867
7868 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
7869 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
7870 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
7871 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
7872 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
7873 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
7874 return -EFAULT;
7875
7876 /*
7877 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
7878 * sense_len is not null, so prepare the 64bit value under
7879 * the same condition.
7880 */
7881 if (get_user(local_sense_off, &ioc->sense_off) ||
7882 get_user(local_sense_len, &ioc->sense_len) ||
7883 get_user(user_sense_off, &cioc->sense_off))
7884 return -EFAULT;
7885
7886 if (local_sense_off != user_sense_off)
7887 return -EINVAL;
7888
7889 if (local_sense_len) {
7890 void __user **sense_ioc_ptr =
7891 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
7892 compat_uptr_t *sense_cioc_ptr =
7893 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
7894 if (get_user(ptr, sense_cioc_ptr) ||
7895 put_user(compat_ptr(ptr), sense_ioc_ptr))
7896 return -EFAULT;
7897 }
7898
7899 for (i = 0; i < MAX_IOCTL_SGE; i++) {
7900 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
7901 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
7902 copy_in_user(&ioc->sgl[i].iov_len,
7903 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
7904 return -EFAULT;
7905 }
7906
7907 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
7908
7909 if (copy_in_user(&cioc->frame.hdr.cmd_status,
7910 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
7911 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
7912 return -EFAULT;
7913 }
7914 return error;
7915 }
7916
7917 static long
7918 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
7919 unsigned long arg)
7920 {
7921 switch (cmd) {
7922 case MEGASAS_IOC_FIRMWARE32:
7923 return megasas_mgmt_compat_ioctl_fw(file, arg);
7924 case MEGASAS_IOC_GET_AEN:
7925 return megasas_mgmt_ioctl_aen(file, arg);
7926 }
7927
7928 return -ENOTTY;
7929 }
7930 #endif
7931
7932 /*
7933 * File operations structure for management interface
7934 */
7935 static const struct file_operations megasas_mgmt_fops = {
7936 .owner = THIS_MODULE,
7937 .open = megasas_mgmt_open,
7938 .fasync = megasas_mgmt_fasync,
7939 .unlocked_ioctl = megasas_mgmt_ioctl,
7940 .poll = megasas_mgmt_poll,
7941 #ifdef CONFIG_COMPAT
7942 .compat_ioctl = megasas_mgmt_compat_ioctl,
7943 #endif
7944 .llseek = noop_llseek,
7945 };
7946
7947 /*
7948 * PCI hotplug support registration structure
7949 */
7950 static struct pci_driver megasas_pci_driver = {
7951
7952 .name = "megaraid_sas",
7953 .id_table = megasas_pci_table,
7954 .probe = megasas_probe_one,
7955 .remove = megasas_detach_one,
7956 .suspend = megasas_suspend,
7957 .resume = megasas_resume,
7958 .shutdown = megasas_shutdown,
7959 };
7960
7961 /*
7962 * Sysfs driver attributes
7963 */
7964 static ssize_t version_show(struct device_driver *dd, char *buf)
7965 {
7966 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
7967 MEGASAS_VERSION);
7968 }
7969 static DRIVER_ATTR_RO(version);
7970
7971 static ssize_t release_date_show(struct device_driver *dd, char *buf)
7972 {
7973 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
7974 MEGASAS_RELDATE);
7975 }
7976 static DRIVER_ATTR_RO(release_date);
7977
7978 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
7979 {
7980 return sprintf(buf, "%u\n", support_poll_for_event);
7981 }
7982 static DRIVER_ATTR_RO(support_poll_for_event);
7983
7984 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
7985 {
7986 return sprintf(buf, "%u\n", support_device_change);
7987 }
7988 static DRIVER_ATTR_RO(support_device_change);
7989
7990 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
7991 {
7992 return sprintf(buf, "%u\n", megasas_dbg_lvl);
7993 }
7994
7995 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
7996 size_t count)
7997 {
7998 int retval = count;
7999
8000 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8001 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8002 retval = -EINVAL;
8003 }
8004 return retval;
8005 }
8006 static DRIVER_ATTR_RW(dbg_lvl);
8007
8008 static ssize_t
8009 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8010 {
8011 return sprintf(buf, "%u\n", support_nvme_encapsulation);
8012 }
8013
8014 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8015
8016 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8017 {
8018 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8019 scsi_remove_device(sdev);
8020 scsi_device_put(sdev);
8021 }
8022
8023 /**
8024 * megasas_update_device_list - Update the PD and LD device list from FW
8025 * after an AEN event notification
8026 * @instance: Adapter soft state
8027 * @event_type: Indicates type of event (PD or LD event)
8028 *
8029 * @return: Success or failure
8030 *
8031 * Issue DCMDs to Firmware to update the internal device list in driver.
8032 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8033 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8034 */
8035 static
8036 int megasas_update_device_list(struct megasas_instance *instance,
8037 int event_type)
8038 {
8039 int dcmd_ret = DCMD_SUCCESS;
8040
8041 if (instance->enable_fw_dev_list) {
8042 dcmd_ret = megasas_host_device_list_query(instance, false);
8043 if (dcmd_ret != DCMD_SUCCESS)
8044 goto out;
8045 } else {
8046 if (event_type & SCAN_PD_CHANNEL) {
8047 dcmd_ret = megasas_get_pd_list(instance);
8048
8049 if (dcmd_ret != DCMD_SUCCESS)
8050 goto out;
8051 }
8052
8053 if (event_type & SCAN_VD_CHANNEL) {
8054 if (!instance->requestorId ||
8055 (instance->requestorId &&
8056 megasas_get_ld_vf_affiliation(instance, 0))) {
8057 dcmd_ret = megasas_ld_list_query(instance,
8058 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8059 if (dcmd_ret != DCMD_SUCCESS)
8060 goto out;
8061 }
8062 }
8063 }
8064
8065 out:
8066 return dcmd_ret;
8067 }
8068
8069 /**
8070 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
8071 * after an AEN event notification
8072 * @instance: Adapter soft state
8073 * @scan_type: Indicates type of devices (PD/LD) to add
8074 * @return void
8075 */
8076 static
8077 void megasas_add_remove_devices(struct megasas_instance *instance,
8078 int scan_type)
8079 {
8080 int i, j;
8081 u16 pd_index = 0;
8082 u16 ld_index = 0;
8083 u16 channel = 0, id = 0;
8084 struct Scsi_Host *host;
8085 struct scsi_device *sdev1;
8086 struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8087 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8088
8089 host = instance->host;
8090
8091 if (instance->enable_fw_dev_list) {
8092 targetid_list = instance->host_device_list_buf;
8093 for (i = 0; i < targetid_list->count; i++) {
8094 targetid_entry = &targetid_list->host_device_list[i];
8095 if (targetid_entry->flags.u.bits.is_sys_pd) {
8096 channel = le16_to_cpu(targetid_entry->target_id) /
8097 MEGASAS_MAX_DEV_PER_CHANNEL;
8098 id = le16_to_cpu(targetid_entry->target_id) %
8099 MEGASAS_MAX_DEV_PER_CHANNEL;
8100 } else {
8101 channel = MEGASAS_MAX_PD_CHANNELS +
8102 (le16_to_cpu(targetid_entry->target_id) /
8103 MEGASAS_MAX_DEV_PER_CHANNEL);
8104 id = le16_to_cpu(targetid_entry->target_id) %
8105 MEGASAS_MAX_DEV_PER_CHANNEL;
8106 }
8107 sdev1 = scsi_device_lookup(host, channel, id, 0);
8108 if (!sdev1) {
8109 scsi_add_device(host, channel, id, 0);
8110 } else {
8111 scsi_device_put(sdev1);
8112 }
8113 }
8114 }
8115
8116 if (scan_type & SCAN_PD_CHANNEL) {
8117 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8118 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8119 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8120 sdev1 = scsi_device_lookup(host, i, j, 0);
8121 if (instance->pd_list[pd_index].driveState ==
8122 MR_PD_STATE_SYSTEM) {
8123 if (!sdev1)
8124 scsi_add_device(host, i, j, 0);
8125 else
8126 scsi_device_put(sdev1);
8127 } else {
8128 if (sdev1)
8129 megasas_remove_scsi_device(sdev1);
8130 }
8131 }
8132 }
8133 }
8134
8135 if (scan_type & SCAN_VD_CHANNEL) {
8136 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8137 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8138 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8139 sdev1 = scsi_device_lookup(host,
8140 MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8141 if (instance->ld_ids[ld_index] != 0xff) {
8142 if (!sdev1)
8143 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8144 else
8145 scsi_device_put(sdev1);
8146 } else {
8147 if (sdev1)
8148 megasas_remove_scsi_device(sdev1);
8149 }
8150 }
8151 }
8152 }
8153
8154 }
8155
8156 static void
8157 megasas_aen_polling(struct work_struct *work)
8158 {
8159 struct megasas_aen_event *ev =
8160 container_of(work, struct megasas_aen_event, hotplug_work.work);
8161 struct megasas_instance *instance = ev->instance;
8162 union megasas_evt_class_locale class_locale;
8163 int event_type = 0;
8164 u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;
8165 int error;
8166 u8 dcmd_ret = DCMD_SUCCESS;
8167
8168 if (!instance) {
8169 printk(KERN_ERR "invalid instance!\n");
8170 kfree(ev);
8171 return;
8172 }
8173
8174 /* Adjust event workqueue thread wait time for VF mode */
8175 if (instance->requestorId)
8176 wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF;
8177
8178 /* Don't run the event workqueue thread if OCR is running */
8179 mutex_lock(&instance->reset_mutex);
8180
8181 instance->ev = NULL;
8182 if (instance->evt_detail) {
8183 megasas_decode_evt(instance);
8184
8185 switch (le32_to_cpu(instance->evt_detail->code)) {
8186
8187 case MR_EVT_PD_INSERTED:
8188 case MR_EVT_PD_REMOVED:
8189 event_type = SCAN_PD_CHANNEL;
8190 break;
8191
8192 case MR_EVT_LD_OFFLINE:
8193 case MR_EVT_CFG_CLEARED:
8194 case MR_EVT_LD_DELETED:
8195 case MR_EVT_LD_CREATED:
8196 event_type = SCAN_VD_CHANNEL;
8197 break;
8198
8199 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8200 case MR_EVT_FOREIGN_CFG_IMPORTED:
8201 case MR_EVT_LD_STATE_CHANGE:
8202 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8203 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8204 instance->host->host_no);
8205 break;
8206
8207 case MR_EVT_CTRL_PROP_CHANGED:
8208 dcmd_ret = megasas_get_ctrl_info(instance);
8209 if (dcmd_ret == DCMD_SUCCESS &&
8210 instance->snapdump_wait_time) {
8211 megasas_get_snapdump_properties(instance);
8212 dev_info(&instance->pdev->dev,
8213 "Snap dump wait time\t: %d\n",
8214 instance->snapdump_wait_time);
8215 }
8216 break;
8217 default:
8218 event_type = 0;
8219 break;
8220 }
8221 } else {
8222 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8223 mutex_unlock(&instance->reset_mutex);
8224 kfree(ev);
8225 return;
8226 }
8227
8228 if (event_type)
8229 dcmd_ret = megasas_update_device_list(instance, event_type);
8230
8231 mutex_unlock(&instance->reset_mutex);
8232
8233 if (event_type && dcmd_ret == DCMD_SUCCESS)
8234 megasas_add_remove_devices(instance, event_type);
8235
8236 if (dcmd_ret == DCMD_SUCCESS)
8237 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8238 else
8239 seq_num = instance->last_seq_num;
8240
8241 /* Register AEN with FW for latest sequence number plus 1 */
8242 class_locale.members.reserved = 0;
8243 class_locale.members.locale = MR_EVT_LOCALE_ALL;
8244 class_locale.members.class = MR_EVT_CLASS_DEBUG;
8245
8246 if (instance->aen_cmd != NULL) {
8247 kfree(ev);
8248 return;
8249 }
8250
8251 mutex_lock(&instance->reset_mutex);
8252 error = megasas_register_aen(instance, seq_num,
8253 class_locale.word);
8254 if (error)
8255 dev_err(&instance->pdev->dev,
8256 "register aen failed error %x\n", error);
8257
8258 mutex_unlock(&instance->reset_mutex);
8259 kfree(ev);
8260 }
8261
8262 /**
8263 * megasas_init - Driver load entry point
8264 */
8265 static int __init megasas_init(void)
8266 {
8267 int rval;
8268
8269 /*
8270 * Booted in kdump kernel, minimize memory footprints by
8271 * disabling few features
8272 */
8273 if (reset_devices) {
8274 msix_vectors = 1;
8275 rdpq_enable = 0;
8276 dual_qdepth_disable = 1;
8277 }
8278
8279 /*
8280 * Announce driver version and other information
8281 */
8282 pr_info("megasas: %s\n", MEGASAS_VERSION);
8283
8284 spin_lock_init(&poll_aen_lock);
8285
8286 support_poll_for_event = 2;
8287 support_device_change = 1;
8288 support_nvme_encapsulation = true;
8289
8290 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8291
8292 /*
8293 * Register character device node
8294 */
8295 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8296
8297 if (rval < 0) {
8298 printk(KERN_DEBUG "megasas: failed to open device node\n");
8299 return rval;
8300 }
8301
8302 megasas_mgmt_majorno = rval;
8303
8304 /*
8305 * Register ourselves as PCI hotplug module
8306 */
8307 rval = pci_register_driver(&megasas_pci_driver);
8308
8309 if (rval) {
8310 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8311 goto err_pcidrv;
8312 }
8313
8314 rval = driver_create_file(&megasas_pci_driver.driver,
8315 &driver_attr_version);
8316 if (rval)
8317 goto err_dcf_attr_ver;
8318
8319 rval = driver_create_file(&megasas_pci_driver.driver,
8320 &driver_attr_release_date);
8321 if (rval)
8322 goto err_dcf_rel_date;
8323
8324 rval = driver_create_file(&megasas_pci_driver.driver,
8325 &driver_attr_support_poll_for_event);
8326 if (rval)
8327 goto err_dcf_support_poll_for_event;
8328
8329 rval = driver_create_file(&megasas_pci_driver.driver,
8330 &driver_attr_dbg_lvl);
8331 if (rval)
8332 goto err_dcf_dbg_lvl;
8333 rval = driver_create_file(&megasas_pci_driver.driver,
8334 &driver_attr_support_device_change);
8335 if (rval)
8336 goto err_dcf_support_device_change;
8337
8338 rval = driver_create_file(&megasas_pci_driver.driver,
8339 &driver_attr_support_nvme_encapsulation);
8340 if (rval)
8341 goto err_dcf_support_nvme_encapsulation;
8342
8343 return rval;
8344
8345 err_dcf_support_nvme_encapsulation:
8346 driver_remove_file(&megasas_pci_driver.driver,
8347 &driver_attr_support_device_change);
8348
8349 err_dcf_support_device_change:
8350 driver_remove_file(&megasas_pci_driver.driver,
8351 &driver_attr_dbg_lvl);
8352 err_dcf_dbg_lvl:
8353 driver_remove_file(&megasas_pci_driver.driver,
8354 &driver_attr_support_poll_for_event);
8355 err_dcf_support_poll_for_event:
8356 driver_remove_file(&megasas_pci_driver.driver,
8357 &driver_attr_release_date);
8358 err_dcf_rel_date:
8359 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8360 err_dcf_attr_ver:
8361 pci_unregister_driver(&megasas_pci_driver);
8362 err_pcidrv:
8363 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8364 return rval;
8365 }
8366
8367 /**
8368 * megasas_exit - Driver unload entry point
8369 */
8370 static void __exit megasas_exit(void)
8371 {
8372 driver_remove_file(&megasas_pci_driver.driver,
8373 &driver_attr_dbg_lvl);
8374 driver_remove_file(&megasas_pci_driver.driver,
8375 &driver_attr_support_poll_for_event);
8376 driver_remove_file(&megasas_pci_driver.driver,
8377 &driver_attr_support_device_change);
8378 driver_remove_file(&megasas_pci_driver.driver,
8379 &driver_attr_release_date);
8380 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8381 driver_remove_file(&megasas_pci_driver.driver,
8382 &driver_attr_support_nvme_encapsulation);
8383
8384 pci_unregister_driver(&megasas_pci_driver);
8385 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8386 }
8387
8388 module_init(megasas_init);
8389 module_exit(megasas_exit);