2 * Linux MegaRAID driver for SAS based RAID controllers
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * FILE: megaraid_sas_fusion.c
22 * Authors: Avago Technologies
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 #include <linux/vmalloc.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_dbg.h>
57 #include <linux/dmi.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
63 extern void megasas_free_cmds(struct megasas_instance
*instance
);
64 extern struct megasas_cmd
*megasas_get_cmd(struct megasas_instance
67 megasas_complete_cmd(struct megasas_instance
*instance
,
68 struct megasas_cmd
*cmd
, u8 alt_status
);
70 wait_and_poll(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
,
74 megasas_return_cmd(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
);
75 int megasas_alloc_cmds(struct megasas_instance
*instance
);
77 megasas_clear_intr_fusion(struct megasas_register_set __iomem
*regs
);
79 megasas_issue_polled(struct megasas_instance
*instance
,
80 struct megasas_cmd
*cmd
);
82 megasas_check_and_restore_queue_depth(struct megasas_instance
*instance
);
84 int megasas_transition_to_ready(struct megasas_instance
*instance
, int ocr
);
85 void megaraid_sas_kill_hba(struct megasas_instance
*instance
);
87 extern u32 megasas_dbg_lvl
;
88 int megasas_sriov_start_heartbeat(struct megasas_instance
*instance
,
90 void megasas_start_timer(struct megasas_instance
*instance
);
91 extern struct megasas_mgmt_info megasas_mgmt_info
;
92 extern unsigned int resetwaittime
;
93 extern unsigned int dual_qdepth_disable
;
94 static void megasas_free_rdpq_fusion(struct megasas_instance
*instance
);
95 static void megasas_free_reply_fusion(struct megasas_instance
*instance
);
100 * megasas_enable_intr_fusion - Enables interrupts
101 * @regs: MFI register set
104 megasas_enable_intr_fusion(struct megasas_instance
*instance
)
106 struct megasas_register_set __iomem
*regs
;
107 regs
= instance
->reg_set
;
109 instance
->mask_interrupts
= 0;
110 /* For Thunderbolt/Invader also clear intr on enable */
111 writel(~0, ®s
->outbound_intr_status
);
112 readl(®s
->outbound_intr_status
);
114 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK
, &(regs
)->outbound_intr_mask
);
116 /* Dummy readl to force pci flush */
117 readl(®s
->outbound_intr_mask
);
121 * megasas_disable_intr_fusion - Disables interrupt
122 * @regs: MFI register set
125 megasas_disable_intr_fusion(struct megasas_instance
*instance
)
127 u32 mask
= 0xFFFFFFFF;
129 struct megasas_register_set __iomem
*regs
;
130 regs
= instance
->reg_set
;
131 instance
->mask_interrupts
= 1;
133 writel(mask
, ®s
->outbound_intr_mask
);
134 /* Dummy readl to force pci flush */
135 status
= readl(®s
->outbound_intr_mask
);
139 megasas_clear_intr_fusion(struct megasas_register_set __iomem
*regs
)
143 * Check if it is our interrupt
145 status
= readl(®s
->outbound_intr_status
);
148 writel(status
, ®s
->outbound_intr_status
);
149 readl(®s
->outbound_intr_status
);
152 if (!(status
& MFI_FUSION_ENABLE_INTERRUPT_MASK
))
159 * megasas_get_cmd_fusion - Get a command from the free pool
160 * @instance: Adapter soft state
162 * Returns a blk_tag indexed mpt frame
164 inline struct megasas_cmd_fusion
*megasas_get_cmd_fusion(struct megasas_instance
165 *instance
, u32 blk_tag
)
167 struct fusion_context
*fusion
;
169 fusion
= instance
->ctrl_context
;
170 return fusion
->cmd_list
[blk_tag
];
174 * megasas_return_cmd_fusion - Return a cmd to free command pool
175 * @instance: Adapter soft state
176 * @cmd: Command packet to be returned to free command pool
178 inline void megasas_return_cmd_fusion(struct megasas_instance
*instance
,
179 struct megasas_cmd_fusion
*cmd
)
182 memset(cmd
->io_request
, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
);
183 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
184 cmd
->cmd_completed
= false;
188 * megasas_fire_cmd_fusion - Sends command to the FW
189 * @instance: Adapter soft state
190 * @req_desc: 32bit or 64bit Request descriptor
192 * Perform PCI Write. Ventura supports 32 bit Descriptor.
193 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
197 megasas_fire_cmd_fusion(struct megasas_instance
*instance
,
198 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
)
200 if (instance
->is_ventura
)
201 writel(le32_to_cpu(req_desc
->u
.low
),
202 &instance
->reg_set
->inbound_single_queue_port
);
204 #if defined(writeq) && defined(CONFIG_64BIT)
205 u64 req_data
= (((u64
)le32_to_cpu(req_desc
->u
.high
) << 32) |
206 le32_to_cpu(req_desc
->u
.low
));
208 writeq(req_data
, &instance
->reg_set
->inbound_low_queue_port
);
211 spin_lock_irqsave(&instance
->hba_lock
, flags
);
212 writel(le32_to_cpu(req_desc
->u
.low
),
213 &instance
->reg_set
->inbound_low_queue_port
);
214 writel(le32_to_cpu(req_desc
->u
.high
),
215 &instance
->reg_set
->inbound_high_queue_port
);
217 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
223 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
224 * @instance: Adapter soft state
225 * fw_boot_context: Whether this function called during probe or after OCR
227 * This function is only for fusion controllers.
228 * Update host can queue, if firmware downgrade max supported firmware commands.
229 * Firmware upgrade case will be skiped because underlying firmware has
230 * more resource than exposed to the OS.
234 megasas_fusion_update_can_queue(struct megasas_instance
*instance
, int fw_boot_context
)
236 u16 cur_max_fw_cmds
= 0;
237 u16 ldio_threshold
= 0;
238 struct megasas_register_set __iomem
*reg_set
;
240 reg_set
= instance
->reg_set
;
242 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
243 if (!instance
->is_ventura
)
245 readl(&instance
->reg_set
->outbound_scratch_pad_3
) & 0x00FFFF;
247 if (dual_qdepth_disable
|| !cur_max_fw_cmds
)
248 cur_max_fw_cmds
= instance
->instancet
->read_fw_status_reg(reg_set
) & 0x00FFFF;
251 (instance
->instancet
->read_fw_status_reg(reg_set
) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS
;
253 dev_info(&instance
->pdev
->dev
,
254 "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
255 cur_max_fw_cmds
, ldio_threshold
);
257 if (fw_boot_context
== OCR_CONTEXT
) {
258 cur_max_fw_cmds
= cur_max_fw_cmds
- 1;
259 if (cur_max_fw_cmds
< instance
->max_fw_cmds
) {
260 instance
->cur_can_queue
=
261 cur_max_fw_cmds
- (MEGASAS_FUSION_INTERNAL_CMDS
+
262 MEGASAS_FUSION_IOCTL_CMDS
);
263 instance
->host
->can_queue
= instance
->cur_can_queue
;
264 instance
->ldio_threshold
= ldio_threshold
;
267 instance
->max_fw_cmds
= cur_max_fw_cmds
;
268 instance
->ldio_threshold
= ldio_threshold
;
270 if (!instance
->is_rdpq
)
271 instance
->max_fw_cmds
=
272 min_t(u16
, instance
->max_fw_cmds
, 1024);
275 instance
->max_fw_cmds
= min(instance
->max_fw_cmds
,
276 (u16
)MEGASAS_KDUMP_QUEUE_DEPTH
);
278 * Reduce the max supported cmds by 1. This is to ensure that the
279 * reply_q_sz (1 more than the max cmd that driver may send)
280 * does not exceed max cmds that the FW can support
282 instance
->max_fw_cmds
= instance
->max_fw_cmds
-1;
284 instance
->max_scsi_cmds
= instance
->max_fw_cmds
-
285 (MEGASAS_FUSION_INTERNAL_CMDS
+
286 MEGASAS_FUSION_IOCTL_CMDS
);
287 instance
->cur_can_queue
= instance
->max_scsi_cmds
;
288 instance
->host
->can_queue
= instance
->cur_can_queue
;
291 if (instance
->is_ventura
)
292 instance
->max_mpt_cmds
=
293 instance
->max_fw_cmds
* RAID_1_PEER_CMDS
;
295 instance
->max_mpt_cmds
= instance
->max_fw_cmds
;
298 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
299 * @instance: Adapter soft state
302 megasas_free_cmds_fusion(struct megasas_instance
*instance
)
305 struct fusion_context
*fusion
= instance
->ctrl_context
;
306 struct megasas_cmd_fusion
*cmd
;
309 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++) {
310 cmd
= fusion
->cmd_list
[i
];
313 dma_pool_free(fusion
->sg_dma_pool
, cmd
->sg_frame
,
314 cmd
->sg_frame_phys_addr
);
316 dma_pool_free(fusion
->sense_dma_pool
, cmd
->sense
,
317 cmd
->sense_phys_addr
);
321 if (fusion
->sg_dma_pool
) {
322 dma_pool_destroy(fusion
->sg_dma_pool
);
323 fusion
->sg_dma_pool
= NULL
;
325 if (fusion
->sense_dma_pool
) {
326 dma_pool_destroy(fusion
->sense_dma_pool
);
327 fusion
->sense_dma_pool
= NULL
;
331 /* Reply Frame, Desc*/
332 if (instance
->is_rdpq
)
333 megasas_free_rdpq_fusion(instance
);
335 megasas_free_reply_fusion(instance
);
337 /* Request Frame, Desc*/
338 if (fusion
->req_frames_desc
)
339 dma_free_coherent(&instance
->pdev
->dev
,
340 fusion
->request_alloc_sz
, fusion
->req_frames_desc
,
341 fusion
->req_frames_desc_phys
);
342 if (fusion
->io_request_frames
)
343 dma_pool_free(fusion
->io_request_frames_pool
,
344 fusion
->io_request_frames
,
345 fusion
->io_request_frames_phys
);
346 if (fusion
->io_request_frames_pool
) {
347 dma_pool_destroy(fusion
->io_request_frames_pool
);
348 fusion
->io_request_frames_pool
= NULL
;
353 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++)
354 kfree(fusion
->cmd_list
[i
]);
356 kfree(fusion
->cmd_list
);
360 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
361 * @instance: Adapter soft state
364 static int megasas_create_sg_sense_fusion(struct megasas_instance
*instance
)
368 struct fusion_context
*fusion
;
369 struct megasas_cmd_fusion
*cmd
;
371 fusion
= instance
->ctrl_context
;
372 max_cmd
= instance
->max_fw_cmds
;
375 fusion
->sg_dma_pool
=
376 dma_pool_create("mr_sg", &instance
->pdev
->dev
,
377 instance
->max_chain_frame_sz
,
378 MR_DEFAULT_NVME_PAGE_SIZE
, 0);
379 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
380 fusion
->sense_dma_pool
=
381 dma_pool_create("mr_sense", &instance
->pdev
->dev
,
382 SCSI_SENSE_BUFFERSIZE
, 64, 0);
384 if (!fusion
->sense_dma_pool
|| !fusion
->sg_dma_pool
) {
385 dev_err(&instance
->pdev
->dev
,
386 "Failed from %s %d\n", __func__
, __LINE__
);
391 * Allocate and attach a frame to each of the commands in cmd_list
393 for (i
= 0; i
< max_cmd
; i
++) {
394 cmd
= fusion
->cmd_list
[i
];
395 cmd
->sg_frame
= dma_pool_alloc(fusion
->sg_dma_pool
,
396 GFP_KERNEL
, &cmd
->sg_frame_phys_addr
);
398 cmd
->sense
= dma_pool_alloc(fusion
->sense_dma_pool
,
399 GFP_KERNEL
, &cmd
->sense_phys_addr
);
400 if (!cmd
->sg_frame
|| !cmd
->sense
) {
401 dev_err(&instance
->pdev
->dev
,
402 "Failed from %s %d\n", __func__
, __LINE__
);
407 /* create sense buffer for the raid 1/10 fp */
408 for (i
= max_cmd
; i
< instance
->max_mpt_cmds
; i
++) {
409 cmd
= fusion
->cmd_list
[i
];
410 cmd
->sense
= dma_pool_alloc(fusion
->sense_dma_pool
,
411 GFP_KERNEL
, &cmd
->sense_phys_addr
);
413 dev_err(&instance
->pdev
->dev
,
414 "Failed from %s %d\n", __func__
, __LINE__
);
423 megasas_alloc_cmdlist_fusion(struct megasas_instance
*instance
)
425 u32 max_mpt_cmd
, i
, j
;
426 struct fusion_context
*fusion
;
428 fusion
= instance
->ctrl_context
;
430 max_mpt_cmd
= instance
->max_mpt_cmds
;
433 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
434 * Allocate the dynamic array first and then allocate individual
438 kzalloc(sizeof(struct megasas_cmd_fusion
*) * max_mpt_cmd
,
440 if (!fusion
->cmd_list
) {
441 dev_err(&instance
->pdev
->dev
,
442 "Failed from %s %d\n", __func__
, __LINE__
);
446 for (i
= 0; i
< max_mpt_cmd
; i
++) {
447 fusion
->cmd_list
[i
] = kzalloc(sizeof(struct megasas_cmd_fusion
),
449 if (!fusion
->cmd_list
[i
]) {
450 for (j
= 0; j
< i
; j
++)
451 kfree(fusion
->cmd_list
[j
]);
452 kfree(fusion
->cmd_list
);
453 dev_err(&instance
->pdev
->dev
,
454 "Failed from %s %d\n", __func__
, __LINE__
);
462 megasas_alloc_request_fusion(struct megasas_instance
*instance
)
464 struct fusion_context
*fusion
;
466 fusion
= instance
->ctrl_context
;
468 fusion
->req_frames_desc
=
469 dma_alloc_coherent(&instance
->pdev
->dev
,
470 fusion
->request_alloc_sz
,
471 &fusion
->req_frames_desc_phys
, GFP_KERNEL
);
472 if (!fusion
->req_frames_desc
) {
473 dev_err(&instance
->pdev
->dev
,
474 "Failed from %s %d\n", __func__
, __LINE__
);
478 fusion
->io_request_frames_pool
=
479 dma_pool_create("mr_ioreq", &instance
->pdev
->dev
,
480 fusion
->io_frames_alloc_sz
, 16, 0);
482 if (!fusion
->io_request_frames_pool
) {
483 dev_err(&instance
->pdev
->dev
,
484 "Failed from %s %d\n", __func__
, __LINE__
);
488 fusion
->io_request_frames
=
489 dma_pool_alloc(fusion
->io_request_frames_pool
,
490 GFP_KERNEL
, &fusion
->io_request_frames_phys
);
491 if (!fusion
->io_request_frames
) {
492 dev_err(&instance
->pdev
->dev
,
493 "Failed from %s %d\n", __func__
, __LINE__
);
500 megasas_alloc_reply_fusion(struct megasas_instance
*instance
)
503 struct fusion_context
*fusion
;
504 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
505 fusion
= instance
->ctrl_context
;
507 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
508 fusion
->reply_frames_desc_pool
=
509 dma_pool_create("mr_reply", &instance
->pdev
->dev
,
510 fusion
->reply_alloc_sz
* count
, 16, 0);
512 if (!fusion
->reply_frames_desc_pool
) {
513 dev_err(&instance
->pdev
->dev
,
514 "Failed from %s %d\n", __func__
, __LINE__
);
518 fusion
->reply_frames_desc
[0] =
519 dma_pool_alloc(fusion
->reply_frames_desc_pool
,
520 GFP_KERNEL
, &fusion
->reply_frames_desc_phys
[0]);
521 if (!fusion
->reply_frames_desc
[0]) {
522 dev_err(&instance
->pdev
->dev
,
523 "Failed from %s %d\n", __func__
, __LINE__
);
526 reply_desc
= fusion
->reply_frames_desc
[0];
527 for (i
= 0; i
< fusion
->reply_q_depth
* count
; i
++, reply_desc
++)
528 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
530 /* This is not a rdpq mode, but driver still populate
531 * reply_frame_desc array to use same msix index in ISR path.
533 for (i
= 0; i
< (count
- 1); i
++)
534 fusion
->reply_frames_desc
[i
+ 1] =
535 fusion
->reply_frames_desc
[i
] +
536 (fusion
->reply_alloc_sz
)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION
);
542 megasas_alloc_rdpq_fusion(struct megasas_instance
*instance
)
545 struct fusion_context
*fusion
;
546 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
548 fusion
= instance
->ctrl_context
;
550 fusion
->rdpq_virt
= pci_alloc_consistent(instance
->pdev
,
551 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
,
553 if (!fusion
->rdpq_virt
) {
554 dev_err(&instance
->pdev
->dev
,
555 "Failed from %s %d\n", __func__
, __LINE__
);
559 memset(fusion
->rdpq_virt
, 0,
560 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
);
561 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
562 fusion
->reply_frames_desc_pool
= dma_pool_create("mr_rdpq",
563 &instance
->pdev
->dev
,
564 fusion
->reply_alloc_sz
,
567 if (!fusion
->reply_frames_desc_pool
) {
568 dev_err(&instance
->pdev
->dev
,
569 "Failed from %s %d\n", __func__
, __LINE__
);
573 for (i
= 0; i
< count
; i
++) {
574 fusion
->reply_frames_desc
[i
] =
575 dma_pool_alloc(fusion
->reply_frames_desc_pool
,
576 GFP_KERNEL
, &fusion
->reply_frames_desc_phys
[i
]);
577 if (!fusion
->reply_frames_desc
[i
]) {
578 dev_err(&instance
->pdev
->dev
,
579 "Failed from %s %d\n", __func__
, __LINE__
);
583 fusion
->rdpq_virt
[i
].RDPQBaseAddress
=
584 cpu_to_le64(fusion
->reply_frames_desc_phys
[i
]);
586 reply_desc
= fusion
->reply_frames_desc
[i
];
587 for (j
= 0; j
< fusion
->reply_q_depth
; j
++, reply_desc
++)
588 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
594 megasas_free_rdpq_fusion(struct megasas_instance
*instance
) {
597 struct fusion_context
*fusion
;
599 fusion
= instance
->ctrl_context
;
601 for (i
= 0; i
< MAX_MSIX_QUEUES_FUSION
; i
++) {
602 if (fusion
->reply_frames_desc
[i
])
603 dma_pool_free(fusion
->reply_frames_desc_pool
,
604 fusion
->reply_frames_desc
[i
],
605 fusion
->reply_frames_desc_phys
[i
]);
608 if (fusion
->reply_frames_desc_pool
)
609 dma_pool_destroy(fusion
->reply_frames_desc_pool
);
611 if (fusion
->rdpq_virt
)
612 pci_free_consistent(instance
->pdev
,
613 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
,
614 fusion
->rdpq_virt
, fusion
->rdpq_phys
);
618 megasas_free_reply_fusion(struct megasas_instance
*instance
) {
620 struct fusion_context
*fusion
;
622 fusion
= instance
->ctrl_context
;
624 if (fusion
->reply_frames_desc
[0])
625 dma_pool_free(fusion
->reply_frames_desc_pool
,
626 fusion
->reply_frames_desc
[0],
627 fusion
->reply_frames_desc_phys
[0]);
629 if (fusion
->reply_frames_desc_pool
)
630 dma_pool_destroy(fusion
->reply_frames_desc_pool
);
636 * megasas_alloc_cmds_fusion - Allocates the command packets
637 * @instance: Adapter soft state
640 * Each frame has a 32-bit field called context. This context is used to get
641 * back the megasas_cmd_fusion from the frame when a frame gets completed
642 * In this driver, the 32 bit values are the indices into an array cmd_list.
643 * This array is used only to look up the megasas_cmd_fusion given the context.
644 * The free commands themselves are maintained in a linked list called cmd_pool.
646 * cmds are formed in the io_request and sg_frame members of the
647 * megasas_cmd_fusion. The context field is used to get a request descriptor
648 * and is used as SMID of the cmd.
649 * SMID value range is from 1 to max_fw_cmds.
652 megasas_alloc_cmds_fusion(struct megasas_instance
*instance
)
655 struct fusion_context
*fusion
;
656 struct megasas_cmd_fusion
*cmd
;
658 dma_addr_t io_req_base_phys
;
662 fusion
= instance
->ctrl_context
;
664 if (megasas_alloc_cmdlist_fusion(instance
))
667 if (megasas_alloc_request_fusion(instance
))
670 if (instance
->is_rdpq
) {
671 if (megasas_alloc_rdpq_fusion(instance
))
674 if (megasas_alloc_reply_fusion(instance
))
678 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
679 io_req_base
= fusion
->io_request_frames
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
;
680 io_req_base_phys
= fusion
->io_request_frames_phys
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
;
683 * Add all the commands to command pool (fusion->cmd_pool)
686 /* SMID 0 is reserved. Set SMID/index from 1 */
687 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++) {
688 cmd
= fusion
->cmd_list
[i
];
689 offset
= MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
* i
;
690 memset(cmd
, 0, sizeof(struct megasas_cmd_fusion
));
694 (i
>= instance
->max_scsi_cmds
&& i
< instance
->max_fw_cmds
) ?
695 (i
- instance
->max_scsi_cmds
) :
696 (u32
)ULONG_MAX
; /* Set to Invalid */
697 cmd
->instance
= instance
;
699 (struct MPI2_RAID_SCSI_IO_REQUEST
*)
700 (io_req_base
+ offset
);
701 memset(cmd
->io_request
, 0,
702 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST
));
703 cmd
->io_request_phys_addr
= io_req_base_phys
+ offset
;
704 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
707 if (megasas_create_sg_sense_fusion(instance
))
713 megasas_free_cmds_fusion(instance
);
718 * wait_and_poll - Issues a polling command
719 * @instance: Adapter soft state
720 * @cmd: Command packet to be issued
722 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
725 wait_and_poll(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
,
729 struct megasas_header
*frame_hdr
= &cmd
->frame
->hdr
;
730 struct fusion_context
*fusion
;
732 u32 msecs
= seconds
* 1000;
734 fusion
= instance
->ctrl_context
;
736 * Wait for cmd_status to change
738 for (i
= 0; (i
< msecs
) && (frame_hdr
->cmd_status
== 0xff); i
+= 20) {
743 if (frame_hdr
->cmd_status
== MFI_STAT_INVALID_STATUS
)
745 else if (frame_hdr
->cmd_status
== MFI_STAT_OK
)
752 * megasas_ioc_init_fusion - Initializes the FW
753 * @instance: Adapter soft state
755 * Issues the IOC Init cmd
758 megasas_ioc_init_fusion(struct megasas_instance
*instance
)
760 struct megasas_init_frame
*init_frame
;
761 struct MPI2_IOC_INIT_REQUEST
*IOCInitMessage
= NULL
;
762 dma_addr_t ioc_init_handle
;
763 struct megasas_cmd
*cmd
;
764 u8 ret
, cur_rdpq_mode
;
765 struct fusion_context
*fusion
;
766 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc
;
768 struct megasas_header
*frame_hdr
;
769 const char *sys_info
;
770 MFI_CAPABILITIES
*drv_ops
;
774 fusion
= instance
->ctrl_context
;
776 cmd
= megasas_get_cmd(instance
);
779 dev_err(&instance
->pdev
->dev
, "Could not allocate cmd for INIT Frame\n");
784 scratch_pad_2
= readl
785 (&instance
->reg_set
->outbound_scratch_pad_2
);
787 cur_rdpq_mode
= (scratch_pad_2
& MR_RDPQ_MODE_OFFSET
) ? 1 : 0;
789 if (instance
->is_rdpq
&& !cur_rdpq_mode
) {
790 dev_err(&instance
->pdev
->dev
, "Firmware downgrade *NOT SUPPORTED*"
791 " from RDPQ mode to non RDPQ mode\n");
796 instance
->fw_sync_cache_support
= (scratch_pad_2
&
797 MR_CAN_HANDLE_SYNC_CACHE_OFFSET
) ? 1 : 0;
798 dev_info(&instance
->pdev
->dev
, "FW supports sync cache\t: %s\n",
799 instance
->fw_sync_cache_support
? "Yes" : "No");
802 dma_alloc_coherent(&instance
->pdev
->dev
,
803 sizeof(struct MPI2_IOC_INIT_REQUEST
),
804 &ioc_init_handle
, GFP_KERNEL
);
806 if (!IOCInitMessage
) {
807 dev_err(&instance
->pdev
->dev
, "Could not allocate memory for "
813 memset(IOCInitMessage
, 0, sizeof(struct MPI2_IOC_INIT_REQUEST
));
815 IOCInitMessage
->Function
= MPI2_FUNCTION_IOC_INIT
;
816 IOCInitMessage
->WhoInit
= MPI2_WHOINIT_HOST_DRIVER
;
817 IOCInitMessage
->MsgVersion
= cpu_to_le16(MPI2_VERSION
);
818 IOCInitMessage
->HeaderVersion
= cpu_to_le16(MPI2_HEADER_VERSION
);
819 IOCInitMessage
->SystemRequestFrameSize
= cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
/ 4);
821 IOCInitMessage
->ReplyDescriptorPostQueueDepth
= cpu_to_le16(fusion
->reply_q_depth
);
822 IOCInitMessage
->ReplyDescriptorPostQueueAddress
= instance
->is_rdpq
?
823 cpu_to_le64(fusion
->rdpq_phys
) :
824 cpu_to_le64(fusion
->reply_frames_desc_phys
[0]);
825 IOCInitMessage
->MsgFlags
= instance
->is_rdpq
?
826 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
: 0;
827 IOCInitMessage
->SystemRequestFrameBaseAddress
= cpu_to_le64(fusion
->io_request_frames_phys
);
828 IOCInitMessage
->HostMSIxVectors
= instance
->msix_vectors
;
829 IOCInitMessage
->HostPageSize
= MR_DEFAULT_NVME_PAGE_SHIFT
;
830 init_frame
= (struct megasas_init_frame
*)cmd
->frame
;
831 memset(init_frame
, 0, MEGAMFI_FRAME_SIZE
);
833 frame_hdr
= &cmd
->frame
->hdr
;
834 frame_hdr
->cmd_status
= 0xFF;
835 frame_hdr
->flags
= cpu_to_le16(
836 le16_to_cpu(frame_hdr
->flags
) |
837 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
);
839 init_frame
->cmd
= MFI_CMD_INIT
;
840 init_frame
->cmd_status
= 0xFF;
842 drv_ops
= (MFI_CAPABILITIES
*) &(init_frame
->driver_operations
);
844 /* driver support Extended MSIX */
845 if (fusion
->adapter_type
>= INVADER_SERIES
)
846 drv_ops
->mfi_capabilities
.support_additional_msix
= 1;
847 /* driver supports HA / Remote LUN over Fast Path interface */
848 drv_ops
->mfi_capabilities
.support_fp_remote_lun
= 1;
850 drv_ops
->mfi_capabilities
.support_max_255lds
= 1;
851 drv_ops
->mfi_capabilities
.support_ndrive_r1_lb
= 1;
852 drv_ops
->mfi_capabilities
.security_protocol_cmds_fw
= 1;
854 if (instance
->max_chain_frame_sz
> MEGASAS_CHAIN_FRAME_SZ_MIN
)
855 drv_ops
->mfi_capabilities
.support_ext_io_size
= 1;
857 drv_ops
->mfi_capabilities
.support_fp_rlbypass
= 1;
858 if (!dual_qdepth_disable
)
859 drv_ops
->mfi_capabilities
.support_ext_queue_depth
= 1;
861 drv_ops
->mfi_capabilities
.support_qd_throttling
= 1;
862 drv_ops
->mfi_capabilities
.support_pd_map_target_id
= 1;
863 /* Convert capability to LE32 */
864 cpu_to_le32s((u32
*)&init_frame
->driver_operations
.mfi_capabilities
);
866 sys_info
= dmi_get_system_info(DMI_PRODUCT_UUID
);
867 if (instance
->system_info_buf
&& sys_info
) {
868 memcpy(instance
->system_info_buf
->systemId
, sys_info
,
869 strlen(sys_info
) > 64 ? 64 : strlen(sys_info
));
870 instance
->system_info_buf
->systemIdLength
=
871 strlen(sys_info
) > 64 ? 64 : strlen(sys_info
);
872 init_frame
->system_info_lo
= instance
->system_info_h
;
873 init_frame
->system_info_hi
= 0;
876 init_frame
->queue_info_new_phys_addr_hi
=
877 cpu_to_le32(upper_32_bits(ioc_init_handle
));
878 init_frame
->queue_info_new_phys_addr_lo
=
879 cpu_to_le32(lower_32_bits(ioc_init_handle
));
880 init_frame
->data_xfer_len
= cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST
));
882 req_desc
.u
.low
= cpu_to_le32(lower_32_bits(cmd
->frame_phys_addr
));
883 req_desc
.u
.high
= cpu_to_le32(upper_32_bits(cmd
->frame_phys_addr
));
884 req_desc
.MFAIo
.RequestFlags
=
885 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA
<<
886 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
889 * disable the intr before firing the init frame
891 instance
->instancet
->disable_intr(instance
);
893 for (i
= 0; i
< (10 * 1000); i
+= 20) {
894 if (readl(&instance
->reg_set
->doorbell
) & 1)
900 /* For Ventura also IOC INIT required 64 bit Descriptor write. */
901 spin_lock_irqsave(&instance
->hba_lock
, flags
);
902 writel(le32_to_cpu(req_desc
.u
.low
),
903 &instance
->reg_set
->inbound_low_queue_port
);
904 writel(le32_to_cpu(req_desc
.u
.high
),
905 &instance
->reg_set
->inbound_high_queue_port
);
907 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
909 wait_and_poll(instance
, cmd
, MFI_POLL_TIMEOUT_SECS
);
911 frame_hdr
= &cmd
->frame
->hdr
;
912 if (frame_hdr
->cmd_status
!= 0) {
920 megasas_return_cmd(instance
, cmd
);
922 dma_free_coherent(&instance
->pdev
->dev
,
923 sizeof(struct MPI2_IOC_INIT_REQUEST
),
924 IOCInitMessage
, ioc_init_handle
);
926 dev_err(&instance
->pdev
->dev
,
927 "Init cmd return status %s for SCSI host %d\n",
928 ret
? "FAILED" : "SUCCESS", instance
->host
->host_no
);
934 * megasas_sync_pd_seq_num - JBOD SEQ MAP
935 * @instance: Adapter soft state
936 * @pend: set to 1, if it is pended jbod map.
938 * Issue Jbod map to the firmware. If it is pended command,
939 * issue command and return. If it is first instance of jbod map
940 * issue and receive command.
943 megasas_sync_pd_seq_num(struct megasas_instance
*instance
, bool pend
) {
946 struct megasas_cmd
*cmd
;
947 struct megasas_dcmd_frame
*dcmd
;
948 struct fusion_context
*fusion
= instance
->ctrl_context
;
949 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
952 pd_sync
= (void *)fusion
->pd_seq_sync
[(instance
->pd_seq_map_id
& 1)];
953 pd_seq_h
= fusion
->pd_seq_phys
[(instance
->pd_seq_map_id
& 1)];
954 pd_seq_map_sz
= sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC
) +
955 (sizeof(struct MR_PD_CFG_SEQ
) *
956 (MAX_PHYSICAL_DEVICES
- 1));
958 cmd
= megasas_get_cmd(instance
);
960 dev_err(&instance
->pdev
->dev
,
961 "Could not get mfi cmd. Fail from %s %d\n",
966 dcmd
= &cmd
->frame
->dcmd
;
968 memset(pd_sync
, 0, pd_seq_map_sz
);
969 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
970 dcmd
->cmd
= MFI_CMD_DCMD
;
971 dcmd
->cmd_status
= 0xFF;
975 dcmd
->data_xfer_len
= cpu_to_le32(pd_seq_map_sz
);
976 dcmd
->opcode
= cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO
);
977 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(pd_seq_h
);
978 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(pd_seq_map_sz
);
981 dcmd
->mbox
.b
[0] = MEGASAS_DCMD_MBOX_PEND_FLAG
;
982 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_WRITE
);
983 instance
->jbod_seq_cmd
= cmd
;
984 instance
->instancet
->issue_dcmd(instance
, cmd
);
988 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_READ
);
990 /* Below code is only for non pended DCMD */
991 if (instance
->ctrl_context
&& !instance
->mask_interrupts
)
992 ret
= megasas_issue_blocked_cmd(instance
, cmd
,
993 MFI_IO_TIMEOUT_SECS
);
995 ret
= megasas_issue_polled(instance
, cmd
);
997 if (le32_to_cpu(pd_sync
->count
) > MAX_PHYSICAL_DEVICES
) {
998 dev_warn(&instance
->pdev
->dev
,
999 "driver supports max %d JBOD, but FW reports %d\n",
1000 MAX_PHYSICAL_DEVICES
, le32_to_cpu(pd_sync
->count
));
1004 if (ret
== DCMD_TIMEOUT
&& instance
->ctrl_context
)
1005 megaraid_sas_kill_hba(instance
);
1007 if (ret
== DCMD_SUCCESS
)
1008 instance
->pd_seq_map_id
++;
1010 megasas_return_cmd(instance
, cmd
);
1015 * megasas_get_ld_map_info - Returns FW's ld_map structure
1016 * @instance: Adapter soft state
1017 * @pend: Pend the command or not
1018 * Issues an internal command (DCMD) to get the FW's controller PD
1019 * list structure. This information is mainly used to find out SYSTEM
1020 * supported by the FW.
1021 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
1022 * dcmd.mbox.b[0] - number of LDs being sync'd
1023 * dcmd.mbox.b[1] - 0 - complete command immediately.
1024 * - 1 - pend till config change
1025 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
1026 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
1027 * uses extended struct MR_FW_RAID_MAP_EXT
1030 megasas_get_ld_map_info(struct megasas_instance
*instance
)
1033 struct megasas_cmd
*cmd
;
1034 struct megasas_dcmd_frame
*dcmd
;
1036 dma_addr_t ci_h
= 0;
1038 struct fusion_context
*fusion
;
1040 cmd
= megasas_get_cmd(instance
);
1043 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to get cmd for map info\n");
1047 fusion
= instance
->ctrl_context
;
1050 megasas_return_cmd(instance
, cmd
);
1054 dcmd
= &cmd
->frame
->dcmd
;
1056 size_map_info
= fusion
->current_map_sz
;
1058 ci
= (void *) fusion
->ld_map
[(instance
->map_id
& 1)];
1059 ci_h
= fusion
->ld_map_phys
[(instance
->map_id
& 1)];
1062 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to alloc mem for ld_map_info\n");
1063 megasas_return_cmd(instance
, cmd
);
1067 memset(ci
, 0, fusion
->max_map_sz
);
1068 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
1069 dcmd
->cmd
= MFI_CMD_DCMD
;
1070 dcmd
->cmd_status
= 0xFF;
1071 dcmd
->sge_count
= 1;
1072 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_READ
);
1075 dcmd
->data_xfer_len
= cpu_to_le32(size_map_info
);
1076 dcmd
->opcode
= cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
);
1077 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(ci_h
);
1078 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(size_map_info
);
1080 if (instance
->ctrl_context
&& !instance
->mask_interrupts
)
1081 ret
= megasas_issue_blocked_cmd(instance
, cmd
,
1082 MFI_IO_TIMEOUT_SECS
);
1084 ret
= megasas_issue_polled(instance
, cmd
);
1086 if (ret
== DCMD_TIMEOUT
&& instance
->ctrl_context
)
1087 megaraid_sas_kill_hba(instance
);
1089 megasas_return_cmd(instance
, cmd
);
1095 megasas_get_map_info(struct megasas_instance
*instance
)
1097 struct fusion_context
*fusion
= instance
->ctrl_context
;
1099 fusion
->fast_path_io
= 0;
1100 if (!megasas_get_ld_map_info(instance
)) {
1101 if (MR_ValidateMapInfo(instance
)) {
1102 fusion
->fast_path_io
= 1;
1110 * megasas_sync_map_info - Returns FW's ld_map structure
1111 * @instance: Adapter soft state
1113 * Issues an internal command (DCMD) to get the FW's controller PD
1114 * list structure. This information is mainly used to find out SYSTEM
1115 * supported by the FW.
1118 megasas_sync_map_info(struct megasas_instance
*instance
)
1121 struct megasas_cmd
*cmd
;
1122 struct megasas_dcmd_frame
*dcmd
;
1125 struct fusion_context
*fusion
;
1126 struct MR_LD_TARGET_SYNC
*ci
= NULL
;
1127 struct MR_DRV_RAID_MAP_ALL
*map
;
1128 struct MR_LD_RAID
*raid
;
1129 struct MR_LD_TARGET_SYNC
*ld_sync
;
1130 dma_addr_t ci_h
= 0;
1133 cmd
= megasas_get_cmd(instance
);
1136 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to get cmd for sync info\n");
1140 fusion
= instance
->ctrl_context
;
1143 megasas_return_cmd(instance
, cmd
);
1147 map
= fusion
->ld_drv_map
[instance
->map_id
& 1];
1149 num_lds
= le16_to_cpu(map
->raidMap
.ldCount
);
1151 dcmd
= &cmd
->frame
->dcmd
;
1153 size_sync_info
= sizeof(struct MR_LD_TARGET_SYNC
) *num_lds
;
1155 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
1157 ci
= (struct MR_LD_TARGET_SYNC
*)
1158 fusion
->ld_map
[(instance
->map_id
- 1) & 1];
1159 memset(ci
, 0, fusion
->max_map_sz
);
1161 ci_h
= fusion
->ld_map_phys
[(instance
->map_id
- 1) & 1];
1163 ld_sync
= (struct MR_LD_TARGET_SYNC
*)ci
;
1165 for (i
= 0; i
< num_lds
; i
++, ld_sync
++) {
1166 raid
= MR_LdRaidGet(i
, map
);
1167 ld_sync
->targetId
= MR_GetLDTgtId(i
, map
);
1168 ld_sync
->seqNum
= raid
->seqNum
;
1171 size_map_info
= fusion
->current_map_sz
;
1173 dcmd
->cmd
= MFI_CMD_DCMD
;
1174 dcmd
->cmd_status
= 0xFF;
1175 dcmd
->sge_count
= 1;
1176 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_WRITE
);
1179 dcmd
->data_xfer_len
= cpu_to_le32(size_map_info
);
1180 dcmd
->mbox
.b
[0] = num_lds
;
1181 dcmd
->mbox
.b
[1] = MEGASAS_DCMD_MBOX_PEND_FLAG
;
1182 dcmd
->opcode
= cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
);
1183 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(ci_h
);
1184 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(size_map_info
);
1186 instance
->map_update_cmd
= cmd
;
1188 instance
->instancet
->issue_dcmd(instance
, cmd
);
1194 * meagasas_display_intel_branding - Display branding string
1195 * @instance: per adapter object
1200 megasas_display_intel_branding(struct megasas_instance
*instance
)
1202 if (instance
->pdev
->subsystem_vendor
!= PCI_VENDOR_ID_INTEL
)
1205 switch (instance
->pdev
->device
) {
1206 case PCI_DEVICE_ID_LSI_INVADER
:
1207 switch (instance
->pdev
->subsystem_device
) {
1208 case MEGARAID_INTEL_RS3DC080_SSDID
:
1209 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1210 instance
->host
->host_no
,
1211 MEGARAID_INTEL_RS3DC080_BRANDING
);
1213 case MEGARAID_INTEL_RS3DC040_SSDID
:
1214 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1215 instance
->host
->host_no
,
1216 MEGARAID_INTEL_RS3DC040_BRANDING
);
1218 case MEGARAID_INTEL_RS3SC008_SSDID
:
1219 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1220 instance
->host
->host_no
,
1221 MEGARAID_INTEL_RS3SC008_BRANDING
);
1223 case MEGARAID_INTEL_RS3MC044_SSDID
:
1224 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1225 instance
->host
->host_no
,
1226 MEGARAID_INTEL_RS3MC044_BRANDING
);
1232 case PCI_DEVICE_ID_LSI_FURY
:
1233 switch (instance
->pdev
->subsystem_device
) {
1234 case MEGARAID_INTEL_RS3WC080_SSDID
:
1235 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1236 instance
->host
->host_no
,
1237 MEGARAID_INTEL_RS3WC080_BRANDING
);
1239 case MEGARAID_INTEL_RS3WC040_SSDID
:
1240 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1241 instance
->host
->host_no
,
1242 MEGARAID_INTEL_RS3WC040_BRANDING
);
1248 case PCI_DEVICE_ID_LSI_CUTLASS_52
:
1249 case PCI_DEVICE_ID_LSI_CUTLASS_53
:
1250 switch (instance
->pdev
->subsystem_device
) {
1251 case MEGARAID_INTEL_RMS3BC160_SSDID
:
1252 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1253 instance
->host
->host_no
,
1254 MEGARAID_INTEL_RMS3BC160_BRANDING
);
1266 * megasas_allocate_raid_maps - Allocate memory for RAID maps
1267 * @instance: Adapter soft state
1269 * return: if success: return 0
1270 * failed: return -ENOMEM
1272 static inline int megasas_allocate_raid_maps(struct megasas_instance
*instance
)
1274 struct fusion_context
*fusion
;
1277 fusion
= instance
->ctrl_context
;
1279 fusion
->drv_map_pages
= get_order(fusion
->drv_map_sz
);
1281 for (i
= 0; i
< 2; i
++) {
1282 fusion
->ld_map
[i
] = NULL
;
1284 fusion
->ld_drv_map
[i
] = (void *)
1285 __get_free_pages(__GFP_ZERO
| GFP_KERNEL
,
1286 fusion
->drv_map_pages
);
1288 if (!fusion
->ld_drv_map
[i
]) {
1289 fusion
->ld_drv_map
[i
] = vzalloc(fusion
->drv_map_sz
);
1291 if (!fusion
->ld_drv_map
[i
]) {
1292 dev_err(&instance
->pdev
->dev
,
1293 "Could not allocate memory for local map"
1294 " size requested: %d\n",
1295 fusion
->drv_map_sz
);
1296 goto ld_drv_map_alloc_fail
;
1301 for (i
= 0; i
< 2; i
++) {
1302 fusion
->ld_map
[i
] = dma_alloc_coherent(&instance
->pdev
->dev
,
1304 &fusion
->ld_map_phys
[i
],
1306 if (!fusion
->ld_map
[i
]) {
1307 dev_err(&instance
->pdev
->dev
,
1308 "Could not allocate memory for map info %s:%d\n",
1309 __func__
, __LINE__
);
1310 goto ld_map_alloc_fail
;
1317 for (i
= 0; i
< 2; i
++) {
1318 if (fusion
->ld_map
[i
])
1319 dma_free_coherent(&instance
->pdev
->dev
,
1322 fusion
->ld_map_phys
[i
]);
1325 ld_drv_map_alloc_fail
:
1326 for (i
= 0; i
< 2; i
++) {
1327 if (fusion
->ld_drv_map
[i
]) {
1328 if (is_vmalloc_addr(fusion
->ld_drv_map
[i
]))
1329 vfree(fusion
->ld_drv_map
[i
]);
1331 free_pages((ulong
)fusion
->ld_drv_map
[i
],
1332 fusion
->drv_map_pages
);
1340 * megasas_init_adapter_fusion - Initializes the FW
1341 * @instance: Adapter soft state
1343 * This is the main function for initializing firmware.
1346 megasas_init_adapter_fusion(struct megasas_instance
*instance
)
1348 struct megasas_register_set __iomem
*reg_set
;
1349 struct fusion_context
*fusion
;
1354 fusion
= instance
->ctrl_context
;
1356 reg_set
= instance
->reg_set
;
1358 megasas_fusion_update_can_queue(instance
, PROBE_CONTEXT
);
1361 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1363 instance
->max_mfi_cmds
=
1364 MEGASAS_FUSION_INTERNAL_CMDS
+ MEGASAS_FUSION_IOCTL_CMDS
;
1366 max_cmd
= instance
->max_fw_cmds
;
1368 fusion
->reply_q_depth
= 2 * (((max_cmd
+ 1 + 15)/16)*16);
1370 fusion
->request_alloc_sz
=
1371 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION
) * instance
->max_mpt_cmds
;
1372 fusion
->reply_alloc_sz
= sizeof(union MPI2_REPLY_DESCRIPTORS_UNION
)
1373 *(fusion
->reply_q_depth
);
1374 fusion
->io_frames_alloc_sz
= MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+
1375 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1376 * (instance
->max_mpt_cmds
+ 1)); /* Extra 1 for SMID 0 */
1378 scratch_pad_2
= readl(&instance
->reg_set
->outbound_scratch_pad_2
);
1379 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1380 * Firmware support extended IO chain frame which is 4 times more than
1382 * Legacy Firmware - Frame size is (8 * 128) = 1K
1383 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1385 if (scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK
)
1386 instance
->max_chain_frame_sz
=
1387 ((scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_MASK
) >>
1388 MEGASAS_MAX_CHAIN_SHIFT
) * MEGASAS_1MB_IO
;
1390 instance
->max_chain_frame_sz
=
1391 ((scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_MASK
) >>
1392 MEGASAS_MAX_CHAIN_SHIFT
) * MEGASAS_256K_IO
;
1394 if (instance
->max_chain_frame_sz
< MEGASAS_CHAIN_FRAME_SZ_MIN
) {
1395 dev_warn(&instance
->pdev
->dev
, "frame size %d invalid, fall back to legacy max frame size %d\n",
1396 instance
->max_chain_frame_sz
,
1397 MEGASAS_CHAIN_FRAME_SZ_MIN
);
1398 instance
->max_chain_frame_sz
= MEGASAS_CHAIN_FRAME_SZ_MIN
;
1401 fusion
->max_sge_in_main_msg
=
1402 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1403 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
))/16;
1405 fusion
->max_sge_in_chain
=
1406 instance
->max_chain_frame_sz
1407 / sizeof(union MPI2_SGE_IO_UNION
);
1409 instance
->max_num_sge
=
1410 rounddown_pow_of_two(fusion
->max_sge_in_main_msg
1411 + fusion
->max_sge_in_chain
- 2);
1413 /* Used for pass thru MFI frame (DCMD) */
1414 fusion
->chain_offset_mfi_pthru
=
1415 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
)/16;
1417 fusion
->chain_offset_io_request
=
1418 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
-
1419 sizeof(union MPI2_SGE_IO_UNION
))/16;
1421 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
1422 for (i
= 0 ; i
< count
; i
++)
1423 fusion
->last_reply_idx
[i
] = 0;
1426 * For fusion adapters, 3 commands for IOCTL and 8 commands
1427 * for driver's internal DCMDs.
1429 instance
->max_scsi_cmds
= instance
->max_fw_cmds
-
1430 (MEGASAS_FUSION_INTERNAL_CMDS
+
1431 MEGASAS_FUSION_IOCTL_CMDS
);
1432 sema_init(&instance
->ioctl_sem
, MEGASAS_FUSION_IOCTL_CMDS
);
1435 * Allocate memory for descriptors
1436 * Create a pool of commands
1438 if (megasas_alloc_cmds(instance
))
1439 goto fail_alloc_mfi_cmds
;
1440 if (megasas_alloc_cmds_fusion(instance
))
1441 goto fail_alloc_cmds
;
1443 if (megasas_ioc_init_fusion(instance
))
1446 megasas_display_intel_branding(instance
);
1447 if (megasas_get_ctrl_info(instance
)) {
1448 dev_err(&instance
->pdev
->dev
,
1449 "Could not get controller info. Fail from %s %d\n",
1450 __func__
, __LINE__
);
1454 instance
->flag_ieee
= 1;
1455 instance
->r1_ldio_hint_default
= MR_R1_LDIO_PIGGYBACK_DEFAULT
;
1456 fusion
->fast_path_io
= 0;
1458 if (megasas_allocate_raid_maps(instance
))
1461 if (!megasas_get_map_info(instance
))
1462 megasas_sync_map_info(instance
);
1467 megasas_free_cmds_fusion(instance
);
1469 megasas_free_cmds(instance
);
1470 fail_alloc_mfi_cmds
:
1475 * map_cmd_status - Maps FW cmd status to OS cmd status
1476 * @cmd : Pointer to cmd
1477 * @status : status of cmd returned by FW
1478 * @ext_status : ext status of cmd returned by FW
1482 map_cmd_status(struct fusion_context
*fusion
,
1483 struct scsi_cmnd
*scmd
, u8 status
, u8 ext_status
,
1484 u32 data_length
, u8
*sense
)
1489 cmd_type
= megasas_cmd_type(scmd
);
1493 scmd
->result
= DID_OK
<< 16;
1496 case MFI_STAT_SCSI_IO_FAILED
:
1497 case MFI_STAT_LD_INIT_IN_PROGRESS
:
1498 scmd
->result
= (DID_ERROR
<< 16) | ext_status
;
1501 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
1503 scmd
->result
= (DID_OK
<< 16) | ext_status
;
1504 if (ext_status
== SAM_STAT_CHECK_CONDITION
) {
1505 memset(scmd
->sense_buffer
, 0,
1506 SCSI_SENSE_BUFFERSIZE
);
1507 memcpy(scmd
->sense_buffer
, sense
,
1508 SCSI_SENSE_BUFFERSIZE
);
1509 scmd
->result
|= DRIVER_SENSE
<< 24;
1513 * If the IO request is partially completed, then MR FW will
1514 * update "io_request->DataLength" field with actual number of
1515 * bytes transferred.Driver will set residual bytes count in
1516 * SCSI command structure.
1518 resid
= (scsi_bufflen(scmd
) - data_length
);
1519 scsi_set_resid(scmd
, resid
);
1522 ((cmd_type
== READ_WRITE_LDIO
) ||
1523 (cmd_type
== READ_WRITE_SYSPDIO
)))
1524 scmd_printk(KERN_INFO
, scmd
, "BRCM Debug mfi stat 0x%x, data len"
1525 " requested/completed 0x%x/0x%x\n",
1526 status
, scsi_bufflen(scmd
), data_length
);
1529 case MFI_STAT_LD_OFFLINE
:
1530 case MFI_STAT_DEVICE_NOT_FOUND
:
1531 scmd
->result
= DID_BAD_TARGET
<< 16;
1533 case MFI_STAT_CONFIG_SEQ_MISMATCH
:
1534 scmd
->result
= DID_IMM_RETRY
<< 16;
1537 scmd
->result
= DID_ERROR
<< 16;
1543 * megasas_is_prp_possible -
1544 * Checks if native NVMe PRPs can be built for the IO
1546 * @instance: Adapter soft state
1547 * @scmd: SCSI command from the mid-layer
1548 * @sge_count: scatter gather element count.
1550 * Returns: true: PRPs can be built
1551 * false: IEEE SGLs needs to be built
1554 megasas_is_prp_possible(struct megasas_instance
*instance
,
1555 struct scsi_cmnd
*scmd
, int sge_count
)
1557 struct fusion_context
*fusion
;
1559 u32 data_length
= 0;
1560 struct scatterlist
*sg_scmd
;
1561 bool build_prp
= false;
1562 u32 mr_nvme_pg_size
;
1564 mr_nvme_pg_size
= max_t(u32
, instance
->nvme_page_size
,
1565 MR_DEFAULT_NVME_PAGE_SIZE
);
1566 fusion
= instance
->ctrl_context
;
1567 data_length
= scsi_bufflen(scmd
);
1568 sg_scmd
= scsi_sglist(scmd
);
1571 * NVMe uses one PRP for each page (or part of a page)
1572 * look at the data length - if 4 pages or less then IEEE is OK
1573 * if > 5 pages then we need to build a native SGL
1574 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
1575 * if this first size in the page is >= the residual beyond 4 pages
1576 * then use IEEE, otherwise use native SGL
1579 if (data_length
> (mr_nvme_pg_size
* 5)) {
1581 } else if ((data_length
> (mr_nvme_pg_size
* 4)) &&
1582 (data_length
<= (mr_nvme_pg_size
* 5))) {
1583 /* check if 1st SG entry size is < residual beyond 4 pages */
1584 if (sg_dma_len(sg_scmd
) < (data_length
- (mr_nvme_pg_size
* 4)))
1589 * Below code detects gaps/holes in IO data buffers.
1590 * What does holes/gaps mean?
1591 * Any SGE except first one in a SGL starts at non NVME page size
1592 * aligned address OR Any SGE except last one in a SGL ends at
1593 * non NVME page size boundary.
1595 * Driver has already informed block layer by setting boundary rules for
1596 * bio merging done at NVME page size boundary calling kernel API
1597 * blk_queue_virt_boundary inside slave_config.
1598 * Still there is possibility of IO coming with holes to driver because of
1599 * IO merging done by IO scheduler.
1601 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
1602 * IO scheduling so no IO merging.
1604 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
1605 * then sending IOs with holes.
1607 * Though driver can request block layer to disable IO merging by calling-
1608 * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
1609 * user may tune sysfs parameter- nomerges again to 0 or 1.
1611 * If in future IO scheduling is enabled with SCSI BLK MQ,
1612 * this algorithm to detect holes will be required in driver
1613 * for SCSI BLK MQ enabled case as well.
1617 scsi_for_each_sg(scmd
, sg_scmd
, sge_count
, i
) {
1618 if ((i
!= 0) && (i
!= (sge_count
- 1))) {
1619 if (mega_mod64(sg_dma_len(sg_scmd
), mr_nvme_pg_size
) ||
1620 mega_mod64(sg_dma_address(sg_scmd
),
1623 atomic_inc(&instance
->sge_holes_type1
);
1628 if ((sge_count
> 1) && (i
== 0)) {
1629 if ((mega_mod64((sg_dma_address(sg_scmd
) +
1630 sg_dma_len(sg_scmd
)),
1631 mr_nvme_pg_size
))) {
1633 atomic_inc(&instance
->sge_holes_type2
);
1638 if ((sge_count
> 1) && (i
== (sge_count
- 1))) {
1639 if (mega_mod64(sg_dma_address(sg_scmd
),
1642 atomic_inc(&instance
->sge_holes_type3
);
1652 * megasas_make_prp_nvme -
1653 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1655 * @instance: Adapter soft state
1656 * @scmd: SCSI command from the mid-layer
1657 * @sgl_ptr: SGL to be filled in
1658 * @cmd: Fusion command frame
1659 * @sge_count: scatter gather element count.
1661 * Returns: true: PRPs are built
1662 * false: IEEE SGLs needs to be built
1665 megasas_make_prp_nvme(struct megasas_instance
*instance
, struct scsi_cmnd
*scmd
,
1666 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr
,
1667 struct megasas_cmd_fusion
*cmd
, int sge_count
)
1669 int sge_len
, offset
, num_prp_in_chain
= 0;
1670 struct MPI25_IEEE_SGE_CHAIN64
*main_chain_element
, *ptr_first_sgl
;
1672 dma_addr_t ptr_sgl_phys
;
1674 u32 page_mask
, page_mask_result
;
1675 struct scatterlist
*sg_scmd
;
1677 bool build_prp
= false;
1678 int data_len
= scsi_bufflen(scmd
);
1679 struct fusion_context
*fusion
;
1680 u32 mr_nvme_pg_size
= max_t(u32
, instance
->nvme_page_size
,
1681 MR_DEFAULT_NVME_PAGE_SIZE
);
1683 fusion
= instance
->ctrl_context
;
1685 build_prp
= megasas_is_prp_possible(instance
, scmd
, sge_count
);
1691 * Nvme has a very convoluted prp format. One prp is required
1692 * for each page or partial page. Driver need to split up OS sg_list
1693 * entries if it is longer than one page or cross a page
1694 * boundary. Driver also have to insert a PRP list pointer entry as
1695 * the last entry in each physical page of the PRP list.
1697 * NOTE: The first PRP "entry" is actually placed in the first
1698 * SGL entry in the main message as IEEE 64 format. The 2nd
1699 * entry in the main message is the chain element, and the rest
1700 * of the PRP entries are built in the contiguous pcie buffer.
1702 page_mask
= mr_nvme_pg_size
- 1;
1703 ptr_sgl
= (u64
*)cmd
->sg_frame
;
1704 ptr_sgl_phys
= cmd
->sg_frame_phys_addr
;
1705 memset(ptr_sgl
, 0, instance
->max_chain_frame_sz
);
1707 /* Build chain frame element which holds all prps except first*/
1708 main_chain_element
= (struct MPI25_IEEE_SGE_CHAIN64
*)
1709 ((u8
*)sgl_ptr
+ sizeof(struct MPI25_IEEE_SGE_CHAIN64
));
1711 main_chain_element
->Address
= cpu_to_le64(ptr_sgl_phys
);
1712 main_chain_element
->NextChainOffset
= 0;
1713 main_chain_element
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
1714 IEEE_SGE_FLAGS_SYSTEM_ADDR
|
1715 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP
;
1717 /* Build first prp, sge need not to be page aligned*/
1718 ptr_first_sgl
= sgl_ptr
;
1719 sg_scmd
= scsi_sglist(scmd
);
1720 sge_addr
= sg_dma_address(sg_scmd
);
1721 sge_len
= sg_dma_len(sg_scmd
);
1723 offset
= (u32
)(sge_addr
& page_mask
);
1724 first_prp_len
= mr_nvme_pg_size
- offset
;
1726 ptr_first_sgl
->Address
= cpu_to_le64(sge_addr
);
1727 ptr_first_sgl
->Length
= cpu_to_le32(first_prp_len
);
1729 data_len
-= first_prp_len
;
1731 if (sge_len
> first_prp_len
) {
1732 sge_addr
+= first_prp_len
;
1733 sge_len
-= first_prp_len
;
1734 } else if (sge_len
== first_prp_len
) {
1735 sg_scmd
= sg_next(sg_scmd
);
1736 sge_addr
= sg_dma_address(sg_scmd
);
1737 sge_len
= sg_dma_len(sg_scmd
);
1741 offset
= (u32
)(sge_addr
& page_mask
);
1743 /* Put PRP pointer due to page boundary*/
1744 page_mask_result
= (uintptr_t)(ptr_sgl
+ 1) & page_mask
;
1745 if (unlikely(!page_mask_result
)) {
1746 scmd_printk(KERN_NOTICE
,
1747 scmd
, "page boundary ptr_sgl: 0x%p\n",
1750 *ptr_sgl
= cpu_to_le64(ptr_sgl_phys
);
1755 *ptr_sgl
= cpu_to_le64(sge_addr
);
1760 sge_addr
+= mr_nvme_pg_size
;
1761 sge_len
-= mr_nvme_pg_size
;
1762 data_len
-= mr_nvme_pg_size
;
1770 sg_scmd
= sg_next(sg_scmd
);
1771 sge_addr
= sg_dma_address(sg_scmd
);
1772 sge_len
= sg_dma_len(sg_scmd
);
1775 main_chain_element
->Length
=
1776 cpu_to_le32(num_prp_in_chain
* sizeof(u64
));
1778 atomic_inc(&instance
->prp_sgl
);
1783 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1784 * @instance: Adapter soft state
1785 * @scp: SCSI command from the mid-layer
1786 * @sgl_ptr: SGL to be filled in
1787 * @cmd: cmd we are working on
1788 * @sge_count sge count
1792 megasas_make_sgl_fusion(struct megasas_instance
*instance
,
1793 struct scsi_cmnd
*scp
,
1794 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr
,
1795 struct megasas_cmd_fusion
*cmd
, int sge_count
)
1797 int i
, sg_processed
;
1798 struct scatterlist
*os_sgl
;
1799 struct fusion_context
*fusion
;
1801 fusion
= instance
->ctrl_context
;
1803 if (fusion
->adapter_type
>= INVADER_SERIES
) {
1804 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr_end
= sgl_ptr
;
1805 sgl_ptr_end
+= fusion
->max_sge_in_main_msg
- 1;
1806 sgl_ptr_end
->Flags
= 0;
1809 scsi_for_each_sg(scp
, os_sgl
, sge_count
, i
) {
1810 sgl_ptr
->Length
= cpu_to_le32(sg_dma_len(os_sgl
));
1811 sgl_ptr
->Address
= cpu_to_le64(sg_dma_address(os_sgl
));
1813 if (fusion
->adapter_type
>= INVADER_SERIES
)
1814 if (i
== sge_count
- 1)
1815 sgl_ptr
->Flags
= IEEE_SGE_FLAGS_END_OF_LIST
;
1817 sg_processed
= i
+ 1;
1819 if ((sg_processed
== (fusion
->max_sge_in_main_msg
- 1)) &&
1820 (sge_count
> fusion
->max_sge_in_main_msg
)) {
1822 struct MPI25_IEEE_SGE_CHAIN64
*sg_chain
;
1823 if (fusion
->adapter_type
>= INVADER_SERIES
) {
1824 if ((le16_to_cpu(cmd
->io_request
->IoFlags
) &
1825 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) !=
1826 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
)
1827 cmd
->io_request
->ChainOffset
=
1829 chain_offset_io_request
;
1831 cmd
->io_request
->ChainOffset
= 0;
1833 cmd
->io_request
->ChainOffset
=
1834 fusion
->chain_offset_io_request
;
1837 /* Prepare chain element */
1838 sg_chain
->NextChainOffset
= 0;
1839 if (fusion
->adapter_type
>= INVADER_SERIES
)
1840 sg_chain
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
;
1843 (IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
1844 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
);
1845 sg_chain
->Length
= cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION
) * (sge_count
- sg_processed
)));
1846 sg_chain
->Address
= cpu_to_le64(cmd
->sg_frame_phys_addr
);
1849 (struct MPI25_IEEE_SGE_CHAIN64
*)cmd
->sg_frame
;
1850 memset(sgl_ptr
, 0, instance
->max_chain_frame_sz
);
1853 atomic_inc(&instance
->ieee_sgl
);
1857 * megasas_make_sgl - Build Scatter Gather List(SGLs)
1858 * @scp: SCSI command pointer
1859 * @instance: Soft instance of controller
1860 * @cmd: Fusion command pointer
1862 * This function will build sgls based on device type.
1863 * For nvme drives, there is different way of building sgls in nvme native
1864 * format- PRPs(Physical Region Page).
1866 * Returns the number of sg lists actually used, zero if the sg lists
1867 * is NULL, or -ENOMEM if the mapping failed
1870 int megasas_make_sgl(struct megasas_instance
*instance
, struct scsi_cmnd
*scp
,
1871 struct megasas_cmd_fusion
*cmd
)
1874 bool build_prp
= false;
1875 struct MPI25_IEEE_SGE_CHAIN64
*sgl_chain64
;
1877 sge_count
= scsi_dma_map(scp
);
1879 if ((sge_count
> instance
->max_num_sge
) || (sge_count
<= 0))
1882 sgl_chain64
= (struct MPI25_IEEE_SGE_CHAIN64
*)&cmd
->io_request
->SGL
;
1883 if ((le16_to_cpu(cmd
->io_request
->IoFlags
) &
1884 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) &&
1885 (cmd
->pd_interface
== NVME_PD
))
1886 build_prp
= megasas_make_prp_nvme(instance
, scp
, sgl_chain64
,
1890 megasas_make_sgl_fusion(instance
, scp
, sgl_chain64
,
1897 * megasas_set_pd_lba - Sets PD LBA
1899 * @cdb_len: cdb length
1900 * @start_blk: Start block of IO
1902 * Used to set the PD LBA in CDB for FP IOs
1905 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
, u8 cdb_len
,
1906 struct IO_REQUEST_INFO
*io_info
, struct scsi_cmnd
*scp
,
1907 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
, u32 ref_tag
)
1909 struct MR_LD_RAID
*raid
;
1911 u64 start_blk
= io_info
->pdBlock
;
1912 u8
*cdb
= io_request
->CDB
.CDB32
;
1913 u32 num_blocks
= io_info
->numBlocks
;
1914 u8 opcode
= 0, flagvals
= 0, groupnum
= 0, control
= 0;
1916 /* Check if T10 PI (DIF) is enabled for this LD */
1917 ld
= MR_TargetIdToLdGet(io_info
->ldTgtId
, local_map_ptr
);
1918 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
1919 if (raid
->capability
.ldPiMode
== MR_PROT_INFO_TYPE_CONTROLLER
) {
1920 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
1921 cdb
[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD
;
1922 cdb
[7] = MEGASAS_SCSI_ADDL_CDB_LEN
;
1924 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
1925 cdb
[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32
;
1927 cdb
[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32
;
1928 cdb
[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL
;
1931 cdb
[12] = (u8
)((start_blk
>> 56) & 0xff);
1932 cdb
[13] = (u8
)((start_blk
>> 48) & 0xff);
1933 cdb
[14] = (u8
)((start_blk
>> 40) & 0xff);
1934 cdb
[15] = (u8
)((start_blk
>> 32) & 0xff);
1935 cdb
[16] = (u8
)((start_blk
>> 24) & 0xff);
1936 cdb
[17] = (u8
)((start_blk
>> 16) & 0xff);
1937 cdb
[18] = (u8
)((start_blk
>> 8) & 0xff);
1938 cdb
[19] = (u8
)(start_blk
& 0xff);
1940 /* Logical block reference tag */
1941 io_request
->CDB
.EEDP32
.PrimaryReferenceTag
=
1942 cpu_to_be32(ref_tag
);
1943 io_request
->CDB
.EEDP32
.PrimaryApplicationTagMask
= cpu_to_be16(0xffff);
1944 io_request
->IoFlags
= cpu_to_le16(32); /* Specify 32-byte cdb */
1946 /* Transfer length */
1947 cdb
[28] = (u8
)((num_blocks
>> 24) & 0xff);
1948 cdb
[29] = (u8
)((num_blocks
>> 16) & 0xff);
1949 cdb
[30] = (u8
)((num_blocks
>> 8) & 0xff);
1950 cdb
[31] = (u8
)(num_blocks
& 0xff);
1952 /* set SCSI IO EEDPFlags */
1953 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
) {
1954 io_request
->EEDPFlags
= cpu_to_le16(
1955 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
1956 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG
|
1957 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
|
1958 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG
|
1959 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE
|
1960 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD
);
1962 io_request
->EEDPFlags
= cpu_to_le16(
1963 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
1964 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
);
1966 io_request
->Control
|= cpu_to_le32((0x4 << 26));
1967 io_request
->EEDPBlockSize
= cpu_to_le32(scp
->device
->sector_size
);
1969 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1970 if (((cdb_len
== 12) || (cdb_len
== 16)) &&
1971 (start_blk
<= 0xffffffff)) {
1972 if (cdb_len
== 16) {
1973 opcode
= cdb
[0] == READ_16
? READ_10
: WRITE_10
;
1978 opcode
= cdb
[0] == READ_12
? READ_10
: WRITE_10
;
1984 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
1991 /* Transfer length */
1992 cdb
[8] = (u8
)(num_blocks
& 0xff);
1993 cdb
[7] = (u8
)((num_blocks
>> 8) & 0xff);
1995 io_request
->IoFlags
= cpu_to_le16(10); /* Specify 10-byte cdb */
1997 } else if ((cdb_len
< 16) && (start_blk
> 0xffffffff)) {
1998 /* Convert to 16 byte CDB for large LBA's */
2001 opcode
= cdb
[0] == READ_6
? READ_16
: WRITE_16
;
2006 cdb
[0] == READ_10
? READ_16
: WRITE_16
;
2013 cdb
[0] == READ_12
? READ_16
: WRITE_16
;
2020 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
2027 /* Transfer length */
2028 cdb
[13] = (u8
)(num_blocks
& 0xff);
2029 cdb
[12] = (u8
)((num_blocks
>> 8) & 0xff);
2030 cdb
[11] = (u8
)((num_blocks
>> 16) & 0xff);
2031 cdb
[10] = (u8
)((num_blocks
>> 24) & 0xff);
2033 io_request
->IoFlags
= cpu_to_le16(16); /* Specify 16-byte cdb */
2037 /* Normal case, just load LBA here */
2041 u8 val
= cdb
[1] & 0xE0;
2042 cdb
[3] = (u8
)(start_blk
& 0xff);
2043 cdb
[2] = (u8
)((start_blk
>> 8) & 0xff);
2044 cdb
[1] = val
| ((u8
)(start_blk
>> 16) & 0x1f);
2048 cdb
[5] = (u8
)(start_blk
& 0xff);
2049 cdb
[4] = (u8
)((start_blk
>> 8) & 0xff);
2050 cdb
[3] = (u8
)((start_blk
>> 16) & 0xff);
2051 cdb
[2] = (u8
)((start_blk
>> 24) & 0xff);
2054 cdb
[5] = (u8
)(start_blk
& 0xff);
2055 cdb
[4] = (u8
)((start_blk
>> 8) & 0xff);
2056 cdb
[3] = (u8
)((start_blk
>> 16) & 0xff);
2057 cdb
[2] = (u8
)((start_blk
>> 24) & 0xff);
2060 cdb
[9] = (u8
)(start_blk
& 0xff);
2061 cdb
[8] = (u8
)((start_blk
>> 8) & 0xff);
2062 cdb
[7] = (u8
)((start_blk
>> 16) & 0xff);
2063 cdb
[6] = (u8
)((start_blk
>> 24) & 0xff);
2064 cdb
[5] = (u8
)((start_blk
>> 32) & 0xff);
2065 cdb
[4] = (u8
)((start_blk
>> 40) & 0xff);
2066 cdb
[3] = (u8
)((start_blk
>> 48) & 0xff);
2067 cdb
[2] = (u8
)((start_blk
>> 56) & 0xff);
2074 * megasas_stream_detect - stream detection on read and and write IOs
2075 * @instance: Adapter soft state
2076 * @cmd: Command to be prepared
2077 * @io_info: IO Request info
2081 /** stream detection on read and and write IOs */
2082 static void megasas_stream_detect(struct megasas_instance
*instance
,
2083 struct megasas_cmd_fusion
*cmd
,
2084 struct IO_REQUEST_INFO
*io_info
)
2086 struct fusion_context
*fusion
= instance
->ctrl_context
;
2087 u32 device_id
= io_info
->ldTgtId
;
2088 struct LD_STREAM_DETECT
*current_ld_sd
2089 = fusion
->stream_detect_by_ld
[device_id
];
2090 u32
*track_stream
= ¤t_ld_sd
->mru_bit_map
, stream_num
;
2091 u32 shifted_values
, unshifted_values
;
2092 u32 index_value_mask
, shifted_values_mask
;
2094 bool is_read_ahead
= false;
2095 struct STREAM_DETECT
*current_sd
;
2096 /* find possible stream */
2097 for (i
= 0; i
< MAX_STREAMS_TRACKED
; ++i
) {
2098 stream_num
= (*track_stream
>>
2099 (i
* BITS_PER_INDEX_STREAM
)) &
2101 current_sd
= ¤t_ld_sd
->stream_track
[stream_num
];
2102 /* if we found a stream, update the raid
2103 * context and also update the mruBitMap
2105 /* boundary condition */
2106 if ((current_sd
->next_seq_lba
) &&
2107 (io_info
->ldStartBlock
>= current_sd
->next_seq_lba
) &&
2108 (io_info
->ldStartBlock
<= (current_sd
->next_seq_lba
+ 32)) &&
2109 (current_sd
->is_read
== io_info
->isRead
)) {
2111 if ((io_info
->ldStartBlock
!= current_sd
->next_seq_lba
) &&
2112 ((!io_info
->isRead
) || (!is_read_ahead
)))
2114 * Once the API availible we need to change this.
2115 * At this point we are not allowing any gap
2119 SET_STREAM_DETECTED(cmd
->io_request
->RaidContext
.raid_context_g35
);
2120 current_sd
->next_seq_lba
=
2121 io_info
->ldStartBlock
+ io_info
->numBlocks
;
2123 * update the mruBitMap LRU
2125 shifted_values_mask
=
2126 (1 << i
* BITS_PER_INDEX_STREAM
) - 1;
2127 shifted_values
= ((*track_stream
& shifted_values_mask
)
2128 << BITS_PER_INDEX_STREAM
);
2130 STREAM_MASK
<< i
* BITS_PER_INDEX_STREAM
;
2132 *track_stream
& ~(shifted_values_mask
|
2135 unshifted_values
| shifted_values
| stream_num
;
2140 * if we did not find any stream, create a new one
2141 * from the least recently used
2143 stream_num
= (*track_stream
>>
2144 ((MAX_STREAMS_TRACKED
- 1) * BITS_PER_INDEX_STREAM
)) &
2146 current_sd
= ¤t_ld_sd
->stream_track
[stream_num
];
2147 current_sd
->is_read
= io_info
->isRead
;
2148 current_sd
->next_seq_lba
= io_info
->ldStartBlock
+ io_info
->numBlocks
;
2149 *track_stream
= (((*track_stream
& ZERO_LAST_STREAM
) << 4) | stream_num
);
2154 * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2155 * affinity (cpu of the controller) and raid_flags in the raid context
2158 * @praid_context: IO RAID context
2159 * @raid: LD raid map
2160 * @fp_possible: Is fast path possible?
2161 * @is_read: Is read IO?
2165 megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION
*praid_context
,
2166 struct MR_LD_RAID
*raid
, bool fp_possible
,
2167 u8 is_read
, u32 scsi_buff_len
)
2169 u8 cpu_sel
= MR_RAID_CTX_CPUSEL_0
;
2170 struct RAID_CONTEXT_G35
*rctx_g35
;
2172 rctx_g35
= &praid_context
->raid_context_g35
;
2175 if ((raid
->cpuAffinity
.pdRead
.cpu0
) &&
2176 (raid
->cpuAffinity
.pdRead
.cpu1
))
2177 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2178 else if (raid
->cpuAffinity
.pdRead
.cpu1
)
2179 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2181 if ((raid
->cpuAffinity
.pdWrite
.cpu0
) &&
2182 (raid
->cpuAffinity
.pdWrite
.cpu1
))
2183 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2184 else if (raid
->cpuAffinity
.pdWrite
.cpu1
)
2185 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2186 /* Fast path cache by pass capable R0/R1 VD */
2187 if ((raid
->level
<= 1) &&
2188 (raid
->capability
.fp_cache_bypass_capable
)) {
2189 rctx_g35
->routing_flags
|=
2190 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT
);
2191 rctx_g35
->raid_flags
=
2192 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2193 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
);
2198 if ((raid
->cpuAffinity
.ldRead
.cpu0
) &&
2199 (raid
->cpuAffinity
.ldRead
.cpu1
))
2200 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2201 else if (raid
->cpuAffinity
.ldRead
.cpu1
)
2202 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2204 if ((raid
->cpuAffinity
.ldWrite
.cpu0
) &&
2205 (raid
->cpuAffinity
.ldWrite
.cpu1
))
2206 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2207 else if (raid
->cpuAffinity
.ldWrite
.cpu1
)
2208 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2210 if (is_stream_detected(rctx_g35
) &&
2211 ((raid
->level
== 5) || (raid
->level
== 6)) &&
2212 (raid
->writeMode
== MR_RL_WRITE_THROUGH_MODE
) &&
2213 (cpu_sel
== MR_RAID_CTX_CPUSEL_FCFS
))
2214 cpu_sel
= MR_RAID_CTX_CPUSEL_0
;
2218 rctx_g35
->routing_flags
|=
2219 (cpu_sel
<< MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT
);
2221 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2222 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2223 * IO Subtype is not bitmap.
2225 if ((raid
->level
== 1) && (!is_read
)) {
2226 if (scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
)
2227 praid_context
->raid_context_g35
.raid_flags
=
2228 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2229 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
);
2234 * megasas_build_ldio_fusion - Prepares IOs to devices
2235 * @instance: Adapter soft state
2236 * @scp: SCSI command
2237 * @cmd: Command to be prepared
2239 * Prepares the io_request and chain elements (sg_frame) for IO
2240 * The IO can be for PD (Fast Path) or LD
2243 megasas_build_ldio_fusion(struct megasas_instance
*instance
,
2244 struct scsi_cmnd
*scp
,
2245 struct megasas_cmd_fusion
*cmd
)
2249 u32 start_lba_lo
, start_lba_hi
, device_id
, datalength
= 0;
2251 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2252 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
2253 struct IO_REQUEST_INFO io_info
;
2254 struct fusion_context
*fusion
;
2255 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2257 unsigned long spinlock_flags
;
2258 union RAID_CONTEXT_UNION
*praid_context
;
2259 struct MR_LD_RAID
*raid
= NULL
;
2260 struct MR_PRIV_DEVICE
*mrdev_priv
;
2262 device_id
= MEGASAS_DEV_INDEX(scp
);
2264 fusion
= instance
->ctrl_context
;
2266 io_request
= cmd
->io_request
;
2267 io_request
->RaidContext
.raid_context
.virtual_disk_tgt_id
=
2268 cpu_to_le16(device_id
);
2269 io_request
->RaidContext
.raid_context
.status
= 0;
2270 io_request
->RaidContext
.raid_context
.ex_status
= 0;
2272 req_desc
= (union MEGASAS_REQUEST_DESCRIPTOR_UNION
*)cmd
->request_desc
;
2276 fp_possible
= false;
2279 * 6-byte READ(0x08) or WRITE(0x0A) cdb
2281 if (scp
->cmd_len
== 6) {
2282 datalength
= (u32
) scp
->cmnd
[4];
2283 start_lba_lo
= ((u32
) scp
->cmnd
[1] << 16) |
2284 ((u32
) scp
->cmnd
[2] << 8) | (u32
) scp
->cmnd
[3];
2286 start_lba_lo
&= 0x1FFFFF;
2290 * 10-byte READ(0x28) or WRITE(0x2A) cdb
2292 else if (scp
->cmd_len
== 10) {
2293 datalength
= (u32
) scp
->cmnd
[8] |
2294 ((u32
) scp
->cmnd
[7] << 8);
2295 start_lba_lo
= ((u32
) scp
->cmnd
[2] << 24) |
2296 ((u32
) scp
->cmnd
[3] << 16) |
2297 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2301 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
2303 else if (scp
->cmd_len
== 12) {
2304 datalength
= ((u32
) scp
->cmnd
[6] << 24) |
2305 ((u32
) scp
->cmnd
[7] << 16) |
2306 ((u32
) scp
->cmnd
[8] << 8) | (u32
) scp
->cmnd
[9];
2307 start_lba_lo
= ((u32
) scp
->cmnd
[2] << 24) |
2308 ((u32
) scp
->cmnd
[3] << 16) |
2309 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2313 * 16-byte READ(0x88) or WRITE(0x8A) cdb
2315 else if (scp
->cmd_len
== 16) {
2316 datalength
= ((u32
) scp
->cmnd
[10] << 24) |
2317 ((u32
) scp
->cmnd
[11] << 16) |
2318 ((u32
) scp
->cmnd
[12] << 8) | (u32
) scp
->cmnd
[13];
2319 start_lba_lo
= ((u32
) scp
->cmnd
[6] << 24) |
2320 ((u32
) scp
->cmnd
[7] << 16) |
2321 ((u32
) scp
->cmnd
[8] << 8) | (u32
) scp
->cmnd
[9];
2323 start_lba_hi
= ((u32
) scp
->cmnd
[2] << 24) |
2324 ((u32
) scp
->cmnd
[3] << 16) |
2325 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2328 memset(&io_info
, 0, sizeof(struct IO_REQUEST_INFO
));
2329 io_info
.ldStartBlock
= ((u64
)start_lba_hi
<< 32) | start_lba_lo
;
2330 io_info
.numBlocks
= datalength
;
2331 io_info
.ldTgtId
= device_id
;
2332 io_info
.r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
2333 scsi_buff_len
= scsi_bufflen(scp
);
2334 io_request
->DataLength
= cpu_to_le32(scsi_buff_len
);
2336 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
2339 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2340 ld
= MR_TargetIdToLdGet(device_id
, local_map_ptr
);
2342 if (ld
< instance
->fw_supported_vd_count
)
2343 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
2345 if (!raid
|| (!fusion
->fast_path_io
)) {
2346 io_request
->RaidContext
.raid_context
.reg_lock_flags
= 0;
2347 fp_possible
= false;
2349 if (MR_BuildRaidContext(instance
, &io_info
,
2350 &io_request
->RaidContext
.raid_context
,
2351 local_map_ptr
, &raidLUN
))
2352 fp_possible
= (io_info
.fpOkForIo
> 0) ? true : false;
2355 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
2356 id by default, not CPU group id, otherwise all MSI-X queues won't
2358 cmd
->request_desc
->SCSIIO
.MSIxIndex
= instance
->msix_vectors
?
2359 raw_smp_processor_id() % instance
->msix_vectors
: 0;
2361 praid_context
= &io_request
->RaidContext
;
2363 if (instance
->is_ventura
) {
2364 spin_lock_irqsave(&instance
->stream_lock
, spinlock_flags
);
2365 megasas_stream_detect(instance
, cmd
, &io_info
);
2366 spin_unlock_irqrestore(&instance
->stream_lock
, spinlock_flags
);
2367 /* In ventura if stream detected for a read and it is read ahead
2368 * capable make this IO as LDIO
2370 if (is_stream_detected(&io_request
->RaidContext
.raid_context_g35
) &&
2371 io_info
.isRead
&& io_info
.ra_capable
)
2372 fp_possible
= false;
2374 /* FP for Optimal raid level 1.
2375 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2376 * are built by the driver as LD I/Os.
2377 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2378 * (there is never a reason to process these as buffered writes)
2379 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2380 * with the SLD bit asserted.
2382 if (io_info
.r1_alt_dev_handle
!= MR_DEVHANDLE_INVALID
) {
2383 mrdev_priv
= scp
->device
->hostdata
;
2385 if (atomic_inc_return(&instance
->fw_outstanding
) >
2386 (instance
->host
->can_queue
)) {
2387 fp_possible
= false;
2388 atomic_dec(&instance
->fw_outstanding
);
2389 } else if ((scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
) ||
2390 (atomic_dec_if_positive(&mrdev_priv
->r1_ldio_hint
) > 0)) {
2391 fp_possible
= false;
2392 atomic_dec(&instance
->fw_outstanding
);
2393 if (scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
)
2394 atomic_set(&mrdev_priv
->r1_ldio_hint
,
2395 instance
->r1_ldio_hint_default
);
2399 /* If raid is NULL, set CPU affinity to default CPU0 */
2401 megasas_set_raidflag_cpu_affinity(praid_context
,
2402 raid
, fp_possible
, io_info
.isRead
,
2405 praid_context
->raid_context_g35
.routing_flags
|=
2406 (MR_RAID_CTX_CPUSEL_0
<< MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT
);
2410 megasas_set_pd_lba(io_request
, scp
->cmd_len
, &io_info
, scp
,
2411 local_map_ptr
, start_lba_lo
);
2412 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2413 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2414 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2415 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2416 if (fusion
->adapter_type
== INVADER_SERIES
) {
2417 if (io_request
->RaidContext
.raid_context
.reg_lock_flags
==
2419 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2420 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
2421 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2422 io_request
->RaidContext
.raid_context
.type
2424 io_request
->RaidContext
.raid_context
.nseg
= 0x1;
2425 io_request
->IoFlags
|= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2426 io_request
->RaidContext
.raid_context
.reg_lock_flags
|=
2427 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA
|
2428 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
2429 } else if (instance
->is_ventura
) {
2430 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2431 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2432 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2433 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2434 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2435 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2436 io_request
->IoFlags
|=
2437 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2439 if (fusion
->load_balance_info
&&
2440 (fusion
->load_balance_info
[device_id
].loadBalanceFlag
) &&
2443 get_updated_dev_handle(instance
,
2444 &fusion
->load_balance_info
[device_id
],
2445 &io_info
, local_map_ptr
);
2446 scp
->SCp
.Status
|= MEGASAS_LOAD_BALANCE_FLAG
;
2447 cmd
->pd_r1_lb
= io_info
.pd_after_lb
;
2448 if (instance
->is_ventura
)
2449 io_request
->RaidContext
.raid_context_g35
.span_arm
2452 io_request
->RaidContext
.raid_context
.span_arm
2456 scp
->SCp
.Status
&= ~MEGASAS_LOAD_BALANCE_FLAG
;
2458 if (instance
->is_ventura
)
2459 cmd
->r1_alt_dev_handle
= io_info
.r1_alt_dev_handle
;
2461 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
2463 if ((raidLUN
[0] == 1) &&
2464 (local_map_ptr
->raidMap
.devHndlInfo
[io_info
.pd_after_lb
].validHandles
> 1)) {
2465 instance
->dev_handle
= !(instance
->dev_handle
);
2467 local_map_ptr
->raidMap
.devHndlInfo
[io_info
.pd_after_lb
].devHandle
[instance
->dev_handle
];
2470 cmd
->request_desc
->SCSIIO
.DevHandle
= io_info
.devHandle
;
2471 io_request
->DevHandle
= io_info
.devHandle
;
2472 cmd
->pd_interface
= io_info
.pd_interface
;
2473 /* populate the LUN field */
2474 memcpy(io_request
->LUN
, raidLUN
, 8);
2476 io_request
->RaidContext
.raid_context
.timeout_value
=
2477 cpu_to_le16(local_map_ptr
->raidMap
.fpPdIoTimeoutSec
);
2478 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2479 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2480 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2481 if (fusion
->adapter_type
== INVADER_SERIES
) {
2482 if (io_info
.do_fp_rlbypass
||
2483 (io_request
->RaidContext
.raid_context
.reg_lock_flags
2484 == REGION_TYPE_UNUSED
))
2485 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2486 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
2487 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2488 io_request
->RaidContext
.raid_context
.type
2490 io_request
->RaidContext
.raid_context
.reg_lock_flags
|=
2491 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0
|
2492 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
2493 io_request
->RaidContext
.raid_context
.nseg
= 0x1;
2494 } else if (instance
->is_ventura
) {
2495 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2496 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2497 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2498 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2499 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2500 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2502 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2503 io_request
->DevHandle
= cpu_to_le16(device_id
);
2509 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
2510 * @instance: Adapter soft state
2511 * @scp: SCSI command
2512 * @cmd: Command to be prepared
2514 * Prepares the io_request frame for non-rw io cmds for vd.
2516 static void megasas_build_ld_nonrw_fusion(struct megasas_instance
*instance
,
2517 struct scsi_cmnd
*scmd
, struct megasas_cmd_fusion
*cmd
)
2520 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2522 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2523 struct fusion_context
*fusion
= instance
->ctrl_context
;
2527 struct MR_LD_RAID
*raid
;
2528 struct RAID_CONTEXT
*pRAID_Context
;
2531 io_request
= cmd
->io_request
;
2532 device_id
= MEGASAS_DEV_INDEX(scmd
);
2533 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2534 io_request
->DataLength
= cpu_to_le32(scsi_bufflen(scmd
));
2535 /* get RAID_Context pointer */
2536 pRAID_Context
= &io_request
->RaidContext
.raid_context
;
2537 /* Check with FW team */
2538 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2539 pRAID_Context
->reg_lock_row_lba
= 0;
2540 pRAID_Context
->reg_lock_length
= 0;
2542 if (fusion
->fast_path_io
&& (
2543 device_id
< instance
->fw_supported_vd_count
)) {
2545 ld
= MR_TargetIdToLdGet(device_id
, local_map_ptr
);
2546 if (ld
>= instance
->fw_supported_vd_count
)
2549 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
2550 if (!(raid
->capability
.fpNonRWCapable
))
2557 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2558 io_request
->DevHandle
= cpu_to_le16(device_id
);
2559 io_request
->LUN
[1] = scmd
->device
->lun
;
2560 pRAID_Context
->timeout_value
=
2561 cpu_to_le16 (scmd
->request
->timeout
/ HZ
);
2562 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2563 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
2564 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2567 /* set RAID context values */
2568 pRAID_Context
->config_seq_num
= raid
->seqNum
;
2569 if (!instance
->is_ventura
)
2570 pRAID_Context
->reg_lock_flags
= REGION_TYPE_SHARED_READ
;
2571 pRAID_Context
->timeout_value
=
2572 cpu_to_le16(raid
->fpIoTimeoutForLd
);
2574 /* get the DevHandle for the PD (since this is
2575 fpNonRWCapable, this is a single disk RAID0) */
2577 arRef
= MR_LdSpanArrayGet(ld
, span
, local_map_ptr
);
2578 pd
= MR_ArPdGet(arRef
, physArm
, local_map_ptr
);
2579 devHandle
= MR_PdDevHandleGet(pd
, local_map_ptr
);
2581 /* build request descriptor */
2582 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2583 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<<
2584 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2585 cmd
->request_desc
->SCSIIO
.DevHandle
= devHandle
;
2587 /* populate the LUN field */
2588 memcpy(io_request
->LUN
, raid
->LUN
, 8);
2590 /* build the raidScsiIO structure */
2591 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2592 io_request
->DevHandle
= devHandle
;
2597 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
2598 * @instance: Adapter soft state
2599 * @scp: SCSI command
2600 * @cmd: Command to be prepared
2601 * @fp_possible: parameter to detect fast path or firmware path io.
2603 * Prepares the io_request frame for rw/non-rw io cmds for syspds
2606 megasas_build_syspd_fusion(struct megasas_instance
*instance
,
2607 struct scsi_cmnd
*scmd
, struct megasas_cmd_fusion
*cmd
,
2611 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2613 u16 os_timeout_value
;
2615 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2616 struct RAID_CONTEXT
*pRAID_Context
;
2617 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
2618 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
2619 struct fusion_context
*fusion
= instance
->ctrl_context
;
2620 pd_sync
= (void *)fusion
->pd_seq_sync
[(instance
->pd_seq_map_id
- 1) & 1];
2622 device_id
= MEGASAS_DEV_INDEX(scmd
);
2623 pd_index
= MEGASAS_PD_INDEX(scmd
);
2624 os_timeout_value
= scmd
->request
->timeout
/ HZ
;
2625 mr_device_priv_data
= scmd
->device
->hostdata
;
2626 cmd
->pd_interface
= mr_device_priv_data
->interface_type
;
2628 io_request
= cmd
->io_request
;
2629 /* get RAID_Context pointer */
2630 pRAID_Context
= &io_request
->RaidContext
.raid_context
;
2631 pRAID_Context
->reg_lock_flags
= 0;
2632 pRAID_Context
->reg_lock_row_lba
= 0;
2633 pRAID_Context
->reg_lock_length
= 0;
2634 io_request
->DataLength
= cpu_to_le32(scsi_bufflen(scmd
));
2635 io_request
->LUN
[1] = scmd
->device
->lun
;
2636 pRAID_Context
->raid_flags
= MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
2637 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
;
2639 /* If FW supports PD sequence number */
2640 if (instance
->use_seqnum_jbod_fp
&&
2641 instance
->pd_list
[pd_index
].driveType
== TYPE_DISK
) {
2642 /* TgtId must be incremented by 255 as jbod seq number is index
2645 /* More than 256 PD/JBOD support for Ventura */
2646 if (instance
->support_morethan256jbod
)
2647 pRAID_Context
->virtual_disk_tgt_id
=
2648 pd_sync
->seq
[pd_index
].pd_target_id
;
2650 pRAID_Context
->virtual_disk_tgt_id
=
2651 cpu_to_le16(device_id
+ (MAX_PHYSICAL_DEVICES
- 1));
2652 pRAID_Context
->config_seq_num
= pd_sync
->seq
[pd_index
].seqNum
;
2653 io_request
->DevHandle
= pd_sync
->seq
[pd_index
].devHandle
;
2654 if (instance
->is_ventura
) {
2655 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2656 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2657 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2658 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2659 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2660 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2662 pRAID_Context
->type
= MPI2_TYPE_CUDA
;
2663 pRAID_Context
->nseg
= 0x1;
2664 pRAID_Context
->reg_lock_flags
|=
2665 (MR_RL_FLAGS_SEQ_NUM_ENABLE
|MR_RL_FLAGS_GRANT_DESTINATION_CUDA
);
2667 } else if (fusion
->fast_path_io
) {
2668 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2669 pRAID_Context
->config_seq_num
= 0;
2670 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2671 io_request
->DevHandle
=
2672 local_map_ptr
->raidMap
.devHndlInfo
[device_id
].curDevHdl
;
2674 /* Want to send all IO via FW path */
2675 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2676 pRAID_Context
->config_seq_num
= 0;
2677 io_request
->DevHandle
= cpu_to_le16(0xFFFF);
2680 cmd
->request_desc
->SCSIIO
.DevHandle
= io_request
->DevHandle
;
2681 cmd
->request_desc
->SCSIIO
.MSIxIndex
=
2682 instance
->msix_vectors
?
2683 (raw_smp_processor_id() % instance
->msix_vectors
) : 0;
2687 /* system pd firmware path */
2688 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2689 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2690 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
2691 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2692 pRAID_Context
->timeout_value
= cpu_to_le16(os_timeout_value
);
2693 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2695 /* system pd Fast Path */
2696 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2697 timeout_limit
= (scmd
->device
->type
== TYPE_DISK
) ?
2699 pRAID_Context
->timeout_value
=
2700 cpu_to_le16((os_timeout_value
> timeout_limit
) ?
2701 timeout_limit
: os_timeout_value
);
2702 if (fusion
->adapter_type
>= INVADER_SERIES
)
2703 io_request
->IoFlags
|=
2704 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2706 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2707 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<<
2708 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2713 * megasas_build_io_fusion - Prepares IOs to devices
2714 * @instance: Adapter soft state
2715 * @scp: SCSI command
2716 * @cmd: Command to be prepared
2718 * Invokes helper functions to prepare request frames
2719 * and sets flags appropriate for IO/Non-IO cmd
2722 megasas_build_io_fusion(struct megasas_instance
*instance
,
2723 struct scsi_cmnd
*scp
,
2724 struct megasas_cmd_fusion
*cmd
)
2728 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
= cmd
->io_request
;
2729 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
2730 mr_device_priv_data
= scp
->device
->hostdata
;
2732 /* Zero out some fields so they don't get reused */
2733 memset(io_request
->LUN
, 0x0, 8);
2734 io_request
->CDB
.EEDP32
.PrimaryReferenceTag
= 0;
2735 io_request
->CDB
.EEDP32
.PrimaryApplicationTagMask
= 0;
2736 io_request
->EEDPFlags
= 0;
2737 io_request
->Control
= 0;
2738 io_request
->EEDPBlockSize
= 0;
2739 io_request
->ChainOffset
= 0;
2740 io_request
->RaidContext
.raid_context
.raid_flags
= 0;
2741 io_request
->RaidContext
.raid_context
.type
= 0;
2742 io_request
->RaidContext
.raid_context
.nseg
= 0;
2744 memcpy(io_request
->CDB
.CDB32
, scp
->cmnd
, scp
->cmd_len
);
2746 * Just the CDB length,rest of the Flags are zero
2747 * This will be modified for FP in build_ldio_fusion
2749 io_request
->IoFlags
= cpu_to_le16(scp
->cmd_len
);
2751 switch (cmd_type
= megasas_cmd_type(scp
)) {
2752 case READ_WRITE_LDIO
:
2753 megasas_build_ldio_fusion(instance
, scp
, cmd
);
2755 case NON_READ_WRITE_LDIO
:
2756 megasas_build_ld_nonrw_fusion(instance
, scp
, cmd
);
2758 case READ_WRITE_SYSPDIO
:
2759 megasas_build_syspd_fusion(instance
, scp
, cmd
, true);
2761 case NON_READ_WRITE_SYSPDIO
:
2762 if (instance
->secure_jbod_support
||
2763 mr_device_priv_data
->is_tm_capable
)
2764 megasas_build_syspd_fusion(instance
, scp
, cmd
, false);
2766 megasas_build_syspd_fusion(instance
, scp
, cmd
, true);
2776 sge_count
= megasas_make_sgl(instance
, scp
, cmd
);
2778 if (sge_count
> instance
->max_num_sge
|| (sge_count
< 0)) {
2779 dev_err(&instance
->pdev
->dev
,
2780 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n",
2781 __func__
, __LINE__
, sge_count
, instance
->max_num_sge
);
2785 if (instance
->is_ventura
) {
2786 set_num_sge(&io_request
->RaidContext
.raid_context_g35
, sge_count
);
2787 cpu_to_le16s(&io_request
->RaidContext
.raid_context_g35
.routing_flags
);
2788 cpu_to_le16s(&io_request
->RaidContext
.raid_context_g35
.nseg_type
);
2790 /* numSGE store lower 8 bit of sge_count.
2791 * numSGEExt store higher 8 bit of sge_count
2793 io_request
->RaidContext
.raid_context
.num_sge
= sge_count
;
2794 io_request
->RaidContext
.raid_context
.num_sge_ext
=
2795 (u8
)(sge_count
>> 8);
2798 io_request
->SGLFlags
= cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING
);
2800 if (scp
->sc_data_direction
== PCI_DMA_TODEVICE
)
2801 io_request
->Control
|= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE
);
2802 else if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
2803 io_request
->Control
|= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ
);
2805 io_request
->SGLOffset0
=
2806 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 4;
2808 io_request
->SenseBufferLowAddress
= cpu_to_le32(cmd
->sense_phys_addr
);
2809 io_request
->SenseBufferLength
= SCSI_SENSE_BUFFERSIZE
;
2812 scp
->SCp
.ptr
= (char *)cmd
;
2817 static union MEGASAS_REQUEST_DESCRIPTOR_UNION
*
2818 megasas_get_request_descriptor(struct megasas_instance
*instance
, u16 index
)
2821 struct fusion_context
*fusion
;
2823 fusion
= instance
->ctrl_context
;
2824 p
= fusion
->req_frames_desc
+
2825 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION
) * index
;
2827 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION
*)p
;
2831 /* megasas_prepate_secondRaid1_IO
2832 * It prepares the raid 1 second IO
2834 void megasas_prepare_secondRaid1_IO(struct megasas_instance
*instance
,
2835 struct megasas_cmd_fusion
*cmd
,
2836 struct megasas_cmd_fusion
*r1_cmd
)
2838 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
, *req_desc2
= NULL
;
2839 struct fusion_context
*fusion
;
2840 fusion
= instance
->ctrl_context
;
2841 req_desc
= cmd
->request_desc
;
2842 /* copy the io request frame as well as 8 SGEs data for r1 command*/
2843 memcpy(r1_cmd
->io_request
, cmd
->io_request
,
2844 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST
)));
2845 memcpy(&r1_cmd
->io_request
->SGL
, &cmd
->io_request
->SGL
,
2846 (fusion
->max_sge_in_main_msg
* sizeof(union MPI2_SGE_IO_UNION
)));
2847 /*sense buffer is different for r1 command*/
2848 r1_cmd
->io_request
->SenseBufferLowAddress
=
2849 cpu_to_le32(r1_cmd
->sense_phys_addr
);
2850 r1_cmd
->scmd
= cmd
->scmd
;
2851 req_desc2
= megasas_get_request_descriptor(instance
,
2852 (r1_cmd
->index
- 1));
2853 req_desc2
->Words
= 0;
2854 r1_cmd
->request_desc
= req_desc2
;
2855 req_desc2
->SCSIIO
.SMID
= cpu_to_le16(r1_cmd
->index
);
2856 req_desc2
->SCSIIO
.RequestFlags
= req_desc
->SCSIIO
.RequestFlags
;
2857 r1_cmd
->request_desc
->SCSIIO
.DevHandle
= cmd
->r1_alt_dev_handle
;
2858 r1_cmd
->io_request
->DevHandle
= cmd
->r1_alt_dev_handle
;
2859 r1_cmd
->r1_alt_dev_handle
= cmd
->io_request
->DevHandle
;
2860 cmd
->io_request
->RaidContext
.raid_context_g35
.smid
.peer_smid
=
2861 cpu_to_le16(r1_cmd
->index
);
2862 r1_cmd
->io_request
->RaidContext
.raid_context_g35
.smid
.peer_smid
=
2863 cpu_to_le16(cmd
->index
);
2864 /*MSIxIndex of both commands request descriptors should be same*/
2865 r1_cmd
->request_desc
->SCSIIO
.MSIxIndex
=
2866 cmd
->request_desc
->SCSIIO
.MSIxIndex
;
2867 /*span arm is different for r1 cmd*/
2868 r1_cmd
->io_request
->RaidContext
.raid_context_g35
.span_arm
=
2869 cmd
->io_request
->RaidContext
.raid_context_g35
.span_arm
+ 1;
2873 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2874 * issuing non IOCTL cmd
2875 * @instance: Adapter soft state
2876 * @scmd: pointer to scsi cmd from OS
2879 megasas_build_and_issue_cmd_fusion(struct megasas_instance
*instance
,
2880 struct scsi_cmnd
*scmd
)
2882 struct megasas_cmd_fusion
*cmd
, *r1_cmd
= NULL
;
2883 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
2885 struct fusion_context
*fusion
;
2887 fusion
= instance
->ctrl_context
;
2889 if ((megasas_cmd_type(scmd
) == READ_WRITE_LDIO
) &&
2890 instance
->ldio_threshold
&&
2891 (atomic_inc_return(&instance
->ldio_outstanding
) >
2892 instance
->ldio_threshold
)) {
2893 atomic_dec(&instance
->ldio_outstanding
);
2894 return SCSI_MLQUEUE_DEVICE_BUSY
;
2897 if (atomic_inc_return(&instance
->fw_outstanding
) >
2898 instance
->host
->can_queue
) {
2899 atomic_dec(&instance
->fw_outstanding
);
2900 return SCSI_MLQUEUE_HOST_BUSY
;
2903 cmd
= megasas_get_cmd_fusion(instance
, scmd
->request
->tag
);
2906 atomic_dec(&instance
->fw_outstanding
);
2907 return SCSI_MLQUEUE_HOST_BUSY
;
2912 req_desc
= megasas_get_request_descriptor(instance
, index
-1);
2914 req_desc
->Words
= 0;
2915 cmd
->request_desc
= req_desc
;
2917 if (megasas_build_io_fusion(instance
, scmd
, cmd
)) {
2918 megasas_return_cmd_fusion(instance
, cmd
);
2919 dev_err(&instance
->pdev
->dev
, "Error building command\n");
2920 cmd
->request_desc
= NULL
;
2921 atomic_dec(&instance
->fw_outstanding
);
2922 return SCSI_MLQUEUE_HOST_BUSY
;
2925 req_desc
= cmd
->request_desc
;
2926 req_desc
->SCSIIO
.SMID
= cpu_to_le16(index
);
2928 if (cmd
->io_request
->ChainOffset
!= 0 &&
2929 cmd
->io_request
->ChainOffset
!= 0xF)
2930 dev_err(&instance
->pdev
->dev
, "The chain offset value is not "
2931 "correct : %x\n", cmd
->io_request
->ChainOffset
);
2933 * if it is raid 1/10 fp write capable.
2934 * try to get second command from pool and construct it.
2935 * From FW, it has confirmed that lba values of two PDs
2936 * corresponds to single R1/10 LD are always same
2939 /* driver side count always should be less than max_fw_cmds
2940 * to get new command
2942 if (cmd
->r1_alt_dev_handle
!= MR_DEVHANDLE_INVALID
) {
2943 r1_cmd
= megasas_get_cmd_fusion(instance
,
2944 (scmd
->request
->tag
+ instance
->max_fw_cmds
));
2945 megasas_prepare_secondRaid1_IO(instance
, cmd
, r1_cmd
);
2950 * Issue the command to the FW
2953 megasas_fire_cmd_fusion(instance
, req_desc
);
2956 megasas_fire_cmd_fusion(instance
, r1_cmd
->request_desc
);
2963 * megasas_complete_r1_command -
2964 * completes R1 FP write commands which has valid peer smid
2965 * @instance: Adapter soft state
2966 * @cmd_fusion: MPT command frame
2970 megasas_complete_r1_command(struct megasas_instance
*instance
,
2971 struct megasas_cmd_fusion
*cmd
)
2973 u8
*sense
, status
, ex_status
;
2976 struct fusion_context
*fusion
;
2977 struct megasas_cmd_fusion
*r1_cmd
= NULL
;
2978 struct scsi_cmnd
*scmd_local
= NULL
;
2979 struct RAID_CONTEXT_G35
*rctx_g35
;
2981 rctx_g35
= &cmd
->io_request
->RaidContext
.raid_context_g35
;
2982 fusion
= instance
->ctrl_context
;
2983 peer_smid
= le16_to_cpu(rctx_g35
->smid
.peer_smid
);
2985 r1_cmd
= fusion
->cmd_list
[peer_smid
- 1];
2986 scmd_local
= cmd
->scmd
;
2987 status
= rctx_g35
->status
;
2988 ex_status
= rctx_g35
->ex_status
;
2989 data_length
= cmd
->io_request
->DataLength
;
2992 cmd
->cmd_completed
= true;
2994 /* Check if peer command is completed or not*/
2995 if (r1_cmd
->cmd_completed
) {
2996 rctx_g35
= &r1_cmd
->io_request
->RaidContext
.raid_context_g35
;
2997 if (rctx_g35
->status
!= MFI_STAT_OK
) {
2998 status
= rctx_g35
->status
;
2999 ex_status
= rctx_g35
->ex_status
;
3000 data_length
= r1_cmd
->io_request
->DataLength
;
3001 sense
= r1_cmd
->sense
;
3004 megasas_return_cmd_fusion(instance
, r1_cmd
);
3005 map_cmd_status(fusion
, scmd_local
, status
, ex_status
,
3006 le32_to_cpu(data_length
), sense
);
3007 if (instance
->ldio_threshold
&&
3008 megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
)
3009 atomic_dec(&instance
->ldio_outstanding
);
3010 scmd_local
->SCp
.ptr
= NULL
;
3011 megasas_return_cmd_fusion(instance
, cmd
);
3012 scsi_dma_unmap(scmd_local
);
3013 scmd_local
->scsi_done(scmd_local
);
3018 * complete_cmd_fusion - Completes command
3019 * @instance: Adapter soft state
3020 * Completes all commands that is in reply descriptor queue
3023 complete_cmd_fusion(struct megasas_instance
*instance
, u32 MSIxIndex
)
3025 union MPI2_REPLY_DESCRIPTORS_UNION
*desc
;
3026 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*reply_desc
;
3027 struct MPI2_RAID_SCSI_IO_REQUEST
*scsi_io_req
;
3028 struct fusion_context
*fusion
;
3029 struct megasas_cmd
*cmd_mfi
;
3030 struct megasas_cmd_fusion
*cmd_fusion
;
3031 u16 smid
, num_completed
;
3032 u8 reply_descript_type
, *sense
, status
, extStatus
;
3033 u32 device_id
, data_length
;
3034 union desc_value d_val
;
3035 struct LD_LOAD_BALANCE_INFO
*lbinfo
;
3036 int threshold_reply_count
= 0;
3037 struct scsi_cmnd
*scmd_local
= NULL
;
3038 struct MR_TASK_MANAGE_REQUEST
*mr_tm_req
;
3039 struct MPI2_SCSI_TASK_MANAGE_REQUEST
*mpi_tm_req
;
3041 fusion
= instance
->ctrl_context
;
3043 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
)
3046 desc
= fusion
->reply_frames_desc
[MSIxIndex
] +
3047 fusion
->last_reply_idx
[MSIxIndex
];
3049 reply_desc
= (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
3051 d_val
.word
= desc
->Words
;
3053 reply_descript_type
= reply_desc
->ReplyFlags
&
3054 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
3056 if (reply_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
3061 while (d_val
.u
.low
!= cpu_to_le32(UINT_MAX
) &&
3062 d_val
.u
.high
!= cpu_to_le32(UINT_MAX
)) {
3064 smid
= le16_to_cpu(reply_desc
->SMID
);
3065 cmd_fusion
= fusion
->cmd_list
[smid
- 1];
3066 scsi_io_req
= (struct MPI2_RAID_SCSI_IO_REQUEST
*)
3067 cmd_fusion
->io_request
;
3069 scmd_local
= cmd_fusion
->scmd
;
3070 status
= scsi_io_req
->RaidContext
.raid_context
.status
;
3071 extStatus
= scsi_io_req
->RaidContext
.raid_context
.ex_status
;
3072 sense
= cmd_fusion
->sense
;
3073 data_length
= scsi_io_req
->DataLength
;
3075 switch (scsi_io_req
->Function
) {
3076 case MPI2_FUNCTION_SCSI_TASK_MGMT
:
3077 mr_tm_req
= (struct MR_TASK_MANAGE_REQUEST
*)
3078 cmd_fusion
->io_request
;
3079 mpi_tm_req
= (struct MPI2_SCSI_TASK_MANAGE_REQUEST
*)
3080 &mr_tm_req
->TmRequest
;
3081 dev_dbg(&instance
->pdev
->dev
, "TM completion:"
3082 "type: 0x%x TaskMID: 0x%x\n",
3083 mpi_tm_req
->TaskType
, mpi_tm_req
->TaskMID
);
3084 complete(&cmd_fusion
->done
);
3086 case MPI2_FUNCTION_SCSI_IO_REQUEST
: /*Fast Path IO.*/
3087 /* Update load balancing info */
3088 if (fusion
->load_balance_info
&&
3089 (cmd_fusion
->scmd
->SCp
.Status
&
3090 MEGASAS_LOAD_BALANCE_FLAG
)) {
3091 device_id
= MEGASAS_DEV_INDEX(scmd_local
);
3092 lbinfo
= &fusion
->load_balance_info
[device_id
];
3093 atomic_dec(&lbinfo
->scsi_pending_cmds
[cmd_fusion
->pd_r1_lb
]);
3094 cmd_fusion
->scmd
->SCp
.Status
&= ~MEGASAS_LOAD_BALANCE_FLAG
;
3096 //Fall thru and complete IO
3097 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
: /* LD-IO Path */
3098 atomic_dec(&instance
->fw_outstanding
);
3099 if (cmd_fusion
->r1_alt_dev_handle
== MR_DEVHANDLE_INVALID
) {
3100 map_cmd_status(fusion
, scmd_local
, status
,
3101 extStatus
, le32_to_cpu(data_length
),
3103 if (instance
->ldio_threshold
&&
3104 (megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
))
3105 atomic_dec(&instance
->ldio_outstanding
);
3106 scmd_local
->SCp
.ptr
= NULL
;
3107 megasas_return_cmd_fusion(instance
, cmd_fusion
);
3108 scsi_dma_unmap(scmd_local
);
3109 scmd_local
->scsi_done(scmd_local
);
3110 } else /* Optimal VD - R1 FP command completion. */
3111 megasas_complete_r1_command(instance
, cmd_fusion
);
3113 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST
: /*MFI command */
3114 cmd_mfi
= instance
->cmd_list
[cmd_fusion
->sync_cmd_idx
];
3115 /* Poll mode. Dummy free.
3116 * In case of Interrupt mode, caller has reverse check.
3118 if (cmd_mfi
->flags
& DRV_DCMD_POLLED_MODE
) {
3119 cmd_mfi
->flags
&= ~DRV_DCMD_POLLED_MODE
;
3120 megasas_return_cmd(instance
, cmd_mfi
);
3122 megasas_complete_cmd(instance
, cmd_mfi
, DID_OK
);
3126 fusion
->last_reply_idx
[MSIxIndex
]++;
3127 if (fusion
->last_reply_idx
[MSIxIndex
] >=
3128 fusion
->reply_q_depth
)
3129 fusion
->last_reply_idx
[MSIxIndex
] = 0;
3131 desc
->Words
= cpu_to_le64(ULLONG_MAX
);
3133 threshold_reply_count
++;
3135 /* Get the next reply descriptor */
3136 if (!fusion
->last_reply_idx
[MSIxIndex
])
3137 desc
= fusion
->reply_frames_desc
[MSIxIndex
];
3142 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
3144 d_val
.word
= desc
->Words
;
3146 reply_descript_type
= reply_desc
->ReplyFlags
&
3147 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
3149 if (reply_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
3152 * Write to reply post host index register after completing threshold
3153 * number of reply counts and still there are more replies in reply queue
3154 * pending to be completed
3156 if (threshold_reply_count
>= THRESHOLD_REPLY_COUNT
) {
3157 if (instance
->msix_combined
)
3158 writel(((MSIxIndex
& 0x7) << 24) |
3159 fusion
->last_reply_idx
[MSIxIndex
],
3160 instance
->reply_post_host_index_addr
[MSIxIndex
/8]);
3162 writel((MSIxIndex
<< 24) |
3163 fusion
->last_reply_idx
[MSIxIndex
],
3164 instance
->reply_post_host_index_addr
[0]);
3165 threshold_reply_count
= 0;
3173 if (instance
->msix_combined
)
3174 writel(((MSIxIndex
& 0x7) << 24) |
3175 fusion
->last_reply_idx
[MSIxIndex
],
3176 instance
->reply_post_host_index_addr
[MSIxIndex
/8]);
3178 writel((MSIxIndex
<< 24) |
3179 fusion
->last_reply_idx
[MSIxIndex
],
3180 instance
->reply_post_host_index_addr
[0]);
3181 megasas_check_and_restore_queue_depth(instance
);
3186 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
3187 * @instance: Adapter soft state
3189 void megasas_sync_irqs(unsigned long instance_addr
)
3192 struct megasas_instance
*instance
=
3193 (struct megasas_instance
*)instance_addr
;
3195 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3197 for (i
= 0; i
< count
; i
++)
3198 synchronize_irq(pci_irq_vector(instance
->pdev
, i
));
3202 * megasas_complete_cmd_dpc_fusion - Completes command
3203 * @instance: Adapter soft state
3205 * Tasklet to complete cmds
3208 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr
)
3210 struct megasas_instance
*instance
=
3211 (struct megasas_instance
*)instance_addr
;
3212 unsigned long flags
;
3213 u32 count
, MSIxIndex
;
3215 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3217 /* If we have already declared adapter dead, donot complete cmds */
3218 spin_lock_irqsave(&instance
->hba_lock
, flags
);
3219 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
) {
3220 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
3223 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
3225 for (MSIxIndex
= 0 ; MSIxIndex
< count
; MSIxIndex
++)
3226 complete_cmd_fusion(instance
, MSIxIndex
);
3230 * megasas_isr_fusion - isr entry point
3232 irqreturn_t
megasas_isr_fusion(int irq
, void *devp
)
3234 struct megasas_irq_context
*irq_context
= devp
;
3235 struct megasas_instance
*instance
= irq_context
->instance
;
3236 u32 mfiStatus
, fw_state
, dma_state
;
3238 if (instance
->mask_interrupts
)
3241 if (!instance
->msix_vectors
) {
3242 mfiStatus
= instance
->instancet
->clear_intr(instance
->reg_set
);
3247 /* If we are resetting, bail */
3248 if (test_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
)) {
3249 instance
->instancet
->clear_intr(instance
->reg_set
);
3253 if (!complete_cmd_fusion(instance
, irq_context
->MSIxIndex
)) {
3254 instance
->instancet
->clear_intr(instance
->reg_set
);
3255 /* If we didn't complete any commands, check for FW fault */
3256 fw_state
= instance
->instancet
->read_fw_status_reg(
3257 instance
->reg_set
) & MFI_STATE_MASK
;
3258 dma_state
= instance
->instancet
->read_fw_status_reg
3259 (instance
->reg_set
) & MFI_STATE_DMADONE
;
3260 if (instance
->crash_dump_drv_support
&&
3261 instance
->crash_dump_app_support
) {
3262 /* Start collecting crash, if DMA bit is done */
3263 if ((fw_state
== MFI_STATE_FAULT
) && dma_state
)
3264 schedule_work(&instance
->crash_init
);
3265 else if (fw_state
== MFI_STATE_FAULT
) {
3266 if (instance
->unload
== 0)
3267 schedule_work(&instance
->work_init
);
3269 } else if (fw_state
== MFI_STATE_FAULT
) {
3270 dev_warn(&instance
->pdev
->dev
, "Iop2SysDoorbellInt"
3271 "for scsi%d\n", instance
->host
->host_no
);
3272 if (instance
->unload
== 0)
3273 schedule_work(&instance
->work_init
);
3281 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
3282 * @instance: Adapter soft state
3283 * mfi_cmd: megasas_cmd pointer
3287 build_mpt_mfi_pass_thru(struct megasas_instance
*instance
,
3288 struct megasas_cmd
*mfi_cmd
)
3290 struct MPI25_IEEE_SGE_CHAIN64
*mpi25_ieee_chain
;
3291 struct MPI2_RAID_SCSI_IO_REQUEST
*io_req
;
3292 struct megasas_cmd_fusion
*cmd
;
3293 struct fusion_context
*fusion
;
3294 struct megasas_header
*frame_hdr
= &mfi_cmd
->frame
->hdr
;
3296 fusion
= instance
->ctrl_context
;
3298 cmd
= megasas_get_cmd_fusion(instance
,
3299 instance
->max_scsi_cmds
+ mfi_cmd
->index
);
3301 /* Save the smid. To be used for returning the cmd */
3302 mfi_cmd
->context
.smid
= cmd
->index
;
3305 * For cmds where the flag is set, store the flag and check
3306 * on completion. For cmds with this flag, don't call
3307 * megasas_complete_cmd
3310 if (frame_hdr
->flags
& cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
))
3311 mfi_cmd
->flags
|= DRV_DCMD_POLLED_MODE
;
3313 io_req
= cmd
->io_request
;
3315 if (fusion
->adapter_type
>= INVADER_SERIES
) {
3316 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr_end
=
3317 (struct MPI25_IEEE_SGE_CHAIN64
*)&io_req
->SGL
;
3318 sgl_ptr_end
+= fusion
->max_sge_in_main_msg
- 1;
3319 sgl_ptr_end
->Flags
= 0;
3323 (struct MPI25_IEEE_SGE_CHAIN64
*)&io_req
->SGL
.IeeeChain
;
3325 io_req
->Function
= MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST
;
3326 io_req
->SGLOffset0
= offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
,
3328 io_req
->ChainOffset
= fusion
->chain_offset_mfi_pthru
;
3330 mpi25_ieee_chain
->Address
= cpu_to_le64(mfi_cmd
->frame_phys_addr
);
3332 mpi25_ieee_chain
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
3333 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
;
3335 mpi25_ieee_chain
->Length
= cpu_to_le32(instance
->mfi_frame_size
);
3339 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
3340 * @instance: Adapter soft state
3341 * @cmd: mfi cmd to build
3344 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*
3345 build_mpt_cmd(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
)
3347 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
= NULL
;
3350 build_mpt_mfi_pass_thru(instance
, cmd
);
3351 index
= cmd
->context
.smid
;
3353 req_desc
= megasas_get_request_descriptor(instance
, index
- 1);
3355 req_desc
->Words
= 0;
3356 req_desc
->SCSIIO
.RequestFlags
= (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
3357 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
3359 req_desc
->SCSIIO
.SMID
= cpu_to_le16(index
);
3365 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
3366 * @instance: Adapter soft state
3367 * @cmd: mfi cmd pointer
3371 megasas_issue_dcmd_fusion(struct megasas_instance
*instance
,
3372 struct megasas_cmd
*cmd
)
3374 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3376 req_desc
= build_mpt_cmd(instance
, cmd
);
3378 megasas_fire_cmd_fusion(instance
, req_desc
);
3383 * megasas_release_fusion - Reverses the FW initialization
3384 * @instance: Adapter soft state
3387 megasas_release_fusion(struct megasas_instance
*instance
)
3389 megasas_free_cmds(instance
);
3390 megasas_free_cmds_fusion(instance
);
3392 iounmap(instance
->reg_set
);
3394 pci_release_selected_regions(instance
->pdev
, 1<<instance
->bar
);
3398 * megasas_read_fw_status_reg_fusion - returns the current FW status value
3399 * @regs: MFI register set
3402 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem
*regs
)
3404 return readl(&(regs
)->outbound_scratch_pad
);
3408 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3409 * @instance: Controller's soft instance
3410 * return: Number of allocated host crash buffers
3413 megasas_alloc_host_crash_buffer(struct megasas_instance
*instance
)
3417 for (i
= 0; i
< MAX_CRASH_DUMP_SIZE
; i
++) {
3418 instance
->crash_buf
[i
] = vzalloc(CRASH_DMA_BUF_SIZE
);
3419 if (!instance
->crash_buf
[i
]) {
3420 dev_info(&instance
->pdev
->dev
, "Firmware crash dump "
3421 "memory allocation failed at index %d\n", i
);
3425 instance
->drv_buf_alloc
= i
;
3429 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3430 * @instance: Controller's soft instance
3433 megasas_free_host_crash_buffer(struct megasas_instance
*instance
)
3436 for (i
= 0; i
< instance
->drv_buf_alloc
; i
++) {
3437 if (instance
->crash_buf
[i
])
3438 vfree(instance
->crash_buf
[i
]);
3440 instance
->drv_buf_index
= 0;
3441 instance
->drv_buf_alloc
= 0;
3442 instance
->fw_crash_state
= UNAVAILABLE
;
3443 instance
->fw_crash_buffer_size
= 0;
3447 * megasas_adp_reset_fusion - For controller reset
3448 * @regs: MFI register set
3451 megasas_adp_reset_fusion(struct megasas_instance
*instance
,
3452 struct megasas_register_set __iomem
*regs
)
3454 u32 host_diag
, abs_state
, retry
;
3456 /* Now try to reset the chip */
3457 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3458 writel(MPI2_WRSEQ_1ST_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3459 writel(MPI2_WRSEQ_2ND_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3460 writel(MPI2_WRSEQ_3RD_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3461 writel(MPI2_WRSEQ_4TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3462 writel(MPI2_WRSEQ_5TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3463 writel(MPI2_WRSEQ_6TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3465 /* Check that the diag write enable (DRWE) bit is on */
3466 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3468 while (!(host_diag
& HOST_DIAG_WRITE_ENABLE
)) {
3470 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3471 if (retry
++ == 100) {
3472 dev_warn(&instance
->pdev
->dev
,
3473 "Host diag unlock failed from %s %d\n",
3474 __func__
, __LINE__
);
3478 if (!(host_diag
& HOST_DIAG_WRITE_ENABLE
))
3481 /* Send chip reset command */
3482 writel(host_diag
| HOST_DIAG_RESET_ADAPTER
,
3483 &instance
->reg_set
->fusion_host_diag
);
3486 /* Make sure reset adapter bit is cleared */
3487 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3489 while (host_diag
& HOST_DIAG_RESET_ADAPTER
) {
3491 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3492 if (retry
++ == 1000) {
3493 dev_warn(&instance
->pdev
->dev
,
3494 "Diag reset adapter never cleared %s %d\n",
3495 __func__
, __LINE__
);
3499 if (host_diag
& HOST_DIAG_RESET_ADAPTER
)
3502 abs_state
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
)
3506 while ((abs_state
<= MFI_STATE_FW_INIT
) && (retry
++ < 1000)) {
3508 abs_state
= instance
->instancet
->
3509 read_fw_status_reg(instance
->reg_set
) & MFI_STATE_MASK
;
3511 if (abs_state
<= MFI_STATE_FW_INIT
) {
3512 dev_warn(&instance
->pdev
->dev
,
3513 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
3514 abs_state
, __func__
, __LINE__
);
3522 * megasas_check_reset_fusion - For controller reset check
3523 * @regs: MFI register set
3526 megasas_check_reset_fusion(struct megasas_instance
*instance
,
3527 struct megasas_register_set __iomem
*regs
)
3532 /* This function waits for outstanding commands on fusion to complete */
3533 int megasas_wait_for_outstanding_fusion(struct megasas_instance
*instance
,
3534 int reason
, int *convert
)
3536 int i
, outstanding
, retval
= 0, hb_seconds_missed
= 0;
3539 for (i
= 0; i
< resetwaittime
; i
++) {
3540 /* Check if firmware is in fault state */
3541 fw_state
= instance
->instancet
->read_fw_status_reg(
3542 instance
->reg_set
) & MFI_STATE_MASK
;
3543 if (fw_state
== MFI_STATE_FAULT
) {
3544 dev_warn(&instance
->pdev
->dev
, "Found FW in FAULT state,"
3545 " will reset adapter scsi%d.\n",
3546 instance
->host
->host_no
);
3547 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3548 if (instance
->requestorId
&& reason
) {
3549 dev_warn(&instance
->pdev
->dev
, "SR-IOV Found FW in FAULT"
3550 " state while polling during"
3551 " I/O timeout handling for %d\n",
3552 instance
->host
->host_no
);
3560 if (reason
== MFI_IO_TIMEOUT_OCR
) {
3561 dev_info(&instance
->pdev
->dev
,
3562 "MFI IO is timed out, initiating OCR\n");
3563 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3568 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
3569 if (instance
->requestorId
&& !reason
) {
3574 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
3575 if (instance
->requestorId
&& (reason
== SCSIIO_TIMEOUT_OCR
)) {
3576 if (instance
->hb_host_mem
->HB
.fwCounter
!=
3577 instance
->hb_host_mem
->HB
.driverCounter
) {
3578 instance
->hb_host_mem
->HB
.driverCounter
=
3579 instance
->hb_host_mem
->HB
.fwCounter
;
3580 hb_seconds_missed
= 0;
3582 hb_seconds_missed
++;
3583 if (hb_seconds_missed
==
3584 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF
/HZ
)) {
3585 dev_warn(&instance
->pdev
->dev
, "SR-IOV:"
3586 " Heartbeat never completed "
3587 " while polling during I/O "
3588 " timeout handling for "
3590 instance
->host
->host_no
);
3598 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3599 outstanding
= atomic_read(&instance
->fw_outstanding
);
3603 if (!(i
% MEGASAS_RESET_NOTICE_INTERVAL
)) {
3604 dev_notice(&instance
->pdev
->dev
, "[%2d]waiting for %d "
3605 "commands to complete for scsi%d\n", i
,
3606 outstanding
, instance
->host
->host_no
);
3611 if (atomic_read(&instance
->fw_outstanding
)) {
3612 dev_err(&instance
->pdev
->dev
, "pending commands remain after waiting, "
3613 "will reset adapter scsi%d.\n",
3614 instance
->host
->host_no
);
3622 void megasas_reset_reply_desc(struct megasas_instance
*instance
)
3625 struct fusion_context
*fusion
;
3626 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
3628 fusion
= instance
->ctrl_context
;
3629 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3630 for (i
= 0 ; i
< count
; i
++) {
3631 fusion
->last_reply_idx
[i
] = 0;
3632 reply_desc
= fusion
->reply_frames_desc
[i
];
3633 for (j
= 0 ; j
< fusion
->reply_q_depth
; j
++, reply_desc
++)
3634 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
3639 * megasas_refire_mgmt_cmd : Re-fire management commands
3640 * @instance: Controller's soft instance
3642 void megasas_refire_mgmt_cmd(struct megasas_instance
*instance
)
3645 struct megasas_cmd_fusion
*cmd_fusion
;
3646 struct fusion_context
*fusion
;
3647 struct megasas_cmd
*cmd_mfi
;
3648 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3650 bool refire_cmd
= 0;
3652 fusion
= instance
->ctrl_context
;
3654 /* Re-fire management commands.
3655 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
3657 for (j
= instance
->max_scsi_cmds
; j
< instance
->max_fw_cmds
; j
++) {
3658 cmd_fusion
= fusion
->cmd_list
[j
];
3659 cmd_mfi
= instance
->cmd_list
[cmd_fusion
->sync_cmd_idx
];
3660 smid
= le16_to_cpu(cmd_mfi
->context
.smid
);
3665 /* Do not refire shutdown command */
3666 if (le32_to_cpu(cmd_mfi
->frame
->dcmd
.opcode
) ==
3667 MR_DCMD_CTRL_SHUTDOWN
) {
3668 cmd_mfi
->frame
->dcmd
.cmd_status
= MFI_STAT_OK
;
3669 megasas_complete_cmd(instance
, cmd_mfi
, DID_OK
);
3673 req_desc
= megasas_get_request_descriptor
3674 (instance
, smid
- 1);
3675 refire_cmd
= req_desc
&& ((cmd_mfi
->frame
->dcmd
.opcode
!=
3676 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
)) &&
3677 (cmd_mfi
->frame
->dcmd
.opcode
!=
3678 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO
)))
3679 && !(cmd_mfi
->flags
& DRV_DCMD_SKIP_REFIRE
);
3681 megasas_fire_cmd_fusion(instance
, req_desc
);
3683 megasas_return_cmd(instance
, cmd_mfi
);
3688 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
3689 * @instance: per adapter struct
3690 * @channel: the channel assigned by the OS
3691 * @id: the id assigned by the OS
3693 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
3696 static int megasas_track_scsiio(struct megasas_instance
*instance
,
3697 int id
, int channel
)
3700 struct megasas_cmd_fusion
*cmd_fusion
;
3701 struct fusion_context
*fusion
;
3702 fusion
= instance
->ctrl_context
;
3704 for (i
= 0 ; i
< instance
->max_scsi_cmds
; i
++) {
3705 cmd_fusion
= fusion
->cmd_list
[i
];
3706 if (cmd_fusion
->scmd
&&
3707 (cmd_fusion
->scmd
->device
->id
== id
&&
3708 cmd_fusion
->scmd
->device
->channel
== channel
)) {
3709 dev_info(&instance
->pdev
->dev
,
3710 "SCSI commands pending to target"
3711 "channel %d id %d \tSMID: 0x%x\n",
3712 channel
, id
, cmd_fusion
->index
);
3713 scsi_print_command(cmd_fusion
->scmd
);
3719 return found
? FAILED
: SUCCESS
;
3723 * megasas_tm_response_code - translation of device response code
3724 * @ioc: per adapter object
3725 * @mpi_reply: MPI reply returned by firmware
3730 megasas_tm_response_code(struct megasas_instance
*instance
,
3731 struct MPI2_SCSI_TASK_MANAGE_REPLY
*mpi_reply
)
3735 switch (mpi_reply
->ResponseCode
) {
3736 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE
:
3737 desc
= "task management request completed";
3739 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME
:
3740 desc
= "invalid frame";
3742 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED
:
3743 desc
= "task management request not supported";
3745 case MPI2_SCSITASKMGMT_RSP_TM_FAILED
:
3746 desc
= "task management request failed";
3748 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED
:
3749 desc
= "task management request succeeded";
3751 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN
:
3752 desc
= "invalid lun";
3755 desc
= "overlapped tag attempted";
3757 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC
:
3758 desc
= "task queued, however not sent to target";
3764 dev_dbg(&instance
->pdev
->dev
, "response_code(%01x): %s\n",
3765 mpi_reply
->ResponseCode
, desc
);
3766 dev_dbg(&instance
->pdev
->dev
,
3767 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
3768 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
3769 mpi_reply
->TerminationCount
, mpi_reply
->DevHandle
,
3770 mpi_reply
->Function
, mpi_reply
->TaskType
,
3771 mpi_reply
->IOCStatus
, mpi_reply
->IOCLogInfo
);
3775 * megasas_issue_tm - main routine for sending tm requests
3776 * @instance: per adapter struct
3777 * @device_handle: device handle
3778 * @channel: the channel assigned by the OS
3779 * @id: the id assigned by the OS
3780 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
3781 * @smid_task: smid assigned to the task
3782 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
3785 * MegaRaid use MPT interface for Task Magement request.
3786 * A generic API for sending task management requests to firmware.
3788 * Return SUCCESS or FAILED.
3791 megasas_issue_tm(struct megasas_instance
*instance
, u16 device_handle
,
3792 uint channel
, uint id
, u16 smid_task
, u8 type
)
3794 struct MR_TASK_MANAGE_REQUEST
*mr_request
;
3795 struct MPI2_SCSI_TASK_MANAGE_REQUEST
*mpi_request
;
3796 unsigned long timeleft
;
3797 struct megasas_cmd_fusion
*cmd_fusion
;
3798 struct megasas_cmd
*cmd_mfi
;
3799 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3800 struct fusion_context
*fusion
= NULL
;
3801 struct megasas_cmd_fusion
*scsi_lookup
;
3803 struct MPI2_SCSI_TASK_MANAGE_REPLY
*mpi_reply
;
3805 fusion
= instance
->ctrl_context
;
3807 cmd_mfi
= megasas_get_cmd(instance
);
3810 dev_err(&instance
->pdev
->dev
, "Failed from %s %d\n",
3811 __func__
, __LINE__
);
3815 cmd_fusion
= megasas_get_cmd_fusion(instance
,
3816 instance
->max_scsi_cmds
+ cmd_mfi
->index
);
3818 /* Save the smid. To be used for returning the cmd */
3819 cmd_mfi
->context
.smid
= cmd_fusion
->index
;
3821 req_desc
= megasas_get_request_descriptor(instance
,
3822 (cmd_fusion
->index
- 1));
3824 cmd_fusion
->request_desc
= req_desc
;
3825 req_desc
->Words
= 0;
3827 mr_request
= (struct MR_TASK_MANAGE_REQUEST
*) cmd_fusion
->io_request
;
3828 memset(mr_request
, 0, sizeof(struct MR_TASK_MANAGE_REQUEST
));
3829 mpi_request
= (struct MPI2_SCSI_TASK_MANAGE_REQUEST
*) &mr_request
->TmRequest
;
3830 mpi_request
->Function
= MPI2_FUNCTION_SCSI_TASK_MGMT
;
3831 mpi_request
->DevHandle
= cpu_to_le16(device_handle
);
3832 mpi_request
->TaskType
= type
;
3833 mpi_request
->TaskMID
= cpu_to_le16(smid_task
);
3834 mpi_request
->LUN
[1] = 0;
3837 req_desc
= cmd_fusion
->request_desc
;
3838 req_desc
->HighPriority
.SMID
= cpu_to_le16(cmd_fusion
->index
);
3839 req_desc
->HighPriority
.RequestFlags
=
3840 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
<<
3841 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
3842 req_desc
->HighPriority
.MSIxIndex
= 0;
3843 req_desc
->HighPriority
.LMID
= 0;
3844 req_desc
->HighPriority
.Reserved1
= 0;
3846 if (channel
< MEGASAS_MAX_PD_CHANNELS
)
3847 mr_request
->tmReqFlags
.isTMForPD
= 1;
3849 mr_request
->tmReqFlags
.isTMForLD
= 1;
3851 init_completion(&cmd_fusion
->done
);
3852 megasas_fire_cmd_fusion(instance
, req_desc
);
3854 timeleft
= wait_for_completion_timeout(&cmd_fusion
->done
, 50 * HZ
);
3857 dev_err(&instance
->pdev
->dev
,
3858 "task mgmt type 0x%x timed out\n", type
);
3859 cmd_mfi
->flags
|= DRV_DCMD_SKIP_REFIRE
;
3860 mutex_unlock(&instance
->reset_mutex
);
3861 rc
= megasas_reset_fusion(instance
->host
, MFI_IO_TIMEOUT_OCR
);
3862 mutex_lock(&instance
->reset_mutex
);
3866 mpi_reply
= (struct MPI2_SCSI_TASK_MANAGE_REPLY
*) &mr_request
->TMReply
;
3867 megasas_tm_response_code(instance
, mpi_reply
);
3869 megasas_return_cmd(instance
, cmd_mfi
);
3872 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
:
3873 scsi_lookup
= fusion
->cmd_list
[smid_task
- 1];
3875 if (scsi_lookup
->scmd
== NULL
)
3878 instance
->instancet
->disable_intr(instance
);
3879 megasas_sync_irqs((unsigned long)instance
);
3880 instance
->instancet
->enable_intr(instance
);
3881 if (scsi_lookup
->scmd
== NULL
)
3887 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
3888 if ((channel
== 0xFFFFFFFF) && (id
== 0xFFFFFFFF))
3890 instance
->instancet
->disable_intr(instance
);
3891 megasas_sync_irqs((unsigned long)instance
);
3892 rc
= megasas_track_scsiio(instance
, id
, channel
);
3893 instance
->instancet
->enable_intr(instance
);
3896 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
:
3897 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK
:
3909 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3910 * @instance: per adapter struct
3912 * Return Non Zero index, if SMID found in outstanding commands
3914 static u16
megasas_fusion_smid_lookup(struct scsi_cmnd
*scmd
)
3917 struct megasas_instance
*instance
;
3918 struct megasas_cmd_fusion
*cmd_fusion
;
3919 struct fusion_context
*fusion
;
3921 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
3923 fusion
= instance
->ctrl_context
;
3925 for (i
= 0; i
< instance
->max_scsi_cmds
; i
++) {
3926 cmd_fusion
= fusion
->cmd_list
[i
];
3927 if (cmd_fusion
->scmd
&& (cmd_fusion
->scmd
== scmd
)) {
3928 scmd_printk(KERN_NOTICE
, scmd
, "Abort request is for"
3929 " SMID: %d\n", cmd_fusion
->index
);
3930 ret
= cmd_fusion
->index
;
3939 * megasas_get_tm_devhandle - Get devhandle for TM request
3940 * @sdev- OS provided scsi device
3942 * Returns- devhandle/targetID of SCSI device
3944 static u16
megasas_get_tm_devhandle(struct scsi_device
*sdev
)
3948 struct megasas_instance
*instance
;
3949 struct fusion_context
*fusion
;
3950 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
3951 u16 devhandle
= (u16
)ULONG_MAX
;
3953 instance
= (struct megasas_instance
*)sdev
->host
->hostdata
;
3954 fusion
= instance
->ctrl_context
;
3956 if (!MEGASAS_IS_LOGICAL(sdev
)) {
3957 if (instance
->use_seqnum_jbod_fp
) {
3958 pd_index
= (sdev
->channel
* MEGASAS_MAX_DEV_PER_CHANNEL
)
3960 pd_sync
= (void *)fusion
->pd_seq_sync
3961 [(instance
->pd_seq_map_id
- 1) & 1];
3962 devhandle
= pd_sync
->seq
[pd_index
].devHandle
;
3964 sdev_printk(KERN_ERR
, sdev
, "Firmware expose tmCapable"
3965 " without JBOD MAP support from %s %d\n", __func__
, __LINE__
);
3967 device_id
= ((sdev
->channel
% 2) * MEGASAS_MAX_DEV_PER_CHANNEL
)
3969 devhandle
= device_id
;
3976 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3977 * @scmd : pointer to scsi command object
3979 * Return SUCCESS, if command aborted else FAILED
3982 int megasas_task_abort_fusion(struct scsi_cmnd
*scmd
)
3984 struct megasas_instance
*instance
;
3985 u16 smid
, devhandle
;
3986 struct fusion_context
*fusion
;
3988 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
3989 mr_device_priv_data
= scmd
->device
->hostdata
;
3992 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
3993 fusion
= instance
->ctrl_context
;
3995 scmd_printk(KERN_INFO
, scmd
, "task abort called for scmd(%p)\n", scmd
);
3996 scsi_print_command(scmd
);
3998 if (atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) {
3999 dev_err(&instance
->pdev
->dev
, "Controller is not OPERATIONAL,"
4000 "SCSI host:%d\n", instance
->host
->host_no
);
4005 if (!mr_device_priv_data
) {
4006 sdev_printk(KERN_INFO
, scmd
->device
, "device been deleted! "
4007 "scmd(%p)\n", scmd
);
4008 scmd
->result
= DID_NO_CONNECT
<< 16;
4014 if (!mr_device_priv_data
->is_tm_capable
) {
4019 mutex_lock(&instance
->reset_mutex
);
4021 smid
= megasas_fusion_smid_lookup(scmd
);
4025 scmd_printk(KERN_NOTICE
, scmd
, "Command for which abort is"
4026 " issued is not found in oustanding commands\n");
4027 mutex_unlock(&instance
->reset_mutex
);
4031 devhandle
= megasas_get_tm_devhandle(scmd
->device
);
4033 if (devhandle
== (u16
)ULONG_MAX
) {
4035 sdev_printk(KERN_INFO
, scmd
->device
,
4036 "task abort issued for invalid devhandle\n");
4037 mutex_unlock(&instance
->reset_mutex
);
4040 sdev_printk(KERN_INFO
, scmd
->device
,
4041 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
4044 mr_device_priv_data
->tm_busy
= 1;
4045 ret
= megasas_issue_tm(instance
, devhandle
,
4046 scmd
->device
->channel
, scmd
->device
->id
, smid
,
4047 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
);
4048 mr_device_priv_data
->tm_busy
= 0;
4050 mutex_unlock(&instance
->reset_mutex
);
4052 sdev_printk(KERN_INFO
, scmd
->device
, "task abort: %s scmd(%p)\n",
4053 ((ret
== SUCCESS
) ? "SUCCESS" : "FAILED"), scmd
);
4059 * megasas_reset_target_fusion : target reset function for fusion adapters
4060 * scmd: SCSI command pointer
4062 * Returns SUCCESS if all commands associated with target aborted else FAILED
4065 int megasas_reset_target_fusion(struct scsi_cmnd
*scmd
)
4068 struct megasas_instance
*instance
;
4071 struct fusion_context
*fusion
;
4072 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
4073 mr_device_priv_data
= scmd
->device
->hostdata
;
4075 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
4076 fusion
= instance
->ctrl_context
;
4078 sdev_printk(KERN_INFO
, scmd
->device
,
4079 "target reset called for scmd(%p)\n", scmd
);
4081 if (atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) {
4082 dev_err(&instance
->pdev
->dev
, "Controller is not OPERATIONAL,"
4083 "SCSI host:%d\n", instance
->host
->host_no
);
4088 if (!mr_device_priv_data
) {
4089 sdev_printk(KERN_INFO
, scmd
->device
, "device been deleted! "
4090 "scmd(%p)\n", scmd
);
4091 scmd
->result
= DID_NO_CONNECT
<< 16;
4097 if (!mr_device_priv_data
->is_tm_capable
) {
4102 mutex_lock(&instance
->reset_mutex
);
4103 devhandle
= megasas_get_tm_devhandle(scmd
->device
);
4105 if (devhandle
== (u16
)ULONG_MAX
) {
4107 sdev_printk(KERN_INFO
, scmd
->device
,
4108 "target reset issued for invalid devhandle\n");
4109 mutex_unlock(&instance
->reset_mutex
);
4113 sdev_printk(KERN_INFO
, scmd
->device
,
4114 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
4116 mr_device_priv_data
->tm_busy
= 1;
4117 ret
= megasas_issue_tm(instance
, devhandle
,
4118 scmd
->device
->channel
, scmd
->device
->id
, 0,
4119 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
);
4120 mr_device_priv_data
->tm_busy
= 0;
4121 mutex_unlock(&instance
->reset_mutex
);
4123 scmd_printk(KERN_NOTICE
, scmd
, "megasas: target reset %s!!\n",
4124 (ret
== SUCCESS
) ? "SUCCESS" : "FAILED");
4129 /*SRIOV get other instance in cluster if any*/
4130 struct megasas_instance
*megasas_get_peer_instance(struct megasas_instance
*instance
)
4134 for (i
= 0; i
< MAX_MGMT_ADAPTERS
; i
++) {
4135 if (megasas_mgmt_info
.instance
[i
] &&
4136 (megasas_mgmt_info
.instance
[i
] != instance
) &&
4137 megasas_mgmt_info
.instance
[i
]->requestorId
&&
4138 megasas_mgmt_info
.instance
[i
]->peerIsPresent
&&
4139 (memcmp((megasas_mgmt_info
.instance
[i
]->clusterId
),
4140 instance
->clusterId
, MEGASAS_CLUSTER_ID_SIZE
) == 0))
4141 return megasas_mgmt_info
.instance
[i
];
4146 /* Check for a second path that is currently UP */
4147 int megasas_check_mpio_paths(struct megasas_instance
*instance
,
4148 struct scsi_cmnd
*scmd
)
4150 struct megasas_instance
*peer_instance
= NULL
;
4151 int retval
= (DID_REQUEUE
<< 16);
4153 if (instance
->peerIsPresent
) {
4154 peer_instance
= megasas_get_peer_instance(instance
);
4155 if ((peer_instance
) &&
4156 (atomic_read(&peer_instance
->adprecovery
) ==
4157 MEGASAS_HBA_OPERATIONAL
))
4158 retval
= (DID_NO_CONNECT
<< 16);
4163 /* Core fusion reset function */
4164 int megasas_reset_fusion(struct Scsi_Host
*shost
, int reason
)
4166 int retval
= SUCCESS
, i
, j
, convert
= 0;
4167 struct megasas_instance
*instance
;
4168 struct megasas_cmd_fusion
*cmd_fusion
, *r1_cmd
;
4169 struct fusion_context
*fusion
;
4170 u32 abs_state
, status_reg
, reset_adapter
;
4171 u32 io_timeout_in_crash_mode
= 0;
4172 struct scsi_cmnd
*scmd_local
= NULL
;
4173 struct scsi_device
*sdev
;
4175 instance
= (struct megasas_instance
*)shost
->hostdata
;
4176 fusion
= instance
->ctrl_context
;
4178 mutex_lock(&instance
->reset_mutex
);
4180 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
) {
4181 dev_warn(&instance
->pdev
->dev
, "Hardware critical error, "
4182 "returning FAILED for scsi%d.\n",
4183 instance
->host
->host_no
);
4184 mutex_unlock(&instance
->reset_mutex
);
4187 status_reg
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
);
4188 abs_state
= status_reg
& MFI_STATE_MASK
;
4190 /* IO timeout detected, forcibly put FW in FAULT state */
4191 if (abs_state
!= MFI_STATE_FAULT
&& instance
->crash_dump_buf
&&
4192 instance
->crash_dump_app_support
&& reason
) {
4193 dev_info(&instance
->pdev
->dev
, "IO/DCMD timeout is detected, "
4194 "forcibly FAULT Firmware\n");
4195 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_INFAULT
);
4196 status_reg
= readl(&instance
->reg_set
->doorbell
);
4197 writel(status_reg
| MFI_STATE_FORCE_OCR
,
4198 &instance
->reg_set
->doorbell
);
4199 readl(&instance
->reg_set
->doorbell
);
4200 mutex_unlock(&instance
->reset_mutex
);
4203 io_timeout_in_crash_mode
++;
4204 dev_dbg(&instance
->pdev
->dev
, "waiting for [%d] "
4205 "seconds for crash dump collection and OCR "
4206 "to be done\n", (io_timeout_in_crash_mode
* 3));
4207 } while ((atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) &&
4208 (io_timeout_in_crash_mode
< 80));
4210 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HBA_OPERATIONAL
) {
4211 dev_info(&instance
->pdev
->dev
, "OCR done for IO "
4215 dev_info(&instance
->pdev
->dev
, "Controller is not "
4216 "operational after 240 seconds wait for IO "
4217 "timeout case in FW crash dump mode\n do "
4218 "OCR/kill adapter\n");
4219 retval
= megasas_reset_fusion(shost
, 0);
4224 if (instance
->requestorId
&& !instance
->skip_heartbeat_timer_del
)
4225 del_timer_sync(&instance
->sriov_heartbeat_timer
);
4226 set_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4227 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_POLLING
);
4228 instance
->instancet
->disable_intr(instance
);
4229 megasas_sync_irqs((unsigned long)instance
);
4231 /* First try waiting for commands to complete */
4232 if (megasas_wait_for_outstanding_fusion(instance
, reason
,
4234 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_INFAULT
);
4235 dev_warn(&instance
->pdev
->dev
, "resetting fusion "
4236 "adapter scsi%d.\n", instance
->host
->host_no
);
4240 if (megasas_dbg_lvl
& OCR_LOGS
)
4241 dev_info(&instance
->pdev
->dev
, "\nPending SCSI commands:\n");
4243 /* Now return commands back to the OS */
4244 for (i
= 0 ; i
< instance
->max_scsi_cmds
; i
++) {
4245 cmd_fusion
= fusion
->cmd_list
[i
];
4246 /*check for extra commands issued by driver*/
4247 if (instance
->is_ventura
) {
4248 r1_cmd
= fusion
->cmd_list
[i
+ instance
->max_fw_cmds
];
4249 megasas_return_cmd_fusion(instance
, r1_cmd
);
4251 scmd_local
= cmd_fusion
->scmd
;
4252 if (cmd_fusion
->scmd
) {
4253 if (megasas_dbg_lvl
& OCR_LOGS
) {
4254 sdev_printk(KERN_INFO
,
4255 cmd_fusion
->scmd
->device
, "SMID: 0x%x\n",
4257 scsi_print_command(cmd_fusion
->scmd
);
4260 scmd_local
->result
=
4261 megasas_check_mpio_paths(instance
,
4263 if (instance
->ldio_threshold
&&
4264 megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
)
4265 atomic_dec(&instance
->ldio_outstanding
);
4266 megasas_return_cmd_fusion(instance
, cmd_fusion
);
4267 scsi_dma_unmap(scmd_local
);
4268 scmd_local
->scsi_done(scmd_local
);
4272 atomic_set(&instance
->fw_outstanding
, 0);
4274 status_reg
= instance
->instancet
->read_fw_status_reg(
4276 abs_state
= status_reg
& MFI_STATE_MASK
;
4277 reset_adapter
= status_reg
& MFI_RESET_ADAPTER
;
4278 if (instance
->disableOnlineCtrlReset
||
4279 (abs_state
== MFI_STATE_FAULT
&& !reset_adapter
)) {
4280 /* Reset not supported, kill adapter */
4281 dev_warn(&instance
->pdev
->dev
, "Reset not supported"
4282 ", killing adapter scsi%d.\n",
4283 instance
->host
->host_no
);
4284 megaraid_sas_kill_hba(instance
);
4285 instance
->skip_heartbeat_timer_del
= 1;
4290 /* Let SR-IOV VF & PF sync up if there was a HB failure */
4291 if (instance
->requestorId
&& !reason
) {
4292 msleep(MEGASAS_OCR_SETTLE_TIME_VF
);
4293 goto transition_to_ready
;
4296 /* Now try to reset the chip */
4297 for (i
= 0; i
< MEGASAS_FUSION_MAX_RESET_TRIES
; i
++) {
4299 if (instance
->instancet
->adp_reset
4300 (instance
, instance
->reg_set
))
4302 transition_to_ready
:
4303 /* Wait for FW to become ready */
4304 if (megasas_transition_to_ready(instance
, 1)) {
4305 dev_warn(&instance
->pdev
->dev
,
4306 "Failed to transition controller to ready for "
4307 "scsi%d.\n", instance
->host
->host_no
);
4308 if (instance
->requestorId
&& !reason
)
4309 goto fail_kill_adapter
;
4313 megasas_reset_reply_desc(instance
);
4314 megasas_fusion_update_can_queue(instance
, OCR_CONTEXT
);
4316 if (megasas_ioc_init_fusion(instance
)) {
4317 if (instance
->requestorId
&& !reason
)
4318 goto fail_kill_adapter
;
4323 megasas_refire_mgmt_cmd(instance
);
4325 if (megasas_get_ctrl_info(instance
)) {
4326 dev_info(&instance
->pdev
->dev
,
4327 "Failed from %s %d\n",
4328 __func__
, __LINE__
);
4329 megaraid_sas_kill_hba(instance
);
4333 /* Reset load balance info */
4334 if (fusion
->load_balance_info
)
4335 memset(fusion
->load_balance_info
, 0,
4336 (sizeof(struct LD_LOAD_BALANCE_INFO
) *
4337 MAX_LOGICAL_DRIVES_EXT
));
4339 if (!megasas_get_map_info(instance
))
4340 megasas_sync_map_info(instance
);
4342 megasas_setup_jbod_map(instance
);
4344 shost_for_each_device(sdev
, shost
)
4345 megasas_set_dynamic_target_properties(sdev
);
4347 /* reset stream detection array */
4348 if (instance
->is_ventura
) {
4349 for (j
= 0; j
< MAX_LOGICAL_DRIVES_EXT
; ++j
) {
4350 memset(fusion
->stream_detect_by_ld
[j
],
4351 0, sizeof(struct LD_STREAM_DETECT
));
4352 fusion
->stream_detect_by_ld
[j
]->mru_bit_map
4357 clear_bit(MEGASAS_FUSION_IN_RESET
,
4358 &instance
->reset_flags
);
4359 instance
->instancet
->enable_intr(instance
);
4360 atomic_set(&instance
->adprecovery
, MEGASAS_HBA_OPERATIONAL
);
4362 dev_info(&instance
->pdev
->dev
, "Interrupts are enabled and"
4363 " controller is OPERATIONAL for scsi:%d\n",
4364 instance
->host
->host_no
);
4366 /* Restart SR-IOV heartbeat */
4367 if (instance
->requestorId
) {
4368 if (!megasas_sriov_start_heartbeat(instance
, 0))
4369 megasas_start_timer(instance
);
4371 instance
->skip_heartbeat_timer_del
= 1;
4374 if (instance
->crash_dump_drv_support
&&
4375 instance
->crash_dump_app_support
)
4376 megasas_set_crash_dump_params(instance
,
4377 MR_CRASH_BUF_TURN_ON
);
4379 megasas_set_crash_dump_params(instance
,
4380 MR_CRASH_BUF_TURN_OFF
);
4384 /* Adapter reset completed successfully */
4385 dev_warn(&instance
->pdev
->dev
,
4386 "Reset successful for scsi%d.\n",
4387 instance
->host
->host_no
);
4392 /* Reset failed, kill the adapter */
4393 dev_warn(&instance
->pdev
->dev
, "Reset failed, killing "
4394 "adapter scsi%d.\n", instance
->host
->host_no
);
4395 megaraid_sas_kill_hba(instance
);
4396 instance
->skip_heartbeat_timer_del
= 1;
4399 /* For VF: Restart HB timer if we didn't OCR */
4400 if (instance
->requestorId
) {
4401 megasas_start_timer(instance
);
4403 clear_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4404 instance
->instancet
->enable_intr(instance
);
4405 atomic_set(&instance
->adprecovery
, MEGASAS_HBA_OPERATIONAL
);
4408 clear_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4409 mutex_unlock(&instance
->reset_mutex
);
4413 /* Fusion Crash dump collection work queue */
4414 void megasas_fusion_crash_dump_wq(struct work_struct
*work
)
4416 struct megasas_instance
*instance
=
4417 container_of(work
, struct megasas_instance
, crash_init
);
4419 u8 partial_copy
= 0;
4422 status_reg
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
);
4425 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
4426 * to host crash buffers
4428 if (instance
->drv_buf_index
== 0) {
4429 /* Buffer is already allocated for old Crash dump.
4430 * Do OCR and do not wait for crash dump collection
4432 if (instance
->drv_buf_alloc
) {
4433 dev_info(&instance
->pdev
->dev
, "earlier crash dump is "
4434 "not yet copied by application, ignoring this "
4435 "crash dump and initiating OCR\n");
4436 status_reg
|= MFI_STATE_CRASH_DUMP_DONE
;
4438 &instance
->reg_set
->outbound_scratch_pad
);
4439 readl(&instance
->reg_set
->outbound_scratch_pad
);
4442 megasas_alloc_host_crash_buffer(instance
);
4443 dev_info(&instance
->pdev
->dev
, "Number of host crash buffers "
4444 "allocated: %d\n", instance
->drv_buf_alloc
);
4448 * Driver has allocated max buffers, which can be allocated
4449 * and FW has more crash dump data, then driver will
4452 if (instance
->drv_buf_index
>= (instance
->drv_buf_alloc
)) {
4453 dev_info(&instance
->pdev
->dev
, "Driver is done copying "
4454 "the buffer: %d\n", instance
->drv_buf_alloc
);
4455 status_reg
|= MFI_STATE_CRASH_DUMP_DONE
;
4458 memcpy(instance
->crash_buf
[instance
->drv_buf_index
],
4459 instance
->crash_dump_buf
, CRASH_DMA_BUF_SIZE
);
4460 instance
->drv_buf_index
++;
4461 status_reg
&= ~MFI_STATE_DMADONE
;
4464 if (status_reg
& MFI_STATE_CRASH_DUMP_DONE
) {
4465 dev_info(&instance
->pdev
->dev
, "Crash Dump is available,number "
4466 "of copied buffers: %d\n", instance
->drv_buf_index
);
4467 instance
->fw_crash_buffer_size
= instance
->drv_buf_index
;
4468 instance
->fw_crash_state
= AVAILABLE
;
4469 instance
->drv_buf_index
= 0;
4470 writel(status_reg
, &instance
->reg_set
->outbound_scratch_pad
);
4471 readl(&instance
->reg_set
->outbound_scratch_pad
);
4473 megasas_reset_fusion(instance
->host
, 0);
4475 writel(status_reg
, &instance
->reg_set
->outbound_scratch_pad
);
4476 readl(&instance
->reg_set
->outbound_scratch_pad
);
4481 /* Fusion OCR work queue */
4482 void megasas_fusion_ocr_wq(struct work_struct
*work
)
4484 struct megasas_instance
*instance
=
4485 container_of(work
, struct megasas_instance
, work_init
);
4487 megasas_reset_fusion(instance
->host
, 0);
4490 /* Allocate fusion context */
4492 megasas_alloc_fusion_context(struct megasas_instance
*instance
)
4494 struct fusion_context
*fusion
;
4496 instance
->ctrl_context_pages
= get_order(sizeof(struct fusion_context
));
4497 instance
->ctrl_context
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
4498 instance
->ctrl_context_pages
);
4499 if (!instance
->ctrl_context
) {
4500 /* fall back to using vmalloc for fusion_context */
4501 instance
->ctrl_context
= vzalloc(sizeof(struct fusion_context
));
4502 if (!instance
->ctrl_context
) {
4503 dev_err(&instance
->pdev
->dev
, "Failed from %s %d\n", __func__
, __LINE__
);
4508 fusion
= instance
->ctrl_context
;
4510 fusion
->load_balance_info_pages
= get_order(MAX_LOGICAL_DRIVES_EXT
*
4511 sizeof(struct LD_LOAD_BALANCE_INFO
));
4512 fusion
->load_balance_info
=
4513 (struct LD_LOAD_BALANCE_INFO
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
4514 fusion
->load_balance_info_pages
);
4515 if (!fusion
->load_balance_info
) {
4516 fusion
->load_balance_info
= vzalloc(MAX_LOGICAL_DRIVES_EXT
*
4517 sizeof(struct LD_LOAD_BALANCE_INFO
));
4518 if (!fusion
->load_balance_info
)
4519 dev_err(&instance
->pdev
->dev
, "Failed to allocate load_balance_info, "
4520 "continuing without Load Balance support\n");
4527 megasas_free_fusion_context(struct megasas_instance
*instance
)
4529 struct fusion_context
*fusion
= instance
->ctrl_context
;
4532 if (fusion
->load_balance_info
) {
4533 if (is_vmalloc_addr(fusion
->load_balance_info
))
4534 vfree(fusion
->load_balance_info
);
4536 free_pages((ulong
)fusion
->load_balance_info
,
4537 fusion
->load_balance_info_pages
);
4540 if (is_vmalloc_addr(fusion
))
4543 free_pages((ulong
)fusion
,
4544 instance
->ctrl_context_pages
);
4548 struct megasas_instance_template megasas_instance_template_fusion
= {
4549 .enable_intr
= megasas_enable_intr_fusion
,
4550 .disable_intr
= megasas_disable_intr_fusion
,
4551 .clear_intr
= megasas_clear_intr_fusion
,
4552 .read_fw_status_reg
= megasas_read_fw_status_reg_fusion
,
4553 .adp_reset
= megasas_adp_reset_fusion
,
4554 .check_reset
= megasas_check_reset_fusion
,
4555 .service_isr
= megasas_isr_fusion
,
4556 .tasklet
= megasas_complete_cmd_dpc_fusion
,
4557 .init_adapter
= megasas_init_adapter_fusion
,
4558 .build_and_issue_cmd
= megasas_build_and_issue_cmd_fusion
,
4559 .issue_dcmd
= megasas_issue_dcmd_fusion
,