2 * Linux MegaRAID driver for SAS based RAID controllers
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 * FILE: megaraid_sas_fusion.c
22 * Authors: Avago Technologies
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 #include <linux/vmalloc.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_dbg.h>
57 #include <linux/dmi.h>
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
63 extern void megasas_free_cmds(struct megasas_instance
*instance
);
64 extern struct megasas_cmd
*megasas_get_cmd(struct megasas_instance
67 megasas_complete_cmd(struct megasas_instance
*instance
,
68 struct megasas_cmd
*cmd
, u8 alt_status
);
70 wait_and_poll(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
,
74 megasas_return_cmd(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
);
75 int megasas_alloc_cmds(struct megasas_instance
*instance
);
77 megasas_clear_intr_fusion(struct megasas_register_set __iomem
*regs
);
79 megasas_issue_polled(struct megasas_instance
*instance
,
80 struct megasas_cmd
*cmd
);
82 megasas_check_and_restore_queue_depth(struct megasas_instance
*instance
);
84 int megasas_transition_to_ready(struct megasas_instance
*instance
, int ocr
);
85 void megaraid_sas_kill_hba(struct megasas_instance
*instance
);
87 extern u32 megasas_dbg_lvl
;
88 void megasas_sriov_heartbeat_handler(unsigned long instance_addr
);
89 int megasas_sriov_start_heartbeat(struct megasas_instance
*instance
,
91 void megasas_start_timer(struct megasas_instance
*instance
,
92 struct timer_list
*timer
,
93 void *fn
, unsigned long interval
);
94 extern struct megasas_mgmt_info megasas_mgmt_info
;
95 extern unsigned int resetwaittime
;
96 extern unsigned int dual_qdepth_disable
;
97 static void megasas_free_rdpq_fusion(struct megasas_instance
*instance
);
98 static void megasas_free_reply_fusion(struct megasas_instance
*instance
);
103 * megasas_enable_intr_fusion - Enables interrupts
104 * @regs: MFI register set
107 megasas_enable_intr_fusion(struct megasas_instance
*instance
)
109 struct megasas_register_set __iomem
*regs
;
110 regs
= instance
->reg_set
;
112 instance
->mask_interrupts
= 0;
113 /* For Thunderbolt/Invader also clear intr on enable */
114 writel(~0, ®s
->outbound_intr_status
);
115 readl(®s
->outbound_intr_status
);
117 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK
, &(regs
)->outbound_intr_mask
);
119 /* Dummy readl to force pci flush */
120 readl(®s
->outbound_intr_mask
);
124 * megasas_disable_intr_fusion - Disables interrupt
125 * @regs: MFI register set
128 megasas_disable_intr_fusion(struct megasas_instance
*instance
)
130 u32 mask
= 0xFFFFFFFF;
132 struct megasas_register_set __iomem
*regs
;
133 regs
= instance
->reg_set
;
134 instance
->mask_interrupts
= 1;
136 writel(mask
, ®s
->outbound_intr_mask
);
137 /* Dummy readl to force pci flush */
138 status
= readl(®s
->outbound_intr_mask
);
142 megasas_clear_intr_fusion(struct megasas_register_set __iomem
*regs
)
146 * Check if it is our interrupt
148 status
= readl(®s
->outbound_intr_status
);
151 writel(status
, ®s
->outbound_intr_status
);
152 readl(®s
->outbound_intr_status
);
155 if (!(status
& MFI_FUSION_ENABLE_INTERRUPT_MASK
))
162 * megasas_get_cmd_fusion - Get a command from the free pool
163 * @instance: Adapter soft state
165 * Returns a blk_tag indexed mpt frame
167 inline struct megasas_cmd_fusion
*megasas_get_cmd_fusion(struct megasas_instance
168 *instance
, u32 blk_tag
)
170 struct fusion_context
*fusion
;
172 fusion
= instance
->ctrl_context
;
173 return fusion
->cmd_list
[blk_tag
];
177 * megasas_return_cmd_fusion - Return a cmd to free command pool
178 * @instance: Adapter soft state
179 * @cmd: Command packet to be returned to free command pool
181 inline void megasas_return_cmd_fusion(struct megasas_instance
*instance
,
182 struct megasas_cmd_fusion
*cmd
)
185 memset(cmd
->io_request
, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
);
186 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
187 cmd
->cmd_completed
= false;
191 * megasas_fire_cmd_fusion - Sends command to the FW
192 * @instance: Adapter soft state
193 * @req_desc: 32bit or 64bit Request descriptor
195 * Perform PCI Write. Ventura supports 32 bit Descriptor.
196 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
200 megasas_fire_cmd_fusion(struct megasas_instance
*instance
,
201 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
)
203 if (instance
->is_ventura
)
204 writel(le32_to_cpu(req_desc
->u
.low
),
205 &instance
->reg_set
->inbound_single_queue_port
);
207 #if defined(writeq) && defined(CONFIG_64BIT)
208 u64 req_data
= (((u64
)le32_to_cpu(req_desc
->u
.high
) << 32) |
209 le32_to_cpu(req_desc
->u
.low
));
211 writeq(req_data
, &instance
->reg_set
->inbound_low_queue_port
);
214 spin_lock_irqsave(&instance
->hba_lock
, flags
);
215 writel(le32_to_cpu(req_desc
->u
.low
),
216 &instance
->reg_set
->inbound_low_queue_port
);
217 writel(le32_to_cpu(req_desc
->u
.high
),
218 &instance
->reg_set
->inbound_high_queue_port
);
220 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
226 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
227 * @instance: Adapter soft state
228 * fw_boot_context: Whether this function called during probe or after OCR
230 * This function is only for fusion controllers.
231 * Update host can queue, if firmware downgrade max supported firmware commands.
232 * Firmware upgrade case will be skiped because underlying firmware has
233 * more resource than exposed to the OS.
237 megasas_fusion_update_can_queue(struct megasas_instance
*instance
, int fw_boot_context
)
239 u16 cur_max_fw_cmds
= 0;
240 u16 ldio_threshold
= 0;
241 struct megasas_register_set __iomem
*reg_set
;
243 reg_set
= instance
->reg_set
;
245 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
246 if (!instance
->is_ventura
)
248 readl(&instance
->reg_set
->outbound_scratch_pad_3
) & 0x00FFFF;
250 if (dual_qdepth_disable
|| !cur_max_fw_cmds
)
251 cur_max_fw_cmds
= instance
->instancet
->read_fw_status_reg(reg_set
) & 0x00FFFF;
254 (instance
->instancet
->read_fw_status_reg(reg_set
) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS
;
256 dev_info(&instance
->pdev
->dev
,
257 "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
258 cur_max_fw_cmds
, ldio_threshold
);
260 if (fw_boot_context
== OCR_CONTEXT
) {
261 cur_max_fw_cmds
= cur_max_fw_cmds
- 1;
262 if (cur_max_fw_cmds
< instance
->max_fw_cmds
) {
263 instance
->cur_can_queue
=
264 cur_max_fw_cmds
- (MEGASAS_FUSION_INTERNAL_CMDS
+
265 MEGASAS_FUSION_IOCTL_CMDS
);
266 instance
->host
->can_queue
= instance
->cur_can_queue
;
267 instance
->ldio_threshold
= ldio_threshold
;
270 instance
->max_fw_cmds
= cur_max_fw_cmds
;
271 instance
->ldio_threshold
= ldio_threshold
;
273 if (!instance
->is_rdpq
)
274 instance
->max_fw_cmds
=
275 min_t(u16
, instance
->max_fw_cmds
, 1024);
278 instance
->max_fw_cmds
= min(instance
->max_fw_cmds
,
279 (u16
)MEGASAS_KDUMP_QUEUE_DEPTH
);
281 * Reduce the max supported cmds by 1. This is to ensure that the
282 * reply_q_sz (1 more than the max cmd that driver may send)
283 * does not exceed max cmds that the FW can support
285 instance
->max_fw_cmds
= instance
->max_fw_cmds
-1;
287 instance
->max_scsi_cmds
= instance
->max_fw_cmds
-
288 (MEGASAS_FUSION_INTERNAL_CMDS
+
289 MEGASAS_FUSION_IOCTL_CMDS
);
290 instance
->cur_can_queue
= instance
->max_scsi_cmds
;
291 instance
->host
->can_queue
= instance
->cur_can_queue
;
294 if (instance
->is_ventura
)
295 instance
->max_mpt_cmds
=
296 instance
->max_fw_cmds
* RAID_1_PEER_CMDS
;
298 instance
->max_mpt_cmds
= instance
->max_fw_cmds
;
301 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
302 * @instance: Adapter soft state
305 megasas_free_cmds_fusion(struct megasas_instance
*instance
)
308 struct fusion_context
*fusion
= instance
->ctrl_context
;
309 struct megasas_cmd_fusion
*cmd
;
312 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++) {
313 cmd
= fusion
->cmd_list
[i
];
316 dma_pool_free(fusion
->sg_dma_pool
, cmd
->sg_frame
,
317 cmd
->sg_frame_phys_addr
);
319 dma_pool_free(fusion
->sense_dma_pool
, cmd
->sense
,
320 cmd
->sense_phys_addr
);
324 if (fusion
->sg_dma_pool
) {
325 dma_pool_destroy(fusion
->sg_dma_pool
);
326 fusion
->sg_dma_pool
= NULL
;
328 if (fusion
->sense_dma_pool
) {
329 dma_pool_destroy(fusion
->sense_dma_pool
);
330 fusion
->sense_dma_pool
= NULL
;
334 /* Reply Frame, Desc*/
335 if (instance
->is_rdpq
)
336 megasas_free_rdpq_fusion(instance
);
338 megasas_free_reply_fusion(instance
);
340 /* Request Frame, Desc*/
341 if (fusion
->req_frames_desc
)
342 dma_free_coherent(&instance
->pdev
->dev
,
343 fusion
->request_alloc_sz
, fusion
->req_frames_desc
,
344 fusion
->req_frames_desc_phys
);
345 if (fusion
->io_request_frames
)
346 dma_pool_free(fusion
->io_request_frames_pool
,
347 fusion
->io_request_frames
,
348 fusion
->io_request_frames_phys
);
349 if (fusion
->io_request_frames_pool
) {
350 dma_pool_destroy(fusion
->io_request_frames_pool
);
351 fusion
->io_request_frames_pool
= NULL
;
356 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++)
357 kfree(fusion
->cmd_list
[i
]);
359 kfree(fusion
->cmd_list
);
363 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
364 * @instance: Adapter soft state
367 static int megasas_create_sg_sense_fusion(struct megasas_instance
*instance
)
371 struct fusion_context
*fusion
;
372 struct megasas_cmd_fusion
*cmd
;
374 fusion
= instance
->ctrl_context
;
375 max_cmd
= instance
->max_fw_cmds
;
378 fusion
->sg_dma_pool
=
379 dma_pool_create("mr_sg", &instance
->pdev
->dev
,
380 instance
->max_chain_frame_sz
,
381 MR_DEFAULT_NVME_PAGE_SIZE
, 0);
382 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
383 fusion
->sense_dma_pool
=
384 dma_pool_create("mr_sense", &instance
->pdev
->dev
,
385 SCSI_SENSE_BUFFERSIZE
, 64, 0);
387 if (!fusion
->sense_dma_pool
|| !fusion
->sg_dma_pool
) {
388 dev_err(&instance
->pdev
->dev
,
389 "Failed from %s %d\n", __func__
, __LINE__
);
394 * Allocate and attach a frame to each of the commands in cmd_list
396 for (i
= 0; i
< max_cmd
; i
++) {
397 cmd
= fusion
->cmd_list
[i
];
398 cmd
->sg_frame
= dma_pool_alloc(fusion
->sg_dma_pool
,
399 GFP_KERNEL
, &cmd
->sg_frame_phys_addr
);
401 cmd
->sense
= dma_pool_alloc(fusion
->sense_dma_pool
,
402 GFP_KERNEL
, &cmd
->sense_phys_addr
);
403 if (!cmd
->sg_frame
|| !cmd
->sense
) {
404 dev_err(&instance
->pdev
->dev
,
405 "Failed from %s %d\n", __func__
, __LINE__
);
410 /* create sense buffer for the raid 1/10 fp */
411 for (i
= max_cmd
; i
< instance
->max_mpt_cmds
; i
++) {
412 cmd
= fusion
->cmd_list
[i
];
413 cmd
->sense
= dma_pool_alloc(fusion
->sense_dma_pool
,
414 GFP_KERNEL
, &cmd
->sense_phys_addr
);
416 dev_err(&instance
->pdev
->dev
,
417 "Failed from %s %d\n", __func__
, __LINE__
);
426 megasas_alloc_cmdlist_fusion(struct megasas_instance
*instance
)
428 u32 max_mpt_cmd
, i
, j
;
429 struct fusion_context
*fusion
;
431 fusion
= instance
->ctrl_context
;
433 max_mpt_cmd
= instance
->max_mpt_cmds
;
436 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
437 * Allocate the dynamic array first and then allocate individual
441 kzalloc(sizeof(struct megasas_cmd_fusion
*) * max_mpt_cmd
,
443 if (!fusion
->cmd_list
) {
444 dev_err(&instance
->pdev
->dev
,
445 "Failed from %s %d\n", __func__
, __LINE__
);
449 for (i
= 0; i
< max_mpt_cmd
; i
++) {
450 fusion
->cmd_list
[i
] = kzalloc(sizeof(struct megasas_cmd_fusion
),
452 if (!fusion
->cmd_list
[i
]) {
453 for (j
= 0; j
< i
; j
++)
454 kfree(fusion
->cmd_list
[j
]);
455 kfree(fusion
->cmd_list
);
456 dev_err(&instance
->pdev
->dev
,
457 "Failed from %s %d\n", __func__
, __LINE__
);
465 megasas_alloc_request_fusion(struct megasas_instance
*instance
)
467 struct fusion_context
*fusion
;
469 fusion
= instance
->ctrl_context
;
471 fusion
->req_frames_desc
=
472 dma_alloc_coherent(&instance
->pdev
->dev
,
473 fusion
->request_alloc_sz
,
474 &fusion
->req_frames_desc_phys
, GFP_KERNEL
);
475 if (!fusion
->req_frames_desc
) {
476 dev_err(&instance
->pdev
->dev
,
477 "Failed from %s %d\n", __func__
, __LINE__
);
481 fusion
->io_request_frames_pool
=
482 dma_pool_create("mr_ioreq", &instance
->pdev
->dev
,
483 fusion
->io_frames_alloc_sz
, 16, 0);
485 if (!fusion
->io_request_frames_pool
) {
486 dev_err(&instance
->pdev
->dev
,
487 "Failed from %s %d\n", __func__
, __LINE__
);
491 fusion
->io_request_frames
=
492 dma_pool_alloc(fusion
->io_request_frames_pool
,
493 GFP_KERNEL
, &fusion
->io_request_frames_phys
);
494 if (!fusion
->io_request_frames
) {
495 dev_err(&instance
->pdev
->dev
,
496 "Failed from %s %d\n", __func__
, __LINE__
);
503 megasas_alloc_reply_fusion(struct megasas_instance
*instance
)
506 struct fusion_context
*fusion
;
507 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
508 fusion
= instance
->ctrl_context
;
510 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
511 fusion
->reply_frames_desc_pool
=
512 dma_pool_create("mr_reply", &instance
->pdev
->dev
,
513 fusion
->reply_alloc_sz
* count
, 16, 0);
515 if (!fusion
->reply_frames_desc_pool
) {
516 dev_err(&instance
->pdev
->dev
,
517 "Failed from %s %d\n", __func__
, __LINE__
);
521 fusion
->reply_frames_desc
[0] =
522 dma_pool_alloc(fusion
->reply_frames_desc_pool
,
523 GFP_KERNEL
, &fusion
->reply_frames_desc_phys
[0]);
524 if (!fusion
->reply_frames_desc
[0]) {
525 dev_err(&instance
->pdev
->dev
,
526 "Failed from %s %d\n", __func__
, __LINE__
);
529 reply_desc
= fusion
->reply_frames_desc
[0];
530 for (i
= 0; i
< fusion
->reply_q_depth
* count
; i
++, reply_desc
++)
531 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
533 /* This is not a rdpq mode, but driver still populate
534 * reply_frame_desc array to use same msix index in ISR path.
536 for (i
= 0; i
< (count
- 1); i
++)
537 fusion
->reply_frames_desc
[i
+ 1] =
538 fusion
->reply_frames_desc
[i
] +
539 (fusion
->reply_alloc_sz
)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION
);
545 megasas_alloc_rdpq_fusion(struct megasas_instance
*instance
)
548 struct fusion_context
*fusion
;
549 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
551 fusion
= instance
->ctrl_context
;
553 fusion
->rdpq_virt
= pci_alloc_consistent(instance
->pdev
,
554 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
,
556 if (!fusion
->rdpq_virt
) {
557 dev_err(&instance
->pdev
->dev
,
558 "Failed from %s %d\n", __func__
, __LINE__
);
562 memset(fusion
->rdpq_virt
, 0,
563 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
);
564 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
565 fusion
->reply_frames_desc_pool
= dma_pool_create("mr_rdpq",
566 &instance
->pdev
->dev
,
567 fusion
->reply_alloc_sz
,
570 if (!fusion
->reply_frames_desc_pool
) {
571 dev_err(&instance
->pdev
->dev
,
572 "Failed from %s %d\n", __func__
, __LINE__
);
576 for (i
= 0; i
< count
; i
++) {
577 fusion
->reply_frames_desc
[i
] =
578 dma_pool_alloc(fusion
->reply_frames_desc_pool
,
579 GFP_KERNEL
, &fusion
->reply_frames_desc_phys
[i
]);
580 if (!fusion
->reply_frames_desc
[i
]) {
581 dev_err(&instance
->pdev
->dev
,
582 "Failed from %s %d\n", __func__
, __LINE__
);
586 fusion
->rdpq_virt
[i
].RDPQBaseAddress
=
587 cpu_to_le64(fusion
->reply_frames_desc_phys
[i
]);
589 reply_desc
= fusion
->reply_frames_desc
[i
];
590 for (j
= 0; j
< fusion
->reply_q_depth
; j
++, reply_desc
++)
591 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
597 megasas_free_rdpq_fusion(struct megasas_instance
*instance
) {
600 struct fusion_context
*fusion
;
602 fusion
= instance
->ctrl_context
;
604 for (i
= 0; i
< MAX_MSIX_QUEUES_FUSION
; i
++) {
605 if (fusion
->reply_frames_desc
[i
])
606 dma_pool_free(fusion
->reply_frames_desc_pool
,
607 fusion
->reply_frames_desc
[i
],
608 fusion
->reply_frames_desc_phys
[i
]);
611 if (fusion
->reply_frames_desc_pool
)
612 dma_pool_destroy(fusion
->reply_frames_desc_pool
);
614 if (fusion
->rdpq_virt
)
615 pci_free_consistent(instance
->pdev
,
616 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
) * MAX_MSIX_QUEUES_FUSION
,
617 fusion
->rdpq_virt
, fusion
->rdpq_phys
);
621 megasas_free_reply_fusion(struct megasas_instance
*instance
) {
623 struct fusion_context
*fusion
;
625 fusion
= instance
->ctrl_context
;
627 if (fusion
->reply_frames_desc
[0])
628 dma_pool_free(fusion
->reply_frames_desc_pool
,
629 fusion
->reply_frames_desc
[0],
630 fusion
->reply_frames_desc_phys
[0]);
632 if (fusion
->reply_frames_desc_pool
)
633 dma_pool_destroy(fusion
->reply_frames_desc_pool
);
639 * megasas_alloc_cmds_fusion - Allocates the command packets
640 * @instance: Adapter soft state
643 * Each frame has a 32-bit field called context. This context is used to get
644 * back the megasas_cmd_fusion from the frame when a frame gets completed
645 * In this driver, the 32 bit values are the indices into an array cmd_list.
646 * This array is used only to look up the megasas_cmd_fusion given the context.
647 * The free commands themselves are maintained in a linked list called cmd_pool.
649 * cmds are formed in the io_request and sg_frame members of the
650 * megasas_cmd_fusion. The context field is used to get a request descriptor
651 * and is used as SMID of the cmd.
652 * SMID value range is from 1 to max_fw_cmds.
655 megasas_alloc_cmds_fusion(struct megasas_instance
*instance
)
658 struct fusion_context
*fusion
;
659 struct megasas_cmd_fusion
*cmd
;
661 dma_addr_t io_req_base_phys
;
665 fusion
= instance
->ctrl_context
;
667 if (megasas_alloc_cmdlist_fusion(instance
))
670 if (megasas_alloc_request_fusion(instance
))
673 if (instance
->is_rdpq
) {
674 if (megasas_alloc_rdpq_fusion(instance
))
677 if (megasas_alloc_reply_fusion(instance
))
681 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
682 io_req_base
= fusion
->io_request_frames
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
;
683 io_req_base_phys
= fusion
->io_request_frames_phys
+ MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
;
686 * Add all the commands to command pool (fusion->cmd_pool)
689 /* SMID 0 is reserved. Set SMID/index from 1 */
690 for (i
= 0; i
< instance
->max_mpt_cmds
; i
++) {
691 cmd
= fusion
->cmd_list
[i
];
692 offset
= MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
* i
;
693 memset(cmd
, 0, sizeof(struct megasas_cmd_fusion
));
697 (i
>= instance
->max_scsi_cmds
&& i
< instance
->max_fw_cmds
) ?
698 (i
- instance
->max_scsi_cmds
) :
699 (u32
)ULONG_MAX
; /* Set to Invalid */
700 cmd
->instance
= instance
;
702 (struct MPI2_RAID_SCSI_IO_REQUEST
*)
703 (io_req_base
+ offset
);
704 memset(cmd
->io_request
, 0,
705 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST
));
706 cmd
->io_request_phys_addr
= io_req_base_phys
+ offset
;
707 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
710 if (megasas_create_sg_sense_fusion(instance
))
716 megasas_free_cmds_fusion(instance
);
721 * wait_and_poll - Issues a polling command
722 * @instance: Adapter soft state
723 * @cmd: Command packet to be issued
725 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
728 wait_and_poll(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
,
732 struct megasas_header
*frame_hdr
= &cmd
->frame
->hdr
;
733 struct fusion_context
*fusion
;
735 u32 msecs
= seconds
* 1000;
737 fusion
= instance
->ctrl_context
;
739 * Wait for cmd_status to change
741 for (i
= 0; (i
< msecs
) && (frame_hdr
->cmd_status
== 0xff); i
+= 20) {
746 if (frame_hdr
->cmd_status
== MFI_STAT_INVALID_STATUS
)
748 else if (frame_hdr
->cmd_status
== MFI_STAT_OK
)
755 * megasas_ioc_init_fusion - Initializes the FW
756 * @instance: Adapter soft state
758 * Issues the IOC Init cmd
761 megasas_ioc_init_fusion(struct megasas_instance
*instance
)
763 struct megasas_init_frame
*init_frame
;
764 struct MPI2_IOC_INIT_REQUEST
*IOCInitMessage
= NULL
;
765 dma_addr_t ioc_init_handle
;
766 struct megasas_cmd
*cmd
;
767 u8 ret
, cur_rdpq_mode
;
768 struct fusion_context
*fusion
;
769 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc
;
771 struct megasas_header
*frame_hdr
;
772 const char *sys_info
;
773 MFI_CAPABILITIES
*drv_ops
;
777 fusion
= instance
->ctrl_context
;
779 cmd
= megasas_get_cmd(instance
);
782 dev_err(&instance
->pdev
->dev
, "Could not allocate cmd for INIT Frame\n");
787 scratch_pad_2
= readl
788 (&instance
->reg_set
->outbound_scratch_pad_2
);
790 cur_rdpq_mode
= (scratch_pad_2
& MR_RDPQ_MODE_OFFSET
) ? 1 : 0;
792 if (instance
->is_rdpq
&& !cur_rdpq_mode
) {
793 dev_err(&instance
->pdev
->dev
, "Firmware downgrade *NOT SUPPORTED*"
794 " from RDPQ mode to non RDPQ mode\n");
799 instance
->fw_sync_cache_support
= (scratch_pad_2
&
800 MR_CAN_HANDLE_SYNC_CACHE_OFFSET
) ? 1 : 0;
801 dev_info(&instance
->pdev
->dev
, "FW supports sync cache\t: %s\n",
802 instance
->fw_sync_cache_support
? "Yes" : "No");
805 dma_alloc_coherent(&instance
->pdev
->dev
,
806 sizeof(struct MPI2_IOC_INIT_REQUEST
),
807 &ioc_init_handle
, GFP_KERNEL
);
809 if (!IOCInitMessage
) {
810 dev_err(&instance
->pdev
->dev
, "Could not allocate memory for "
816 memset(IOCInitMessage
, 0, sizeof(struct MPI2_IOC_INIT_REQUEST
));
818 IOCInitMessage
->Function
= MPI2_FUNCTION_IOC_INIT
;
819 IOCInitMessage
->WhoInit
= MPI2_WHOINIT_HOST_DRIVER
;
820 IOCInitMessage
->MsgVersion
= cpu_to_le16(MPI2_VERSION
);
821 IOCInitMessage
->HeaderVersion
= cpu_to_le16(MPI2_HEADER_VERSION
);
822 IOCInitMessage
->SystemRequestFrameSize
= cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
/ 4);
824 IOCInitMessage
->ReplyDescriptorPostQueueDepth
= cpu_to_le16(fusion
->reply_q_depth
);
825 IOCInitMessage
->ReplyDescriptorPostQueueAddress
= instance
->is_rdpq
?
826 cpu_to_le64(fusion
->rdpq_phys
) :
827 cpu_to_le64(fusion
->reply_frames_desc_phys
[0]);
828 IOCInitMessage
->MsgFlags
= instance
->is_rdpq
?
829 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
: 0;
830 IOCInitMessage
->SystemRequestFrameBaseAddress
= cpu_to_le64(fusion
->io_request_frames_phys
);
831 IOCInitMessage
->HostMSIxVectors
= instance
->msix_vectors
;
832 IOCInitMessage
->HostPageSize
= MR_DEFAULT_NVME_PAGE_SHIFT
;
833 init_frame
= (struct megasas_init_frame
*)cmd
->frame
;
834 memset(init_frame
, 0, MEGAMFI_FRAME_SIZE
);
836 frame_hdr
= &cmd
->frame
->hdr
;
837 frame_hdr
->cmd_status
= 0xFF;
838 frame_hdr
->flags
= cpu_to_le16(
839 le16_to_cpu(frame_hdr
->flags
) |
840 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
);
842 init_frame
->cmd
= MFI_CMD_INIT
;
843 init_frame
->cmd_status
= 0xFF;
845 drv_ops
= (MFI_CAPABILITIES
*) &(init_frame
->driver_operations
);
847 /* driver support Extended MSIX */
848 if (instance
->adapter_type
>= INVADER_SERIES
)
849 drv_ops
->mfi_capabilities
.support_additional_msix
= 1;
850 /* driver supports HA / Remote LUN over Fast Path interface */
851 drv_ops
->mfi_capabilities
.support_fp_remote_lun
= 1;
853 drv_ops
->mfi_capabilities
.support_max_255lds
= 1;
854 drv_ops
->mfi_capabilities
.support_ndrive_r1_lb
= 1;
855 drv_ops
->mfi_capabilities
.security_protocol_cmds_fw
= 1;
857 if (instance
->max_chain_frame_sz
> MEGASAS_CHAIN_FRAME_SZ_MIN
)
858 drv_ops
->mfi_capabilities
.support_ext_io_size
= 1;
860 drv_ops
->mfi_capabilities
.support_fp_rlbypass
= 1;
861 if (!dual_qdepth_disable
)
862 drv_ops
->mfi_capabilities
.support_ext_queue_depth
= 1;
864 drv_ops
->mfi_capabilities
.support_qd_throttling
= 1;
865 drv_ops
->mfi_capabilities
.support_pd_map_target_id
= 1;
866 /* Convert capability to LE32 */
867 cpu_to_le32s((u32
*)&init_frame
->driver_operations
.mfi_capabilities
);
869 sys_info
= dmi_get_system_info(DMI_PRODUCT_UUID
);
870 if (instance
->system_info_buf
&& sys_info
) {
871 memcpy(instance
->system_info_buf
->systemId
, sys_info
,
872 strlen(sys_info
) > 64 ? 64 : strlen(sys_info
));
873 instance
->system_info_buf
->systemIdLength
=
874 strlen(sys_info
) > 64 ? 64 : strlen(sys_info
);
875 init_frame
->system_info_lo
= instance
->system_info_h
;
876 init_frame
->system_info_hi
= 0;
879 init_frame
->queue_info_new_phys_addr_hi
=
880 cpu_to_le32(upper_32_bits(ioc_init_handle
));
881 init_frame
->queue_info_new_phys_addr_lo
=
882 cpu_to_le32(lower_32_bits(ioc_init_handle
));
883 init_frame
->data_xfer_len
= cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST
));
885 req_desc
.u
.low
= cpu_to_le32(lower_32_bits(cmd
->frame_phys_addr
));
886 req_desc
.u
.high
= cpu_to_le32(upper_32_bits(cmd
->frame_phys_addr
));
887 req_desc
.MFAIo
.RequestFlags
=
888 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA
<<
889 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
892 * disable the intr before firing the init frame
894 instance
->instancet
->disable_intr(instance
);
896 for (i
= 0; i
< (10 * 1000); i
+= 20) {
897 if (readl(&instance
->reg_set
->doorbell
) & 1)
903 /* For Ventura also IOC INIT required 64 bit Descriptor write. */
904 spin_lock_irqsave(&instance
->hba_lock
, flags
);
905 writel(le32_to_cpu(req_desc
.u
.low
),
906 &instance
->reg_set
->inbound_low_queue_port
);
907 writel(le32_to_cpu(req_desc
.u
.high
),
908 &instance
->reg_set
->inbound_high_queue_port
);
910 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
912 wait_and_poll(instance
, cmd
, MFI_POLL_TIMEOUT_SECS
);
914 frame_hdr
= &cmd
->frame
->hdr
;
915 if (frame_hdr
->cmd_status
!= 0) {
923 megasas_return_cmd(instance
, cmd
);
925 dma_free_coherent(&instance
->pdev
->dev
,
926 sizeof(struct MPI2_IOC_INIT_REQUEST
),
927 IOCInitMessage
, ioc_init_handle
);
929 dev_err(&instance
->pdev
->dev
,
930 "Init cmd return status %s for SCSI host %d\n",
931 ret
? "FAILED" : "SUCCESS", instance
->host
->host_no
);
937 * megasas_sync_pd_seq_num - JBOD SEQ MAP
938 * @instance: Adapter soft state
939 * @pend: set to 1, if it is pended jbod map.
941 * Issue Jbod map to the firmware. If it is pended command,
942 * issue command and return. If it is first instance of jbod map
943 * issue and receive command.
946 megasas_sync_pd_seq_num(struct megasas_instance
*instance
, bool pend
) {
949 struct megasas_cmd
*cmd
;
950 struct megasas_dcmd_frame
*dcmd
;
951 struct fusion_context
*fusion
= instance
->ctrl_context
;
952 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
955 pd_sync
= (void *)fusion
->pd_seq_sync
[(instance
->pd_seq_map_id
& 1)];
956 pd_seq_h
= fusion
->pd_seq_phys
[(instance
->pd_seq_map_id
& 1)];
957 pd_seq_map_sz
= sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC
) +
958 (sizeof(struct MR_PD_CFG_SEQ
) *
959 (MAX_PHYSICAL_DEVICES
- 1));
961 cmd
= megasas_get_cmd(instance
);
963 dev_err(&instance
->pdev
->dev
,
964 "Could not get mfi cmd. Fail from %s %d\n",
969 dcmd
= &cmd
->frame
->dcmd
;
971 memset(pd_sync
, 0, pd_seq_map_sz
);
972 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
973 dcmd
->cmd
= MFI_CMD_DCMD
;
974 dcmd
->cmd_status
= 0xFF;
978 dcmd
->data_xfer_len
= cpu_to_le32(pd_seq_map_sz
);
979 dcmd
->opcode
= cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO
);
980 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(pd_seq_h
);
981 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(pd_seq_map_sz
);
984 dcmd
->mbox
.b
[0] = MEGASAS_DCMD_MBOX_PEND_FLAG
;
985 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_WRITE
);
986 instance
->jbod_seq_cmd
= cmd
;
987 instance
->instancet
->issue_dcmd(instance
, cmd
);
991 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_READ
);
993 /* Below code is only for non pended DCMD */
994 if (!instance
->mask_interrupts
)
995 ret
= megasas_issue_blocked_cmd(instance
, cmd
,
996 MFI_IO_TIMEOUT_SECS
);
998 ret
= megasas_issue_polled(instance
, cmd
);
1000 if (le32_to_cpu(pd_sync
->count
) > MAX_PHYSICAL_DEVICES
) {
1001 dev_warn(&instance
->pdev
->dev
,
1002 "driver supports max %d JBOD, but FW reports %d\n",
1003 MAX_PHYSICAL_DEVICES
, le32_to_cpu(pd_sync
->count
));
1007 if (ret
== DCMD_TIMEOUT
)
1008 megaraid_sas_kill_hba(instance
);
1010 if (ret
== DCMD_SUCCESS
)
1011 instance
->pd_seq_map_id
++;
1013 megasas_return_cmd(instance
, cmd
);
1018 * megasas_get_ld_map_info - Returns FW's ld_map structure
1019 * @instance: Adapter soft state
1020 * @pend: Pend the command or not
1021 * Issues an internal command (DCMD) to get the FW's controller PD
1022 * list structure. This information is mainly used to find out SYSTEM
1023 * supported by the FW.
1024 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
1025 * dcmd.mbox.b[0] - number of LDs being sync'd
1026 * dcmd.mbox.b[1] - 0 - complete command immediately.
1027 * - 1 - pend till config change
1028 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
1029 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
1030 * uses extended struct MR_FW_RAID_MAP_EXT
1033 megasas_get_ld_map_info(struct megasas_instance
*instance
)
1036 struct megasas_cmd
*cmd
;
1037 struct megasas_dcmd_frame
*dcmd
;
1039 dma_addr_t ci_h
= 0;
1041 struct fusion_context
*fusion
;
1043 cmd
= megasas_get_cmd(instance
);
1046 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to get cmd for map info\n");
1050 fusion
= instance
->ctrl_context
;
1053 megasas_return_cmd(instance
, cmd
);
1057 dcmd
= &cmd
->frame
->dcmd
;
1059 size_map_info
= fusion
->current_map_sz
;
1061 ci
= (void *) fusion
->ld_map
[(instance
->map_id
& 1)];
1062 ci_h
= fusion
->ld_map_phys
[(instance
->map_id
& 1)];
1065 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to alloc mem for ld_map_info\n");
1066 megasas_return_cmd(instance
, cmd
);
1070 memset(ci
, 0, fusion
->max_map_sz
);
1071 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
1072 dcmd
->cmd
= MFI_CMD_DCMD
;
1073 dcmd
->cmd_status
= 0xFF;
1074 dcmd
->sge_count
= 1;
1075 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_READ
);
1078 dcmd
->data_xfer_len
= cpu_to_le32(size_map_info
);
1079 dcmd
->opcode
= cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
);
1080 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(ci_h
);
1081 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(size_map_info
);
1083 if (!instance
->mask_interrupts
)
1084 ret
= megasas_issue_blocked_cmd(instance
, cmd
,
1085 MFI_IO_TIMEOUT_SECS
);
1087 ret
= megasas_issue_polled(instance
, cmd
);
1089 if (ret
== DCMD_TIMEOUT
)
1090 megaraid_sas_kill_hba(instance
);
1092 megasas_return_cmd(instance
, cmd
);
1098 megasas_get_map_info(struct megasas_instance
*instance
)
1100 struct fusion_context
*fusion
= instance
->ctrl_context
;
1102 fusion
->fast_path_io
= 0;
1103 if (!megasas_get_ld_map_info(instance
)) {
1104 if (MR_ValidateMapInfo(instance
)) {
1105 fusion
->fast_path_io
= 1;
1113 * megasas_sync_map_info - Returns FW's ld_map structure
1114 * @instance: Adapter soft state
1116 * Issues an internal command (DCMD) to get the FW's controller PD
1117 * list structure. This information is mainly used to find out SYSTEM
1118 * supported by the FW.
1121 megasas_sync_map_info(struct megasas_instance
*instance
)
1124 struct megasas_cmd
*cmd
;
1125 struct megasas_dcmd_frame
*dcmd
;
1128 struct fusion_context
*fusion
;
1129 struct MR_LD_TARGET_SYNC
*ci
= NULL
;
1130 struct MR_DRV_RAID_MAP_ALL
*map
;
1131 struct MR_LD_RAID
*raid
;
1132 struct MR_LD_TARGET_SYNC
*ld_sync
;
1133 dma_addr_t ci_h
= 0;
1136 cmd
= megasas_get_cmd(instance
);
1139 dev_printk(KERN_DEBUG
, &instance
->pdev
->dev
, "Failed to get cmd for sync info\n");
1143 fusion
= instance
->ctrl_context
;
1146 megasas_return_cmd(instance
, cmd
);
1150 map
= fusion
->ld_drv_map
[instance
->map_id
& 1];
1152 num_lds
= le16_to_cpu(map
->raidMap
.ldCount
);
1154 dcmd
= &cmd
->frame
->dcmd
;
1156 size_sync_info
= sizeof(struct MR_LD_TARGET_SYNC
) *num_lds
;
1158 memset(dcmd
->mbox
.b
, 0, MFI_MBOX_SIZE
);
1160 ci
= (struct MR_LD_TARGET_SYNC
*)
1161 fusion
->ld_map
[(instance
->map_id
- 1) & 1];
1162 memset(ci
, 0, fusion
->max_map_sz
);
1164 ci_h
= fusion
->ld_map_phys
[(instance
->map_id
- 1) & 1];
1166 ld_sync
= (struct MR_LD_TARGET_SYNC
*)ci
;
1168 for (i
= 0; i
< num_lds
; i
++, ld_sync
++) {
1169 raid
= MR_LdRaidGet(i
, map
);
1170 ld_sync
->targetId
= MR_GetLDTgtId(i
, map
);
1171 ld_sync
->seqNum
= raid
->seqNum
;
1174 size_map_info
= fusion
->current_map_sz
;
1176 dcmd
->cmd
= MFI_CMD_DCMD
;
1177 dcmd
->cmd_status
= 0xFF;
1178 dcmd
->sge_count
= 1;
1179 dcmd
->flags
= cpu_to_le16(MFI_FRAME_DIR_WRITE
);
1182 dcmd
->data_xfer_len
= cpu_to_le32(size_map_info
);
1183 dcmd
->mbox
.b
[0] = num_lds
;
1184 dcmd
->mbox
.b
[1] = MEGASAS_DCMD_MBOX_PEND_FLAG
;
1185 dcmd
->opcode
= cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
);
1186 dcmd
->sgl
.sge32
[0].phys_addr
= cpu_to_le32(ci_h
);
1187 dcmd
->sgl
.sge32
[0].length
= cpu_to_le32(size_map_info
);
1189 instance
->map_update_cmd
= cmd
;
1191 instance
->instancet
->issue_dcmd(instance
, cmd
);
1197 * meagasas_display_intel_branding - Display branding string
1198 * @instance: per adapter object
1203 megasas_display_intel_branding(struct megasas_instance
*instance
)
1205 if (instance
->pdev
->subsystem_vendor
!= PCI_VENDOR_ID_INTEL
)
1208 switch (instance
->pdev
->device
) {
1209 case PCI_DEVICE_ID_LSI_INVADER
:
1210 switch (instance
->pdev
->subsystem_device
) {
1211 case MEGARAID_INTEL_RS3DC080_SSDID
:
1212 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1213 instance
->host
->host_no
,
1214 MEGARAID_INTEL_RS3DC080_BRANDING
);
1216 case MEGARAID_INTEL_RS3DC040_SSDID
:
1217 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1218 instance
->host
->host_no
,
1219 MEGARAID_INTEL_RS3DC040_BRANDING
);
1221 case MEGARAID_INTEL_RS3SC008_SSDID
:
1222 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1223 instance
->host
->host_no
,
1224 MEGARAID_INTEL_RS3SC008_BRANDING
);
1226 case MEGARAID_INTEL_RS3MC044_SSDID
:
1227 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1228 instance
->host
->host_no
,
1229 MEGARAID_INTEL_RS3MC044_BRANDING
);
1235 case PCI_DEVICE_ID_LSI_FURY
:
1236 switch (instance
->pdev
->subsystem_device
) {
1237 case MEGARAID_INTEL_RS3WC080_SSDID
:
1238 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1239 instance
->host
->host_no
,
1240 MEGARAID_INTEL_RS3WC080_BRANDING
);
1242 case MEGARAID_INTEL_RS3WC040_SSDID
:
1243 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1244 instance
->host
->host_no
,
1245 MEGARAID_INTEL_RS3WC040_BRANDING
);
1251 case PCI_DEVICE_ID_LSI_CUTLASS_52
:
1252 case PCI_DEVICE_ID_LSI_CUTLASS_53
:
1253 switch (instance
->pdev
->subsystem_device
) {
1254 case MEGARAID_INTEL_RMS3BC160_SSDID
:
1255 dev_info(&instance
->pdev
->dev
, "scsi host %d: %s\n",
1256 instance
->host
->host_no
,
1257 MEGARAID_INTEL_RMS3BC160_BRANDING
);
1269 * megasas_allocate_raid_maps - Allocate memory for RAID maps
1270 * @instance: Adapter soft state
1272 * return: if success: return 0
1273 * failed: return -ENOMEM
1275 static inline int megasas_allocate_raid_maps(struct megasas_instance
*instance
)
1277 struct fusion_context
*fusion
;
1280 fusion
= instance
->ctrl_context
;
1282 fusion
->drv_map_pages
= get_order(fusion
->drv_map_sz
);
1284 for (i
= 0; i
< 2; i
++) {
1285 fusion
->ld_map
[i
] = NULL
;
1287 fusion
->ld_drv_map
[i
] = (void *)
1288 __get_free_pages(__GFP_ZERO
| GFP_KERNEL
,
1289 fusion
->drv_map_pages
);
1291 if (!fusion
->ld_drv_map
[i
]) {
1292 fusion
->ld_drv_map
[i
] = vzalloc(fusion
->drv_map_sz
);
1294 if (!fusion
->ld_drv_map
[i
]) {
1295 dev_err(&instance
->pdev
->dev
,
1296 "Could not allocate memory for local map"
1297 " size requested: %d\n",
1298 fusion
->drv_map_sz
);
1299 goto ld_drv_map_alloc_fail
;
1304 for (i
= 0; i
< 2; i
++) {
1305 fusion
->ld_map
[i
] = dma_alloc_coherent(&instance
->pdev
->dev
,
1307 &fusion
->ld_map_phys
[i
],
1309 if (!fusion
->ld_map
[i
]) {
1310 dev_err(&instance
->pdev
->dev
,
1311 "Could not allocate memory for map info %s:%d\n",
1312 __func__
, __LINE__
);
1313 goto ld_map_alloc_fail
;
1320 for (i
= 0; i
< 2; i
++) {
1321 if (fusion
->ld_map
[i
])
1322 dma_free_coherent(&instance
->pdev
->dev
,
1325 fusion
->ld_map_phys
[i
]);
1328 ld_drv_map_alloc_fail
:
1329 for (i
= 0; i
< 2; i
++) {
1330 if (fusion
->ld_drv_map
[i
]) {
1331 if (is_vmalloc_addr(fusion
->ld_drv_map
[i
]))
1332 vfree(fusion
->ld_drv_map
[i
]);
1334 free_pages((ulong
)fusion
->ld_drv_map
[i
],
1335 fusion
->drv_map_pages
);
1343 * megasas_init_adapter_fusion - Initializes the FW
1344 * @instance: Adapter soft state
1346 * This is the main function for initializing firmware.
1349 megasas_init_adapter_fusion(struct megasas_instance
*instance
)
1351 struct megasas_register_set __iomem
*reg_set
;
1352 struct fusion_context
*fusion
;
1357 fusion
= instance
->ctrl_context
;
1359 reg_set
= instance
->reg_set
;
1361 megasas_fusion_update_can_queue(instance
, PROBE_CONTEXT
);
1364 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1366 instance
->max_mfi_cmds
=
1367 MEGASAS_FUSION_INTERNAL_CMDS
+ MEGASAS_FUSION_IOCTL_CMDS
;
1369 max_cmd
= instance
->max_fw_cmds
;
1371 fusion
->reply_q_depth
= 2 * (((max_cmd
+ 1 + 15)/16)*16);
1373 fusion
->request_alloc_sz
=
1374 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION
) * instance
->max_mpt_cmds
;
1375 fusion
->reply_alloc_sz
= sizeof(union MPI2_REPLY_DESCRIPTORS_UNION
)
1376 *(fusion
->reply_q_depth
);
1377 fusion
->io_frames_alloc_sz
= MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
+
1378 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1379 * (instance
->max_mpt_cmds
+ 1)); /* Extra 1 for SMID 0 */
1381 scratch_pad_2
= readl(&instance
->reg_set
->outbound_scratch_pad_2
);
1382 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1383 * Firmware support extended IO chain frame which is 4 times more than
1385 * Legacy Firmware - Frame size is (8 * 128) = 1K
1386 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1388 if (scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK
)
1389 instance
->max_chain_frame_sz
=
1390 ((scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_MASK
) >>
1391 MEGASAS_MAX_CHAIN_SHIFT
) * MEGASAS_1MB_IO
;
1393 instance
->max_chain_frame_sz
=
1394 ((scratch_pad_2
& MEGASAS_MAX_CHAIN_SIZE_MASK
) >>
1395 MEGASAS_MAX_CHAIN_SHIFT
) * MEGASAS_256K_IO
;
1397 if (instance
->max_chain_frame_sz
< MEGASAS_CHAIN_FRAME_SZ_MIN
) {
1398 dev_warn(&instance
->pdev
->dev
, "frame size %d invalid, fall back to legacy max frame size %d\n",
1399 instance
->max_chain_frame_sz
,
1400 MEGASAS_CHAIN_FRAME_SZ_MIN
);
1401 instance
->max_chain_frame_sz
= MEGASAS_CHAIN_FRAME_SZ_MIN
;
1404 fusion
->max_sge_in_main_msg
=
1405 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1406 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
))/16;
1408 fusion
->max_sge_in_chain
=
1409 instance
->max_chain_frame_sz
1410 / sizeof(union MPI2_SGE_IO_UNION
);
1412 instance
->max_num_sge
=
1413 rounddown_pow_of_two(fusion
->max_sge_in_main_msg
1414 + fusion
->max_sge_in_chain
- 2);
1416 /* Used for pass thru MFI frame (DCMD) */
1417 fusion
->chain_offset_mfi_pthru
=
1418 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
)/16;
1420 fusion
->chain_offset_io_request
=
1421 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
-
1422 sizeof(union MPI2_SGE_IO_UNION
))/16;
1424 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
1425 for (i
= 0 ; i
< count
; i
++)
1426 fusion
->last_reply_idx
[i
] = 0;
1429 * For fusion adapters, 3 commands for IOCTL and 8 commands
1430 * for driver's internal DCMDs.
1432 instance
->max_scsi_cmds
= instance
->max_fw_cmds
-
1433 (MEGASAS_FUSION_INTERNAL_CMDS
+
1434 MEGASAS_FUSION_IOCTL_CMDS
);
1435 sema_init(&instance
->ioctl_sem
, MEGASAS_FUSION_IOCTL_CMDS
);
1438 * Allocate memory for descriptors
1439 * Create a pool of commands
1441 if (megasas_alloc_cmds(instance
))
1442 goto fail_alloc_mfi_cmds
;
1443 if (megasas_alloc_cmds_fusion(instance
))
1444 goto fail_alloc_cmds
;
1446 if (megasas_ioc_init_fusion(instance
))
1449 megasas_display_intel_branding(instance
);
1450 if (megasas_get_ctrl_info(instance
)) {
1451 dev_err(&instance
->pdev
->dev
,
1452 "Could not get controller info. Fail from %s %d\n",
1453 __func__
, __LINE__
);
1457 instance
->flag_ieee
= 1;
1458 instance
->r1_ldio_hint_default
= MR_R1_LDIO_PIGGYBACK_DEFAULT
;
1459 fusion
->fast_path_io
= 0;
1461 if (megasas_allocate_raid_maps(instance
))
1464 if (!megasas_get_map_info(instance
))
1465 megasas_sync_map_info(instance
);
1470 megasas_free_cmds_fusion(instance
);
1472 megasas_free_cmds(instance
);
1473 fail_alloc_mfi_cmds
:
1478 * map_cmd_status - Maps FW cmd status to OS cmd status
1479 * @cmd : Pointer to cmd
1480 * @status : status of cmd returned by FW
1481 * @ext_status : ext status of cmd returned by FW
1485 map_cmd_status(struct fusion_context
*fusion
,
1486 struct scsi_cmnd
*scmd
, u8 status
, u8 ext_status
,
1487 u32 data_length
, u8
*sense
)
1492 cmd_type
= megasas_cmd_type(scmd
);
1496 scmd
->result
= DID_OK
<< 16;
1499 case MFI_STAT_SCSI_IO_FAILED
:
1500 case MFI_STAT_LD_INIT_IN_PROGRESS
:
1501 scmd
->result
= (DID_ERROR
<< 16) | ext_status
;
1504 case MFI_STAT_SCSI_DONE_WITH_ERROR
:
1506 scmd
->result
= (DID_OK
<< 16) | ext_status
;
1507 if (ext_status
== SAM_STAT_CHECK_CONDITION
) {
1508 memset(scmd
->sense_buffer
, 0,
1509 SCSI_SENSE_BUFFERSIZE
);
1510 memcpy(scmd
->sense_buffer
, sense
,
1511 SCSI_SENSE_BUFFERSIZE
);
1512 scmd
->result
|= DRIVER_SENSE
<< 24;
1516 * If the IO request is partially completed, then MR FW will
1517 * update "io_request->DataLength" field with actual number of
1518 * bytes transferred.Driver will set residual bytes count in
1519 * SCSI command structure.
1521 resid
= (scsi_bufflen(scmd
) - data_length
);
1522 scsi_set_resid(scmd
, resid
);
1525 ((cmd_type
== READ_WRITE_LDIO
) ||
1526 (cmd_type
== READ_WRITE_SYSPDIO
)))
1527 scmd_printk(KERN_INFO
, scmd
, "BRCM Debug mfi stat 0x%x, data len"
1528 " requested/completed 0x%x/0x%x\n",
1529 status
, scsi_bufflen(scmd
), data_length
);
1532 case MFI_STAT_LD_OFFLINE
:
1533 case MFI_STAT_DEVICE_NOT_FOUND
:
1534 scmd
->result
= DID_BAD_TARGET
<< 16;
1536 case MFI_STAT_CONFIG_SEQ_MISMATCH
:
1537 scmd
->result
= DID_IMM_RETRY
<< 16;
1540 scmd
->result
= DID_ERROR
<< 16;
1546 * megasas_is_prp_possible -
1547 * Checks if native NVMe PRPs can be built for the IO
1549 * @instance: Adapter soft state
1550 * @scmd: SCSI command from the mid-layer
1551 * @sge_count: scatter gather element count.
1553 * Returns: true: PRPs can be built
1554 * false: IEEE SGLs needs to be built
1557 megasas_is_prp_possible(struct megasas_instance
*instance
,
1558 struct scsi_cmnd
*scmd
, int sge_count
)
1560 struct fusion_context
*fusion
;
1562 u32 data_length
= 0;
1563 struct scatterlist
*sg_scmd
;
1564 bool build_prp
= false;
1565 u32 mr_nvme_pg_size
;
1567 mr_nvme_pg_size
= max_t(u32
, instance
->nvme_page_size
,
1568 MR_DEFAULT_NVME_PAGE_SIZE
);
1569 fusion
= instance
->ctrl_context
;
1570 data_length
= scsi_bufflen(scmd
);
1571 sg_scmd
= scsi_sglist(scmd
);
1574 * NVMe uses one PRP for each page (or part of a page)
1575 * look at the data length - if 4 pages or less then IEEE is OK
1576 * if > 5 pages then we need to build a native SGL
1577 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
1578 * if this first size in the page is >= the residual beyond 4 pages
1579 * then use IEEE, otherwise use native SGL
1582 if (data_length
> (mr_nvme_pg_size
* 5)) {
1584 } else if ((data_length
> (mr_nvme_pg_size
* 4)) &&
1585 (data_length
<= (mr_nvme_pg_size
* 5))) {
1586 /* check if 1st SG entry size is < residual beyond 4 pages */
1587 if (sg_dma_len(sg_scmd
) < (data_length
- (mr_nvme_pg_size
* 4)))
1592 * Below code detects gaps/holes in IO data buffers.
1593 * What does holes/gaps mean?
1594 * Any SGE except first one in a SGL starts at non NVME page size
1595 * aligned address OR Any SGE except last one in a SGL ends at
1596 * non NVME page size boundary.
1598 * Driver has already informed block layer by setting boundary rules for
1599 * bio merging done at NVME page size boundary calling kernel API
1600 * blk_queue_virt_boundary inside slave_config.
1601 * Still there is possibility of IO coming with holes to driver because of
1602 * IO merging done by IO scheduler.
1604 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
1605 * IO scheduling so no IO merging.
1607 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
1608 * then sending IOs with holes.
1610 * Though driver can request block layer to disable IO merging by calling-
1611 * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
1612 * user may tune sysfs parameter- nomerges again to 0 or 1.
1614 * If in future IO scheduling is enabled with SCSI BLK MQ,
1615 * this algorithm to detect holes will be required in driver
1616 * for SCSI BLK MQ enabled case as well.
1620 scsi_for_each_sg(scmd
, sg_scmd
, sge_count
, i
) {
1621 if ((i
!= 0) && (i
!= (sge_count
- 1))) {
1622 if (mega_mod64(sg_dma_len(sg_scmd
), mr_nvme_pg_size
) ||
1623 mega_mod64(sg_dma_address(sg_scmd
),
1626 atomic_inc(&instance
->sge_holes_type1
);
1631 if ((sge_count
> 1) && (i
== 0)) {
1632 if ((mega_mod64((sg_dma_address(sg_scmd
) +
1633 sg_dma_len(sg_scmd
)),
1634 mr_nvme_pg_size
))) {
1636 atomic_inc(&instance
->sge_holes_type2
);
1641 if ((sge_count
> 1) && (i
== (sge_count
- 1))) {
1642 if (mega_mod64(sg_dma_address(sg_scmd
),
1645 atomic_inc(&instance
->sge_holes_type3
);
1655 * megasas_make_prp_nvme -
1656 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1658 * @instance: Adapter soft state
1659 * @scmd: SCSI command from the mid-layer
1660 * @sgl_ptr: SGL to be filled in
1661 * @cmd: Fusion command frame
1662 * @sge_count: scatter gather element count.
1664 * Returns: true: PRPs are built
1665 * false: IEEE SGLs needs to be built
1668 megasas_make_prp_nvme(struct megasas_instance
*instance
, struct scsi_cmnd
*scmd
,
1669 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr
,
1670 struct megasas_cmd_fusion
*cmd
, int sge_count
)
1672 int sge_len
, offset
, num_prp_in_chain
= 0;
1673 struct MPI25_IEEE_SGE_CHAIN64
*main_chain_element
, *ptr_first_sgl
;
1675 dma_addr_t ptr_sgl_phys
;
1677 u32 page_mask
, page_mask_result
;
1678 struct scatterlist
*sg_scmd
;
1680 bool build_prp
= false;
1681 int data_len
= scsi_bufflen(scmd
);
1682 struct fusion_context
*fusion
;
1683 u32 mr_nvme_pg_size
= max_t(u32
, instance
->nvme_page_size
,
1684 MR_DEFAULT_NVME_PAGE_SIZE
);
1686 fusion
= instance
->ctrl_context
;
1688 build_prp
= megasas_is_prp_possible(instance
, scmd
, sge_count
);
1694 * Nvme has a very convoluted prp format. One prp is required
1695 * for each page or partial page. Driver need to split up OS sg_list
1696 * entries if it is longer than one page or cross a page
1697 * boundary. Driver also have to insert a PRP list pointer entry as
1698 * the last entry in each physical page of the PRP list.
1700 * NOTE: The first PRP "entry" is actually placed in the first
1701 * SGL entry in the main message as IEEE 64 format. The 2nd
1702 * entry in the main message is the chain element, and the rest
1703 * of the PRP entries are built in the contiguous pcie buffer.
1705 page_mask
= mr_nvme_pg_size
- 1;
1706 ptr_sgl
= (u64
*)cmd
->sg_frame
;
1707 ptr_sgl_phys
= cmd
->sg_frame_phys_addr
;
1708 memset(ptr_sgl
, 0, instance
->max_chain_frame_sz
);
1710 /* Build chain frame element which holds all prps except first*/
1711 main_chain_element
= (struct MPI25_IEEE_SGE_CHAIN64
*)
1712 ((u8
*)sgl_ptr
+ sizeof(struct MPI25_IEEE_SGE_CHAIN64
));
1714 main_chain_element
->Address
= cpu_to_le64(ptr_sgl_phys
);
1715 main_chain_element
->NextChainOffset
= 0;
1716 main_chain_element
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
1717 IEEE_SGE_FLAGS_SYSTEM_ADDR
|
1718 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP
;
1720 /* Build first prp, sge need not to be page aligned*/
1721 ptr_first_sgl
= sgl_ptr
;
1722 sg_scmd
= scsi_sglist(scmd
);
1723 sge_addr
= sg_dma_address(sg_scmd
);
1724 sge_len
= sg_dma_len(sg_scmd
);
1726 offset
= (u32
)(sge_addr
& page_mask
);
1727 first_prp_len
= mr_nvme_pg_size
- offset
;
1729 ptr_first_sgl
->Address
= cpu_to_le64(sge_addr
);
1730 ptr_first_sgl
->Length
= cpu_to_le32(first_prp_len
);
1732 data_len
-= first_prp_len
;
1734 if (sge_len
> first_prp_len
) {
1735 sge_addr
+= first_prp_len
;
1736 sge_len
-= first_prp_len
;
1737 } else if (sge_len
== first_prp_len
) {
1738 sg_scmd
= sg_next(sg_scmd
);
1739 sge_addr
= sg_dma_address(sg_scmd
);
1740 sge_len
= sg_dma_len(sg_scmd
);
1744 offset
= (u32
)(sge_addr
& page_mask
);
1746 /* Put PRP pointer due to page boundary*/
1747 page_mask_result
= (uintptr_t)(ptr_sgl
+ 1) & page_mask
;
1748 if (unlikely(!page_mask_result
)) {
1749 scmd_printk(KERN_NOTICE
,
1750 scmd
, "page boundary ptr_sgl: 0x%p\n",
1753 *ptr_sgl
= cpu_to_le64(ptr_sgl_phys
);
1758 *ptr_sgl
= cpu_to_le64(sge_addr
);
1763 sge_addr
+= mr_nvme_pg_size
;
1764 sge_len
-= mr_nvme_pg_size
;
1765 data_len
-= mr_nvme_pg_size
;
1773 sg_scmd
= sg_next(sg_scmd
);
1774 sge_addr
= sg_dma_address(sg_scmd
);
1775 sge_len
= sg_dma_len(sg_scmd
);
1778 main_chain_element
->Length
=
1779 cpu_to_le32(num_prp_in_chain
* sizeof(u64
));
1781 atomic_inc(&instance
->prp_sgl
);
1786 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1787 * @instance: Adapter soft state
1788 * @scp: SCSI command from the mid-layer
1789 * @sgl_ptr: SGL to be filled in
1790 * @cmd: cmd we are working on
1791 * @sge_count sge count
1795 megasas_make_sgl_fusion(struct megasas_instance
*instance
,
1796 struct scsi_cmnd
*scp
,
1797 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr
,
1798 struct megasas_cmd_fusion
*cmd
, int sge_count
)
1800 int i
, sg_processed
;
1801 struct scatterlist
*os_sgl
;
1802 struct fusion_context
*fusion
;
1804 fusion
= instance
->ctrl_context
;
1806 if (instance
->adapter_type
>= INVADER_SERIES
) {
1807 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr_end
= sgl_ptr
;
1808 sgl_ptr_end
+= fusion
->max_sge_in_main_msg
- 1;
1809 sgl_ptr_end
->Flags
= 0;
1812 scsi_for_each_sg(scp
, os_sgl
, sge_count
, i
) {
1813 sgl_ptr
->Length
= cpu_to_le32(sg_dma_len(os_sgl
));
1814 sgl_ptr
->Address
= cpu_to_le64(sg_dma_address(os_sgl
));
1816 if (instance
->adapter_type
>= INVADER_SERIES
)
1817 if (i
== sge_count
- 1)
1818 sgl_ptr
->Flags
= IEEE_SGE_FLAGS_END_OF_LIST
;
1820 sg_processed
= i
+ 1;
1822 if ((sg_processed
== (fusion
->max_sge_in_main_msg
- 1)) &&
1823 (sge_count
> fusion
->max_sge_in_main_msg
)) {
1825 struct MPI25_IEEE_SGE_CHAIN64
*sg_chain
;
1826 if (instance
->adapter_type
>= INVADER_SERIES
) {
1827 if ((le16_to_cpu(cmd
->io_request
->IoFlags
) &
1828 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) !=
1829 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
)
1830 cmd
->io_request
->ChainOffset
=
1832 chain_offset_io_request
;
1834 cmd
->io_request
->ChainOffset
= 0;
1836 cmd
->io_request
->ChainOffset
=
1837 fusion
->chain_offset_io_request
;
1840 /* Prepare chain element */
1841 sg_chain
->NextChainOffset
= 0;
1842 if (instance
->adapter_type
>= INVADER_SERIES
)
1843 sg_chain
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
;
1846 (IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
1847 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
);
1848 sg_chain
->Length
= cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION
) * (sge_count
- sg_processed
)));
1849 sg_chain
->Address
= cpu_to_le64(cmd
->sg_frame_phys_addr
);
1852 (struct MPI25_IEEE_SGE_CHAIN64
*)cmd
->sg_frame
;
1853 memset(sgl_ptr
, 0, instance
->max_chain_frame_sz
);
1856 atomic_inc(&instance
->ieee_sgl
);
1860 * megasas_make_sgl - Build Scatter Gather List(SGLs)
1861 * @scp: SCSI command pointer
1862 * @instance: Soft instance of controller
1863 * @cmd: Fusion command pointer
1865 * This function will build sgls based on device type.
1866 * For nvme drives, there is different way of building sgls in nvme native
1867 * format- PRPs(Physical Region Page).
1869 * Returns the number of sg lists actually used, zero if the sg lists
1870 * is NULL, or -ENOMEM if the mapping failed
1873 int megasas_make_sgl(struct megasas_instance
*instance
, struct scsi_cmnd
*scp
,
1874 struct megasas_cmd_fusion
*cmd
)
1877 bool build_prp
= false;
1878 struct MPI25_IEEE_SGE_CHAIN64
*sgl_chain64
;
1880 sge_count
= scsi_dma_map(scp
);
1882 if ((sge_count
> instance
->max_num_sge
) || (sge_count
<= 0))
1885 sgl_chain64
= (struct MPI25_IEEE_SGE_CHAIN64
*)&cmd
->io_request
->SGL
;
1886 if ((le16_to_cpu(cmd
->io_request
->IoFlags
) &
1887 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
) &&
1888 (cmd
->pd_interface
== NVME_PD
))
1889 build_prp
= megasas_make_prp_nvme(instance
, scp
, sgl_chain64
,
1893 megasas_make_sgl_fusion(instance
, scp
, sgl_chain64
,
1900 * megasas_set_pd_lba - Sets PD LBA
1902 * @cdb_len: cdb length
1903 * @start_blk: Start block of IO
1905 * Used to set the PD LBA in CDB for FP IOs
1908 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
, u8 cdb_len
,
1909 struct IO_REQUEST_INFO
*io_info
, struct scsi_cmnd
*scp
,
1910 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
, u32 ref_tag
)
1912 struct MR_LD_RAID
*raid
;
1914 u64 start_blk
= io_info
->pdBlock
;
1915 u8
*cdb
= io_request
->CDB
.CDB32
;
1916 u32 num_blocks
= io_info
->numBlocks
;
1917 u8 opcode
= 0, flagvals
= 0, groupnum
= 0, control
= 0;
1919 /* Check if T10 PI (DIF) is enabled for this LD */
1920 ld
= MR_TargetIdToLdGet(io_info
->ldTgtId
, local_map_ptr
);
1921 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
1922 if (raid
->capability
.ldPiMode
== MR_PROT_INFO_TYPE_CONTROLLER
) {
1923 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
1924 cdb
[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD
;
1925 cdb
[7] = MEGASAS_SCSI_ADDL_CDB_LEN
;
1927 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
1928 cdb
[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32
;
1930 cdb
[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32
;
1931 cdb
[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL
;
1934 cdb
[12] = (u8
)((start_blk
>> 56) & 0xff);
1935 cdb
[13] = (u8
)((start_blk
>> 48) & 0xff);
1936 cdb
[14] = (u8
)((start_blk
>> 40) & 0xff);
1937 cdb
[15] = (u8
)((start_blk
>> 32) & 0xff);
1938 cdb
[16] = (u8
)((start_blk
>> 24) & 0xff);
1939 cdb
[17] = (u8
)((start_blk
>> 16) & 0xff);
1940 cdb
[18] = (u8
)((start_blk
>> 8) & 0xff);
1941 cdb
[19] = (u8
)(start_blk
& 0xff);
1943 /* Logical block reference tag */
1944 io_request
->CDB
.EEDP32
.PrimaryReferenceTag
=
1945 cpu_to_be32(ref_tag
);
1946 io_request
->CDB
.EEDP32
.PrimaryApplicationTagMask
= cpu_to_be16(0xffff);
1947 io_request
->IoFlags
= cpu_to_le16(32); /* Specify 32-byte cdb */
1949 /* Transfer length */
1950 cdb
[28] = (u8
)((num_blocks
>> 24) & 0xff);
1951 cdb
[29] = (u8
)((num_blocks
>> 16) & 0xff);
1952 cdb
[30] = (u8
)((num_blocks
>> 8) & 0xff);
1953 cdb
[31] = (u8
)(num_blocks
& 0xff);
1955 /* set SCSI IO EEDPFlags */
1956 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
) {
1957 io_request
->EEDPFlags
= cpu_to_le16(
1958 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
1959 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG
|
1960 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP
|
1961 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG
|
1962 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE
|
1963 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD
);
1965 io_request
->EEDPFlags
= cpu_to_le16(
1966 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG
|
1967 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP
);
1969 io_request
->Control
|= cpu_to_le32((0x4 << 26));
1970 io_request
->EEDPBlockSize
= cpu_to_le32(scp
->device
->sector_size
);
1972 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1973 if (((cdb_len
== 12) || (cdb_len
== 16)) &&
1974 (start_blk
<= 0xffffffff)) {
1975 if (cdb_len
== 16) {
1976 opcode
= cdb
[0] == READ_16
? READ_10
: WRITE_10
;
1981 opcode
= cdb
[0] == READ_12
? READ_10
: WRITE_10
;
1987 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
1994 /* Transfer length */
1995 cdb
[8] = (u8
)(num_blocks
& 0xff);
1996 cdb
[7] = (u8
)((num_blocks
>> 8) & 0xff);
1998 io_request
->IoFlags
= cpu_to_le16(10); /* Specify 10-byte cdb */
2000 } else if ((cdb_len
< 16) && (start_blk
> 0xffffffff)) {
2001 /* Convert to 16 byte CDB for large LBA's */
2004 opcode
= cdb
[0] == READ_6
? READ_16
: WRITE_16
;
2009 cdb
[0] == READ_10
? READ_16
: WRITE_16
;
2016 cdb
[0] == READ_12
? READ_16
: WRITE_16
;
2023 memset(cdb
, 0, sizeof(io_request
->CDB
.CDB32
));
2030 /* Transfer length */
2031 cdb
[13] = (u8
)(num_blocks
& 0xff);
2032 cdb
[12] = (u8
)((num_blocks
>> 8) & 0xff);
2033 cdb
[11] = (u8
)((num_blocks
>> 16) & 0xff);
2034 cdb
[10] = (u8
)((num_blocks
>> 24) & 0xff);
2036 io_request
->IoFlags
= cpu_to_le16(16); /* Specify 16-byte cdb */
2040 /* Normal case, just load LBA here */
2044 u8 val
= cdb
[1] & 0xE0;
2045 cdb
[3] = (u8
)(start_blk
& 0xff);
2046 cdb
[2] = (u8
)((start_blk
>> 8) & 0xff);
2047 cdb
[1] = val
| ((u8
)(start_blk
>> 16) & 0x1f);
2051 cdb
[5] = (u8
)(start_blk
& 0xff);
2052 cdb
[4] = (u8
)((start_blk
>> 8) & 0xff);
2053 cdb
[3] = (u8
)((start_blk
>> 16) & 0xff);
2054 cdb
[2] = (u8
)((start_blk
>> 24) & 0xff);
2057 cdb
[5] = (u8
)(start_blk
& 0xff);
2058 cdb
[4] = (u8
)((start_blk
>> 8) & 0xff);
2059 cdb
[3] = (u8
)((start_blk
>> 16) & 0xff);
2060 cdb
[2] = (u8
)((start_blk
>> 24) & 0xff);
2063 cdb
[9] = (u8
)(start_blk
& 0xff);
2064 cdb
[8] = (u8
)((start_blk
>> 8) & 0xff);
2065 cdb
[7] = (u8
)((start_blk
>> 16) & 0xff);
2066 cdb
[6] = (u8
)((start_blk
>> 24) & 0xff);
2067 cdb
[5] = (u8
)((start_blk
>> 32) & 0xff);
2068 cdb
[4] = (u8
)((start_blk
>> 40) & 0xff);
2069 cdb
[3] = (u8
)((start_blk
>> 48) & 0xff);
2070 cdb
[2] = (u8
)((start_blk
>> 56) & 0xff);
2077 * megasas_stream_detect - stream detection on read and and write IOs
2078 * @instance: Adapter soft state
2079 * @cmd: Command to be prepared
2080 * @io_info: IO Request info
2084 /** stream detection on read and and write IOs */
2085 static void megasas_stream_detect(struct megasas_instance
*instance
,
2086 struct megasas_cmd_fusion
*cmd
,
2087 struct IO_REQUEST_INFO
*io_info
)
2089 struct fusion_context
*fusion
= instance
->ctrl_context
;
2090 u32 device_id
= io_info
->ldTgtId
;
2091 struct LD_STREAM_DETECT
*current_ld_sd
2092 = fusion
->stream_detect_by_ld
[device_id
];
2093 u32
*track_stream
= ¤t_ld_sd
->mru_bit_map
, stream_num
;
2094 u32 shifted_values
, unshifted_values
;
2095 u32 index_value_mask
, shifted_values_mask
;
2097 bool is_read_ahead
= false;
2098 struct STREAM_DETECT
*current_sd
;
2099 /* find possible stream */
2100 for (i
= 0; i
< MAX_STREAMS_TRACKED
; ++i
) {
2101 stream_num
= (*track_stream
>>
2102 (i
* BITS_PER_INDEX_STREAM
)) &
2104 current_sd
= ¤t_ld_sd
->stream_track
[stream_num
];
2105 /* if we found a stream, update the raid
2106 * context and also update the mruBitMap
2108 /* boundary condition */
2109 if ((current_sd
->next_seq_lba
) &&
2110 (io_info
->ldStartBlock
>= current_sd
->next_seq_lba
) &&
2111 (io_info
->ldStartBlock
<= (current_sd
->next_seq_lba
+ 32)) &&
2112 (current_sd
->is_read
== io_info
->isRead
)) {
2114 if ((io_info
->ldStartBlock
!= current_sd
->next_seq_lba
) &&
2115 ((!io_info
->isRead
) || (!is_read_ahead
)))
2117 * Once the API availible we need to change this.
2118 * At this point we are not allowing any gap
2122 SET_STREAM_DETECTED(cmd
->io_request
->RaidContext
.raid_context_g35
);
2123 current_sd
->next_seq_lba
=
2124 io_info
->ldStartBlock
+ io_info
->numBlocks
;
2126 * update the mruBitMap LRU
2128 shifted_values_mask
=
2129 (1 << i
* BITS_PER_INDEX_STREAM
) - 1;
2130 shifted_values
= ((*track_stream
& shifted_values_mask
)
2131 << BITS_PER_INDEX_STREAM
);
2133 STREAM_MASK
<< i
* BITS_PER_INDEX_STREAM
;
2135 *track_stream
& ~(shifted_values_mask
|
2138 unshifted_values
| shifted_values
| stream_num
;
2143 * if we did not find any stream, create a new one
2144 * from the least recently used
2146 stream_num
= (*track_stream
>>
2147 ((MAX_STREAMS_TRACKED
- 1) * BITS_PER_INDEX_STREAM
)) &
2149 current_sd
= ¤t_ld_sd
->stream_track
[stream_num
];
2150 current_sd
->is_read
= io_info
->isRead
;
2151 current_sd
->next_seq_lba
= io_info
->ldStartBlock
+ io_info
->numBlocks
;
2152 *track_stream
= (((*track_stream
& ZERO_LAST_STREAM
) << 4) | stream_num
);
2157 * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2158 * affinity (cpu of the controller) and raid_flags in the raid context
2161 * @praid_context: IO RAID context
2162 * @raid: LD raid map
2163 * @fp_possible: Is fast path possible?
2164 * @is_read: Is read IO?
2168 megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION
*praid_context
,
2169 struct MR_LD_RAID
*raid
, bool fp_possible
,
2170 u8 is_read
, u32 scsi_buff_len
)
2172 u8 cpu_sel
= MR_RAID_CTX_CPUSEL_0
;
2173 struct RAID_CONTEXT_G35
*rctx_g35
;
2175 rctx_g35
= &praid_context
->raid_context_g35
;
2178 if ((raid
->cpuAffinity
.pdRead
.cpu0
) &&
2179 (raid
->cpuAffinity
.pdRead
.cpu1
))
2180 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2181 else if (raid
->cpuAffinity
.pdRead
.cpu1
)
2182 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2184 if ((raid
->cpuAffinity
.pdWrite
.cpu0
) &&
2185 (raid
->cpuAffinity
.pdWrite
.cpu1
))
2186 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2187 else if (raid
->cpuAffinity
.pdWrite
.cpu1
)
2188 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2189 /* Fast path cache by pass capable R0/R1 VD */
2190 if ((raid
->level
<= 1) &&
2191 (raid
->capability
.fp_cache_bypass_capable
)) {
2192 rctx_g35
->routing_flags
|=
2193 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT
);
2194 rctx_g35
->raid_flags
=
2195 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2196 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
);
2201 if ((raid
->cpuAffinity
.ldRead
.cpu0
) &&
2202 (raid
->cpuAffinity
.ldRead
.cpu1
))
2203 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2204 else if (raid
->cpuAffinity
.ldRead
.cpu1
)
2205 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2207 if ((raid
->cpuAffinity
.ldWrite
.cpu0
) &&
2208 (raid
->cpuAffinity
.ldWrite
.cpu1
))
2209 cpu_sel
= MR_RAID_CTX_CPUSEL_FCFS
;
2210 else if (raid
->cpuAffinity
.ldWrite
.cpu1
)
2211 cpu_sel
= MR_RAID_CTX_CPUSEL_1
;
2213 if (is_stream_detected(rctx_g35
) &&
2214 ((raid
->level
== 5) || (raid
->level
== 6)) &&
2215 (raid
->writeMode
== MR_RL_WRITE_THROUGH_MODE
) &&
2216 (cpu_sel
== MR_RAID_CTX_CPUSEL_FCFS
))
2217 cpu_sel
= MR_RAID_CTX_CPUSEL_0
;
2221 rctx_g35
->routing_flags
|=
2222 (cpu_sel
<< MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT
);
2224 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2225 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2226 * IO Subtype is not bitmap.
2228 if ((raid
->level
== 1) && (!is_read
)) {
2229 if (scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
)
2230 praid_context
->raid_context_g35
.raid_flags
=
2231 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2232 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
);
2237 * megasas_build_ldio_fusion - Prepares IOs to devices
2238 * @instance: Adapter soft state
2239 * @scp: SCSI command
2240 * @cmd: Command to be prepared
2242 * Prepares the io_request and chain elements (sg_frame) for IO
2243 * The IO can be for PD (Fast Path) or LD
2246 megasas_build_ldio_fusion(struct megasas_instance
*instance
,
2247 struct scsi_cmnd
*scp
,
2248 struct megasas_cmd_fusion
*cmd
)
2252 u32 start_lba_lo
, start_lba_hi
, device_id
, datalength
= 0;
2254 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2255 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
2256 struct IO_REQUEST_INFO io_info
;
2257 struct fusion_context
*fusion
;
2258 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2260 unsigned long spinlock_flags
;
2261 union RAID_CONTEXT_UNION
*praid_context
;
2262 struct MR_LD_RAID
*raid
= NULL
;
2263 struct MR_PRIV_DEVICE
*mrdev_priv
;
2265 device_id
= MEGASAS_DEV_INDEX(scp
);
2267 fusion
= instance
->ctrl_context
;
2269 io_request
= cmd
->io_request
;
2270 io_request
->RaidContext
.raid_context
.virtual_disk_tgt_id
=
2271 cpu_to_le16(device_id
);
2272 io_request
->RaidContext
.raid_context
.status
= 0;
2273 io_request
->RaidContext
.raid_context
.ex_status
= 0;
2275 req_desc
= (union MEGASAS_REQUEST_DESCRIPTOR_UNION
*)cmd
->request_desc
;
2279 fp_possible
= false;
2282 * 6-byte READ(0x08) or WRITE(0x0A) cdb
2284 if (scp
->cmd_len
== 6) {
2285 datalength
= (u32
) scp
->cmnd
[4];
2286 start_lba_lo
= ((u32
) scp
->cmnd
[1] << 16) |
2287 ((u32
) scp
->cmnd
[2] << 8) | (u32
) scp
->cmnd
[3];
2289 start_lba_lo
&= 0x1FFFFF;
2293 * 10-byte READ(0x28) or WRITE(0x2A) cdb
2295 else if (scp
->cmd_len
== 10) {
2296 datalength
= (u32
) scp
->cmnd
[8] |
2297 ((u32
) scp
->cmnd
[7] << 8);
2298 start_lba_lo
= ((u32
) scp
->cmnd
[2] << 24) |
2299 ((u32
) scp
->cmnd
[3] << 16) |
2300 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2304 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
2306 else if (scp
->cmd_len
== 12) {
2307 datalength
= ((u32
) scp
->cmnd
[6] << 24) |
2308 ((u32
) scp
->cmnd
[7] << 16) |
2309 ((u32
) scp
->cmnd
[8] << 8) | (u32
) scp
->cmnd
[9];
2310 start_lba_lo
= ((u32
) scp
->cmnd
[2] << 24) |
2311 ((u32
) scp
->cmnd
[3] << 16) |
2312 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2316 * 16-byte READ(0x88) or WRITE(0x8A) cdb
2318 else if (scp
->cmd_len
== 16) {
2319 datalength
= ((u32
) scp
->cmnd
[10] << 24) |
2320 ((u32
) scp
->cmnd
[11] << 16) |
2321 ((u32
) scp
->cmnd
[12] << 8) | (u32
) scp
->cmnd
[13];
2322 start_lba_lo
= ((u32
) scp
->cmnd
[6] << 24) |
2323 ((u32
) scp
->cmnd
[7] << 16) |
2324 ((u32
) scp
->cmnd
[8] << 8) | (u32
) scp
->cmnd
[9];
2326 start_lba_hi
= ((u32
) scp
->cmnd
[2] << 24) |
2327 ((u32
) scp
->cmnd
[3] << 16) |
2328 ((u32
) scp
->cmnd
[4] << 8) | (u32
) scp
->cmnd
[5];
2331 memset(&io_info
, 0, sizeof(struct IO_REQUEST_INFO
));
2332 io_info
.ldStartBlock
= ((u64
)start_lba_hi
<< 32) | start_lba_lo
;
2333 io_info
.numBlocks
= datalength
;
2334 io_info
.ldTgtId
= device_id
;
2335 io_info
.r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
2336 scsi_buff_len
= scsi_bufflen(scp
);
2337 io_request
->DataLength
= cpu_to_le32(scsi_buff_len
);
2339 if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
2342 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2343 ld
= MR_TargetIdToLdGet(device_id
, local_map_ptr
);
2345 if (ld
< instance
->fw_supported_vd_count
)
2346 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
2348 if (!raid
|| (!fusion
->fast_path_io
)) {
2349 io_request
->RaidContext
.raid_context
.reg_lock_flags
= 0;
2350 fp_possible
= false;
2352 if (MR_BuildRaidContext(instance
, &io_info
,
2353 &io_request
->RaidContext
.raid_context
,
2354 local_map_ptr
, &raidLUN
))
2355 fp_possible
= (io_info
.fpOkForIo
> 0) ? true : false;
2358 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
2359 id by default, not CPU group id, otherwise all MSI-X queues won't
2361 cmd
->request_desc
->SCSIIO
.MSIxIndex
= instance
->msix_vectors
?
2362 raw_smp_processor_id() % instance
->msix_vectors
: 0;
2364 praid_context
= &io_request
->RaidContext
;
2366 if (instance
->is_ventura
) {
2367 spin_lock_irqsave(&instance
->stream_lock
, spinlock_flags
);
2368 megasas_stream_detect(instance
, cmd
, &io_info
);
2369 spin_unlock_irqrestore(&instance
->stream_lock
, spinlock_flags
);
2370 /* In ventura if stream detected for a read and it is read ahead
2371 * capable make this IO as LDIO
2373 if (is_stream_detected(&io_request
->RaidContext
.raid_context_g35
) &&
2374 io_info
.isRead
&& io_info
.ra_capable
)
2375 fp_possible
= false;
2377 /* FP for Optimal raid level 1.
2378 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2379 * are built by the driver as LD I/Os.
2380 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2381 * (there is never a reason to process these as buffered writes)
2382 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2383 * with the SLD bit asserted.
2385 if (io_info
.r1_alt_dev_handle
!= MR_DEVHANDLE_INVALID
) {
2386 mrdev_priv
= scp
->device
->hostdata
;
2388 if (atomic_inc_return(&instance
->fw_outstanding
) >
2389 (instance
->host
->can_queue
)) {
2390 fp_possible
= false;
2391 atomic_dec(&instance
->fw_outstanding
);
2392 } else if ((scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
) ||
2393 (atomic_dec_if_positive(&mrdev_priv
->r1_ldio_hint
) > 0)) {
2394 fp_possible
= false;
2395 atomic_dec(&instance
->fw_outstanding
);
2396 if (scsi_buff_len
> MR_LARGE_IO_MIN_SIZE
)
2397 atomic_set(&mrdev_priv
->r1_ldio_hint
,
2398 instance
->r1_ldio_hint_default
);
2402 /* If raid is NULL, set CPU affinity to default CPU0 */
2404 megasas_set_raidflag_cpu_affinity(praid_context
,
2405 raid
, fp_possible
, io_info
.isRead
,
2408 praid_context
->raid_context_g35
.routing_flags
|=
2409 (MR_RAID_CTX_CPUSEL_0
<< MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT
);
2413 megasas_set_pd_lba(io_request
, scp
->cmd_len
, &io_info
, scp
,
2414 local_map_ptr
, start_lba_lo
);
2415 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2416 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2417 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2418 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2419 if (instance
->adapter_type
== INVADER_SERIES
) {
2420 if (io_request
->RaidContext
.raid_context
.reg_lock_flags
==
2422 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2423 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
2424 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2425 io_request
->RaidContext
.raid_context
.type
2427 io_request
->RaidContext
.raid_context
.nseg
= 0x1;
2428 io_request
->IoFlags
|= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2429 io_request
->RaidContext
.raid_context
.reg_lock_flags
|=
2430 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA
|
2431 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
2432 } else if (instance
->is_ventura
) {
2433 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2434 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2435 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2436 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2437 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2438 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2439 io_request
->IoFlags
|=
2440 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2442 if (fusion
->load_balance_info
&&
2443 (fusion
->load_balance_info
[device_id
].loadBalanceFlag
) &&
2446 get_updated_dev_handle(instance
,
2447 &fusion
->load_balance_info
[device_id
],
2448 &io_info
, local_map_ptr
);
2449 scp
->SCp
.Status
|= MEGASAS_LOAD_BALANCE_FLAG
;
2450 cmd
->pd_r1_lb
= io_info
.pd_after_lb
;
2451 if (instance
->is_ventura
)
2452 io_request
->RaidContext
.raid_context_g35
.span_arm
2455 io_request
->RaidContext
.raid_context
.span_arm
2459 scp
->SCp
.Status
&= ~MEGASAS_LOAD_BALANCE_FLAG
;
2461 if (instance
->is_ventura
)
2462 cmd
->r1_alt_dev_handle
= io_info
.r1_alt_dev_handle
;
2464 cmd
->r1_alt_dev_handle
= MR_DEVHANDLE_INVALID
;
2466 if ((raidLUN
[0] == 1) &&
2467 (local_map_ptr
->raidMap
.devHndlInfo
[io_info
.pd_after_lb
].validHandles
> 1)) {
2468 instance
->dev_handle
= !(instance
->dev_handle
);
2470 local_map_ptr
->raidMap
.devHndlInfo
[io_info
.pd_after_lb
].devHandle
[instance
->dev_handle
];
2473 cmd
->request_desc
->SCSIIO
.DevHandle
= io_info
.devHandle
;
2474 io_request
->DevHandle
= io_info
.devHandle
;
2475 cmd
->pd_interface
= io_info
.pd_interface
;
2476 /* populate the LUN field */
2477 memcpy(io_request
->LUN
, raidLUN
, 8);
2479 io_request
->RaidContext
.raid_context
.timeout_value
=
2480 cpu_to_le16(local_map_ptr
->raidMap
.fpPdIoTimeoutSec
);
2481 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2482 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2483 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2484 if (instance
->adapter_type
== INVADER_SERIES
) {
2485 if (io_info
.do_fp_rlbypass
||
2486 (io_request
->RaidContext
.raid_context
.reg_lock_flags
2487 == REGION_TYPE_UNUSED
))
2488 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2489 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK
<<
2490 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2491 io_request
->RaidContext
.raid_context
.type
2493 io_request
->RaidContext
.raid_context
.reg_lock_flags
|=
2494 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0
|
2495 MR_RL_FLAGS_SEQ_NUM_ENABLE
);
2496 io_request
->RaidContext
.raid_context
.nseg
= 0x1;
2497 } else if (instance
->is_ventura
) {
2498 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2499 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2500 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2501 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2502 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2503 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2505 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2506 io_request
->DevHandle
= cpu_to_le16(device_id
);
2512 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
2513 * @instance: Adapter soft state
2514 * @scp: SCSI command
2515 * @cmd: Command to be prepared
2517 * Prepares the io_request frame for non-rw io cmds for vd.
2519 static void megasas_build_ld_nonrw_fusion(struct megasas_instance
*instance
,
2520 struct scsi_cmnd
*scmd
, struct megasas_cmd_fusion
*cmd
)
2523 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2525 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2526 struct fusion_context
*fusion
= instance
->ctrl_context
;
2530 struct MR_LD_RAID
*raid
;
2531 struct RAID_CONTEXT
*pRAID_Context
;
2534 io_request
= cmd
->io_request
;
2535 device_id
= MEGASAS_DEV_INDEX(scmd
);
2536 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2537 io_request
->DataLength
= cpu_to_le32(scsi_bufflen(scmd
));
2538 /* get RAID_Context pointer */
2539 pRAID_Context
= &io_request
->RaidContext
.raid_context
;
2540 /* Check with FW team */
2541 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2542 pRAID_Context
->reg_lock_row_lba
= 0;
2543 pRAID_Context
->reg_lock_length
= 0;
2545 if (fusion
->fast_path_io
&& (
2546 device_id
< instance
->fw_supported_vd_count
)) {
2548 ld
= MR_TargetIdToLdGet(device_id
, local_map_ptr
);
2549 if (ld
>= instance
->fw_supported_vd_count
)
2552 raid
= MR_LdRaidGet(ld
, local_map_ptr
);
2553 if (!(raid
->capability
.fpNonRWCapable
))
2560 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2561 io_request
->DevHandle
= cpu_to_le16(device_id
);
2562 io_request
->LUN
[1] = scmd
->device
->lun
;
2563 pRAID_Context
->timeout_value
=
2564 cpu_to_le16 (scmd
->request
->timeout
/ HZ
);
2565 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2566 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
2567 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2570 /* set RAID context values */
2571 pRAID_Context
->config_seq_num
= raid
->seqNum
;
2572 if (!instance
->is_ventura
)
2573 pRAID_Context
->reg_lock_flags
= REGION_TYPE_SHARED_READ
;
2574 pRAID_Context
->timeout_value
=
2575 cpu_to_le16(raid
->fpIoTimeoutForLd
);
2577 /* get the DevHandle for the PD (since this is
2578 fpNonRWCapable, this is a single disk RAID0) */
2580 arRef
= MR_LdSpanArrayGet(ld
, span
, local_map_ptr
);
2581 pd
= MR_ArPdGet(arRef
, physArm
, local_map_ptr
);
2582 devHandle
= MR_PdDevHandleGet(pd
, local_map_ptr
);
2584 /* build request descriptor */
2585 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2586 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<<
2587 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2588 cmd
->request_desc
->SCSIIO
.DevHandle
= devHandle
;
2590 /* populate the LUN field */
2591 memcpy(io_request
->LUN
, raid
->LUN
, 8);
2593 /* build the raidScsiIO structure */
2594 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2595 io_request
->DevHandle
= devHandle
;
2600 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
2601 * @instance: Adapter soft state
2602 * @scp: SCSI command
2603 * @cmd: Command to be prepared
2604 * @fp_possible: parameter to detect fast path or firmware path io.
2606 * Prepares the io_request frame for rw/non-rw io cmds for syspds
2609 megasas_build_syspd_fusion(struct megasas_instance
*instance
,
2610 struct scsi_cmnd
*scmd
, struct megasas_cmd_fusion
*cmd
,
2614 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
2616 u16 os_timeout_value
;
2618 struct MR_DRV_RAID_MAP_ALL
*local_map_ptr
;
2619 struct RAID_CONTEXT
*pRAID_Context
;
2620 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
2621 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
2622 struct fusion_context
*fusion
= instance
->ctrl_context
;
2623 pd_sync
= (void *)fusion
->pd_seq_sync
[(instance
->pd_seq_map_id
- 1) & 1];
2625 device_id
= MEGASAS_DEV_INDEX(scmd
);
2626 pd_index
= MEGASAS_PD_INDEX(scmd
);
2627 os_timeout_value
= scmd
->request
->timeout
/ HZ
;
2628 mr_device_priv_data
= scmd
->device
->hostdata
;
2629 cmd
->pd_interface
= mr_device_priv_data
->interface_type
;
2631 io_request
= cmd
->io_request
;
2632 /* get RAID_Context pointer */
2633 pRAID_Context
= &io_request
->RaidContext
.raid_context
;
2634 pRAID_Context
->reg_lock_flags
= 0;
2635 pRAID_Context
->reg_lock_row_lba
= 0;
2636 pRAID_Context
->reg_lock_length
= 0;
2637 io_request
->DataLength
= cpu_to_le32(scsi_bufflen(scmd
));
2638 io_request
->LUN
[1] = scmd
->device
->lun
;
2639 pRAID_Context
->raid_flags
= MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
2640 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT
;
2642 /* If FW supports PD sequence number */
2643 if (instance
->use_seqnum_jbod_fp
&&
2644 instance
->pd_list
[pd_index
].driveType
== TYPE_DISK
) {
2645 /* TgtId must be incremented by 255 as jbod seq number is index
2648 /* More than 256 PD/JBOD support for Ventura */
2649 if (instance
->support_morethan256jbod
)
2650 pRAID_Context
->virtual_disk_tgt_id
=
2651 pd_sync
->seq
[pd_index
].pd_target_id
;
2653 pRAID_Context
->virtual_disk_tgt_id
=
2654 cpu_to_le16(device_id
+ (MAX_PHYSICAL_DEVICES
- 1));
2655 pRAID_Context
->config_seq_num
= pd_sync
->seq
[pd_index
].seqNum
;
2656 io_request
->DevHandle
= pd_sync
->seq
[pd_index
].devHandle
;
2657 if (instance
->is_ventura
) {
2658 io_request
->RaidContext
.raid_context_g35
.routing_flags
|=
2659 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT
);
2660 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2661 (1 << RAID_CONTEXT_NSEG_SHIFT
);
2662 io_request
->RaidContext
.raid_context_g35
.nseg_type
|=
2663 (MPI2_TYPE_CUDA
<< RAID_CONTEXT_TYPE_SHIFT
);
2665 pRAID_Context
->type
= MPI2_TYPE_CUDA
;
2666 pRAID_Context
->nseg
= 0x1;
2667 pRAID_Context
->reg_lock_flags
|=
2668 (MR_RL_FLAGS_SEQ_NUM_ENABLE
|MR_RL_FLAGS_GRANT_DESTINATION_CUDA
);
2670 } else if (fusion
->fast_path_io
) {
2671 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2672 pRAID_Context
->config_seq_num
= 0;
2673 local_map_ptr
= fusion
->ld_drv_map
[(instance
->map_id
& 1)];
2674 io_request
->DevHandle
=
2675 local_map_ptr
->raidMap
.devHndlInfo
[device_id
].curDevHdl
;
2677 /* Want to send all IO via FW path */
2678 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2679 pRAID_Context
->config_seq_num
= 0;
2680 io_request
->DevHandle
= cpu_to_le16(0xFFFF);
2683 cmd
->request_desc
->SCSIIO
.DevHandle
= io_request
->DevHandle
;
2684 cmd
->request_desc
->SCSIIO
.MSIxIndex
=
2685 instance
->msix_vectors
?
2686 (raw_smp_processor_id() % instance
->msix_vectors
) : 0;
2690 /* system pd firmware path */
2691 io_request
->Function
= MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
;
2692 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2693 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
2694 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2695 pRAID_Context
->timeout_value
= cpu_to_le16(os_timeout_value
);
2696 pRAID_Context
->virtual_disk_tgt_id
= cpu_to_le16(device_id
);
2698 /* system pd Fast Path */
2699 io_request
->Function
= MPI2_FUNCTION_SCSI_IO_REQUEST
;
2700 timeout_limit
= (scmd
->device
->type
== TYPE_DISK
) ?
2702 pRAID_Context
->timeout_value
=
2703 cpu_to_le16((os_timeout_value
> timeout_limit
) ?
2704 timeout_limit
: os_timeout_value
);
2705 if (instance
->adapter_type
>= INVADER_SERIES
)
2706 io_request
->IoFlags
|=
2707 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH
);
2709 cmd
->request_desc
->SCSIIO
.RequestFlags
=
2710 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
<<
2711 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
2716 * megasas_build_io_fusion - Prepares IOs to devices
2717 * @instance: Adapter soft state
2718 * @scp: SCSI command
2719 * @cmd: Command to be prepared
2721 * Invokes helper functions to prepare request frames
2722 * and sets flags appropriate for IO/Non-IO cmd
2725 megasas_build_io_fusion(struct megasas_instance
*instance
,
2726 struct scsi_cmnd
*scp
,
2727 struct megasas_cmd_fusion
*cmd
)
2731 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
= cmd
->io_request
;
2732 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
2733 mr_device_priv_data
= scp
->device
->hostdata
;
2735 /* Zero out some fields so they don't get reused */
2736 memset(io_request
->LUN
, 0x0, 8);
2737 io_request
->CDB
.EEDP32
.PrimaryReferenceTag
= 0;
2738 io_request
->CDB
.EEDP32
.PrimaryApplicationTagMask
= 0;
2739 io_request
->EEDPFlags
= 0;
2740 io_request
->Control
= 0;
2741 io_request
->EEDPBlockSize
= 0;
2742 io_request
->ChainOffset
= 0;
2743 io_request
->RaidContext
.raid_context
.raid_flags
= 0;
2744 io_request
->RaidContext
.raid_context
.type
= 0;
2745 io_request
->RaidContext
.raid_context
.nseg
= 0;
2747 memcpy(io_request
->CDB
.CDB32
, scp
->cmnd
, scp
->cmd_len
);
2749 * Just the CDB length,rest of the Flags are zero
2750 * This will be modified for FP in build_ldio_fusion
2752 io_request
->IoFlags
= cpu_to_le16(scp
->cmd_len
);
2754 switch (cmd_type
= megasas_cmd_type(scp
)) {
2755 case READ_WRITE_LDIO
:
2756 megasas_build_ldio_fusion(instance
, scp
, cmd
);
2758 case NON_READ_WRITE_LDIO
:
2759 megasas_build_ld_nonrw_fusion(instance
, scp
, cmd
);
2761 case READ_WRITE_SYSPDIO
:
2762 megasas_build_syspd_fusion(instance
, scp
, cmd
, true);
2764 case NON_READ_WRITE_SYSPDIO
:
2765 if (instance
->secure_jbod_support
||
2766 mr_device_priv_data
->is_tm_capable
)
2767 megasas_build_syspd_fusion(instance
, scp
, cmd
, false);
2769 megasas_build_syspd_fusion(instance
, scp
, cmd
, true);
2779 sge_count
= megasas_make_sgl(instance
, scp
, cmd
);
2781 if (sge_count
> instance
->max_num_sge
|| (sge_count
< 0)) {
2782 dev_err(&instance
->pdev
->dev
,
2783 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n",
2784 __func__
, __LINE__
, sge_count
, instance
->max_num_sge
);
2788 if (instance
->is_ventura
) {
2789 set_num_sge(&io_request
->RaidContext
.raid_context_g35
, sge_count
);
2790 cpu_to_le16s(&io_request
->RaidContext
.raid_context_g35
.routing_flags
);
2791 cpu_to_le16s(&io_request
->RaidContext
.raid_context_g35
.nseg_type
);
2793 /* numSGE store lower 8 bit of sge_count.
2794 * numSGEExt store higher 8 bit of sge_count
2796 io_request
->RaidContext
.raid_context
.num_sge
= sge_count
;
2797 io_request
->RaidContext
.raid_context
.num_sge_ext
=
2798 (u8
)(sge_count
>> 8);
2801 io_request
->SGLFlags
= cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING
);
2803 if (scp
->sc_data_direction
== PCI_DMA_TODEVICE
)
2804 io_request
->Control
|= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE
);
2805 else if (scp
->sc_data_direction
== PCI_DMA_FROMDEVICE
)
2806 io_request
->Control
|= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ
);
2808 io_request
->SGLOffset0
=
2809 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
, SGL
) / 4;
2811 io_request
->SenseBufferLowAddress
= cpu_to_le32(cmd
->sense_phys_addr
);
2812 io_request
->SenseBufferLength
= SCSI_SENSE_BUFFERSIZE
;
2815 scp
->SCp
.ptr
= (char *)cmd
;
2820 static union MEGASAS_REQUEST_DESCRIPTOR_UNION
*
2821 megasas_get_request_descriptor(struct megasas_instance
*instance
, u16 index
)
2824 struct fusion_context
*fusion
;
2826 fusion
= instance
->ctrl_context
;
2827 p
= fusion
->req_frames_desc
+
2828 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION
) * index
;
2830 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION
*)p
;
2834 /* megasas_prepate_secondRaid1_IO
2835 * It prepares the raid 1 second IO
2837 void megasas_prepare_secondRaid1_IO(struct megasas_instance
*instance
,
2838 struct megasas_cmd_fusion
*cmd
,
2839 struct megasas_cmd_fusion
*r1_cmd
)
2841 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
, *req_desc2
= NULL
;
2842 struct fusion_context
*fusion
;
2843 fusion
= instance
->ctrl_context
;
2844 req_desc
= cmd
->request_desc
;
2845 /* copy the io request frame as well as 8 SGEs data for r1 command*/
2846 memcpy(r1_cmd
->io_request
, cmd
->io_request
,
2847 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST
)));
2848 memcpy(&r1_cmd
->io_request
->SGL
, &cmd
->io_request
->SGL
,
2849 (fusion
->max_sge_in_main_msg
* sizeof(union MPI2_SGE_IO_UNION
)));
2850 /*sense buffer is different for r1 command*/
2851 r1_cmd
->io_request
->SenseBufferLowAddress
=
2852 cpu_to_le32(r1_cmd
->sense_phys_addr
);
2853 r1_cmd
->scmd
= cmd
->scmd
;
2854 req_desc2
= megasas_get_request_descriptor(instance
,
2855 (r1_cmd
->index
- 1));
2856 req_desc2
->Words
= 0;
2857 r1_cmd
->request_desc
= req_desc2
;
2858 req_desc2
->SCSIIO
.SMID
= cpu_to_le16(r1_cmd
->index
);
2859 req_desc2
->SCSIIO
.RequestFlags
= req_desc
->SCSIIO
.RequestFlags
;
2860 r1_cmd
->request_desc
->SCSIIO
.DevHandle
= cmd
->r1_alt_dev_handle
;
2861 r1_cmd
->io_request
->DevHandle
= cmd
->r1_alt_dev_handle
;
2862 r1_cmd
->r1_alt_dev_handle
= cmd
->io_request
->DevHandle
;
2863 cmd
->io_request
->RaidContext
.raid_context_g35
.smid
.peer_smid
=
2864 cpu_to_le16(r1_cmd
->index
);
2865 r1_cmd
->io_request
->RaidContext
.raid_context_g35
.smid
.peer_smid
=
2866 cpu_to_le16(cmd
->index
);
2867 /*MSIxIndex of both commands request descriptors should be same*/
2868 r1_cmd
->request_desc
->SCSIIO
.MSIxIndex
=
2869 cmd
->request_desc
->SCSIIO
.MSIxIndex
;
2870 /*span arm is different for r1 cmd*/
2871 r1_cmd
->io_request
->RaidContext
.raid_context_g35
.span_arm
=
2872 cmd
->io_request
->RaidContext
.raid_context_g35
.span_arm
+ 1;
2876 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2877 * issuing non IOCTL cmd
2878 * @instance: Adapter soft state
2879 * @scmd: pointer to scsi cmd from OS
2882 megasas_build_and_issue_cmd_fusion(struct megasas_instance
*instance
,
2883 struct scsi_cmnd
*scmd
)
2885 struct megasas_cmd_fusion
*cmd
, *r1_cmd
= NULL
;
2886 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
2888 struct fusion_context
*fusion
;
2890 fusion
= instance
->ctrl_context
;
2892 if ((megasas_cmd_type(scmd
) == READ_WRITE_LDIO
) &&
2893 instance
->ldio_threshold
&&
2894 (atomic_inc_return(&instance
->ldio_outstanding
) >
2895 instance
->ldio_threshold
)) {
2896 atomic_dec(&instance
->ldio_outstanding
);
2897 return SCSI_MLQUEUE_DEVICE_BUSY
;
2900 if (atomic_inc_return(&instance
->fw_outstanding
) >
2901 instance
->host
->can_queue
) {
2902 atomic_dec(&instance
->fw_outstanding
);
2903 return SCSI_MLQUEUE_HOST_BUSY
;
2906 cmd
= megasas_get_cmd_fusion(instance
, scmd
->request
->tag
);
2909 atomic_dec(&instance
->fw_outstanding
);
2910 return SCSI_MLQUEUE_HOST_BUSY
;
2915 req_desc
= megasas_get_request_descriptor(instance
, index
-1);
2917 req_desc
->Words
= 0;
2918 cmd
->request_desc
= req_desc
;
2920 if (megasas_build_io_fusion(instance
, scmd
, cmd
)) {
2921 megasas_return_cmd_fusion(instance
, cmd
);
2922 dev_err(&instance
->pdev
->dev
, "Error building command\n");
2923 cmd
->request_desc
= NULL
;
2924 atomic_dec(&instance
->fw_outstanding
);
2925 return SCSI_MLQUEUE_HOST_BUSY
;
2928 req_desc
= cmd
->request_desc
;
2929 req_desc
->SCSIIO
.SMID
= cpu_to_le16(index
);
2931 if (cmd
->io_request
->ChainOffset
!= 0 &&
2932 cmd
->io_request
->ChainOffset
!= 0xF)
2933 dev_err(&instance
->pdev
->dev
, "The chain offset value is not "
2934 "correct : %x\n", cmd
->io_request
->ChainOffset
);
2936 * if it is raid 1/10 fp write capable.
2937 * try to get second command from pool and construct it.
2938 * From FW, it has confirmed that lba values of two PDs
2939 * corresponds to single R1/10 LD are always same
2942 /* driver side count always should be less than max_fw_cmds
2943 * to get new command
2945 if (cmd
->r1_alt_dev_handle
!= MR_DEVHANDLE_INVALID
) {
2946 r1_cmd
= megasas_get_cmd_fusion(instance
,
2947 (scmd
->request
->tag
+ instance
->max_fw_cmds
));
2948 megasas_prepare_secondRaid1_IO(instance
, cmd
, r1_cmd
);
2953 * Issue the command to the FW
2956 megasas_fire_cmd_fusion(instance
, req_desc
);
2959 megasas_fire_cmd_fusion(instance
, r1_cmd
->request_desc
);
2966 * megasas_complete_r1_command -
2967 * completes R1 FP write commands which has valid peer smid
2968 * @instance: Adapter soft state
2969 * @cmd_fusion: MPT command frame
2973 megasas_complete_r1_command(struct megasas_instance
*instance
,
2974 struct megasas_cmd_fusion
*cmd
)
2976 u8
*sense
, status
, ex_status
;
2979 struct fusion_context
*fusion
;
2980 struct megasas_cmd_fusion
*r1_cmd
= NULL
;
2981 struct scsi_cmnd
*scmd_local
= NULL
;
2982 struct RAID_CONTEXT_G35
*rctx_g35
;
2984 rctx_g35
= &cmd
->io_request
->RaidContext
.raid_context_g35
;
2985 fusion
= instance
->ctrl_context
;
2986 peer_smid
= le16_to_cpu(rctx_g35
->smid
.peer_smid
);
2988 r1_cmd
= fusion
->cmd_list
[peer_smid
- 1];
2989 scmd_local
= cmd
->scmd
;
2990 status
= rctx_g35
->status
;
2991 ex_status
= rctx_g35
->ex_status
;
2992 data_length
= cmd
->io_request
->DataLength
;
2995 cmd
->cmd_completed
= true;
2997 /* Check if peer command is completed or not*/
2998 if (r1_cmd
->cmd_completed
) {
2999 rctx_g35
= &r1_cmd
->io_request
->RaidContext
.raid_context_g35
;
3000 if (rctx_g35
->status
!= MFI_STAT_OK
) {
3001 status
= rctx_g35
->status
;
3002 ex_status
= rctx_g35
->ex_status
;
3003 data_length
= r1_cmd
->io_request
->DataLength
;
3004 sense
= r1_cmd
->sense
;
3007 megasas_return_cmd_fusion(instance
, r1_cmd
);
3008 map_cmd_status(fusion
, scmd_local
, status
, ex_status
,
3009 le32_to_cpu(data_length
), sense
);
3010 if (instance
->ldio_threshold
&&
3011 megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
)
3012 atomic_dec(&instance
->ldio_outstanding
);
3013 scmd_local
->SCp
.ptr
= NULL
;
3014 megasas_return_cmd_fusion(instance
, cmd
);
3015 scsi_dma_unmap(scmd_local
);
3016 scmd_local
->scsi_done(scmd_local
);
3021 * complete_cmd_fusion - Completes command
3022 * @instance: Adapter soft state
3023 * Completes all commands that is in reply descriptor queue
3026 complete_cmd_fusion(struct megasas_instance
*instance
, u32 MSIxIndex
)
3028 union MPI2_REPLY_DESCRIPTORS_UNION
*desc
;
3029 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*reply_desc
;
3030 struct MPI2_RAID_SCSI_IO_REQUEST
*scsi_io_req
;
3031 struct fusion_context
*fusion
;
3032 struct megasas_cmd
*cmd_mfi
;
3033 struct megasas_cmd_fusion
*cmd_fusion
;
3034 u16 smid
, num_completed
;
3035 u8 reply_descript_type
, *sense
, status
, extStatus
;
3036 u32 device_id
, data_length
;
3037 union desc_value d_val
;
3038 struct LD_LOAD_BALANCE_INFO
*lbinfo
;
3039 int threshold_reply_count
= 0;
3040 struct scsi_cmnd
*scmd_local
= NULL
;
3041 struct MR_TASK_MANAGE_REQUEST
*mr_tm_req
;
3042 struct MPI2_SCSI_TASK_MANAGE_REQUEST
*mpi_tm_req
;
3044 fusion
= instance
->ctrl_context
;
3046 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
)
3049 desc
= fusion
->reply_frames_desc
[MSIxIndex
] +
3050 fusion
->last_reply_idx
[MSIxIndex
];
3052 reply_desc
= (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
3054 d_val
.word
= desc
->Words
;
3056 reply_descript_type
= reply_desc
->ReplyFlags
&
3057 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
3059 if (reply_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
3064 while (d_val
.u
.low
!= cpu_to_le32(UINT_MAX
) &&
3065 d_val
.u
.high
!= cpu_to_le32(UINT_MAX
)) {
3067 smid
= le16_to_cpu(reply_desc
->SMID
);
3068 cmd_fusion
= fusion
->cmd_list
[smid
- 1];
3069 scsi_io_req
= (struct MPI2_RAID_SCSI_IO_REQUEST
*)
3070 cmd_fusion
->io_request
;
3072 scmd_local
= cmd_fusion
->scmd
;
3073 status
= scsi_io_req
->RaidContext
.raid_context
.status
;
3074 extStatus
= scsi_io_req
->RaidContext
.raid_context
.ex_status
;
3075 sense
= cmd_fusion
->sense
;
3076 data_length
= scsi_io_req
->DataLength
;
3078 switch (scsi_io_req
->Function
) {
3079 case MPI2_FUNCTION_SCSI_TASK_MGMT
:
3080 mr_tm_req
= (struct MR_TASK_MANAGE_REQUEST
*)
3081 cmd_fusion
->io_request
;
3082 mpi_tm_req
= (struct MPI2_SCSI_TASK_MANAGE_REQUEST
*)
3083 &mr_tm_req
->TmRequest
;
3084 dev_dbg(&instance
->pdev
->dev
, "TM completion:"
3085 "type: 0x%x TaskMID: 0x%x\n",
3086 mpi_tm_req
->TaskType
, mpi_tm_req
->TaskMID
);
3087 complete(&cmd_fusion
->done
);
3089 case MPI2_FUNCTION_SCSI_IO_REQUEST
: /*Fast Path IO.*/
3090 /* Update load balancing info */
3091 if (fusion
->load_balance_info
&&
3092 (cmd_fusion
->scmd
->SCp
.Status
&
3093 MEGASAS_LOAD_BALANCE_FLAG
)) {
3094 device_id
= MEGASAS_DEV_INDEX(scmd_local
);
3095 lbinfo
= &fusion
->load_balance_info
[device_id
];
3096 atomic_dec(&lbinfo
->scsi_pending_cmds
[cmd_fusion
->pd_r1_lb
]);
3097 cmd_fusion
->scmd
->SCp
.Status
&= ~MEGASAS_LOAD_BALANCE_FLAG
;
3099 //Fall thru and complete IO
3100 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST
: /* LD-IO Path */
3101 atomic_dec(&instance
->fw_outstanding
);
3102 if (cmd_fusion
->r1_alt_dev_handle
== MR_DEVHANDLE_INVALID
) {
3103 map_cmd_status(fusion
, scmd_local
, status
,
3104 extStatus
, le32_to_cpu(data_length
),
3106 if (instance
->ldio_threshold
&&
3107 (megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
))
3108 atomic_dec(&instance
->ldio_outstanding
);
3109 scmd_local
->SCp
.ptr
= NULL
;
3110 megasas_return_cmd_fusion(instance
, cmd_fusion
);
3111 scsi_dma_unmap(scmd_local
);
3112 scmd_local
->scsi_done(scmd_local
);
3113 } else /* Optimal VD - R1 FP command completion. */
3114 megasas_complete_r1_command(instance
, cmd_fusion
);
3116 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST
: /*MFI command */
3117 cmd_mfi
= instance
->cmd_list
[cmd_fusion
->sync_cmd_idx
];
3118 /* Poll mode. Dummy free.
3119 * In case of Interrupt mode, caller has reverse check.
3121 if (cmd_mfi
->flags
& DRV_DCMD_POLLED_MODE
) {
3122 cmd_mfi
->flags
&= ~DRV_DCMD_POLLED_MODE
;
3123 megasas_return_cmd(instance
, cmd_mfi
);
3125 megasas_complete_cmd(instance
, cmd_mfi
, DID_OK
);
3129 fusion
->last_reply_idx
[MSIxIndex
]++;
3130 if (fusion
->last_reply_idx
[MSIxIndex
] >=
3131 fusion
->reply_q_depth
)
3132 fusion
->last_reply_idx
[MSIxIndex
] = 0;
3134 desc
->Words
= cpu_to_le64(ULLONG_MAX
);
3136 threshold_reply_count
++;
3138 /* Get the next reply descriptor */
3139 if (!fusion
->last_reply_idx
[MSIxIndex
])
3140 desc
= fusion
->reply_frames_desc
[MSIxIndex
];
3145 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
*)desc
;
3147 d_val
.word
= desc
->Words
;
3149 reply_descript_type
= reply_desc
->ReplyFlags
&
3150 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
3152 if (reply_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
3155 * Write to reply post host index register after completing threshold
3156 * number of reply counts and still there are more replies in reply queue
3157 * pending to be completed
3159 if (threshold_reply_count
>= THRESHOLD_REPLY_COUNT
) {
3160 if (instance
->msix_combined
)
3161 writel(((MSIxIndex
& 0x7) << 24) |
3162 fusion
->last_reply_idx
[MSIxIndex
],
3163 instance
->reply_post_host_index_addr
[MSIxIndex
/8]);
3165 writel((MSIxIndex
<< 24) |
3166 fusion
->last_reply_idx
[MSIxIndex
],
3167 instance
->reply_post_host_index_addr
[0]);
3168 threshold_reply_count
= 0;
3176 if (instance
->msix_combined
)
3177 writel(((MSIxIndex
& 0x7) << 24) |
3178 fusion
->last_reply_idx
[MSIxIndex
],
3179 instance
->reply_post_host_index_addr
[MSIxIndex
/8]);
3181 writel((MSIxIndex
<< 24) |
3182 fusion
->last_reply_idx
[MSIxIndex
],
3183 instance
->reply_post_host_index_addr
[0]);
3184 megasas_check_and_restore_queue_depth(instance
);
3189 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
3190 * @instance: Adapter soft state
3192 void megasas_sync_irqs(unsigned long instance_addr
)
3195 struct megasas_instance
*instance
=
3196 (struct megasas_instance
*)instance_addr
;
3198 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3200 for (i
= 0; i
< count
; i
++)
3201 synchronize_irq(pci_irq_vector(instance
->pdev
, i
));
3205 * megasas_complete_cmd_dpc_fusion - Completes command
3206 * @instance: Adapter soft state
3208 * Tasklet to complete cmds
3211 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr
)
3213 struct megasas_instance
*instance
=
3214 (struct megasas_instance
*)instance_addr
;
3215 unsigned long flags
;
3216 u32 count
, MSIxIndex
;
3218 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3220 /* If we have already declared adapter dead, donot complete cmds */
3221 spin_lock_irqsave(&instance
->hba_lock
, flags
);
3222 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
) {
3223 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
3226 spin_unlock_irqrestore(&instance
->hba_lock
, flags
);
3228 for (MSIxIndex
= 0 ; MSIxIndex
< count
; MSIxIndex
++)
3229 complete_cmd_fusion(instance
, MSIxIndex
);
3233 * megasas_isr_fusion - isr entry point
3235 irqreturn_t
megasas_isr_fusion(int irq
, void *devp
)
3237 struct megasas_irq_context
*irq_context
= devp
;
3238 struct megasas_instance
*instance
= irq_context
->instance
;
3239 u32 mfiStatus
, fw_state
, dma_state
;
3241 if (instance
->mask_interrupts
)
3244 if (!instance
->msix_vectors
) {
3245 mfiStatus
= instance
->instancet
->clear_intr(instance
->reg_set
);
3250 /* If we are resetting, bail */
3251 if (test_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
)) {
3252 instance
->instancet
->clear_intr(instance
->reg_set
);
3256 if (!complete_cmd_fusion(instance
, irq_context
->MSIxIndex
)) {
3257 instance
->instancet
->clear_intr(instance
->reg_set
);
3258 /* If we didn't complete any commands, check for FW fault */
3259 fw_state
= instance
->instancet
->read_fw_status_reg(
3260 instance
->reg_set
) & MFI_STATE_MASK
;
3261 dma_state
= instance
->instancet
->read_fw_status_reg
3262 (instance
->reg_set
) & MFI_STATE_DMADONE
;
3263 if (instance
->crash_dump_drv_support
&&
3264 instance
->crash_dump_app_support
) {
3265 /* Start collecting crash, if DMA bit is done */
3266 if ((fw_state
== MFI_STATE_FAULT
) && dma_state
)
3267 schedule_work(&instance
->crash_init
);
3268 else if (fw_state
== MFI_STATE_FAULT
) {
3269 if (instance
->unload
== 0)
3270 schedule_work(&instance
->work_init
);
3272 } else if (fw_state
== MFI_STATE_FAULT
) {
3273 dev_warn(&instance
->pdev
->dev
, "Iop2SysDoorbellInt"
3274 "for scsi%d\n", instance
->host
->host_no
);
3275 if (instance
->unload
== 0)
3276 schedule_work(&instance
->work_init
);
3284 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
3285 * @instance: Adapter soft state
3286 * mfi_cmd: megasas_cmd pointer
3290 build_mpt_mfi_pass_thru(struct megasas_instance
*instance
,
3291 struct megasas_cmd
*mfi_cmd
)
3293 struct MPI25_IEEE_SGE_CHAIN64
*mpi25_ieee_chain
;
3294 struct MPI2_RAID_SCSI_IO_REQUEST
*io_req
;
3295 struct megasas_cmd_fusion
*cmd
;
3296 struct fusion_context
*fusion
;
3297 struct megasas_header
*frame_hdr
= &mfi_cmd
->frame
->hdr
;
3299 fusion
= instance
->ctrl_context
;
3301 cmd
= megasas_get_cmd_fusion(instance
,
3302 instance
->max_scsi_cmds
+ mfi_cmd
->index
);
3304 /* Save the smid. To be used for returning the cmd */
3305 mfi_cmd
->context
.smid
= cmd
->index
;
3308 * For cmds where the flag is set, store the flag and check
3309 * on completion. For cmds with this flag, don't call
3310 * megasas_complete_cmd
3313 if (frame_hdr
->flags
& cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE
))
3314 mfi_cmd
->flags
|= DRV_DCMD_POLLED_MODE
;
3316 io_req
= cmd
->io_request
;
3318 if (instance
->adapter_type
>= INVADER_SERIES
) {
3319 struct MPI25_IEEE_SGE_CHAIN64
*sgl_ptr_end
=
3320 (struct MPI25_IEEE_SGE_CHAIN64
*)&io_req
->SGL
;
3321 sgl_ptr_end
+= fusion
->max_sge_in_main_msg
- 1;
3322 sgl_ptr_end
->Flags
= 0;
3326 (struct MPI25_IEEE_SGE_CHAIN64
*)&io_req
->SGL
.IeeeChain
;
3328 io_req
->Function
= MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST
;
3329 io_req
->SGLOffset0
= offsetof(struct MPI2_RAID_SCSI_IO_REQUEST
,
3331 io_req
->ChainOffset
= fusion
->chain_offset_mfi_pthru
;
3333 mpi25_ieee_chain
->Address
= cpu_to_le64(mfi_cmd
->frame_phys_addr
);
3335 mpi25_ieee_chain
->Flags
= IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
3336 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR
;
3338 mpi25_ieee_chain
->Length
= cpu_to_le32(instance
->mfi_frame_size
);
3342 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
3343 * @instance: Adapter soft state
3344 * @cmd: mfi cmd to build
3347 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*
3348 build_mpt_cmd(struct megasas_instance
*instance
, struct megasas_cmd
*cmd
)
3350 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
= NULL
;
3353 build_mpt_mfi_pass_thru(instance
, cmd
);
3354 index
= cmd
->context
.smid
;
3356 req_desc
= megasas_get_request_descriptor(instance
, index
- 1);
3358 req_desc
->Words
= 0;
3359 req_desc
->SCSIIO
.RequestFlags
= (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
<<
3360 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
3362 req_desc
->SCSIIO
.SMID
= cpu_to_le16(index
);
3368 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
3369 * @instance: Adapter soft state
3370 * @cmd: mfi cmd pointer
3374 megasas_issue_dcmd_fusion(struct megasas_instance
*instance
,
3375 struct megasas_cmd
*cmd
)
3377 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3379 req_desc
= build_mpt_cmd(instance
, cmd
);
3381 megasas_fire_cmd_fusion(instance
, req_desc
);
3386 * megasas_release_fusion - Reverses the FW initialization
3387 * @instance: Adapter soft state
3390 megasas_release_fusion(struct megasas_instance
*instance
)
3392 megasas_free_cmds(instance
);
3393 megasas_free_cmds_fusion(instance
);
3395 iounmap(instance
->reg_set
);
3397 pci_release_selected_regions(instance
->pdev
, 1<<instance
->bar
);
3401 * megasas_read_fw_status_reg_fusion - returns the current FW status value
3402 * @regs: MFI register set
3405 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem
*regs
)
3407 return readl(&(regs
)->outbound_scratch_pad
);
3411 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3412 * @instance: Controller's soft instance
3413 * return: Number of allocated host crash buffers
3416 megasas_alloc_host_crash_buffer(struct megasas_instance
*instance
)
3420 for (i
= 0; i
< MAX_CRASH_DUMP_SIZE
; i
++) {
3421 instance
->crash_buf
[i
] = vzalloc(CRASH_DMA_BUF_SIZE
);
3422 if (!instance
->crash_buf
[i
]) {
3423 dev_info(&instance
->pdev
->dev
, "Firmware crash dump "
3424 "memory allocation failed at index %d\n", i
);
3428 instance
->drv_buf_alloc
= i
;
3432 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3433 * @instance: Controller's soft instance
3436 megasas_free_host_crash_buffer(struct megasas_instance
*instance
)
3439 for (i
= 0; i
< instance
->drv_buf_alloc
; i
++) {
3440 if (instance
->crash_buf
[i
])
3441 vfree(instance
->crash_buf
[i
]);
3443 instance
->drv_buf_index
= 0;
3444 instance
->drv_buf_alloc
= 0;
3445 instance
->fw_crash_state
= UNAVAILABLE
;
3446 instance
->fw_crash_buffer_size
= 0;
3450 * megasas_adp_reset_fusion - For controller reset
3451 * @regs: MFI register set
3454 megasas_adp_reset_fusion(struct megasas_instance
*instance
,
3455 struct megasas_register_set __iomem
*regs
)
3457 u32 host_diag
, abs_state
, retry
;
3459 /* Now try to reset the chip */
3460 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3461 writel(MPI2_WRSEQ_1ST_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3462 writel(MPI2_WRSEQ_2ND_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3463 writel(MPI2_WRSEQ_3RD_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3464 writel(MPI2_WRSEQ_4TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3465 writel(MPI2_WRSEQ_5TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3466 writel(MPI2_WRSEQ_6TH_KEY_VALUE
, &instance
->reg_set
->fusion_seq_offset
);
3468 /* Check that the diag write enable (DRWE) bit is on */
3469 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3471 while (!(host_diag
& HOST_DIAG_WRITE_ENABLE
)) {
3473 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3474 if (retry
++ == 100) {
3475 dev_warn(&instance
->pdev
->dev
,
3476 "Host diag unlock failed from %s %d\n",
3477 __func__
, __LINE__
);
3481 if (!(host_diag
& HOST_DIAG_WRITE_ENABLE
))
3484 /* Send chip reset command */
3485 writel(host_diag
| HOST_DIAG_RESET_ADAPTER
,
3486 &instance
->reg_set
->fusion_host_diag
);
3489 /* Make sure reset adapter bit is cleared */
3490 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3492 while (host_diag
& HOST_DIAG_RESET_ADAPTER
) {
3494 host_diag
= readl(&instance
->reg_set
->fusion_host_diag
);
3495 if (retry
++ == 1000) {
3496 dev_warn(&instance
->pdev
->dev
,
3497 "Diag reset adapter never cleared %s %d\n",
3498 __func__
, __LINE__
);
3502 if (host_diag
& HOST_DIAG_RESET_ADAPTER
)
3505 abs_state
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
)
3509 while ((abs_state
<= MFI_STATE_FW_INIT
) && (retry
++ < 1000)) {
3511 abs_state
= instance
->instancet
->
3512 read_fw_status_reg(instance
->reg_set
) & MFI_STATE_MASK
;
3514 if (abs_state
<= MFI_STATE_FW_INIT
) {
3515 dev_warn(&instance
->pdev
->dev
,
3516 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
3517 abs_state
, __func__
, __LINE__
);
3525 * megasas_check_reset_fusion - For controller reset check
3526 * @regs: MFI register set
3529 megasas_check_reset_fusion(struct megasas_instance
*instance
,
3530 struct megasas_register_set __iomem
*regs
)
3535 /* This function waits for outstanding commands on fusion to complete */
3536 int megasas_wait_for_outstanding_fusion(struct megasas_instance
*instance
,
3537 int reason
, int *convert
)
3539 int i
, outstanding
, retval
= 0, hb_seconds_missed
= 0;
3542 for (i
= 0; i
< resetwaittime
; i
++) {
3543 /* Check if firmware is in fault state */
3544 fw_state
= instance
->instancet
->read_fw_status_reg(
3545 instance
->reg_set
) & MFI_STATE_MASK
;
3546 if (fw_state
== MFI_STATE_FAULT
) {
3547 dev_warn(&instance
->pdev
->dev
, "Found FW in FAULT state,"
3548 " will reset adapter scsi%d.\n",
3549 instance
->host
->host_no
);
3550 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3551 if (instance
->requestorId
&& reason
) {
3552 dev_warn(&instance
->pdev
->dev
, "SR-IOV Found FW in FAULT"
3553 " state while polling during"
3554 " I/O timeout handling for %d\n",
3555 instance
->host
->host_no
);
3563 if (reason
== MFI_IO_TIMEOUT_OCR
) {
3564 dev_info(&instance
->pdev
->dev
,
3565 "MFI IO is timed out, initiating OCR\n");
3566 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3571 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
3572 if (instance
->requestorId
&& !reason
) {
3577 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
3578 if (instance
->requestorId
&& (reason
== SCSIIO_TIMEOUT_OCR
)) {
3579 if (instance
->hb_host_mem
->HB
.fwCounter
!=
3580 instance
->hb_host_mem
->HB
.driverCounter
) {
3581 instance
->hb_host_mem
->HB
.driverCounter
=
3582 instance
->hb_host_mem
->HB
.fwCounter
;
3583 hb_seconds_missed
= 0;
3585 hb_seconds_missed
++;
3586 if (hb_seconds_missed
==
3587 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF
/HZ
)) {
3588 dev_warn(&instance
->pdev
->dev
, "SR-IOV:"
3589 " Heartbeat never completed "
3590 " while polling during I/O "
3591 " timeout handling for "
3593 instance
->host
->host_no
);
3601 megasas_complete_cmd_dpc_fusion((unsigned long)instance
);
3602 outstanding
= atomic_read(&instance
->fw_outstanding
);
3606 if (!(i
% MEGASAS_RESET_NOTICE_INTERVAL
)) {
3607 dev_notice(&instance
->pdev
->dev
, "[%2d]waiting for %d "
3608 "commands to complete for scsi%d\n", i
,
3609 outstanding
, instance
->host
->host_no
);
3614 if (atomic_read(&instance
->fw_outstanding
)) {
3615 dev_err(&instance
->pdev
->dev
, "pending commands remain after waiting, "
3616 "will reset adapter scsi%d.\n",
3617 instance
->host
->host_no
);
3625 void megasas_reset_reply_desc(struct megasas_instance
*instance
)
3628 struct fusion_context
*fusion
;
3629 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_desc
;
3631 fusion
= instance
->ctrl_context
;
3632 count
= instance
->msix_vectors
> 0 ? instance
->msix_vectors
: 1;
3633 for (i
= 0 ; i
< count
; i
++) {
3634 fusion
->last_reply_idx
[i
] = 0;
3635 reply_desc
= fusion
->reply_frames_desc
[i
];
3636 for (j
= 0 ; j
< fusion
->reply_q_depth
; j
++, reply_desc
++)
3637 reply_desc
->Words
= cpu_to_le64(ULLONG_MAX
);
3642 * megasas_refire_mgmt_cmd : Re-fire management commands
3643 * @instance: Controller's soft instance
3645 void megasas_refire_mgmt_cmd(struct megasas_instance
*instance
)
3648 struct megasas_cmd_fusion
*cmd_fusion
;
3649 struct fusion_context
*fusion
;
3650 struct megasas_cmd
*cmd_mfi
;
3651 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3653 bool refire_cmd
= 0;
3655 fusion
= instance
->ctrl_context
;
3657 /* Re-fire management commands.
3658 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
3660 for (j
= instance
->max_scsi_cmds
; j
< instance
->max_fw_cmds
; j
++) {
3661 cmd_fusion
= fusion
->cmd_list
[j
];
3662 cmd_mfi
= instance
->cmd_list
[cmd_fusion
->sync_cmd_idx
];
3663 smid
= le16_to_cpu(cmd_mfi
->context
.smid
);
3668 /* Do not refire shutdown command */
3669 if (le32_to_cpu(cmd_mfi
->frame
->dcmd
.opcode
) ==
3670 MR_DCMD_CTRL_SHUTDOWN
) {
3671 cmd_mfi
->frame
->dcmd
.cmd_status
= MFI_STAT_OK
;
3672 megasas_complete_cmd(instance
, cmd_mfi
, DID_OK
);
3676 req_desc
= megasas_get_request_descriptor
3677 (instance
, smid
- 1);
3678 refire_cmd
= req_desc
&& ((cmd_mfi
->frame
->dcmd
.opcode
!=
3679 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO
)) &&
3680 (cmd_mfi
->frame
->dcmd
.opcode
!=
3681 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO
)))
3682 && !(cmd_mfi
->flags
& DRV_DCMD_SKIP_REFIRE
);
3684 megasas_fire_cmd_fusion(instance
, req_desc
);
3686 megasas_return_cmd(instance
, cmd_mfi
);
3691 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
3692 * @instance: per adapter struct
3693 * @channel: the channel assigned by the OS
3694 * @id: the id assigned by the OS
3696 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
3699 static int megasas_track_scsiio(struct megasas_instance
*instance
,
3700 int id
, int channel
)
3703 struct megasas_cmd_fusion
*cmd_fusion
;
3704 struct fusion_context
*fusion
;
3705 fusion
= instance
->ctrl_context
;
3707 for (i
= 0 ; i
< instance
->max_scsi_cmds
; i
++) {
3708 cmd_fusion
= fusion
->cmd_list
[i
];
3709 if (cmd_fusion
->scmd
&&
3710 (cmd_fusion
->scmd
->device
->id
== id
&&
3711 cmd_fusion
->scmd
->device
->channel
== channel
)) {
3712 dev_info(&instance
->pdev
->dev
,
3713 "SCSI commands pending to target"
3714 "channel %d id %d \tSMID: 0x%x\n",
3715 channel
, id
, cmd_fusion
->index
);
3716 scsi_print_command(cmd_fusion
->scmd
);
3722 return found
? FAILED
: SUCCESS
;
3726 * megasas_tm_response_code - translation of device response code
3727 * @ioc: per adapter object
3728 * @mpi_reply: MPI reply returned by firmware
3733 megasas_tm_response_code(struct megasas_instance
*instance
,
3734 struct MPI2_SCSI_TASK_MANAGE_REPLY
*mpi_reply
)
3738 switch (mpi_reply
->ResponseCode
) {
3739 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE
:
3740 desc
= "task management request completed";
3742 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME
:
3743 desc
= "invalid frame";
3745 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED
:
3746 desc
= "task management request not supported";
3748 case MPI2_SCSITASKMGMT_RSP_TM_FAILED
:
3749 desc
= "task management request failed";
3751 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED
:
3752 desc
= "task management request succeeded";
3754 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN
:
3755 desc
= "invalid lun";
3758 desc
= "overlapped tag attempted";
3760 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC
:
3761 desc
= "task queued, however not sent to target";
3767 dev_dbg(&instance
->pdev
->dev
, "response_code(%01x): %s\n",
3768 mpi_reply
->ResponseCode
, desc
);
3769 dev_dbg(&instance
->pdev
->dev
,
3770 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
3771 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
3772 mpi_reply
->TerminationCount
, mpi_reply
->DevHandle
,
3773 mpi_reply
->Function
, mpi_reply
->TaskType
,
3774 mpi_reply
->IOCStatus
, mpi_reply
->IOCLogInfo
);
3778 * megasas_issue_tm - main routine for sending tm requests
3779 * @instance: per adapter struct
3780 * @device_handle: device handle
3781 * @channel: the channel assigned by the OS
3782 * @id: the id assigned by the OS
3783 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
3784 * @smid_task: smid assigned to the task
3785 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
3788 * MegaRaid use MPT interface for Task Magement request.
3789 * A generic API for sending task management requests to firmware.
3791 * Return SUCCESS or FAILED.
3794 megasas_issue_tm(struct megasas_instance
*instance
, u16 device_handle
,
3795 uint channel
, uint id
, u16 smid_task
, u8 type
)
3797 struct MR_TASK_MANAGE_REQUEST
*mr_request
;
3798 struct MPI2_SCSI_TASK_MANAGE_REQUEST
*mpi_request
;
3799 unsigned long timeleft
;
3800 struct megasas_cmd_fusion
*cmd_fusion
;
3801 struct megasas_cmd
*cmd_mfi
;
3802 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*req_desc
;
3803 struct fusion_context
*fusion
= NULL
;
3804 struct megasas_cmd_fusion
*scsi_lookup
;
3806 struct MPI2_SCSI_TASK_MANAGE_REPLY
*mpi_reply
;
3808 fusion
= instance
->ctrl_context
;
3810 cmd_mfi
= megasas_get_cmd(instance
);
3813 dev_err(&instance
->pdev
->dev
, "Failed from %s %d\n",
3814 __func__
, __LINE__
);
3818 cmd_fusion
= megasas_get_cmd_fusion(instance
,
3819 instance
->max_scsi_cmds
+ cmd_mfi
->index
);
3821 /* Save the smid. To be used for returning the cmd */
3822 cmd_mfi
->context
.smid
= cmd_fusion
->index
;
3824 req_desc
= megasas_get_request_descriptor(instance
,
3825 (cmd_fusion
->index
- 1));
3827 cmd_fusion
->request_desc
= req_desc
;
3828 req_desc
->Words
= 0;
3830 mr_request
= (struct MR_TASK_MANAGE_REQUEST
*) cmd_fusion
->io_request
;
3831 memset(mr_request
, 0, sizeof(struct MR_TASK_MANAGE_REQUEST
));
3832 mpi_request
= (struct MPI2_SCSI_TASK_MANAGE_REQUEST
*) &mr_request
->TmRequest
;
3833 mpi_request
->Function
= MPI2_FUNCTION_SCSI_TASK_MGMT
;
3834 mpi_request
->DevHandle
= cpu_to_le16(device_handle
);
3835 mpi_request
->TaskType
= type
;
3836 mpi_request
->TaskMID
= cpu_to_le16(smid_task
);
3837 mpi_request
->LUN
[1] = 0;
3840 req_desc
= cmd_fusion
->request_desc
;
3841 req_desc
->HighPriority
.SMID
= cpu_to_le16(cmd_fusion
->index
);
3842 req_desc
->HighPriority
.RequestFlags
=
3843 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
<<
3844 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT
);
3845 req_desc
->HighPriority
.MSIxIndex
= 0;
3846 req_desc
->HighPriority
.LMID
= 0;
3847 req_desc
->HighPriority
.Reserved1
= 0;
3849 if (channel
< MEGASAS_MAX_PD_CHANNELS
)
3850 mr_request
->tmReqFlags
.isTMForPD
= 1;
3852 mr_request
->tmReqFlags
.isTMForLD
= 1;
3854 init_completion(&cmd_fusion
->done
);
3855 megasas_fire_cmd_fusion(instance
, req_desc
);
3857 timeleft
= wait_for_completion_timeout(&cmd_fusion
->done
, 50 * HZ
);
3860 dev_err(&instance
->pdev
->dev
,
3861 "task mgmt type 0x%x timed out\n", type
);
3862 cmd_mfi
->flags
|= DRV_DCMD_SKIP_REFIRE
;
3863 mutex_unlock(&instance
->reset_mutex
);
3864 rc
= megasas_reset_fusion(instance
->host
, MFI_IO_TIMEOUT_OCR
);
3865 mutex_lock(&instance
->reset_mutex
);
3869 mpi_reply
= (struct MPI2_SCSI_TASK_MANAGE_REPLY
*) &mr_request
->TMReply
;
3870 megasas_tm_response_code(instance
, mpi_reply
);
3872 megasas_return_cmd(instance
, cmd_mfi
);
3875 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
:
3876 scsi_lookup
= fusion
->cmd_list
[smid_task
- 1];
3878 if (scsi_lookup
->scmd
== NULL
)
3881 instance
->instancet
->disable_intr(instance
);
3882 megasas_sync_irqs((unsigned long)instance
);
3883 instance
->instancet
->enable_intr(instance
);
3884 if (scsi_lookup
->scmd
== NULL
)
3890 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
:
3891 if ((channel
== 0xFFFFFFFF) && (id
== 0xFFFFFFFF))
3893 instance
->instancet
->disable_intr(instance
);
3894 megasas_sync_irqs((unsigned long)instance
);
3895 rc
= megasas_track_scsiio(instance
, id
, channel
);
3896 instance
->instancet
->enable_intr(instance
);
3899 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET
:
3900 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK
:
3912 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3913 * @instance: per adapter struct
3915 * Return Non Zero index, if SMID found in outstanding commands
3917 static u16
megasas_fusion_smid_lookup(struct scsi_cmnd
*scmd
)
3920 struct megasas_instance
*instance
;
3921 struct megasas_cmd_fusion
*cmd_fusion
;
3922 struct fusion_context
*fusion
;
3924 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
3926 fusion
= instance
->ctrl_context
;
3928 for (i
= 0; i
< instance
->max_scsi_cmds
; i
++) {
3929 cmd_fusion
= fusion
->cmd_list
[i
];
3930 if (cmd_fusion
->scmd
&& (cmd_fusion
->scmd
== scmd
)) {
3931 scmd_printk(KERN_NOTICE
, scmd
, "Abort request is for"
3932 " SMID: %d\n", cmd_fusion
->index
);
3933 ret
= cmd_fusion
->index
;
3942 * megasas_get_tm_devhandle - Get devhandle for TM request
3943 * @sdev- OS provided scsi device
3945 * Returns- devhandle/targetID of SCSI device
3947 static u16
megasas_get_tm_devhandle(struct scsi_device
*sdev
)
3951 struct megasas_instance
*instance
;
3952 struct fusion_context
*fusion
;
3953 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_sync
;
3954 u16 devhandle
= (u16
)ULONG_MAX
;
3956 instance
= (struct megasas_instance
*)sdev
->host
->hostdata
;
3957 fusion
= instance
->ctrl_context
;
3959 if (!MEGASAS_IS_LOGICAL(sdev
)) {
3960 if (instance
->use_seqnum_jbod_fp
) {
3961 pd_index
= (sdev
->channel
* MEGASAS_MAX_DEV_PER_CHANNEL
)
3963 pd_sync
= (void *)fusion
->pd_seq_sync
3964 [(instance
->pd_seq_map_id
- 1) & 1];
3965 devhandle
= pd_sync
->seq
[pd_index
].devHandle
;
3967 sdev_printk(KERN_ERR
, sdev
, "Firmware expose tmCapable"
3968 " without JBOD MAP support from %s %d\n", __func__
, __LINE__
);
3970 device_id
= ((sdev
->channel
% 2) * MEGASAS_MAX_DEV_PER_CHANNEL
)
3972 devhandle
= device_id
;
3979 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3980 * @scmd : pointer to scsi command object
3982 * Return SUCCESS, if command aborted else FAILED
3985 int megasas_task_abort_fusion(struct scsi_cmnd
*scmd
)
3987 struct megasas_instance
*instance
;
3988 u16 smid
, devhandle
;
3989 struct fusion_context
*fusion
;
3991 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
3992 mr_device_priv_data
= scmd
->device
->hostdata
;
3995 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
3996 fusion
= instance
->ctrl_context
;
3998 scmd_printk(KERN_INFO
, scmd
, "task abort called for scmd(%p)\n", scmd
);
3999 scsi_print_command(scmd
);
4001 if (atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) {
4002 dev_err(&instance
->pdev
->dev
, "Controller is not OPERATIONAL,"
4003 "SCSI host:%d\n", instance
->host
->host_no
);
4008 if (!mr_device_priv_data
) {
4009 sdev_printk(KERN_INFO
, scmd
->device
, "device been deleted! "
4010 "scmd(%p)\n", scmd
);
4011 scmd
->result
= DID_NO_CONNECT
<< 16;
4017 if (!mr_device_priv_data
->is_tm_capable
) {
4022 mutex_lock(&instance
->reset_mutex
);
4024 smid
= megasas_fusion_smid_lookup(scmd
);
4028 scmd_printk(KERN_NOTICE
, scmd
, "Command for which abort is"
4029 " issued is not found in oustanding commands\n");
4030 mutex_unlock(&instance
->reset_mutex
);
4034 devhandle
= megasas_get_tm_devhandle(scmd
->device
);
4036 if (devhandle
== (u16
)ULONG_MAX
) {
4038 sdev_printk(KERN_INFO
, scmd
->device
,
4039 "task abort issued for invalid devhandle\n");
4040 mutex_unlock(&instance
->reset_mutex
);
4043 sdev_printk(KERN_INFO
, scmd
->device
,
4044 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
4047 mr_device_priv_data
->tm_busy
= 1;
4048 ret
= megasas_issue_tm(instance
, devhandle
,
4049 scmd
->device
->channel
, scmd
->device
->id
, smid
,
4050 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK
);
4051 mr_device_priv_data
->tm_busy
= 0;
4053 mutex_unlock(&instance
->reset_mutex
);
4055 sdev_printk(KERN_INFO
, scmd
->device
, "task abort: %s scmd(%p)\n",
4056 ((ret
== SUCCESS
) ? "SUCCESS" : "FAILED"), scmd
);
4062 * megasas_reset_target_fusion : target reset function for fusion adapters
4063 * scmd: SCSI command pointer
4065 * Returns SUCCESS if all commands associated with target aborted else FAILED
4068 int megasas_reset_target_fusion(struct scsi_cmnd
*scmd
)
4071 struct megasas_instance
*instance
;
4074 struct fusion_context
*fusion
;
4075 struct MR_PRIV_DEVICE
*mr_device_priv_data
;
4076 mr_device_priv_data
= scmd
->device
->hostdata
;
4078 instance
= (struct megasas_instance
*)scmd
->device
->host
->hostdata
;
4079 fusion
= instance
->ctrl_context
;
4081 sdev_printk(KERN_INFO
, scmd
->device
,
4082 "target reset called for scmd(%p)\n", scmd
);
4084 if (atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) {
4085 dev_err(&instance
->pdev
->dev
, "Controller is not OPERATIONAL,"
4086 "SCSI host:%d\n", instance
->host
->host_no
);
4091 if (!mr_device_priv_data
) {
4092 sdev_printk(KERN_INFO
, scmd
->device
, "device been deleted! "
4093 "scmd(%p)\n", scmd
);
4094 scmd
->result
= DID_NO_CONNECT
<< 16;
4100 if (!mr_device_priv_data
->is_tm_capable
) {
4105 mutex_lock(&instance
->reset_mutex
);
4106 devhandle
= megasas_get_tm_devhandle(scmd
->device
);
4108 if (devhandle
== (u16
)ULONG_MAX
) {
4110 sdev_printk(KERN_INFO
, scmd
->device
,
4111 "target reset issued for invalid devhandle\n");
4112 mutex_unlock(&instance
->reset_mutex
);
4116 sdev_printk(KERN_INFO
, scmd
->device
,
4117 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
4119 mr_device_priv_data
->tm_busy
= 1;
4120 ret
= megasas_issue_tm(instance
, devhandle
,
4121 scmd
->device
->channel
, scmd
->device
->id
, 0,
4122 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET
);
4123 mr_device_priv_data
->tm_busy
= 0;
4124 mutex_unlock(&instance
->reset_mutex
);
4126 scmd_printk(KERN_NOTICE
, scmd
, "megasas: target reset %s!!\n",
4127 (ret
== SUCCESS
) ? "SUCCESS" : "FAILED");
4132 /*SRIOV get other instance in cluster if any*/
4133 struct megasas_instance
*megasas_get_peer_instance(struct megasas_instance
*instance
)
4137 for (i
= 0; i
< MAX_MGMT_ADAPTERS
; i
++) {
4138 if (megasas_mgmt_info
.instance
[i
] &&
4139 (megasas_mgmt_info
.instance
[i
] != instance
) &&
4140 megasas_mgmt_info
.instance
[i
]->requestorId
&&
4141 megasas_mgmt_info
.instance
[i
]->peerIsPresent
&&
4142 (memcmp((megasas_mgmt_info
.instance
[i
]->clusterId
),
4143 instance
->clusterId
, MEGASAS_CLUSTER_ID_SIZE
) == 0))
4144 return megasas_mgmt_info
.instance
[i
];
4149 /* Check for a second path that is currently UP */
4150 int megasas_check_mpio_paths(struct megasas_instance
*instance
,
4151 struct scsi_cmnd
*scmd
)
4153 struct megasas_instance
*peer_instance
= NULL
;
4154 int retval
= (DID_REQUEUE
<< 16);
4156 if (instance
->peerIsPresent
) {
4157 peer_instance
= megasas_get_peer_instance(instance
);
4158 if ((peer_instance
) &&
4159 (atomic_read(&peer_instance
->adprecovery
) ==
4160 MEGASAS_HBA_OPERATIONAL
))
4161 retval
= (DID_NO_CONNECT
<< 16);
4166 /* Core fusion reset function */
4167 int megasas_reset_fusion(struct Scsi_Host
*shost
, int reason
)
4169 int retval
= SUCCESS
, i
, j
, convert
= 0;
4170 struct megasas_instance
*instance
;
4171 struct megasas_cmd_fusion
*cmd_fusion
, *r1_cmd
;
4172 struct fusion_context
*fusion
;
4173 u32 abs_state
, status_reg
, reset_adapter
;
4174 u32 io_timeout_in_crash_mode
= 0;
4175 struct scsi_cmnd
*scmd_local
= NULL
;
4176 struct scsi_device
*sdev
;
4178 instance
= (struct megasas_instance
*)shost
->hostdata
;
4179 fusion
= instance
->ctrl_context
;
4181 mutex_lock(&instance
->reset_mutex
);
4183 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HW_CRITICAL_ERROR
) {
4184 dev_warn(&instance
->pdev
->dev
, "Hardware critical error, "
4185 "returning FAILED for scsi%d.\n",
4186 instance
->host
->host_no
);
4187 mutex_unlock(&instance
->reset_mutex
);
4190 status_reg
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
);
4191 abs_state
= status_reg
& MFI_STATE_MASK
;
4193 /* IO timeout detected, forcibly put FW in FAULT state */
4194 if (abs_state
!= MFI_STATE_FAULT
&& instance
->crash_dump_buf
&&
4195 instance
->crash_dump_app_support
&& reason
) {
4196 dev_info(&instance
->pdev
->dev
, "IO/DCMD timeout is detected, "
4197 "forcibly FAULT Firmware\n");
4198 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_INFAULT
);
4199 status_reg
= readl(&instance
->reg_set
->doorbell
);
4200 writel(status_reg
| MFI_STATE_FORCE_OCR
,
4201 &instance
->reg_set
->doorbell
);
4202 readl(&instance
->reg_set
->doorbell
);
4203 mutex_unlock(&instance
->reset_mutex
);
4206 io_timeout_in_crash_mode
++;
4207 dev_dbg(&instance
->pdev
->dev
, "waiting for [%d] "
4208 "seconds for crash dump collection and OCR "
4209 "to be done\n", (io_timeout_in_crash_mode
* 3));
4210 } while ((atomic_read(&instance
->adprecovery
) != MEGASAS_HBA_OPERATIONAL
) &&
4211 (io_timeout_in_crash_mode
< 80));
4213 if (atomic_read(&instance
->adprecovery
) == MEGASAS_HBA_OPERATIONAL
) {
4214 dev_info(&instance
->pdev
->dev
, "OCR done for IO "
4218 dev_info(&instance
->pdev
->dev
, "Controller is not "
4219 "operational after 240 seconds wait for IO "
4220 "timeout case in FW crash dump mode\n do "
4221 "OCR/kill adapter\n");
4222 retval
= megasas_reset_fusion(shost
, 0);
4227 if (instance
->requestorId
&& !instance
->skip_heartbeat_timer_del
)
4228 del_timer_sync(&instance
->sriov_heartbeat_timer
);
4229 set_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4230 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_POLLING
);
4231 instance
->instancet
->disable_intr(instance
);
4232 megasas_sync_irqs((unsigned long)instance
);
4234 /* First try waiting for commands to complete */
4235 if (megasas_wait_for_outstanding_fusion(instance
, reason
,
4237 atomic_set(&instance
->adprecovery
, MEGASAS_ADPRESET_SM_INFAULT
);
4238 dev_warn(&instance
->pdev
->dev
, "resetting fusion "
4239 "adapter scsi%d.\n", instance
->host
->host_no
);
4243 if (megasas_dbg_lvl
& OCR_LOGS
)
4244 dev_info(&instance
->pdev
->dev
, "\nPending SCSI commands:\n");
4246 /* Now return commands back to the OS */
4247 for (i
= 0 ; i
< instance
->max_scsi_cmds
; i
++) {
4248 cmd_fusion
= fusion
->cmd_list
[i
];
4249 /*check for extra commands issued by driver*/
4250 if (instance
->is_ventura
) {
4251 r1_cmd
= fusion
->cmd_list
[i
+ instance
->max_fw_cmds
];
4252 megasas_return_cmd_fusion(instance
, r1_cmd
);
4254 scmd_local
= cmd_fusion
->scmd
;
4255 if (cmd_fusion
->scmd
) {
4256 if (megasas_dbg_lvl
& OCR_LOGS
) {
4257 sdev_printk(KERN_INFO
,
4258 cmd_fusion
->scmd
->device
, "SMID: 0x%x\n",
4260 scsi_print_command(cmd_fusion
->scmd
);
4263 scmd_local
->result
=
4264 megasas_check_mpio_paths(instance
,
4266 if (instance
->ldio_threshold
&&
4267 megasas_cmd_type(scmd_local
) == READ_WRITE_LDIO
)
4268 atomic_dec(&instance
->ldio_outstanding
);
4269 megasas_return_cmd_fusion(instance
, cmd_fusion
);
4270 scsi_dma_unmap(scmd_local
);
4271 scmd_local
->scsi_done(scmd_local
);
4275 atomic_set(&instance
->fw_outstanding
, 0);
4277 status_reg
= instance
->instancet
->read_fw_status_reg(
4279 abs_state
= status_reg
& MFI_STATE_MASK
;
4280 reset_adapter
= status_reg
& MFI_RESET_ADAPTER
;
4281 if (instance
->disableOnlineCtrlReset
||
4282 (abs_state
== MFI_STATE_FAULT
&& !reset_adapter
)) {
4283 /* Reset not supported, kill adapter */
4284 dev_warn(&instance
->pdev
->dev
, "Reset not supported"
4285 ", killing adapter scsi%d.\n",
4286 instance
->host
->host_no
);
4287 megaraid_sas_kill_hba(instance
);
4288 instance
->skip_heartbeat_timer_del
= 1;
4293 /* Let SR-IOV VF & PF sync up if there was a HB failure */
4294 if (instance
->requestorId
&& !reason
) {
4295 msleep(MEGASAS_OCR_SETTLE_TIME_VF
);
4296 goto transition_to_ready
;
4299 /* Now try to reset the chip */
4300 for (i
= 0; i
< MEGASAS_FUSION_MAX_RESET_TRIES
; i
++) {
4302 if (instance
->instancet
->adp_reset
4303 (instance
, instance
->reg_set
))
4305 transition_to_ready
:
4306 /* Wait for FW to become ready */
4307 if (megasas_transition_to_ready(instance
, 1)) {
4308 dev_warn(&instance
->pdev
->dev
,
4309 "Failed to transition controller to ready for "
4310 "scsi%d.\n", instance
->host
->host_no
);
4311 if (instance
->requestorId
&& !reason
)
4312 goto fail_kill_adapter
;
4316 megasas_reset_reply_desc(instance
);
4317 megasas_fusion_update_can_queue(instance
, OCR_CONTEXT
);
4319 if (megasas_ioc_init_fusion(instance
)) {
4320 if (instance
->requestorId
&& !reason
)
4321 goto fail_kill_adapter
;
4326 megasas_refire_mgmt_cmd(instance
);
4328 if (megasas_get_ctrl_info(instance
)) {
4329 dev_info(&instance
->pdev
->dev
,
4330 "Failed from %s %d\n",
4331 __func__
, __LINE__
);
4332 megaraid_sas_kill_hba(instance
);
4336 /* Reset load balance info */
4337 if (fusion
->load_balance_info
)
4338 memset(fusion
->load_balance_info
, 0,
4339 (sizeof(struct LD_LOAD_BALANCE_INFO
) *
4340 MAX_LOGICAL_DRIVES_EXT
));
4342 if (!megasas_get_map_info(instance
))
4343 megasas_sync_map_info(instance
);
4345 megasas_setup_jbod_map(instance
);
4347 shost_for_each_device(sdev
, shost
)
4348 megasas_set_dynamic_target_properties(sdev
);
4350 /* reset stream detection array */
4351 if (instance
->is_ventura
) {
4352 for (j
= 0; j
< MAX_LOGICAL_DRIVES_EXT
; ++j
) {
4353 memset(fusion
->stream_detect_by_ld
[j
],
4354 0, sizeof(struct LD_STREAM_DETECT
));
4355 fusion
->stream_detect_by_ld
[j
]->mru_bit_map
4360 clear_bit(MEGASAS_FUSION_IN_RESET
,
4361 &instance
->reset_flags
);
4362 instance
->instancet
->enable_intr(instance
);
4363 atomic_set(&instance
->adprecovery
, MEGASAS_HBA_OPERATIONAL
);
4365 dev_info(&instance
->pdev
->dev
, "Interrupts are enabled and"
4366 " controller is OPERATIONAL for scsi:%d\n",
4367 instance
->host
->host_no
);
4369 /* Restart SR-IOV heartbeat */
4370 if (instance
->requestorId
) {
4371 if (!megasas_sriov_start_heartbeat(instance
, 0))
4372 megasas_start_timer(instance
,
4373 &instance
->sriov_heartbeat_timer
,
4374 megasas_sriov_heartbeat_handler
,
4375 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF
);
4377 instance
->skip_heartbeat_timer_del
= 1;
4380 if (instance
->crash_dump_drv_support
&&
4381 instance
->crash_dump_app_support
)
4382 megasas_set_crash_dump_params(instance
,
4383 MR_CRASH_BUF_TURN_ON
);
4385 megasas_set_crash_dump_params(instance
,
4386 MR_CRASH_BUF_TURN_OFF
);
4390 /* Adapter reset completed successfully */
4391 dev_warn(&instance
->pdev
->dev
,
4392 "Reset successful for scsi%d.\n",
4393 instance
->host
->host_no
);
4398 /* Reset failed, kill the adapter */
4399 dev_warn(&instance
->pdev
->dev
, "Reset failed, killing "
4400 "adapter scsi%d.\n", instance
->host
->host_no
);
4401 megaraid_sas_kill_hba(instance
);
4402 instance
->skip_heartbeat_timer_del
= 1;
4405 /* For VF: Restart HB timer if we didn't OCR */
4406 if (instance
->requestorId
) {
4407 megasas_start_timer(instance
,
4408 &instance
->sriov_heartbeat_timer
,
4409 megasas_sriov_heartbeat_handler
,
4410 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF
);
4412 clear_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4413 instance
->instancet
->enable_intr(instance
);
4414 atomic_set(&instance
->adprecovery
, MEGASAS_HBA_OPERATIONAL
);
4417 clear_bit(MEGASAS_FUSION_IN_RESET
, &instance
->reset_flags
);
4418 mutex_unlock(&instance
->reset_mutex
);
4422 /* Fusion Crash dump collection work queue */
4423 void megasas_fusion_crash_dump_wq(struct work_struct
*work
)
4425 struct megasas_instance
*instance
=
4426 container_of(work
, struct megasas_instance
, crash_init
);
4428 u8 partial_copy
= 0;
4431 status_reg
= instance
->instancet
->read_fw_status_reg(instance
->reg_set
);
4434 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
4435 * to host crash buffers
4437 if (instance
->drv_buf_index
== 0) {
4438 /* Buffer is already allocated for old Crash dump.
4439 * Do OCR and do not wait for crash dump collection
4441 if (instance
->drv_buf_alloc
) {
4442 dev_info(&instance
->pdev
->dev
, "earlier crash dump is "
4443 "not yet copied by application, ignoring this "
4444 "crash dump and initiating OCR\n");
4445 status_reg
|= MFI_STATE_CRASH_DUMP_DONE
;
4447 &instance
->reg_set
->outbound_scratch_pad
);
4448 readl(&instance
->reg_set
->outbound_scratch_pad
);
4451 megasas_alloc_host_crash_buffer(instance
);
4452 dev_info(&instance
->pdev
->dev
, "Number of host crash buffers "
4453 "allocated: %d\n", instance
->drv_buf_alloc
);
4457 * Driver has allocated max buffers, which can be allocated
4458 * and FW has more crash dump data, then driver will
4461 if (instance
->drv_buf_index
>= (instance
->drv_buf_alloc
)) {
4462 dev_info(&instance
->pdev
->dev
, "Driver is done copying "
4463 "the buffer: %d\n", instance
->drv_buf_alloc
);
4464 status_reg
|= MFI_STATE_CRASH_DUMP_DONE
;
4467 memcpy(instance
->crash_buf
[instance
->drv_buf_index
],
4468 instance
->crash_dump_buf
, CRASH_DMA_BUF_SIZE
);
4469 instance
->drv_buf_index
++;
4470 status_reg
&= ~MFI_STATE_DMADONE
;
4473 if (status_reg
& MFI_STATE_CRASH_DUMP_DONE
) {
4474 dev_info(&instance
->pdev
->dev
, "Crash Dump is available,number "
4475 "of copied buffers: %d\n", instance
->drv_buf_index
);
4476 instance
->fw_crash_buffer_size
= instance
->drv_buf_index
;
4477 instance
->fw_crash_state
= AVAILABLE
;
4478 instance
->drv_buf_index
= 0;
4479 writel(status_reg
, &instance
->reg_set
->outbound_scratch_pad
);
4480 readl(&instance
->reg_set
->outbound_scratch_pad
);
4482 megasas_reset_fusion(instance
->host
, 0);
4484 writel(status_reg
, &instance
->reg_set
->outbound_scratch_pad
);
4485 readl(&instance
->reg_set
->outbound_scratch_pad
);
4490 /* Fusion OCR work queue */
4491 void megasas_fusion_ocr_wq(struct work_struct
*work
)
4493 struct megasas_instance
*instance
=
4494 container_of(work
, struct megasas_instance
, work_init
);
4496 megasas_reset_fusion(instance
->host
, 0);
4499 /* Allocate fusion context */
4501 megasas_alloc_fusion_context(struct megasas_instance
*instance
)
4503 struct fusion_context
*fusion
;
4505 instance
->ctrl_context_pages
= get_order(sizeof(struct fusion_context
));
4506 instance
->ctrl_context
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
4507 instance
->ctrl_context_pages
);
4508 if (!instance
->ctrl_context
) {
4509 /* fall back to using vmalloc for fusion_context */
4510 instance
->ctrl_context
= vzalloc(sizeof(struct fusion_context
));
4511 if (!instance
->ctrl_context
) {
4512 dev_err(&instance
->pdev
->dev
, "Failed from %s %d\n", __func__
, __LINE__
);
4517 fusion
= instance
->ctrl_context
;
4519 fusion
->load_balance_info_pages
= get_order(MAX_LOGICAL_DRIVES_EXT
*
4520 sizeof(struct LD_LOAD_BALANCE_INFO
));
4521 fusion
->load_balance_info
=
4522 (struct LD_LOAD_BALANCE_INFO
*)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
4523 fusion
->load_balance_info_pages
);
4524 if (!fusion
->load_balance_info
) {
4525 fusion
->load_balance_info
= vzalloc(MAX_LOGICAL_DRIVES_EXT
*
4526 sizeof(struct LD_LOAD_BALANCE_INFO
));
4527 if (!fusion
->load_balance_info
)
4528 dev_err(&instance
->pdev
->dev
, "Failed to allocate load_balance_info, "
4529 "continuing without Load Balance support\n");
4536 megasas_free_fusion_context(struct megasas_instance
*instance
)
4538 struct fusion_context
*fusion
= instance
->ctrl_context
;
4541 if (fusion
->load_balance_info
) {
4542 if (is_vmalloc_addr(fusion
->load_balance_info
))
4543 vfree(fusion
->load_balance_info
);
4545 free_pages((ulong
)fusion
->load_balance_info
,
4546 fusion
->load_balance_info_pages
);
4549 if (is_vmalloc_addr(fusion
))
4552 free_pages((ulong
)fusion
,
4553 instance
->ctrl_context_pages
);
4557 struct megasas_instance_template megasas_instance_template_fusion
= {
4558 .enable_intr
= megasas_enable_intr_fusion
,
4559 .disable_intr
= megasas_disable_intr_fusion
,
4560 .clear_intr
= megasas_clear_intr_fusion
,
4561 .read_fw_status_reg
= megasas_read_fw_status_reg_fusion
,
4562 .adp_reset
= megasas_adp_reset_fusion
,
4563 .check_reset
= megasas_check_reset_fusion
,
4564 .service_isr
= megasas_isr_fusion
,
4565 .tasklet
= megasas_complete_cmd_dpc_fusion
,
4566 .init_adapter
= megasas_init_adapter_fusion
,
4567 .build_and_issue_cmd
= megasas_build_and_issue_cmd_fusion
,
4568 .issue_dcmd
= megasas_issue_dcmd_fusion
,