]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/scsi/megaraid/megaraid_sas_fusion.c
Merge branch 'for-4.14-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-bionic-kernel.git] / drivers / scsi / megaraid / megaraid_sas_fusion.c
1 /*
2 * Linux MegaRAID driver for SAS based RAID controllers
3 *
4 * Copyright (c) 2009-2013 LSI Corporation
5 * Copyright (c) 2013-2014 Avago Technologies
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 *
20 * FILE: megaraid_sas_fusion.c
21 *
22 * Authors: Avago Technologies
23 * Sumant Patro
24 * Adam Radford
25 * Kashyap Desai <kashyap.desai@avagotech.com>
26 * Sumit Saxena <sumit.saxena@avagotech.com>
27 *
28 * Send feedback to: megaraidlinux.pdl@avagotech.com
29 *
30 * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
31 * San Jose, California 95131
32 */
33
34 #include <linux/kernel.h>
35 #include <linux/types.h>
36 #include <linux/pci.h>
37 #include <linux/list.h>
38 #include <linux/moduleparam.h>
39 #include <linux/module.h>
40 #include <linux/spinlock.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/uio.h>
44 #include <linux/uaccess.h>
45 #include <linux/fs.h>
46 #include <linux/compat.h>
47 #include <linux/blkdev.h>
48 #include <linux/mutex.h>
49 #include <linux/poll.h>
50 #include <linux/vmalloc.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsi_dbg.h>
57 #include <linux/dmi.h>
58
59 #include "megaraid_sas_fusion.h"
60 #include "megaraid_sas.h"
61
62
63 extern void megasas_free_cmds(struct megasas_instance *instance);
64 extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
65 *instance);
66 extern void
67 megasas_complete_cmd(struct megasas_instance *instance,
68 struct megasas_cmd *cmd, u8 alt_status);
69 int
70 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
71 int seconds);
72
73 void
74 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
75 int megasas_alloc_cmds(struct megasas_instance *instance);
76 int
77 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
78 int
79 megasas_issue_polled(struct megasas_instance *instance,
80 struct megasas_cmd *cmd);
81 void
82 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
83
84 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
85 void megaraid_sas_kill_hba(struct megasas_instance *instance);
86
87 extern u32 megasas_dbg_lvl;
88 void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
89 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
90 int initial);
91 void megasas_start_timer(struct megasas_instance *instance,
92 struct timer_list *timer,
93 void *fn, unsigned long interval);
94 extern struct megasas_mgmt_info megasas_mgmt_info;
95 extern unsigned int resetwaittime;
96 extern unsigned int dual_qdepth_disable;
97 static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
98 static void megasas_free_reply_fusion(struct megasas_instance *instance);
99
100
101
102 /**
103 * megasas_enable_intr_fusion - Enables interrupts
104 * @regs: MFI register set
105 */
106 void
107 megasas_enable_intr_fusion(struct megasas_instance *instance)
108 {
109 struct megasas_register_set __iomem *regs;
110 regs = instance->reg_set;
111
112 instance->mask_interrupts = 0;
113 /* For Thunderbolt/Invader also clear intr on enable */
114 writel(~0, &regs->outbound_intr_status);
115 readl(&regs->outbound_intr_status);
116
117 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
118
119 /* Dummy readl to force pci flush */
120 readl(&regs->outbound_intr_mask);
121 }
122
123 /**
124 * megasas_disable_intr_fusion - Disables interrupt
125 * @regs: MFI register set
126 */
127 void
128 megasas_disable_intr_fusion(struct megasas_instance *instance)
129 {
130 u32 mask = 0xFFFFFFFF;
131 u32 status;
132 struct megasas_register_set __iomem *regs;
133 regs = instance->reg_set;
134 instance->mask_interrupts = 1;
135
136 writel(mask, &regs->outbound_intr_mask);
137 /* Dummy readl to force pci flush */
138 status = readl(&regs->outbound_intr_mask);
139 }
140
141 int
142 megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
143 {
144 u32 status;
145 /*
146 * Check if it is our interrupt
147 */
148 status = readl(&regs->outbound_intr_status);
149
150 if (status & 1) {
151 writel(status, &regs->outbound_intr_status);
152 readl(&regs->outbound_intr_status);
153 return 1;
154 }
155 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
156 return 0;
157
158 return 1;
159 }
160
161 /**
162 * megasas_get_cmd_fusion - Get a command from the free pool
163 * @instance: Adapter soft state
164 *
165 * Returns a blk_tag indexed mpt frame
166 */
167 inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
168 *instance, u32 blk_tag)
169 {
170 struct fusion_context *fusion;
171
172 fusion = instance->ctrl_context;
173 return fusion->cmd_list[blk_tag];
174 }
175
176 /**
177 * megasas_return_cmd_fusion - Return a cmd to free command pool
178 * @instance: Adapter soft state
179 * @cmd: Command packet to be returned to free command pool
180 */
181 inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
182 struct megasas_cmd_fusion *cmd)
183 {
184 cmd->scmd = NULL;
185 memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
186 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
187 cmd->cmd_completed = false;
188 }
189
190 /**
191 * megasas_fire_cmd_fusion - Sends command to the FW
192 * @instance: Adapter soft state
193 * @req_desc: 32bit or 64bit Request descriptor
194 *
195 * Perform PCI Write. Ventura supports 32 bit Descriptor.
196 * Prior to Ventura (12G) MR controller supports 64 bit Descriptor.
197 */
198
199 static void
200 megasas_fire_cmd_fusion(struct megasas_instance *instance,
201 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
202 {
203 if (instance->is_ventura)
204 writel(le32_to_cpu(req_desc->u.low),
205 &instance->reg_set->inbound_single_queue_port);
206 else {
207 #if defined(writeq) && defined(CONFIG_64BIT)
208 u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
209 le32_to_cpu(req_desc->u.low));
210
211 writeq(req_data, &instance->reg_set->inbound_low_queue_port);
212 #else
213 unsigned long flags;
214 spin_lock_irqsave(&instance->hba_lock, flags);
215 writel(le32_to_cpu(req_desc->u.low),
216 &instance->reg_set->inbound_low_queue_port);
217 writel(le32_to_cpu(req_desc->u.high),
218 &instance->reg_set->inbound_high_queue_port);
219 mmiowb();
220 spin_unlock_irqrestore(&instance->hba_lock, flags);
221 #endif
222 }
223 }
224
225 /**
226 * megasas_fusion_update_can_queue - Do all Adapter Queue depth related calculations here
227 * @instance: Adapter soft state
228 * fw_boot_context: Whether this function called during probe or after OCR
229 *
230 * This function is only for fusion controllers.
231 * Update host can queue, if firmware downgrade max supported firmware commands.
232 * Firmware upgrade case will be skiped because underlying firmware has
233 * more resource than exposed to the OS.
234 *
235 */
236 static void
237 megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
238 {
239 u16 cur_max_fw_cmds = 0;
240 u16 ldio_threshold = 0;
241 struct megasas_register_set __iomem *reg_set;
242
243 reg_set = instance->reg_set;
244
245 /* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
246 if (!instance->is_ventura)
247 cur_max_fw_cmds =
248 readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
249
250 if (dual_qdepth_disable || !cur_max_fw_cmds)
251 cur_max_fw_cmds = instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
252 else
253 ldio_threshold =
254 (instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
255
256 dev_info(&instance->pdev->dev,
257 "Current firmware maximum commands: %d\t LDIO threshold: %d\n",
258 cur_max_fw_cmds, ldio_threshold);
259
260 if (fw_boot_context == OCR_CONTEXT) {
261 cur_max_fw_cmds = cur_max_fw_cmds - 1;
262 if (cur_max_fw_cmds < instance->max_fw_cmds) {
263 instance->cur_can_queue =
264 cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
265 MEGASAS_FUSION_IOCTL_CMDS);
266 instance->host->can_queue = instance->cur_can_queue;
267 instance->ldio_threshold = ldio_threshold;
268 }
269 } else {
270 instance->max_fw_cmds = cur_max_fw_cmds;
271 instance->ldio_threshold = ldio_threshold;
272
273 if (!instance->is_rdpq)
274 instance->max_fw_cmds =
275 min_t(u16, instance->max_fw_cmds, 1024);
276
277 if (reset_devices)
278 instance->max_fw_cmds = min(instance->max_fw_cmds,
279 (u16)MEGASAS_KDUMP_QUEUE_DEPTH);
280 /*
281 * Reduce the max supported cmds by 1. This is to ensure that the
282 * reply_q_sz (1 more than the max cmd that driver may send)
283 * does not exceed max cmds that the FW can support
284 */
285 instance->max_fw_cmds = instance->max_fw_cmds-1;
286
287 instance->max_scsi_cmds = instance->max_fw_cmds -
288 (MEGASAS_FUSION_INTERNAL_CMDS +
289 MEGASAS_FUSION_IOCTL_CMDS);
290 instance->cur_can_queue = instance->max_scsi_cmds;
291 instance->host->can_queue = instance->cur_can_queue;
292 }
293
294 if (instance->is_ventura)
295 instance->max_mpt_cmds =
296 instance->max_fw_cmds * RAID_1_PEER_CMDS;
297 else
298 instance->max_mpt_cmds = instance->max_fw_cmds;
299 }
300 /**
301 * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
302 * @instance: Adapter soft state
303 */
304 void
305 megasas_free_cmds_fusion(struct megasas_instance *instance)
306 {
307 int i;
308 struct fusion_context *fusion = instance->ctrl_context;
309 struct megasas_cmd_fusion *cmd;
310
311 /* SG, Sense */
312 for (i = 0; i < instance->max_mpt_cmds; i++) {
313 cmd = fusion->cmd_list[i];
314 if (cmd) {
315 if (cmd->sg_frame)
316 dma_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
317 cmd->sg_frame_phys_addr);
318 if (cmd->sense)
319 dma_pool_free(fusion->sense_dma_pool, cmd->sense,
320 cmd->sense_phys_addr);
321 }
322 }
323
324 if (fusion->sg_dma_pool) {
325 dma_pool_destroy(fusion->sg_dma_pool);
326 fusion->sg_dma_pool = NULL;
327 }
328 if (fusion->sense_dma_pool) {
329 dma_pool_destroy(fusion->sense_dma_pool);
330 fusion->sense_dma_pool = NULL;
331 }
332
333
334 /* Reply Frame, Desc*/
335 if (instance->is_rdpq)
336 megasas_free_rdpq_fusion(instance);
337 else
338 megasas_free_reply_fusion(instance);
339
340 /* Request Frame, Desc*/
341 if (fusion->req_frames_desc)
342 dma_free_coherent(&instance->pdev->dev,
343 fusion->request_alloc_sz, fusion->req_frames_desc,
344 fusion->req_frames_desc_phys);
345 if (fusion->io_request_frames)
346 dma_pool_free(fusion->io_request_frames_pool,
347 fusion->io_request_frames,
348 fusion->io_request_frames_phys);
349 if (fusion->io_request_frames_pool) {
350 dma_pool_destroy(fusion->io_request_frames_pool);
351 fusion->io_request_frames_pool = NULL;
352 }
353
354
355 /* cmd_list */
356 for (i = 0; i < instance->max_mpt_cmds; i++)
357 kfree(fusion->cmd_list[i]);
358
359 kfree(fusion->cmd_list);
360 }
361
362 /**
363 * megasas_create_sg_sense_fusion - Creates DMA pool for cmd frames
364 * @instance: Adapter soft state
365 *
366 */
367 static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
368 {
369 int i;
370 u16 max_cmd;
371 struct fusion_context *fusion;
372 struct megasas_cmd_fusion *cmd;
373
374 fusion = instance->ctrl_context;
375 max_cmd = instance->max_fw_cmds;
376
377
378 fusion->sg_dma_pool =
379 dma_pool_create("mr_sg", &instance->pdev->dev,
380 instance->max_chain_frame_sz,
381 MR_DEFAULT_NVME_PAGE_SIZE, 0);
382 /* SCSI_SENSE_BUFFERSIZE = 96 bytes */
383 fusion->sense_dma_pool =
384 dma_pool_create("mr_sense", &instance->pdev->dev,
385 SCSI_SENSE_BUFFERSIZE, 64, 0);
386
387 if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
388 dev_err(&instance->pdev->dev,
389 "Failed from %s %d\n", __func__, __LINE__);
390 return -ENOMEM;
391 }
392
393 /*
394 * Allocate and attach a frame to each of the commands in cmd_list
395 */
396 for (i = 0; i < max_cmd; i++) {
397 cmd = fusion->cmd_list[i];
398 cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
399 GFP_KERNEL, &cmd->sg_frame_phys_addr);
400
401 cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
402 GFP_KERNEL, &cmd->sense_phys_addr);
403 if (!cmd->sg_frame || !cmd->sense) {
404 dev_err(&instance->pdev->dev,
405 "Failed from %s %d\n", __func__, __LINE__);
406 return -ENOMEM;
407 }
408 }
409
410 /* create sense buffer for the raid 1/10 fp */
411 for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
412 cmd = fusion->cmd_list[i];
413 cmd->sense = dma_pool_alloc(fusion->sense_dma_pool,
414 GFP_KERNEL, &cmd->sense_phys_addr);
415 if (!cmd->sense) {
416 dev_err(&instance->pdev->dev,
417 "Failed from %s %d\n", __func__, __LINE__);
418 return -ENOMEM;
419 }
420 }
421
422 return 0;
423 }
424
425 int
426 megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
427 {
428 u32 max_mpt_cmd, i, j;
429 struct fusion_context *fusion;
430
431 fusion = instance->ctrl_context;
432
433 max_mpt_cmd = instance->max_mpt_cmds;
434
435 /*
436 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
437 * Allocate the dynamic array first and then allocate individual
438 * commands.
439 */
440 fusion->cmd_list =
441 kzalloc(sizeof(struct megasas_cmd_fusion *) * max_mpt_cmd,
442 GFP_KERNEL);
443 if (!fusion->cmd_list) {
444 dev_err(&instance->pdev->dev,
445 "Failed from %s %d\n", __func__, __LINE__);
446 return -ENOMEM;
447 }
448
449 for (i = 0; i < max_mpt_cmd; i++) {
450 fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
451 GFP_KERNEL);
452 if (!fusion->cmd_list[i]) {
453 for (j = 0; j < i; j++)
454 kfree(fusion->cmd_list[j]);
455 kfree(fusion->cmd_list);
456 dev_err(&instance->pdev->dev,
457 "Failed from %s %d\n", __func__, __LINE__);
458 return -ENOMEM;
459 }
460 }
461
462 return 0;
463 }
464 int
465 megasas_alloc_request_fusion(struct megasas_instance *instance)
466 {
467 struct fusion_context *fusion;
468
469 fusion = instance->ctrl_context;
470
471 fusion->req_frames_desc =
472 dma_alloc_coherent(&instance->pdev->dev,
473 fusion->request_alloc_sz,
474 &fusion->req_frames_desc_phys, GFP_KERNEL);
475 if (!fusion->req_frames_desc) {
476 dev_err(&instance->pdev->dev,
477 "Failed from %s %d\n", __func__, __LINE__);
478 return -ENOMEM;
479 }
480
481 fusion->io_request_frames_pool =
482 dma_pool_create("mr_ioreq", &instance->pdev->dev,
483 fusion->io_frames_alloc_sz, 16, 0);
484
485 if (!fusion->io_request_frames_pool) {
486 dev_err(&instance->pdev->dev,
487 "Failed from %s %d\n", __func__, __LINE__);
488 return -ENOMEM;
489 }
490
491 fusion->io_request_frames =
492 dma_pool_alloc(fusion->io_request_frames_pool,
493 GFP_KERNEL, &fusion->io_request_frames_phys);
494 if (!fusion->io_request_frames) {
495 dev_err(&instance->pdev->dev,
496 "Failed from %s %d\n", __func__, __LINE__);
497 return -ENOMEM;
498 }
499 return 0;
500 }
501
502 int
503 megasas_alloc_reply_fusion(struct megasas_instance *instance)
504 {
505 int i, count;
506 struct fusion_context *fusion;
507 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
508 fusion = instance->ctrl_context;
509
510 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
511 fusion->reply_frames_desc_pool =
512 dma_pool_create("mr_reply", &instance->pdev->dev,
513 fusion->reply_alloc_sz * count, 16, 0);
514
515 if (!fusion->reply_frames_desc_pool) {
516 dev_err(&instance->pdev->dev,
517 "Failed from %s %d\n", __func__, __LINE__);
518 return -ENOMEM;
519 }
520
521 fusion->reply_frames_desc[0] =
522 dma_pool_alloc(fusion->reply_frames_desc_pool,
523 GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
524 if (!fusion->reply_frames_desc[0]) {
525 dev_err(&instance->pdev->dev,
526 "Failed from %s %d\n", __func__, __LINE__);
527 return -ENOMEM;
528 }
529 reply_desc = fusion->reply_frames_desc[0];
530 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
531 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
532
533 /* This is not a rdpq mode, but driver still populate
534 * reply_frame_desc array to use same msix index in ISR path.
535 */
536 for (i = 0; i < (count - 1); i++)
537 fusion->reply_frames_desc[i + 1] =
538 fusion->reply_frames_desc[i] +
539 (fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
540
541 return 0;
542 }
543
544 int
545 megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
546 {
547 int i, j, count;
548 struct fusion_context *fusion;
549 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
550
551 fusion = instance->ctrl_context;
552
553 fusion->rdpq_virt = pci_alloc_consistent(instance->pdev,
554 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
555 &fusion->rdpq_phys);
556 if (!fusion->rdpq_virt) {
557 dev_err(&instance->pdev->dev,
558 "Failed from %s %d\n", __func__, __LINE__);
559 return -ENOMEM;
560 }
561
562 memset(fusion->rdpq_virt, 0,
563 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION);
564 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
565 fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
566 &instance->pdev->dev,
567 fusion->reply_alloc_sz,
568 16, 0);
569
570 if (!fusion->reply_frames_desc_pool) {
571 dev_err(&instance->pdev->dev,
572 "Failed from %s %d\n", __func__, __LINE__);
573 return -ENOMEM;
574 }
575
576 for (i = 0; i < count; i++) {
577 fusion->reply_frames_desc[i] =
578 dma_pool_alloc(fusion->reply_frames_desc_pool,
579 GFP_KERNEL, &fusion->reply_frames_desc_phys[i]);
580 if (!fusion->reply_frames_desc[i]) {
581 dev_err(&instance->pdev->dev,
582 "Failed from %s %d\n", __func__, __LINE__);
583 return -ENOMEM;
584 }
585
586 fusion->rdpq_virt[i].RDPQBaseAddress =
587 cpu_to_le64(fusion->reply_frames_desc_phys[i]);
588
589 reply_desc = fusion->reply_frames_desc[i];
590 for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
591 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
592 }
593 return 0;
594 }
595
596 static void
597 megasas_free_rdpq_fusion(struct megasas_instance *instance) {
598
599 int i;
600 struct fusion_context *fusion;
601
602 fusion = instance->ctrl_context;
603
604 for (i = 0; i < MAX_MSIX_QUEUES_FUSION; i++) {
605 if (fusion->reply_frames_desc[i])
606 dma_pool_free(fusion->reply_frames_desc_pool,
607 fusion->reply_frames_desc[i],
608 fusion->reply_frames_desc_phys[i]);
609 }
610
611 if (fusion->reply_frames_desc_pool)
612 dma_pool_destroy(fusion->reply_frames_desc_pool);
613
614 if (fusion->rdpq_virt)
615 pci_free_consistent(instance->pdev,
616 sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
617 fusion->rdpq_virt, fusion->rdpq_phys);
618 }
619
620 static void
621 megasas_free_reply_fusion(struct megasas_instance *instance) {
622
623 struct fusion_context *fusion;
624
625 fusion = instance->ctrl_context;
626
627 if (fusion->reply_frames_desc[0])
628 dma_pool_free(fusion->reply_frames_desc_pool,
629 fusion->reply_frames_desc[0],
630 fusion->reply_frames_desc_phys[0]);
631
632 if (fusion->reply_frames_desc_pool)
633 dma_pool_destroy(fusion->reply_frames_desc_pool);
634
635 }
636
637
638 /**
639 * megasas_alloc_cmds_fusion - Allocates the command packets
640 * @instance: Adapter soft state
641 *
642 *
643 * Each frame has a 32-bit field called context. This context is used to get
644 * back the megasas_cmd_fusion from the frame when a frame gets completed
645 * In this driver, the 32 bit values are the indices into an array cmd_list.
646 * This array is used only to look up the megasas_cmd_fusion given the context.
647 * The free commands themselves are maintained in a linked list called cmd_pool.
648 *
649 * cmds are formed in the io_request and sg_frame members of the
650 * megasas_cmd_fusion. The context field is used to get a request descriptor
651 * and is used as SMID of the cmd.
652 * SMID value range is from 1 to max_fw_cmds.
653 */
654 int
655 megasas_alloc_cmds_fusion(struct megasas_instance *instance)
656 {
657 int i;
658 struct fusion_context *fusion;
659 struct megasas_cmd_fusion *cmd;
660 u32 offset;
661 dma_addr_t io_req_base_phys;
662 u8 *io_req_base;
663
664
665 fusion = instance->ctrl_context;
666
667 if (megasas_alloc_cmdlist_fusion(instance))
668 goto fail_exit;
669
670 if (megasas_alloc_request_fusion(instance))
671 goto fail_exit;
672
673 if (instance->is_rdpq) {
674 if (megasas_alloc_rdpq_fusion(instance))
675 goto fail_exit;
676 } else
677 if (megasas_alloc_reply_fusion(instance))
678 goto fail_exit;
679
680
681 /* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
682 io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
683 io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
684
685 /*
686 * Add all the commands to command pool (fusion->cmd_pool)
687 */
688
689 /* SMID 0 is reserved. Set SMID/index from 1 */
690 for (i = 0; i < instance->max_mpt_cmds; i++) {
691 cmd = fusion->cmd_list[i];
692 offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
693 memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
694 cmd->index = i + 1;
695 cmd->scmd = NULL;
696 cmd->sync_cmd_idx =
697 (i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
698 (i - instance->max_scsi_cmds) :
699 (u32)ULONG_MAX; /* Set to Invalid */
700 cmd->instance = instance;
701 cmd->io_request =
702 (struct MPI2_RAID_SCSI_IO_REQUEST *)
703 (io_req_base + offset);
704 memset(cmd->io_request, 0,
705 sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
706 cmd->io_request_phys_addr = io_req_base_phys + offset;
707 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
708 }
709
710 if (megasas_create_sg_sense_fusion(instance))
711 goto fail_exit;
712
713 return 0;
714
715 fail_exit:
716 megasas_free_cmds_fusion(instance);
717 return -ENOMEM;
718 }
719
720 /**
721 * wait_and_poll - Issues a polling command
722 * @instance: Adapter soft state
723 * @cmd: Command packet to be issued
724 *
725 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
726 */
727 int
728 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
729 int seconds)
730 {
731 int i;
732 struct megasas_header *frame_hdr = &cmd->frame->hdr;
733 struct fusion_context *fusion;
734
735 u32 msecs = seconds * 1000;
736
737 fusion = instance->ctrl_context;
738 /*
739 * Wait for cmd_status to change
740 */
741 for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
742 rmb();
743 msleep(20);
744 }
745
746 if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
747 return DCMD_TIMEOUT;
748 else if (frame_hdr->cmd_status == MFI_STAT_OK)
749 return DCMD_SUCCESS;
750 else
751 return DCMD_FAILED;
752 }
753
754 /**
755 * megasas_ioc_init_fusion - Initializes the FW
756 * @instance: Adapter soft state
757 *
758 * Issues the IOC Init cmd
759 */
760 int
761 megasas_ioc_init_fusion(struct megasas_instance *instance)
762 {
763 struct megasas_init_frame *init_frame;
764 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
765 dma_addr_t ioc_init_handle;
766 struct megasas_cmd *cmd;
767 u8 ret, cur_rdpq_mode;
768 struct fusion_context *fusion;
769 union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
770 int i;
771 struct megasas_header *frame_hdr;
772 const char *sys_info;
773 MFI_CAPABILITIES *drv_ops;
774 u32 scratch_pad_2;
775 unsigned long flags;
776
777 fusion = instance->ctrl_context;
778
779 cmd = megasas_get_cmd(instance);
780
781 if (!cmd) {
782 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
783 ret = 1;
784 goto fail_get_cmd;
785 }
786
787 scratch_pad_2 = readl
788 (&instance->reg_set->outbound_scratch_pad_2);
789
790 cur_rdpq_mode = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
791
792 if (instance->is_rdpq && !cur_rdpq_mode) {
793 dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
794 " from RDPQ mode to non RDPQ mode\n");
795 ret = 1;
796 goto fail_fw_init;
797 }
798
799 instance->fw_sync_cache_support = (scratch_pad_2 &
800 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
801 dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
802 instance->fw_sync_cache_support ? "Yes" : "No");
803
804 IOCInitMessage =
805 dma_alloc_coherent(&instance->pdev->dev,
806 sizeof(struct MPI2_IOC_INIT_REQUEST),
807 &ioc_init_handle, GFP_KERNEL);
808
809 if (!IOCInitMessage) {
810 dev_err(&instance->pdev->dev, "Could not allocate memory for "
811 "IOCInitMessage\n");
812 ret = 1;
813 goto fail_fw_init;
814 }
815
816 memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
817
818 IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
819 IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
820 IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
821 IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
822 IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
823
824 IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
825 IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
826 cpu_to_le64(fusion->rdpq_phys) :
827 cpu_to_le64(fusion->reply_frames_desc_phys[0]);
828 IOCInitMessage->MsgFlags = instance->is_rdpq ?
829 MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
830 IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
831 IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
832 IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
833 init_frame = (struct megasas_init_frame *)cmd->frame;
834 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
835
836 frame_hdr = &cmd->frame->hdr;
837 frame_hdr->cmd_status = 0xFF;
838 frame_hdr->flags = cpu_to_le16(
839 le16_to_cpu(frame_hdr->flags) |
840 MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
841
842 init_frame->cmd = MFI_CMD_INIT;
843 init_frame->cmd_status = 0xFF;
844
845 drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
846
847 /* driver support Extended MSIX */
848 if (fusion->adapter_type >= INVADER_SERIES)
849 drv_ops->mfi_capabilities.support_additional_msix = 1;
850 /* driver supports HA / Remote LUN over Fast Path interface */
851 drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
852
853 drv_ops->mfi_capabilities.support_max_255lds = 1;
854 drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
855 drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
856
857 if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
858 drv_ops->mfi_capabilities.support_ext_io_size = 1;
859
860 drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
861 if (!dual_qdepth_disable)
862 drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
863
864 drv_ops->mfi_capabilities.support_qd_throttling = 1;
865 drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
866 /* Convert capability to LE32 */
867 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
868
869 sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
870 if (instance->system_info_buf && sys_info) {
871 memcpy(instance->system_info_buf->systemId, sys_info,
872 strlen(sys_info) > 64 ? 64 : strlen(sys_info));
873 instance->system_info_buf->systemIdLength =
874 strlen(sys_info) > 64 ? 64 : strlen(sys_info);
875 init_frame->system_info_lo = instance->system_info_h;
876 init_frame->system_info_hi = 0;
877 }
878
879 init_frame->queue_info_new_phys_addr_hi =
880 cpu_to_le32(upper_32_bits(ioc_init_handle));
881 init_frame->queue_info_new_phys_addr_lo =
882 cpu_to_le32(lower_32_bits(ioc_init_handle));
883 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
884
885 req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
886 req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
887 req_desc.MFAIo.RequestFlags =
888 (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
889 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
890
891 /*
892 * disable the intr before firing the init frame
893 */
894 instance->instancet->disable_intr(instance);
895
896 for (i = 0; i < (10 * 1000); i += 20) {
897 if (readl(&instance->reg_set->doorbell) & 1)
898 msleep(20);
899 else
900 break;
901 }
902
903 /* For Ventura also IOC INIT required 64 bit Descriptor write. */
904 spin_lock_irqsave(&instance->hba_lock, flags);
905 writel(le32_to_cpu(req_desc.u.low),
906 &instance->reg_set->inbound_low_queue_port);
907 writel(le32_to_cpu(req_desc.u.high),
908 &instance->reg_set->inbound_high_queue_port);
909 mmiowb();
910 spin_unlock_irqrestore(&instance->hba_lock, flags);
911
912 wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
913
914 frame_hdr = &cmd->frame->hdr;
915 if (frame_hdr->cmd_status != 0) {
916 ret = 1;
917 goto fail_fw_init;
918 }
919
920 ret = 0;
921
922 fail_fw_init:
923 megasas_return_cmd(instance, cmd);
924 if (IOCInitMessage)
925 dma_free_coherent(&instance->pdev->dev,
926 sizeof(struct MPI2_IOC_INIT_REQUEST),
927 IOCInitMessage, ioc_init_handle);
928 fail_get_cmd:
929 dev_err(&instance->pdev->dev,
930 "Init cmd return status %s for SCSI host %d\n",
931 ret ? "FAILED" : "SUCCESS", instance->host->host_no);
932
933 return ret;
934 }
935
936 /**
937 * megasas_sync_pd_seq_num - JBOD SEQ MAP
938 * @instance: Adapter soft state
939 * @pend: set to 1, if it is pended jbod map.
940 *
941 * Issue Jbod map to the firmware. If it is pended command,
942 * issue command and return. If it is first instance of jbod map
943 * issue and receive command.
944 */
945 int
946 megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
947 int ret = 0;
948 u32 pd_seq_map_sz;
949 struct megasas_cmd *cmd;
950 struct megasas_dcmd_frame *dcmd;
951 struct fusion_context *fusion = instance->ctrl_context;
952 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
953 dma_addr_t pd_seq_h;
954
955 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
956 pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
957 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
958 (sizeof(struct MR_PD_CFG_SEQ) *
959 (MAX_PHYSICAL_DEVICES - 1));
960
961 cmd = megasas_get_cmd(instance);
962 if (!cmd) {
963 dev_err(&instance->pdev->dev,
964 "Could not get mfi cmd. Fail from %s %d\n",
965 __func__, __LINE__);
966 return -ENOMEM;
967 }
968
969 dcmd = &cmd->frame->dcmd;
970
971 memset(pd_sync, 0, pd_seq_map_sz);
972 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
973 dcmd->cmd = MFI_CMD_DCMD;
974 dcmd->cmd_status = 0xFF;
975 dcmd->sge_count = 1;
976 dcmd->timeout = 0;
977 dcmd->pad_0 = 0;
978 dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
979 dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
980 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
981 dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
982
983 if (pend) {
984 dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
985 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
986 instance->jbod_seq_cmd = cmd;
987 instance->instancet->issue_dcmd(instance, cmd);
988 return 0;
989 }
990
991 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
992
993 /* Below code is only for non pended DCMD */
994 if (instance->ctrl_context && !instance->mask_interrupts)
995 ret = megasas_issue_blocked_cmd(instance, cmd,
996 MFI_IO_TIMEOUT_SECS);
997 else
998 ret = megasas_issue_polled(instance, cmd);
999
1000 if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
1001 dev_warn(&instance->pdev->dev,
1002 "driver supports max %d JBOD, but FW reports %d\n",
1003 MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
1004 ret = -EINVAL;
1005 }
1006
1007 if (ret == DCMD_TIMEOUT && instance->ctrl_context)
1008 megaraid_sas_kill_hba(instance);
1009
1010 if (ret == DCMD_SUCCESS)
1011 instance->pd_seq_map_id++;
1012
1013 megasas_return_cmd(instance, cmd);
1014 return ret;
1015 }
1016
1017 /*
1018 * megasas_get_ld_map_info - Returns FW's ld_map structure
1019 * @instance: Adapter soft state
1020 * @pend: Pend the command or not
1021 * Issues an internal command (DCMD) to get the FW's controller PD
1022 * list structure. This information is mainly used to find out SYSTEM
1023 * supported by the FW.
1024 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
1025 * dcmd.mbox.b[0] - number of LDs being sync'd
1026 * dcmd.mbox.b[1] - 0 - complete command immediately.
1027 * - 1 - pend till config change
1028 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
1029 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
1030 * uses extended struct MR_FW_RAID_MAP_EXT
1031 */
1032 static int
1033 megasas_get_ld_map_info(struct megasas_instance *instance)
1034 {
1035 int ret = 0;
1036 struct megasas_cmd *cmd;
1037 struct megasas_dcmd_frame *dcmd;
1038 void *ci;
1039 dma_addr_t ci_h = 0;
1040 u32 size_map_info;
1041 struct fusion_context *fusion;
1042
1043 cmd = megasas_get_cmd(instance);
1044
1045 if (!cmd) {
1046 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
1047 return -ENOMEM;
1048 }
1049
1050 fusion = instance->ctrl_context;
1051
1052 if (!fusion) {
1053 megasas_return_cmd(instance, cmd);
1054 return -ENXIO;
1055 }
1056
1057 dcmd = &cmd->frame->dcmd;
1058
1059 size_map_info = fusion->current_map_sz;
1060
1061 ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1062 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1063
1064 if (!ci) {
1065 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1066 megasas_return_cmd(instance, cmd);
1067 return -ENOMEM;
1068 }
1069
1070 memset(ci, 0, fusion->max_map_sz);
1071 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1072 dcmd->cmd = MFI_CMD_DCMD;
1073 dcmd->cmd_status = 0xFF;
1074 dcmd->sge_count = 1;
1075 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
1076 dcmd->timeout = 0;
1077 dcmd->pad_0 = 0;
1078 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1079 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1080 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1081 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1082
1083 if (instance->ctrl_context && !instance->mask_interrupts)
1084 ret = megasas_issue_blocked_cmd(instance, cmd,
1085 MFI_IO_TIMEOUT_SECS);
1086 else
1087 ret = megasas_issue_polled(instance, cmd);
1088
1089 if (ret == DCMD_TIMEOUT && instance->ctrl_context)
1090 megaraid_sas_kill_hba(instance);
1091
1092 megasas_return_cmd(instance, cmd);
1093
1094 return ret;
1095 }
1096
1097 u8
1098 megasas_get_map_info(struct megasas_instance *instance)
1099 {
1100 struct fusion_context *fusion = instance->ctrl_context;
1101
1102 fusion->fast_path_io = 0;
1103 if (!megasas_get_ld_map_info(instance)) {
1104 if (MR_ValidateMapInfo(instance)) {
1105 fusion->fast_path_io = 1;
1106 return 0;
1107 }
1108 }
1109 return 1;
1110 }
1111
1112 /*
1113 * megasas_sync_map_info - Returns FW's ld_map structure
1114 * @instance: Adapter soft state
1115 *
1116 * Issues an internal command (DCMD) to get the FW's controller PD
1117 * list structure. This information is mainly used to find out SYSTEM
1118 * supported by the FW.
1119 */
1120 int
1121 megasas_sync_map_info(struct megasas_instance *instance)
1122 {
1123 int i;
1124 struct megasas_cmd *cmd;
1125 struct megasas_dcmd_frame *dcmd;
1126 u16 num_lds;
1127 u32 size_sync_info;
1128 struct fusion_context *fusion;
1129 struct MR_LD_TARGET_SYNC *ci = NULL;
1130 struct MR_DRV_RAID_MAP_ALL *map;
1131 struct MR_LD_RAID *raid;
1132 struct MR_LD_TARGET_SYNC *ld_sync;
1133 dma_addr_t ci_h = 0;
1134 u32 size_map_info;
1135
1136 cmd = megasas_get_cmd(instance);
1137
1138 if (!cmd) {
1139 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1140 return -ENOMEM;
1141 }
1142
1143 fusion = instance->ctrl_context;
1144
1145 if (!fusion) {
1146 megasas_return_cmd(instance, cmd);
1147 return 1;
1148 }
1149
1150 map = fusion->ld_drv_map[instance->map_id & 1];
1151
1152 num_lds = le16_to_cpu(map->raidMap.ldCount);
1153
1154 dcmd = &cmd->frame->dcmd;
1155
1156 size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
1157
1158 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1159
1160 ci = (struct MR_LD_TARGET_SYNC *)
1161 fusion->ld_map[(instance->map_id - 1) & 1];
1162 memset(ci, 0, fusion->max_map_sz);
1163
1164 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1165
1166 ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1167
1168 for (i = 0; i < num_lds; i++, ld_sync++) {
1169 raid = MR_LdRaidGet(i, map);
1170 ld_sync->targetId = MR_GetLDTgtId(i, map);
1171 ld_sync->seqNum = raid->seqNum;
1172 }
1173
1174 size_map_info = fusion->current_map_sz;
1175
1176 dcmd->cmd = MFI_CMD_DCMD;
1177 dcmd->cmd_status = 0xFF;
1178 dcmd->sge_count = 1;
1179 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
1180 dcmd->timeout = 0;
1181 dcmd->pad_0 = 0;
1182 dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1183 dcmd->mbox.b[0] = num_lds;
1184 dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1185 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1186 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
1187 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
1188
1189 instance->map_update_cmd = cmd;
1190
1191 instance->instancet->issue_dcmd(instance, cmd);
1192
1193 return 0;
1194 }
1195
1196 /*
1197 * meagasas_display_intel_branding - Display branding string
1198 * @instance: per adapter object
1199 *
1200 * Return nothing.
1201 */
1202 static void
1203 megasas_display_intel_branding(struct megasas_instance *instance)
1204 {
1205 if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1206 return;
1207
1208 switch (instance->pdev->device) {
1209 case PCI_DEVICE_ID_LSI_INVADER:
1210 switch (instance->pdev->subsystem_device) {
1211 case MEGARAID_INTEL_RS3DC080_SSDID:
1212 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1213 instance->host->host_no,
1214 MEGARAID_INTEL_RS3DC080_BRANDING);
1215 break;
1216 case MEGARAID_INTEL_RS3DC040_SSDID:
1217 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1218 instance->host->host_no,
1219 MEGARAID_INTEL_RS3DC040_BRANDING);
1220 break;
1221 case MEGARAID_INTEL_RS3SC008_SSDID:
1222 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1223 instance->host->host_no,
1224 MEGARAID_INTEL_RS3SC008_BRANDING);
1225 break;
1226 case MEGARAID_INTEL_RS3MC044_SSDID:
1227 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1228 instance->host->host_no,
1229 MEGARAID_INTEL_RS3MC044_BRANDING);
1230 break;
1231 default:
1232 break;
1233 }
1234 break;
1235 case PCI_DEVICE_ID_LSI_FURY:
1236 switch (instance->pdev->subsystem_device) {
1237 case MEGARAID_INTEL_RS3WC080_SSDID:
1238 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1239 instance->host->host_no,
1240 MEGARAID_INTEL_RS3WC080_BRANDING);
1241 break;
1242 case MEGARAID_INTEL_RS3WC040_SSDID:
1243 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1244 instance->host->host_no,
1245 MEGARAID_INTEL_RS3WC040_BRANDING);
1246 break;
1247 default:
1248 break;
1249 }
1250 break;
1251 case PCI_DEVICE_ID_LSI_CUTLASS_52:
1252 case PCI_DEVICE_ID_LSI_CUTLASS_53:
1253 switch (instance->pdev->subsystem_device) {
1254 case MEGARAID_INTEL_RMS3BC160_SSDID:
1255 dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1256 instance->host->host_no,
1257 MEGARAID_INTEL_RMS3BC160_BRANDING);
1258 break;
1259 default:
1260 break;
1261 }
1262 break;
1263 default:
1264 break;
1265 }
1266 }
1267
1268 /**
1269 * megasas_allocate_raid_maps - Allocate memory for RAID maps
1270 * @instance: Adapter soft state
1271 *
1272 * return: if success: return 0
1273 * failed: return -ENOMEM
1274 */
1275 static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
1276 {
1277 struct fusion_context *fusion;
1278 int i = 0;
1279
1280 fusion = instance->ctrl_context;
1281
1282 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1283
1284 for (i = 0; i < 2; i++) {
1285 fusion->ld_map[i] = NULL;
1286
1287 fusion->ld_drv_map[i] = (void *)
1288 __get_free_pages(__GFP_ZERO | GFP_KERNEL,
1289 fusion->drv_map_pages);
1290
1291 if (!fusion->ld_drv_map[i]) {
1292 fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
1293
1294 if (!fusion->ld_drv_map[i]) {
1295 dev_err(&instance->pdev->dev,
1296 "Could not allocate memory for local map"
1297 " size requested: %d\n",
1298 fusion->drv_map_sz);
1299 goto ld_drv_map_alloc_fail;
1300 }
1301 }
1302 }
1303
1304 for (i = 0; i < 2; i++) {
1305 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1306 fusion->max_map_sz,
1307 &fusion->ld_map_phys[i],
1308 GFP_KERNEL);
1309 if (!fusion->ld_map[i]) {
1310 dev_err(&instance->pdev->dev,
1311 "Could not allocate memory for map info %s:%d\n",
1312 __func__, __LINE__);
1313 goto ld_map_alloc_fail;
1314 }
1315 }
1316
1317 return 0;
1318
1319 ld_map_alloc_fail:
1320 for (i = 0; i < 2; i++) {
1321 if (fusion->ld_map[i])
1322 dma_free_coherent(&instance->pdev->dev,
1323 fusion->max_map_sz,
1324 fusion->ld_map[i],
1325 fusion->ld_map_phys[i]);
1326 }
1327
1328 ld_drv_map_alloc_fail:
1329 for (i = 0; i < 2; i++) {
1330 if (fusion->ld_drv_map[i]) {
1331 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
1332 vfree(fusion->ld_drv_map[i]);
1333 else
1334 free_pages((ulong)fusion->ld_drv_map[i],
1335 fusion->drv_map_pages);
1336 }
1337 }
1338
1339 return -ENOMEM;
1340 }
1341
1342 /**
1343 * megasas_init_adapter_fusion - Initializes the FW
1344 * @instance: Adapter soft state
1345 *
1346 * This is the main function for initializing firmware.
1347 */
1348 u32
1349 megasas_init_adapter_fusion(struct megasas_instance *instance)
1350 {
1351 struct megasas_register_set __iomem *reg_set;
1352 struct fusion_context *fusion;
1353 u16 max_cmd;
1354 u32 scratch_pad_2;
1355 int i = 0, count;
1356
1357 fusion = instance->ctrl_context;
1358
1359 reg_set = instance->reg_set;
1360
1361 megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1362
1363 /*
1364 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1365 */
1366 instance->max_mfi_cmds =
1367 MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1368
1369 max_cmd = instance->max_fw_cmds;
1370
1371 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
1372
1373 fusion->request_alloc_sz =
1374 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * instance->max_mpt_cmds;
1375 fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
1376 *(fusion->reply_q_depth);
1377 fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1378 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1379 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1380
1381 scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
1382 /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1383 * Firmware support extended IO chain frame which is 4 times more than
1384 * legacy Firmware.
1385 * Legacy Firmware - Frame size is (8 * 128) = 1K
1386 * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
1387 */
1388 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1389 instance->max_chain_frame_sz =
1390 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1391 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1392 else
1393 instance->max_chain_frame_sz =
1394 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1395 MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1396
1397 if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1398 dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1399 instance->max_chain_frame_sz,
1400 MEGASAS_CHAIN_FRAME_SZ_MIN);
1401 instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1402 }
1403
1404 fusion->max_sge_in_main_msg =
1405 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1406 - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1407
1408 fusion->max_sge_in_chain =
1409 instance->max_chain_frame_sz
1410 / sizeof(union MPI2_SGE_IO_UNION);
1411
1412 instance->max_num_sge =
1413 rounddown_pow_of_two(fusion->max_sge_in_main_msg
1414 + fusion->max_sge_in_chain - 2);
1415
1416 /* Used for pass thru MFI frame (DCMD) */
1417 fusion->chain_offset_mfi_pthru =
1418 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1419
1420 fusion->chain_offset_io_request =
1421 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1422 sizeof(union MPI2_SGE_IO_UNION))/16;
1423
1424 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1425 for (i = 0 ; i < count; i++)
1426 fusion->last_reply_idx[i] = 0;
1427
1428 /*
1429 * For fusion adapters, 3 commands for IOCTL and 8 commands
1430 * for driver's internal DCMDs.
1431 */
1432 instance->max_scsi_cmds = instance->max_fw_cmds -
1433 (MEGASAS_FUSION_INTERNAL_CMDS +
1434 MEGASAS_FUSION_IOCTL_CMDS);
1435 sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1436
1437 /*
1438 * Allocate memory for descriptors
1439 * Create a pool of commands
1440 */
1441 if (megasas_alloc_cmds(instance))
1442 goto fail_alloc_mfi_cmds;
1443 if (megasas_alloc_cmds_fusion(instance))
1444 goto fail_alloc_cmds;
1445
1446 if (megasas_ioc_init_fusion(instance))
1447 goto fail_ioc_init;
1448
1449 megasas_display_intel_branding(instance);
1450 if (megasas_get_ctrl_info(instance)) {
1451 dev_err(&instance->pdev->dev,
1452 "Could not get controller info. Fail from %s %d\n",
1453 __func__, __LINE__);
1454 goto fail_ioc_init;
1455 }
1456
1457 instance->flag_ieee = 1;
1458 instance->r1_ldio_hint_default = MR_R1_LDIO_PIGGYBACK_DEFAULT;
1459 fusion->fast_path_io = 0;
1460
1461 if (megasas_allocate_raid_maps(instance))
1462 goto fail_ioc_init;
1463
1464 if (!megasas_get_map_info(instance))
1465 megasas_sync_map_info(instance);
1466
1467 return 0;
1468
1469 fail_ioc_init:
1470 megasas_free_cmds_fusion(instance);
1471 fail_alloc_cmds:
1472 megasas_free_cmds(instance);
1473 fail_alloc_mfi_cmds:
1474 return 1;
1475 }
1476
1477 /**
1478 * map_cmd_status - Maps FW cmd status to OS cmd status
1479 * @cmd : Pointer to cmd
1480 * @status : status of cmd returned by FW
1481 * @ext_status : ext status of cmd returned by FW
1482 */
1483
1484 void
1485 map_cmd_status(struct fusion_context *fusion,
1486 struct scsi_cmnd *scmd, u8 status, u8 ext_status,
1487 u32 data_length, u8 *sense)
1488 {
1489 u8 cmd_type;
1490 int resid;
1491
1492 cmd_type = megasas_cmd_type(scmd);
1493 switch (status) {
1494
1495 case MFI_STAT_OK:
1496 scmd->result = DID_OK << 16;
1497 break;
1498
1499 case MFI_STAT_SCSI_IO_FAILED:
1500 case MFI_STAT_LD_INIT_IN_PROGRESS:
1501 scmd->result = (DID_ERROR << 16) | ext_status;
1502 break;
1503
1504 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1505
1506 scmd->result = (DID_OK << 16) | ext_status;
1507 if (ext_status == SAM_STAT_CHECK_CONDITION) {
1508 memset(scmd->sense_buffer, 0,
1509 SCSI_SENSE_BUFFERSIZE);
1510 memcpy(scmd->sense_buffer, sense,
1511 SCSI_SENSE_BUFFERSIZE);
1512 scmd->result |= DRIVER_SENSE << 24;
1513 }
1514
1515 /*
1516 * If the IO request is partially completed, then MR FW will
1517 * update "io_request->DataLength" field with actual number of
1518 * bytes transferred.Driver will set residual bytes count in
1519 * SCSI command structure.
1520 */
1521 resid = (scsi_bufflen(scmd) - data_length);
1522 scsi_set_resid(scmd, resid);
1523
1524 if (resid &&
1525 ((cmd_type == READ_WRITE_LDIO) ||
1526 (cmd_type == READ_WRITE_SYSPDIO)))
1527 scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
1528 " requested/completed 0x%x/0x%x\n",
1529 status, scsi_bufflen(scmd), data_length);
1530 break;
1531
1532 case MFI_STAT_LD_OFFLINE:
1533 case MFI_STAT_DEVICE_NOT_FOUND:
1534 scmd->result = DID_BAD_TARGET << 16;
1535 break;
1536 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1537 scmd->result = DID_IMM_RETRY << 16;
1538 break;
1539 default:
1540 scmd->result = DID_ERROR << 16;
1541 break;
1542 }
1543 }
1544
1545 /**
1546 * megasas_is_prp_possible -
1547 * Checks if native NVMe PRPs can be built for the IO
1548 *
1549 * @instance: Adapter soft state
1550 * @scmd: SCSI command from the mid-layer
1551 * @sge_count: scatter gather element count.
1552 *
1553 * Returns: true: PRPs can be built
1554 * false: IEEE SGLs needs to be built
1555 */
1556 static bool
1557 megasas_is_prp_possible(struct megasas_instance *instance,
1558 struct scsi_cmnd *scmd, int sge_count)
1559 {
1560 struct fusion_context *fusion;
1561 int i;
1562 u32 data_length = 0;
1563 struct scatterlist *sg_scmd;
1564 bool build_prp = false;
1565 u32 mr_nvme_pg_size;
1566
1567 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1568 MR_DEFAULT_NVME_PAGE_SIZE);
1569 fusion = instance->ctrl_context;
1570 data_length = scsi_bufflen(scmd);
1571 sg_scmd = scsi_sglist(scmd);
1572
1573 /*
1574 * NVMe uses one PRP for each page (or part of a page)
1575 * look at the data length - if 4 pages or less then IEEE is OK
1576 * if > 5 pages then we need to build a native SGL
1577 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
1578 * if this first size in the page is >= the residual beyond 4 pages
1579 * then use IEEE, otherwise use native SGL
1580 */
1581
1582 if (data_length > (mr_nvme_pg_size * 5)) {
1583 build_prp = true;
1584 } else if ((data_length > (mr_nvme_pg_size * 4)) &&
1585 (data_length <= (mr_nvme_pg_size * 5))) {
1586 /* check if 1st SG entry size is < residual beyond 4 pages */
1587 if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
1588 build_prp = true;
1589 }
1590
1591 /*
1592 * Below code detects gaps/holes in IO data buffers.
1593 * What does holes/gaps mean?
1594 * Any SGE except first one in a SGL starts at non NVME page size
1595 * aligned address OR Any SGE except last one in a SGL ends at
1596 * non NVME page size boundary.
1597 *
1598 * Driver has already informed block layer by setting boundary rules for
1599 * bio merging done at NVME page size boundary calling kernel API
1600 * blk_queue_virt_boundary inside slave_config.
1601 * Still there is possibility of IO coming with holes to driver because of
1602 * IO merging done by IO scheduler.
1603 *
1604 * With SCSI BLK MQ enabled, there will be no IO with holes as there is no
1605 * IO scheduling so no IO merging.
1606 *
1607 * With SCSI BLK MQ disabled, IO scheduler may attempt to merge IOs and
1608 * then sending IOs with holes.
1609 *
1610 * Though driver can request block layer to disable IO merging by calling-
1611 * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but
1612 * user may tune sysfs parameter- nomerges again to 0 or 1.
1613 *
1614 * If in future IO scheduling is enabled with SCSI BLK MQ,
1615 * this algorithm to detect holes will be required in driver
1616 * for SCSI BLK MQ enabled case as well.
1617 *
1618 *
1619 */
1620 scsi_for_each_sg(scmd, sg_scmd, sge_count, i) {
1621 if ((i != 0) && (i != (sge_count - 1))) {
1622 if (mega_mod64(sg_dma_len(sg_scmd), mr_nvme_pg_size) ||
1623 mega_mod64(sg_dma_address(sg_scmd),
1624 mr_nvme_pg_size)) {
1625 build_prp = false;
1626 atomic_inc(&instance->sge_holes_type1);
1627 break;
1628 }
1629 }
1630
1631 if ((sge_count > 1) && (i == 0)) {
1632 if ((mega_mod64((sg_dma_address(sg_scmd) +
1633 sg_dma_len(sg_scmd)),
1634 mr_nvme_pg_size))) {
1635 build_prp = false;
1636 atomic_inc(&instance->sge_holes_type2);
1637 break;
1638 }
1639 }
1640
1641 if ((sge_count > 1) && (i == (sge_count - 1))) {
1642 if (mega_mod64(sg_dma_address(sg_scmd),
1643 mr_nvme_pg_size)) {
1644 build_prp = false;
1645 atomic_inc(&instance->sge_holes_type3);
1646 break;
1647 }
1648 }
1649 }
1650
1651 return build_prp;
1652 }
1653
1654 /**
1655 * megasas_make_prp_nvme -
1656 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1657 *
1658 * @instance: Adapter soft state
1659 * @scmd: SCSI command from the mid-layer
1660 * @sgl_ptr: SGL to be filled in
1661 * @cmd: Fusion command frame
1662 * @sge_count: scatter gather element count.
1663 *
1664 * Returns: true: PRPs are built
1665 * false: IEEE SGLs needs to be built
1666 */
1667 static bool
1668 megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
1669 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1670 struct megasas_cmd_fusion *cmd, int sge_count)
1671 {
1672 int sge_len, offset, num_prp_in_chain = 0;
1673 struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
1674 u64 *ptr_sgl;
1675 dma_addr_t ptr_sgl_phys;
1676 u64 sge_addr;
1677 u32 page_mask, page_mask_result;
1678 struct scatterlist *sg_scmd;
1679 u32 first_prp_len;
1680 bool build_prp = false;
1681 int data_len = scsi_bufflen(scmd);
1682 struct fusion_context *fusion;
1683 u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1684 MR_DEFAULT_NVME_PAGE_SIZE);
1685
1686 fusion = instance->ctrl_context;
1687
1688 build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
1689
1690 if (!build_prp)
1691 return false;
1692
1693 /*
1694 * Nvme has a very convoluted prp format. One prp is required
1695 * for each page or partial page. Driver need to split up OS sg_list
1696 * entries if it is longer than one page or cross a page
1697 * boundary. Driver also have to insert a PRP list pointer entry as
1698 * the last entry in each physical page of the PRP list.
1699 *
1700 * NOTE: The first PRP "entry" is actually placed in the first
1701 * SGL entry in the main message as IEEE 64 format. The 2nd
1702 * entry in the main message is the chain element, and the rest
1703 * of the PRP entries are built in the contiguous pcie buffer.
1704 */
1705 page_mask = mr_nvme_pg_size - 1;
1706 ptr_sgl = (u64 *)cmd->sg_frame;
1707 ptr_sgl_phys = cmd->sg_frame_phys_addr;
1708 memset(ptr_sgl, 0, instance->max_chain_frame_sz);
1709
1710 /* Build chain frame element which holds all prps except first*/
1711 main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
1712 ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
1713
1714 main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
1715 main_chain_element->NextChainOffset = 0;
1716 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1717 IEEE_SGE_FLAGS_SYSTEM_ADDR |
1718 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
1719
1720 /* Build first prp, sge need not to be page aligned*/
1721 ptr_first_sgl = sgl_ptr;
1722 sg_scmd = scsi_sglist(scmd);
1723 sge_addr = sg_dma_address(sg_scmd);
1724 sge_len = sg_dma_len(sg_scmd);
1725
1726 offset = (u32)(sge_addr & page_mask);
1727 first_prp_len = mr_nvme_pg_size - offset;
1728
1729 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
1730 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
1731
1732 data_len -= first_prp_len;
1733
1734 if (sge_len > first_prp_len) {
1735 sge_addr += first_prp_len;
1736 sge_len -= first_prp_len;
1737 } else if (sge_len == first_prp_len) {
1738 sg_scmd = sg_next(sg_scmd);
1739 sge_addr = sg_dma_address(sg_scmd);
1740 sge_len = sg_dma_len(sg_scmd);
1741 }
1742
1743 for (;;) {
1744 offset = (u32)(sge_addr & page_mask);
1745
1746 /* Put PRP pointer due to page boundary*/
1747 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
1748 if (unlikely(!page_mask_result)) {
1749 scmd_printk(KERN_NOTICE,
1750 scmd, "page boundary ptr_sgl: 0x%p\n",
1751 ptr_sgl);
1752 ptr_sgl_phys += 8;
1753 *ptr_sgl = cpu_to_le64(ptr_sgl_phys);
1754 ptr_sgl++;
1755 num_prp_in_chain++;
1756 }
1757
1758 *ptr_sgl = cpu_to_le64(sge_addr);
1759 ptr_sgl++;
1760 ptr_sgl_phys += 8;
1761 num_prp_in_chain++;
1762
1763 sge_addr += mr_nvme_pg_size;
1764 sge_len -= mr_nvme_pg_size;
1765 data_len -= mr_nvme_pg_size;
1766
1767 if (data_len <= 0)
1768 break;
1769
1770 if (sge_len > 0)
1771 continue;
1772
1773 sg_scmd = sg_next(sg_scmd);
1774 sge_addr = sg_dma_address(sg_scmd);
1775 sge_len = sg_dma_len(sg_scmd);
1776 }
1777
1778 main_chain_element->Length =
1779 cpu_to_le32(num_prp_in_chain * sizeof(u64));
1780
1781 atomic_inc(&instance->prp_sgl);
1782 return build_prp;
1783 }
1784
1785 /**
1786 * megasas_make_sgl_fusion - Prepares 32-bit SGL
1787 * @instance: Adapter soft state
1788 * @scp: SCSI command from the mid-layer
1789 * @sgl_ptr: SGL to be filled in
1790 * @cmd: cmd we are working on
1791 * @sge_count sge count
1792 *
1793 */
1794 static void
1795 megasas_make_sgl_fusion(struct megasas_instance *instance,
1796 struct scsi_cmnd *scp,
1797 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1798 struct megasas_cmd_fusion *cmd, int sge_count)
1799 {
1800 int i, sg_processed;
1801 struct scatterlist *os_sgl;
1802 struct fusion_context *fusion;
1803
1804 fusion = instance->ctrl_context;
1805
1806 if (fusion->adapter_type >= INVADER_SERIES) {
1807 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1808 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1809 sgl_ptr_end->Flags = 0;
1810 }
1811
1812 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1813 sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
1814 sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
1815 sgl_ptr->Flags = 0;
1816 if (fusion->adapter_type >= INVADER_SERIES)
1817 if (i == sge_count - 1)
1818 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1819 sgl_ptr++;
1820 sg_processed = i + 1;
1821
1822 if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
1823 (sge_count > fusion->max_sge_in_main_msg)) {
1824
1825 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1826 if (fusion->adapter_type >= INVADER_SERIES) {
1827 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1828 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1829 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1830 cmd->io_request->ChainOffset =
1831 fusion->
1832 chain_offset_io_request;
1833 else
1834 cmd->io_request->ChainOffset = 0;
1835 } else
1836 cmd->io_request->ChainOffset =
1837 fusion->chain_offset_io_request;
1838
1839 sg_chain = sgl_ptr;
1840 /* Prepare chain element */
1841 sg_chain->NextChainOffset = 0;
1842 if (fusion->adapter_type >= INVADER_SERIES)
1843 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1844 else
1845 sg_chain->Flags =
1846 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1847 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1848 sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
1849 sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
1850
1851 sgl_ptr =
1852 (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
1853 memset(sgl_ptr, 0, instance->max_chain_frame_sz);
1854 }
1855 }
1856 atomic_inc(&instance->ieee_sgl);
1857 }
1858
1859 /**
1860 * megasas_make_sgl - Build Scatter Gather List(SGLs)
1861 * @scp: SCSI command pointer
1862 * @instance: Soft instance of controller
1863 * @cmd: Fusion command pointer
1864 *
1865 * This function will build sgls based on device type.
1866 * For nvme drives, there is different way of building sgls in nvme native
1867 * format- PRPs(Physical Region Page).
1868 *
1869 * Returns the number of sg lists actually used, zero if the sg lists
1870 * is NULL, or -ENOMEM if the mapping failed
1871 */
1872 static
1873 int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
1874 struct megasas_cmd_fusion *cmd)
1875 {
1876 int sge_count;
1877 bool build_prp = false;
1878 struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
1879
1880 sge_count = scsi_dma_map(scp);
1881
1882 if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
1883 return sge_count;
1884
1885 sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
1886 if ((le16_to_cpu(cmd->io_request->IoFlags) &
1887 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
1888 (cmd->pd_interface == NVME_PD))
1889 build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
1890 cmd, sge_count);
1891
1892 if (!build_prp)
1893 megasas_make_sgl_fusion(instance, scp, sgl_chain64,
1894 cmd, sge_count);
1895
1896 return sge_count;
1897 }
1898
1899 /**
1900 * megasas_set_pd_lba - Sets PD LBA
1901 * @cdb: CDB
1902 * @cdb_len: cdb length
1903 * @start_blk: Start block of IO
1904 *
1905 * Used to set the PD LBA in CDB for FP IOs
1906 */
1907 void
1908 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1909 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1910 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1911 {
1912 struct MR_LD_RAID *raid;
1913 u16 ld;
1914 u64 start_blk = io_info->pdBlock;
1915 u8 *cdb = io_request->CDB.CDB32;
1916 u32 num_blocks = io_info->numBlocks;
1917 u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
1918
1919 /* Check if T10 PI (DIF) is enabled for this LD */
1920 ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
1921 raid = MR_LdRaidGet(ld, local_map_ptr);
1922 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
1923 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1924 cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
1925 cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
1926
1927 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1928 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
1929 else
1930 cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
1931 cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
1932
1933 /* LBA */
1934 cdb[12] = (u8)((start_blk >> 56) & 0xff);
1935 cdb[13] = (u8)((start_blk >> 48) & 0xff);
1936 cdb[14] = (u8)((start_blk >> 40) & 0xff);
1937 cdb[15] = (u8)((start_blk >> 32) & 0xff);
1938 cdb[16] = (u8)((start_blk >> 24) & 0xff);
1939 cdb[17] = (u8)((start_blk >> 16) & 0xff);
1940 cdb[18] = (u8)((start_blk >> 8) & 0xff);
1941 cdb[19] = (u8)(start_blk & 0xff);
1942
1943 /* Logical block reference tag */
1944 io_request->CDB.EEDP32.PrimaryReferenceTag =
1945 cpu_to_be32(ref_tag);
1946 io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
1947 io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
1948
1949 /* Transfer length */
1950 cdb[28] = (u8)((num_blocks >> 24) & 0xff);
1951 cdb[29] = (u8)((num_blocks >> 16) & 0xff);
1952 cdb[30] = (u8)((num_blocks >> 8) & 0xff);
1953 cdb[31] = (u8)(num_blocks & 0xff);
1954
1955 /* set SCSI IO EEDPFlags */
1956 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
1957 io_request->EEDPFlags = cpu_to_le16(
1958 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1959 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
1960 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
1961 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
1962 MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
1963 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
1964 } else {
1965 io_request->EEDPFlags = cpu_to_le16(
1966 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
1967 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
1968 }
1969 io_request->Control |= cpu_to_le32((0x4 << 26));
1970 io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
1971 } else {
1972 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
1973 if (((cdb_len == 12) || (cdb_len == 16)) &&
1974 (start_blk <= 0xffffffff)) {
1975 if (cdb_len == 16) {
1976 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
1977 flagvals = cdb[1];
1978 groupnum = cdb[14];
1979 control = cdb[15];
1980 } else {
1981 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
1982 flagvals = cdb[1];
1983 groupnum = cdb[10];
1984 control = cdb[11];
1985 }
1986
1987 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
1988
1989 cdb[0] = opcode;
1990 cdb[1] = flagvals;
1991 cdb[6] = groupnum;
1992 cdb[9] = control;
1993
1994 /* Transfer length */
1995 cdb[8] = (u8)(num_blocks & 0xff);
1996 cdb[7] = (u8)((num_blocks >> 8) & 0xff);
1997
1998 io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
1999 cdb_len = 10;
2000 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
2001 /* Convert to 16 byte CDB for large LBA's */
2002 switch (cdb_len) {
2003 case 6:
2004 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
2005 control = cdb[5];
2006 break;
2007 case 10:
2008 opcode =
2009 cdb[0] == READ_10 ? READ_16 : WRITE_16;
2010 flagvals = cdb[1];
2011 groupnum = cdb[6];
2012 control = cdb[9];
2013 break;
2014 case 12:
2015 opcode =
2016 cdb[0] == READ_12 ? READ_16 : WRITE_16;
2017 flagvals = cdb[1];
2018 groupnum = cdb[10];
2019 control = cdb[11];
2020 break;
2021 }
2022
2023 memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2024
2025 cdb[0] = opcode;
2026 cdb[1] = flagvals;
2027 cdb[14] = groupnum;
2028 cdb[15] = control;
2029
2030 /* Transfer length */
2031 cdb[13] = (u8)(num_blocks & 0xff);
2032 cdb[12] = (u8)((num_blocks >> 8) & 0xff);
2033 cdb[11] = (u8)((num_blocks >> 16) & 0xff);
2034 cdb[10] = (u8)((num_blocks >> 24) & 0xff);
2035
2036 io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
2037 cdb_len = 16;
2038 }
2039
2040 /* Normal case, just load LBA here */
2041 switch (cdb_len) {
2042 case 6:
2043 {
2044 u8 val = cdb[1] & 0xE0;
2045 cdb[3] = (u8)(start_blk & 0xff);
2046 cdb[2] = (u8)((start_blk >> 8) & 0xff);
2047 cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
2048 break;
2049 }
2050 case 10:
2051 cdb[5] = (u8)(start_blk & 0xff);
2052 cdb[4] = (u8)((start_blk >> 8) & 0xff);
2053 cdb[3] = (u8)((start_blk >> 16) & 0xff);
2054 cdb[2] = (u8)((start_blk >> 24) & 0xff);
2055 break;
2056 case 12:
2057 cdb[5] = (u8)(start_blk & 0xff);
2058 cdb[4] = (u8)((start_blk >> 8) & 0xff);
2059 cdb[3] = (u8)((start_blk >> 16) & 0xff);
2060 cdb[2] = (u8)((start_blk >> 24) & 0xff);
2061 break;
2062 case 16:
2063 cdb[9] = (u8)(start_blk & 0xff);
2064 cdb[8] = (u8)((start_blk >> 8) & 0xff);
2065 cdb[7] = (u8)((start_blk >> 16) & 0xff);
2066 cdb[6] = (u8)((start_blk >> 24) & 0xff);
2067 cdb[5] = (u8)((start_blk >> 32) & 0xff);
2068 cdb[4] = (u8)((start_blk >> 40) & 0xff);
2069 cdb[3] = (u8)((start_blk >> 48) & 0xff);
2070 cdb[2] = (u8)((start_blk >> 56) & 0xff);
2071 break;
2072 }
2073 }
2074 }
2075
2076 /**
2077 * megasas_stream_detect - stream detection on read and and write IOs
2078 * @instance: Adapter soft state
2079 * @cmd: Command to be prepared
2080 * @io_info: IO Request info
2081 *
2082 */
2083
2084 /** stream detection on read and and write IOs */
2085 static void megasas_stream_detect(struct megasas_instance *instance,
2086 struct megasas_cmd_fusion *cmd,
2087 struct IO_REQUEST_INFO *io_info)
2088 {
2089 struct fusion_context *fusion = instance->ctrl_context;
2090 u32 device_id = io_info->ldTgtId;
2091 struct LD_STREAM_DETECT *current_ld_sd
2092 = fusion->stream_detect_by_ld[device_id];
2093 u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
2094 u32 shifted_values, unshifted_values;
2095 u32 index_value_mask, shifted_values_mask;
2096 int i;
2097 bool is_read_ahead = false;
2098 struct STREAM_DETECT *current_sd;
2099 /* find possible stream */
2100 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
2101 stream_num = (*track_stream >>
2102 (i * BITS_PER_INDEX_STREAM)) &
2103 STREAM_MASK;
2104 current_sd = &current_ld_sd->stream_track[stream_num];
2105 /* if we found a stream, update the raid
2106 * context and also update the mruBitMap
2107 */
2108 /* boundary condition */
2109 if ((current_sd->next_seq_lba) &&
2110 (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
2111 (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
2112 (current_sd->is_read == io_info->isRead)) {
2113
2114 if ((io_info->ldStartBlock != current_sd->next_seq_lba) &&
2115 ((!io_info->isRead) || (!is_read_ahead)))
2116 /*
2117 * Once the API availible we need to change this.
2118 * At this point we are not allowing any gap
2119 */
2120 continue;
2121
2122 SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
2123 current_sd->next_seq_lba =
2124 io_info->ldStartBlock + io_info->numBlocks;
2125 /*
2126 * update the mruBitMap LRU
2127 */
2128 shifted_values_mask =
2129 (1 << i * BITS_PER_INDEX_STREAM) - 1;
2130 shifted_values = ((*track_stream & shifted_values_mask)
2131 << BITS_PER_INDEX_STREAM);
2132 index_value_mask =
2133 STREAM_MASK << i * BITS_PER_INDEX_STREAM;
2134 unshifted_values =
2135 *track_stream & ~(shifted_values_mask |
2136 index_value_mask);
2137 *track_stream =
2138 unshifted_values | shifted_values | stream_num;
2139 return;
2140 }
2141 }
2142 /*
2143 * if we did not find any stream, create a new one
2144 * from the least recently used
2145 */
2146 stream_num = (*track_stream >>
2147 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
2148 STREAM_MASK;
2149 current_sd = &current_ld_sd->stream_track[stream_num];
2150 current_sd->is_read = io_info->isRead;
2151 current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
2152 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
2153 return;
2154 }
2155
2156 /**
2157 * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2158 * affinity (cpu of the controller) and raid_flags in the raid context
2159 * based on IO type.
2160 *
2161 * @praid_context: IO RAID context
2162 * @raid: LD raid map
2163 * @fp_possible: Is fast path possible?
2164 * @is_read: Is read IO?
2165 *
2166 */
2167 static void
2168 megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context,
2169 struct MR_LD_RAID *raid, bool fp_possible,
2170 u8 is_read, u32 scsi_buff_len)
2171 {
2172 u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2173 struct RAID_CONTEXT_G35 *rctx_g35;
2174
2175 rctx_g35 = &praid_context->raid_context_g35;
2176 if (fp_possible) {
2177 if (is_read) {
2178 if ((raid->cpuAffinity.pdRead.cpu0) &&
2179 (raid->cpuAffinity.pdRead.cpu1))
2180 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2181 else if (raid->cpuAffinity.pdRead.cpu1)
2182 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2183 } else {
2184 if ((raid->cpuAffinity.pdWrite.cpu0) &&
2185 (raid->cpuAffinity.pdWrite.cpu1))
2186 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2187 else if (raid->cpuAffinity.pdWrite.cpu1)
2188 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2189 /* Fast path cache by pass capable R0/R1 VD */
2190 if ((raid->level <= 1) &&
2191 (raid->capability.fp_cache_bypass_capable)) {
2192 rctx_g35->routing_flags |=
2193 (1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
2194 rctx_g35->raid_flags =
2195 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2196 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2197 }
2198 }
2199 } else {
2200 if (is_read) {
2201 if ((raid->cpuAffinity.ldRead.cpu0) &&
2202 (raid->cpuAffinity.ldRead.cpu1))
2203 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2204 else if (raid->cpuAffinity.ldRead.cpu1)
2205 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2206 } else {
2207 if ((raid->cpuAffinity.ldWrite.cpu0) &&
2208 (raid->cpuAffinity.ldWrite.cpu1))
2209 cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2210 else if (raid->cpuAffinity.ldWrite.cpu1)
2211 cpu_sel = MR_RAID_CTX_CPUSEL_1;
2212
2213 if (is_stream_detected(rctx_g35) &&
2214 ((raid->level == 5) || (raid->level == 6)) &&
2215 (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2216 (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2217 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2218 }
2219 }
2220
2221 rctx_g35->routing_flags |=
2222 (cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2223
2224 /* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2225 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2226 * IO Subtype is not bitmap.
2227 */
2228 if ((raid->level == 1) && (!is_read)) {
2229 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2230 praid_context->raid_context_g35.raid_flags =
2231 (MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2232 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2233 }
2234 }
2235
2236 /**
2237 * megasas_build_ldio_fusion - Prepares IOs to devices
2238 * @instance: Adapter soft state
2239 * @scp: SCSI command
2240 * @cmd: Command to be prepared
2241 *
2242 * Prepares the io_request and chain elements (sg_frame) for IO
2243 * The IO can be for PD (Fast Path) or LD
2244 */
2245 void
2246 megasas_build_ldio_fusion(struct megasas_instance *instance,
2247 struct scsi_cmnd *scp,
2248 struct megasas_cmd_fusion *cmd)
2249 {
2250 bool fp_possible;
2251 u16 ld;
2252 u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
2253 u32 scsi_buff_len;
2254 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2255 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2256 struct IO_REQUEST_INFO io_info;
2257 struct fusion_context *fusion;
2258 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2259 u8 *raidLUN;
2260 unsigned long spinlock_flags;
2261 union RAID_CONTEXT_UNION *praid_context;
2262 struct MR_LD_RAID *raid = NULL;
2263 struct MR_PRIV_DEVICE *mrdev_priv;
2264
2265 device_id = MEGASAS_DEV_INDEX(scp);
2266
2267 fusion = instance->ctrl_context;
2268
2269 io_request = cmd->io_request;
2270 io_request->RaidContext.raid_context.virtual_disk_tgt_id =
2271 cpu_to_le16(device_id);
2272 io_request->RaidContext.raid_context.status = 0;
2273 io_request->RaidContext.raid_context.ex_status = 0;
2274
2275 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2276
2277 start_lba_lo = 0;
2278 start_lba_hi = 0;
2279 fp_possible = false;
2280
2281 /*
2282 * 6-byte READ(0x08) or WRITE(0x0A) cdb
2283 */
2284 if (scp->cmd_len == 6) {
2285 datalength = (u32) scp->cmnd[4];
2286 start_lba_lo = ((u32) scp->cmnd[1] << 16) |
2287 ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
2288
2289 start_lba_lo &= 0x1FFFFF;
2290 }
2291
2292 /*
2293 * 10-byte READ(0x28) or WRITE(0x2A) cdb
2294 */
2295 else if (scp->cmd_len == 10) {
2296 datalength = (u32) scp->cmnd[8] |
2297 ((u32) scp->cmnd[7] << 8);
2298 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2299 ((u32) scp->cmnd[3] << 16) |
2300 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2301 }
2302
2303 /*
2304 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
2305 */
2306 else if (scp->cmd_len == 12) {
2307 datalength = ((u32) scp->cmnd[6] << 24) |
2308 ((u32) scp->cmnd[7] << 16) |
2309 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2310 start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2311 ((u32) scp->cmnd[3] << 16) |
2312 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2313 }
2314
2315 /*
2316 * 16-byte READ(0x88) or WRITE(0x8A) cdb
2317 */
2318 else if (scp->cmd_len == 16) {
2319 datalength = ((u32) scp->cmnd[10] << 24) |
2320 ((u32) scp->cmnd[11] << 16) |
2321 ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
2322 start_lba_lo = ((u32) scp->cmnd[6] << 24) |
2323 ((u32) scp->cmnd[7] << 16) |
2324 ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2325
2326 start_lba_hi = ((u32) scp->cmnd[2] << 24) |
2327 ((u32) scp->cmnd[3] << 16) |
2328 ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2329 }
2330
2331 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
2332 io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
2333 io_info.numBlocks = datalength;
2334 io_info.ldTgtId = device_id;
2335 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2336 scsi_buff_len = scsi_bufflen(scp);
2337 io_request->DataLength = cpu_to_le32(scsi_buff_len);
2338
2339 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
2340 io_info.isRead = 1;
2341
2342 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2343 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2344
2345 if (ld < instance->fw_supported_vd_count)
2346 raid = MR_LdRaidGet(ld, local_map_ptr);
2347
2348 if (!raid || (!fusion->fast_path_io)) {
2349 io_request->RaidContext.raid_context.reg_lock_flags = 0;
2350 fp_possible = false;
2351 } else {
2352 if (MR_BuildRaidContext(instance, &io_info,
2353 &io_request->RaidContext.raid_context,
2354 local_map_ptr, &raidLUN))
2355 fp_possible = (io_info.fpOkForIo > 0) ? true : false;
2356 }
2357
2358 /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
2359 id by default, not CPU group id, otherwise all MSI-X queues won't
2360 be utilized */
2361 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
2362 raw_smp_processor_id() % instance->msix_vectors : 0;
2363
2364 praid_context = &io_request->RaidContext;
2365
2366 if (instance->is_ventura) {
2367 spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
2368 megasas_stream_detect(instance, cmd, &io_info);
2369 spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
2370 /* In ventura if stream detected for a read and it is read ahead
2371 * capable make this IO as LDIO
2372 */
2373 if (is_stream_detected(&io_request->RaidContext.raid_context_g35) &&
2374 io_info.isRead && io_info.ra_capable)
2375 fp_possible = false;
2376
2377 /* FP for Optimal raid level 1.
2378 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2379 * are built by the driver as LD I/Os.
2380 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2381 * (there is never a reason to process these as buffered writes)
2382 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2383 * with the SLD bit asserted.
2384 */
2385 if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2386 mrdev_priv = scp->device->hostdata;
2387
2388 if (atomic_inc_return(&instance->fw_outstanding) >
2389 (instance->host->can_queue)) {
2390 fp_possible = false;
2391 atomic_dec(&instance->fw_outstanding);
2392 } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2393 (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) {
2394 fp_possible = false;
2395 atomic_dec(&instance->fw_outstanding);
2396 if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2397 atomic_set(&mrdev_priv->r1_ldio_hint,
2398 instance->r1_ldio_hint_default);
2399 }
2400 }
2401
2402 /* If raid is NULL, set CPU affinity to default CPU0 */
2403 if (raid)
2404 megasas_set_raidflag_cpu_affinity(praid_context,
2405 raid, fp_possible, io_info.isRead,
2406 scsi_buff_len);
2407 else
2408 praid_context->raid_context_g35.routing_flags |=
2409 (MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2410 }
2411
2412 if (fp_possible) {
2413 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
2414 local_map_ptr, start_lba_lo);
2415 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2416 cmd->request_desc->SCSIIO.RequestFlags =
2417 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2418 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2419 if (fusion->adapter_type == INVADER_SERIES) {
2420 if (io_request->RaidContext.raid_context.reg_lock_flags ==
2421 REGION_TYPE_UNUSED)
2422 cmd->request_desc->SCSIIO.RequestFlags =
2423 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2424 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2425 io_request->RaidContext.raid_context.type
2426 = MPI2_TYPE_CUDA;
2427 io_request->RaidContext.raid_context.nseg = 0x1;
2428 io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2429 io_request->RaidContext.raid_context.reg_lock_flags |=
2430 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
2431 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2432 } else if (instance->is_ventura) {
2433 io_request->RaidContext.raid_context_g35.nseg_type |=
2434 (1 << RAID_CONTEXT_NSEG_SHIFT);
2435 io_request->RaidContext.raid_context_g35.nseg_type |=
2436 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2437 io_request->RaidContext.raid_context_g35.routing_flags |=
2438 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2439 io_request->IoFlags |=
2440 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2441 }
2442 if (fusion->load_balance_info &&
2443 (fusion->load_balance_info[device_id].loadBalanceFlag) &&
2444 (io_info.isRead)) {
2445 io_info.devHandle =
2446 get_updated_dev_handle(instance,
2447 &fusion->load_balance_info[device_id],
2448 &io_info, local_map_ptr);
2449 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
2450 cmd->pd_r1_lb = io_info.pd_after_lb;
2451 if (instance->is_ventura)
2452 io_request->RaidContext.raid_context_g35.span_arm
2453 = io_info.span_arm;
2454 else
2455 io_request->RaidContext.raid_context.span_arm
2456 = io_info.span_arm;
2457
2458 } else
2459 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2460
2461 if (instance->is_ventura)
2462 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2463 else
2464 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2465
2466 if ((raidLUN[0] == 1) &&
2467 (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
2468 instance->dev_handle = !(instance->dev_handle);
2469 io_info.devHandle =
2470 local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
2471 }
2472
2473 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
2474 io_request->DevHandle = io_info.devHandle;
2475 cmd->pd_interface = io_info.pd_interface;
2476 /* populate the LUN field */
2477 memcpy(io_request->LUN, raidLUN, 8);
2478 } else {
2479 io_request->RaidContext.raid_context.timeout_value =
2480 cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
2481 cmd->request_desc->SCSIIO.RequestFlags =
2482 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2483 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2484 if (fusion->adapter_type == INVADER_SERIES) {
2485 if (io_info.do_fp_rlbypass ||
2486 (io_request->RaidContext.raid_context.reg_lock_flags
2487 == REGION_TYPE_UNUSED))
2488 cmd->request_desc->SCSIIO.RequestFlags =
2489 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2490 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2491 io_request->RaidContext.raid_context.type
2492 = MPI2_TYPE_CUDA;
2493 io_request->RaidContext.raid_context.reg_lock_flags |=
2494 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
2495 MR_RL_FLAGS_SEQ_NUM_ENABLE);
2496 io_request->RaidContext.raid_context.nseg = 0x1;
2497 } else if (instance->is_ventura) {
2498 io_request->RaidContext.raid_context_g35.routing_flags |=
2499 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2500 io_request->RaidContext.raid_context_g35.nseg_type |=
2501 (1 << RAID_CONTEXT_NSEG_SHIFT);
2502 io_request->RaidContext.raid_context_g35.nseg_type |=
2503 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2504 }
2505 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2506 io_request->DevHandle = cpu_to_le16(device_id);
2507
2508 } /* Not FP */
2509 }
2510
2511 /**
2512 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
2513 * @instance: Adapter soft state
2514 * @scp: SCSI command
2515 * @cmd: Command to be prepared
2516 *
2517 * Prepares the io_request frame for non-rw io cmds for vd.
2518 */
2519 static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2520 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
2521 {
2522 u32 device_id;
2523 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2524 u16 ld;
2525 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2526 struct fusion_context *fusion = instance->ctrl_context;
2527 u8 span, physArm;
2528 __le16 devHandle;
2529 u32 arRef, pd;
2530 struct MR_LD_RAID *raid;
2531 struct RAID_CONTEXT *pRAID_Context;
2532 u8 fp_possible = 1;
2533
2534 io_request = cmd->io_request;
2535 device_id = MEGASAS_DEV_INDEX(scmd);
2536 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2537 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
2538 /* get RAID_Context pointer */
2539 pRAID_Context = &io_request->RaidContext.raid_context;
2540 /* Check with FW team */
2541 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2542 pRAID_Context->reg_lock_row_lba = 0;
2543 pRAID_Context->reg_lock_length = 0;
2544
2545 if (fusion->fast_path_io && (
2546 device_id < instance->fw_supported_vd_count)) {
2547
2548 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2549 if (ld >= instance->fw_supported_vd_count)
2550 fp_possible = 0;
2551 else {
2552 raid = MR_LdRaidGet(ld, local_map_ptr);
2553 if (!(raid->capability.fpNonRWCapable))
2554 fp_possible = 0;
2555 }
2556 } else
2557 fp_possible = 0;
2558
2559 if (!fp_possible) {
2560 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2561 io_request->DevHandle = cpu_to_le16(device_id);
2562 io_request->LUN[1] = scmd->device->lun;
2563 pRAID_Context->timeout_value =
2564 cpu_to_le16 (scmd->request->timeout / HZ);
2565 cmd->request_desc->SCSIIO.RequestFlags =
2566 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2567 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2568 } else {
2569
2570 /* set RAID context values */
2571 pRAID_Context->config_seq_num = raid->seqNum;
2572 if (!instance->is_ventura)
2573 pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
2574 pRAID_Context->timeout_value =
2575 cpu_to_le16(raid->fpIoTimeoutForLd);
2576
2577 /* get the DevHandle for the PD (since this is
2578 fpNonRWCapable, this is a single disk RAID0) */
2579 span = physArm = 0;
2580 arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
2581 pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
2582 devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
2583
2584 /* build request descriptor */
2585 cmd->request_desc->SCSIIO.RequestFlags =
2586 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2587 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2588 cmd->request_desc->SCSIIO.DevHandle = devHandle;
2589
2590 /* populate the LUN field */
2591 memcpy(io_request->LUN, raid->LUN, 8);
2592
2593 /* build the raidScsiIO structure */
2594 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2595 io_request->DevHandle = devHandle;
2596 }
2597 }
2598
2599 /**
2600 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
2601 * @instance: Adapter soft state
2602 * @scp: SCSI command
2603 * @cmd: Command to be prepared
2604 * @fp_possible: parameter to detect fast path or firmware path io.
2605 *
2606 * Prepares the io_request frame for rw/non-rw io cmds for syspds
2607 */
2608 static void
2609 megasas_build_syspd_fusion(struct megasas_instance *instance,
2610 struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
2611 bool fp_possible)
2612 {
2613 u32 device_id;
2614 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2615 u16 pd_index = 0;
2616 u16 os_timeout_value;
2617 u16 timeout_limit;
2618 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2619 struct RAID_CONTEXT *pRAID_Context;
2620 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
2621 struct MR_PRIV_DEVICE *mr_device_priv_data;
2622 struct fusion_context *fusion = instance->ctrl_context;
2623 pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
2624
2625 device_id = MEGASAS_DEV_INDEX(scmd);
2626 pd_index = MEGASAS_PD_INDEX(scmd);
2627 os_timeout_value = scmd->request->timeout / HZ;
2628 mr_device_priv_data = scmd->device->hostdata;
2629 cmd->pd_interface = mr_device_priv_data->interface_type;
2630
2631 io_request = cmd->io_request;
2632 /* get RAID_Context pointer */
2633 pRAID_Context = &io_request->RaidContext.raid_context;
2634 pRAID_Context->reg_lock_flags = 0;
2635 pRAID_Context->reg_lock_row_lba = 0;
2636 pRAID_Context->reg_lock_length = 0;
2637 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
2638 io_request->LUN[1] = scmd->device->lun;
2639 pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
2640 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
2641
2642 /* If FW supports PD sequence number */
2643 if (instance->use_seqnum_jbod_fp &&
2644 instance->pd_list[pd_index].driveType == TYPE_DISK) {
2645 /* TgtId must be incremented by 255 as jbod seq number is index
2646 * below raid map
2647 */
2648 /* More than 256 PD/JBOD support for Ventura */
2649 if (instance->support_morethan256jbod)
2650 pRAID_Context->virtual_disk_tgt_id =
2651 pd_sync->seq[pd_index].pd_target_id;
2652 else
2653 pRAID_Context->virtual_disk_tgt_id =
2654 cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
2655 pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
2656 io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
2657 if (instance->is_ventura) {
2658 io_request->RaidContext.raid_context_g35.routing_flags |=
2659 (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2660 io_request->RaidContext.raid_context_g35.nseg_type |=
2661 (1 << RAID_CONTEXT_NSEG_SHIFT);
2662 io_request->RaidContext.raid_context_g35.nseg_type |=
2663 (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2664 } else {
2665 pRAID_Context->type = MPI2_TYPE_CUDA;
2666 pRAID_Context->nseg = 0x1;
2667 pRAID_Context->reg_lock_flags |=
2668 (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
2669 }
2670 } else if (fusion->fast_path_io) {
2671 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2672 pRAID_Context->config_seq_num = 0;
2673 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2674 io_request->DevHandle =
2675 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
2676 } else {
2677 /* Want to send all IO via FW path */
2678 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2679 pRAID_Context->config_seq_num = 0;
2680 io_request->DevHandle = cpu_to_le16(0xFFFF);
2681 }
2682
2683 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
2684 cmd->request_desc->SCSIIO.MSIxIndex =
2685 instance->msix_vectors ?
2686 (raw_smp_processor_id() % instance->msix_vectors) : 0;
2687
2688
2689 if (!fp_possible) {
2690 /* system pd firmware path */
2691 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2692 cmd->request_desc->SCSIIO.RequestFlags =
2693 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2694 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2695 pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
2696 pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2697 } else {
2698 /* system pd Fast Path */
2699 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2700 timeout_limit = (scmd->device->type == TYPE_DISK) ?
2701 255 : 0xFFFF;
2702 pRAID_Context->timeout_value =
2703 cpu_to_le16((os_timeout_value > timeout_limit) ?
2704 timeout_limit : os_timeout_value);
2705 if (fusion->adapter_type >= INVADER_SERIES)
2706 io_request->IoFlags |=
2707 cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2708
2709 cmd->request_desc->SCSIIO.RequestFlags =
2710 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
2711 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2712 }
2713 }
2714
2715 /**
2716 * megasas_build_io_fusion - Prepares IOs to devices
2717 * @instance: Adapter soft state
2718 * @scp: SCSI command
2719 * @cmd: Command to be prepared
2720 *
2721 * Invokes helper functions to prepare request frames
2722 * and sets flags appropriate for IO/Non-IO cmd
2723 */
2724 int
2725 megasas_build_io_fusion(struct megasas_instance *instance,
2726 struct scsi_cmnd *scp,
2727 struct megasas_cmd_fusion *cmd)
2728 {
2729 int sge_count;
2730 u8 cmd_type;
2731 struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
2732 struct MR_PRIV_DEVICE *mr_device_priv_data;
2733 mr_device_priv_data = scp->device->hostdata;
2734
2735 /* Zero out some fields so they don't get reused */
2736 memset(io_request->LUN, 0x0, 8);
2737 io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
2738 io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
2739 io_request->EEDPFlags = 0;
2740 io_request->Control = 0;
2741 io_request->EEDPBlockSize = 0;
2742 io_request->ChainOffset = 0;
2743 io_request->RaidContext.raid_context.raid_flags = 0;
2744 io_request->RaidContext.raid_context.type = 0;
2745 io_request->RaidContext.raid_context.nseg = 0;
2746
2747 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
2748 /*
2749 * Just the CDB length,rest of the Flags are zero
2750 * This will be modified for FP in build_ldio_fusion
2751 */
2752 io_request->IoFlags = cpu_to_le16(scp->cmd_len);
2753
2754 switch (cmd_type = megasas_cmd_type(scp)) {
2755 case READ_WRITE_LDIO:
2756 megasas_build_ldio_fusion(instance, scp, cmd);
2757 break;
2758 case NON_READ_WRITE_LDIO:
2759 megasas_build_ld_nonrw_fusion(instance, scp, cmd);
2760 break;
2761 case READ_WRITE_SYSPDIO:
2762 megasas_build_syspd_fusion(instance, scp, cmd, true);
2763 break;
2764 case NON_READ_WRITE_SYSPDIO:
2765 if (instance->secure_jbod_support ||
2766 mr_device_priv_data->is_tm_capable)
2767 megasas_build_syspd_fusion(instance, scp, cmd, false);
2768 else
2769 megasas_build_syspd_fusion(instance, scp, cmd, true);
2770 break;
2771 default:
2772 break;
2773 }
2774
2775 /*
2776 * Construct SGL
2777 */
2778
2779 sge_count = megasas_make_sgl(instance, scp, cmd);
2780
2781 if (sge_count > instance->max_num_sge || (sge_count < 0)) {
2782 dev_err(&instance->pdev->dev,
2783 "%s %d sge_count (%d) is out of range. Range is: 0-%d\n",
2784 __func__, __LINE__, sge_count, instance->max_num_sge);
2785 return 1;
2786 }
2787
2788 if (instance->is_ventura) {
2789 set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
2790 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
2791 cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
2792 } else {
2793 /* numSGE store lower 8 bit of sge_count.
2794 * numSGEExt store higher 8 bit of sge_count
2795 */
2796 io_request->RaidContext.raid_context.num_sge = sge_count;
2797 io_request->RaidContext.raid_context.num_sge_ext =
2798 (u8)(sge_count >> 8);
2799 }
2800
2801 io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
2802
2803 if (scp->sc_data_direction == PCI_DMA_TODEVICE)
2804 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
2805 else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
2806 io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
2807
2808 io_request->SGLOffset0 =
2809 offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
2810
2811 io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
2812 io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
2813
2814 cmd->scmd = scp;
2815 scp->SCp.ptr = (char *)cmd;
2816
2817 return 0;
2818 }
2819
2820 static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
2821 megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
2822 {
2823 u8 *p;
2824 struct fusion_context *fusion;
2825
2826 fusion = instance->ctrl_context;
2827 p = fusion->req_frames_desc +
2828 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
2829
2830 return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
2831 }
2832
2833
2834 /* megasas_prepate_secondRaid1_IO
2835 * It prepares the raid 1 second IO
2836 */
2837 void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
2838 struct megasas_cmd_fusion *cmd,
2839 struct megasas_cmd_fusion *r1_cmd)
2840 {
2841 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
2842 struct fusion_context *fusion;
2843 fusion = instance->ctrl_context;
2844 req_desc = cmd->request_desc;
2845 /* copy the io request frame as well as 8 SGEs data for r1 command*/
2846 memcpy(r1_cmd->io_request, cmd->io_request,
2847 (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
2848 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
2849 (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
2850 /*sense buffer is different for r1 command*/
2851 r1_cmd->io_request->SenseBufferLowAddress =
2852 cpu_to_le32(r1_cmd->sense_phys_addr);
2853 r1_cmd->scmd = cmd->scmd;
2854 req_desc2 = megasas_get_request_descriptor(instance,
2855 (r1_cmd->index - 1));
2856 req_desc2->Words = 0;
2857 r1_cmd->request_desc = req_desc2;
2858 req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
2859 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
2860 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
2861 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
2862 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
2863 cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
2864 cpu_to_le16(r1_cmd->index);
2865 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peer_smid =
2866 cpu_to_le16(cmd->index);
2867 /*MSIxIndex of both commands request descriptors should be same*/
2868 r1_cmd->request_desc->SCSIIO.MSIxIndex =
2869 cmd->request_desc->SCSIIO.MSIxIndex;
2870 /*span arm is different for r1 cmd*/
2871 r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
2872 cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
2873 }
2874
2875 /**
2876 * megasas_build_and_issue_cmd_fusion -Main routine for building and
2877 * issuing non IOCTL cmd
2878 * @instance: Adapter soft state
2879 * @scmd: pointer to scsi cmd from OS
2880 */
2881 static u32
2882 megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
2883 struct scsi_cmnd *scmd)
2884 {
2885 struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
2886 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2887 u32 index;
2888 struct fusion_context *fusion;
2889
2890 fusion = instance->ctrl_context;
2891
2892 if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
2893 instance->ldio_threshold &&
2894 (atomic_inc_return(&instance->ldio_outstanding) >
2895 instance->ldio_threshold)) {
2896 atomic_dec(&instance->ldio_outstanding);
2897 return SCSI_MLQUEUE_DEVICE_BUSY;
2898 }
2899
2900 if (atomic_inc_return(&instance->fw_outstanding) >
2901 instance->host->can_queue) {
2902 atomic_dec(&instance->fw_outstanding);
2903 return SCSI_MLQUEUE_HOST_BUSY;
2904 }
2905
2906 cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
2907
2908 if (!cmd) {
2909 atomic_dec(&instance->fw_outstanding);
2910 return SCSI_MLQUEUE_HOST_BUSY;
2911 }
2912
2913 index = cmd->index;
2914
2915 req_desc = megasas_get_request_descriptor(instance, index-1);
2916
2917 req_desc->Words = 0;
2918 cmd->request_desc = req_desc;
2919
2920 if (megasas_build_io_fusion(instance, scmd, cmd)) {
2921 megasas_return_cmd_fusion(instance, cmd);
2922 dev_err(&instance->pdev->dev, "Error building command\n");
2923 cmd->request_desc = NULL;
2924 atomic_dec(&instance->fw_outstanding);
2925 return SCSI_MLQUEUE_HOST_BUSY;
2926 }
2927
2928 req_desc = cmd->request_desc;
2929 req_desc->SCSIIO.SMID = cpu_to_le16(index);
2930
2931 if (cmd->io_request->ChainOffset != 0 &&
2932 cmd->io_request->ChainOffset != 0xF)
2933 dev_err(&instance->pdev->dev, "The chain offset value is not "
2934 "correct : %x\n", cmd->io_request->ChainOffset);
2935 /*
2936 * if it is raid 1/10 fp write capable.
2937 * try to get second command from pool and construct it.
2938 * From FW, it has confirmed that lba values of two PDs
2939 * corresponds to single R1/10 LD are always same
2940 *
2941 */
2942 /* driver side count always should be less than max_fw_cmds
2943 * to get new command
2944 */
2945 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2946 r1_cmd = megasas_get_cmd_fusion(instance,
2947 (scmd->request->tag + instance->max_fw_cmds));
2948 megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
2949 }
2950
2951
2952 /*
2953 * Issue the command to the FW
2954 */
2955
2956 megasas_fire_cmd_fusion(instance, req_desc);
2957
2958 if (r1_cmd)
2959 megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
2960
2961
2962 return 0;
2963 }
2964
2965 /**
2966 * megasas_complete_r1_command -
2967 * completes R1 FP write commands which has valid peer smid
2968 * @instance: Adapter soft state
2969 * @cmd_fusion: MPT command frame
2970 *
2971 */
2972 static inline void
2973 megasas_complete_r1_command(struct megasas_instance *instance,
2974 struct megasas_cmd_fusion *cmd)
2975 {
2976 u8 *sense, status, ex_status;
2977 u32 data_length;
2978 u16 peer_smid;
2979 struct fusion_context *fusion;
2980 struct megasas_cmd_fusion *r1_cmd = NULL;
2981 struct scsi_cmnd *scmd_local = NULL;
2982 struct RAID_CONTEXT_G35 *rctx_g35;
2983
2984 rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
2985 fusion = instance->ctrl_context;
2986 peer_smid = le16_to_cpu(rctx_g35->smid.peer_smid);
2987
2988 r1_cmd = fusion->cmd_list[peer_smid - 1];
2989 scmd_local = cmd->scmd;
2990 status = rctx_g35->status;
2991 ex_status = rctx_g35->ex_status;
2992 data_length = cmd->io_request->DataLength;
2993 sense = cmd->sense;
2994
2995 cmd->cmd_completed = true;
2996
2997 /* Check if peer command is completed or not*/
2998 if (r1_cmd->cmd_completed) {
2999 rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
3000 if (rctx_g35->status != MFI_STAT_OK) {
3001 status = rctx_g35->status;
3002 ex_status = rctx_g35->ex_status;
3003 data_length = r1_cmd->io_request->DataLength;
3004 sense = r1_cmd->sense;
3005 }
3006
3007 megasas_return_cmd_fusion(instance, r1_cmd);
3008 map_cmd_status(fusion, scmd_local, status, ex_status,
3009 le32_to_cpu(data_length), sense);
3010 if (instance->ldio_threshold &&
3011 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3012 atomic_dec(&instance->ldio_outstanding);
3013 scmd_local->SCp.ptr = NULL;
3014 megasas_return_cmd_fusion(instance, cmd);
3015 scsi_dma_unmap(scmd_local);
3016 scmd_local->scsi_done(scmd_local);
3017 }
3018 }
3019
3020 /**
3021 * complete_cmd_fusion - Completes command
3022 * @instance: Adapter soft state
3023 * Completes all commands that is in reply descriptor queue
3024 */
3025 int
3026 complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
3027 {
3028 union MPI2_REPLY_DESCRIPTORS_UNION *desc;
3029 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
3030 struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
3031 struct fusion_context *fusion;
3032 struct megasas_cmd *cmd_mfi;
3033 struct megasas_cmd_fusion *cmd_fusion;
3034 u16 smid, num_completed;
3035 u8 reply_descript_type, *sense, status, extStatus;
3036 u32 device_id, data_length;
3037 union desc_value d_val;
3038 struct LD_LOAD_BALANCE_INFO *lbinfo;
3039 int threshold_reply_count = 0;
3040 struct scsi_cmnd *scmd_local = NULL;
3041 struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
3042 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
3043
3044 fusion = instance->ctrl_context;
3045
3046 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3047 return IRQ_HANDLED;
3048
3049 desc = fusion->reply_frames_desc[MSIxIndex] +
3050 fusion->last_reply_idx[MSIxIndex];
3051
3052 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3053
3054 d_val.word = desc->Words;
3055
3056 reply_descript_type = reply_desc->ReplyFlags &
3057 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3058
3059 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3060 return IRQ_NONE;
3061
3062 num_completed = 0;
3063
3064 while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
3065 d_val.u.high != cpu_to_le32(UINT_MAX)) {
3066
3067 smid = le16_to_cpu(reply_desc->SMID);
3068 cmd_fusion = fusion->cmd_list[smid - 1];
3069 scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
3070 cmd_fusion->io_request;
3071
3072 scmd_local = cmd_fusion->scmd;
3073 status = scsi_io_req->RaidContext.raid_context.status;
3074 extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
3075 sense = cmd_fusion->sense;
3076 data_length = scsi_io_req->DataLength;
3077
3078 switch (scsi_io_req->Function) {
3079 case MPI2_FUNCTION_SCSI_TASK_MGMT:
3080 mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
3081 cmd_fusion->io_request;
3082 mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
3083 &mr_tm_req->TmRequest;
3084 dev_dbg(&instance->pdev->dev, "TM completion:"
3085 "type: 0x%x TaskMID: 0x%x\n",
3086 mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
3087 complete(&cmd_fusion->done);
3088 break;
3089 case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
3090 /* Update load balancing info */
3091 if (fusion->load_balance_info &&
3092 (cmd_fusion->scmd->SCp.Status &
3093 MEGASAS_LOAD_BALANCE_FLAG)) {
3094 device_id = MEGASAS_DEV_INDEX(scmd_local);
3095 lbinfo = &fusion->load_balance_info[device_id];
3096 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
3097 cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
3098 }
3099 //Fall thru and complete IO
3100 case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
3101 atomic_dec(&instance->fw_outstanding);
3102 if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
3103 map_cmd_status(fusion, scmd_local, status,
3104 extStatus, le32_to_cpu(data_length),
3105 sense);
3106 if (instance->ldio_threshold &&
3107 (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
3108 atomic_dec(&instance->ldio_outstanding);
3109 scmd_local->SCp.ptr = NULL;
3110 megasas_return_cmd_fusion(instance, cmd_fusion);
3111 scsi_dma_unmap(scmd_local);
3112 scmd_local->scsi_done(scmd_local);
3113 } else /* Optimal VD - R1 FP command completion. */
3114 megasas_complete_r1_command(instance, cmd_fusion);
3115 break;
3116 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
3117 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
3118 /* Poll mode. Dummy free.
3119 * In case of Interrupt mode, caller has reverse check.
3120 */
3121 if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
3122 cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
3123 megasas_return_cmd(instance, cmd_mfi);
3124 } else
3125 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
3126 break;
3127 }
3128
3129 fusion->last_reply_idx[MSIxIndex]++;
3130 if (fusion->last_reply_idx[MSIxIndex] >=
3131 fusion->reply_q_depth)
3132 fusion->last_reply_idx[MSIxIndex] = 0;
3133
3134 desc->Words = cpu_to_le64(ULLONG_MAX);
3135 num_completed++;
3136 threshold_reply_count++;
3137
3138 /* Get the next reply descriptor */
3139 if (!fusion->last_reply_idx[MSIxIndex])
3140 desc = fusion->reply_frames_desc[MSIxIndex];
3141 else
3142 desc++;
3143
3144 reply_desc =
3145 (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3146
3147 d_val.word = desc->Words;
3148
3149 reply_descript_type = reply_desc->ReplyFlags &
3150 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3151
3152 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3153 break;
3154 /*
3155 * Write to reply post host index register after completing threshold
3156 * number of reply counts and still there are more replies in reply queue
3157 * pending to be completed
3158 */
3159 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
3160 if (instance->msix_combined)
3161 writel(((MSIxIndex & 0x7) << 24) |
3162 fusion->last_reply_idx[MSIxIndex],
3163 instance->reply_post_host_index_addr[MSIxIndex/8]);
3164 else
3165 writel((MSIxIndex << 24) |
3166 fusion->last_reply_idx[MSIxIndex],
3167 instance->reply_post_host_index_addr[0]);
3168 threshold_reply_count = 0;
3169 }
3170 }
3171
3172 if (!num_completed)
3173 return IRQ_NONE;
3174
3175 wmb();
3176 if (instance->msix_combined)
3177 writel(((MSIxIndex & 0x7) << 24) |
3178 fusion->last_reply_idx[MSIxIndex],
3179 instance->reply_post_host_index_addr[MSIxIndex/8]);
3180 else
3181 writel((MSIxIndex << 24) |
3182 fusion->last_reply_idx[MSIxIndex],
3183 instance->reply_post_host_index_addr[0]);
3184 megasas_check_and_restore_queue_depth(instance);
3185 return IRQ_HANDLED;
3186 }
3187
3188 /**
3189 * megasas_sync_irqs - Synchronizes all IRQs owned by adapter
3190 * @instance: Adapter soft state
3191 */
3192 void megasas_sync_irqs(unsigned long instance_addr)
3193 {
3194 u32 count, i;
3195 struct megasas_instance *instance =
3196 (struct megasas_instance *)instance_addr;
3197
3198 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3199
3200 for (i = 0; i < count; i++)
3201 synchronize_irq(pci_irq_vector(instance->pdev, i));
3202 }
3203
3204 /**
3205 * megasas_complete_cmd_dpc_fusion - Completes command
3206 * @instance: Adapter soft state
3207 *
3208 * Tasklet to complete cmds
3209 */
3210 void
3211 megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3212 {
3213 struct megasas_instance *instance =
3214 (struct megasas_instance *)instance_addr;
3215 unsigned long flags;
3216 u32 count, MSIxIndex;
3217
3218 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3219
3220 /* If we have already declared adapter dead, donot complete cmds */
3221 spin_lock_irqsave(&instance->hba_lock, flags);
3222 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
3223 spin_unlock_irqrestore(&instance->hba_lock, flags);
3224 return;
3225 }
3226 spin_unlock_irqrestore(&instance->hba_lock, flags);
3227
3228 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
3229 complete_cmd_fusion(instance, MSIxIndex);
3230 }
3231
3232 /**
3233 * megasas_isr_fusion - isr entry point
3234 */
3235 irqreturn_t megasas_isr_fusion(int irq, void *devp)
3236 {
3237 struct megasas_irq_context *irq_context = devp;
3238 struct megasas_instance *instance = irq_context->instance;
3239 u32 mfiStatus, fw_state, dma_state;
3240
3241 if (instance->mask_interrupts)
3242 return IRQ_NONE;
3243
3244 if (!instance->msix_vectors) {
3245 mfiStatus = instance->instancet->clear_intr(instance->reg_set);
3246 if (!mfiStatus)
3247 return IRQ_NONE;
3248 }
3249
3250 /* If we are resetting, bail */
3251 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
3252 instance->instancet->clear_intr(instance->reg_set);
3253 return IRQ_HANDLED;
3254 }
3255
3256 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
3257 instance->instancet->clear_intr(instance->reg_set);
3258 /* If we didn't complete any commands, check for FW fault */
3259 fw_state = instance->instancet->read_fw_status_reg(
3260 instance->reg_set) & MFI_STATE_MASK;
3261 dma_state = instance->instancet->read_fw_status_reg
3262 (instance->reg_set) & MFI_STATE_DMADONE;
3263 if (instance->crash_dump_drv_support &&
3264 instance->crash_dump_app_support) {
3265 /* Start collecting crash, if DMA bit is done */
3266 if ((fw_state == MFI_STATE_FAULT) && dma_state)
3267 schedule_work(&instance->crash_init);
3268 else if (fw_state == MFI_STATE_FAULT) {
3269 if (instance->unload == 0)
3270 schedule_work(&instance->work_init);
3271 }
3272 } else if (fw_state == MFI_STATE_FAULT) {
3273 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
3274 "for scsi%d\n", instance->host->host_no);
3275 if (instance->unload == 0)
3276 schedule_work(&instance->work_init);
3277 }
3278 }
3279
3280 return IRQ_HANDLED;
3281 }
3282
3283 /**
3284 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
3285 * @instance: Adapter soft state
3286 * mfi_cmd: megasas_cmd pointer
3287 *
3288 */
3289 void
3290 build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3291 struct megasas_cmd *mfi_cmd)
3292 {
3293 struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3294 struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
3295 struct megasas_cmd_fusion *cmd;
3296 struct fusion_context *fusion;
3297 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
3298
3299 fusion = instance->ctrl_context;
3300
3301 cmd = megasas_get_cmd_fusion(instance,
3302 instance->max_scsi_cmds + mfi_cmd->index);
3303
3304 /* Save the smid. To be used for returning the cmd */
3305 mfi_cmd->context.smid = cmd->index;
3306
3307 /*
3308 * For cmds where the flag is set, store the flag and check
3309 * on completion. For cmds with this flag, don't call
3310 * megasas_complete_cmd
3311 */
3312
3313 if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3314 mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
3315
3316 io_req = cmd->io_request;
3317
3318 if (fusion->adapter_type >= INVADER_SERIES) {
3319 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
3320 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
3321 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
3322 sgl_ptr_end->Flags = 0;
3323 }
3324
3325 mpi25_ieee_chain =
3326 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
3327
3328 io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3329 io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
3330 SGL) / 4;
3331 io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
3332
3333 mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
3334
3335 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3336 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3337
3338 mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
3339 }
3340
3341 /**
3342 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
3343 * @instance: Adapter soft state
3344 * @cmd: mfi cmd to build
3345 *
3346 */
3347 union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3348 build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
3349 {
3350 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
3351 u16 index;
3352
3353 build_mpt_mfi_pass_thru(instance, cmd);
3354 index = cmd->context.smid;
3355
3356 req_desc = megasas_get_request_descriptor(instance, index - 1);
3357
3358 req_desc->Words = 0;
3359 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3360 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3361
3362 req_desc->SCSIIO.SMID = cpu_to_le16(index);
3363
3364 return req_desc;
3365 }
3366
3367 /**
3368 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
3369 * @instance: Adapter soft state
3370 * @cmd: mfi cmd pointer
3371 *
3372 */
3373 void
3374 megasas_issue_dcmd_fusion(struct megasas_instance *instance,
3375 struct megasas_cmd *cmd)
3376 {
3377 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3378
3379 req_desc = build_mpt_cmd(instance, cmd);
3380
3381 megasas_fire_cmd_fusion(instance, req_desc);
3382 return;
3383 }
3384
3385 /**
3386 * megasas_release_fusion - Reverses the FW initialization
3387 * @instance: Adapter soft state
3388 */
3389 void
3390 megasas_release_fusion(struct megasas_instance *instance)
3391 {
3392 megasas_free_cmds(instance);
3393 megasas_free_cmds_fusion(instance);
3394
3395 iounmap(instance->reg_set);
3396
3397 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
3398 }
3399
3400 /**
3401 * megasas_read_fw_status_reg_fusion - returns the current FW status value
3402 * @regs: MFI register set
3403 */
3404 static u32
3405 megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
3406 {
3407 return readl(&(regs)->outbound_scratch_pad);
3408 }
3409
3410 /**
3411 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3412 * @instance: Controller's soft instance
3413 * return: Number of allocated host crash buffers
3414 */
3415 static void
3416 megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
3417 {
3418 unsigned int i;
3419
3420 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
3421 instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
3422 if (!instance->crash_buf[i]) {
3423 dev_info(&instance->pdev->dev, "Firmware crash dump "
3424 "memory allocation failed at index %d\n", i);
3425 break;
3426 }
3427 }
3428 instance->drv_buf_alloc = i;
3429 }
3430
3431 /**
3432 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
3433 * @instance: Controller's soft instance
3434 */
3435 void
3436 megasas_free_host_crash_buffer(struct megasas_instance *instance)
3437 {
3438 unsigned int i;
3439 for (i = 0; i < instance->drv_buf_alloc; i++) {
3440 if (instance->crash_buf[i])
3441 vfree(instance->crash_buf[i]);
3442 }
3443 instance->drv_buf_index = 0;
3444 instance->drv_buf_alloc = 0;
3445 instance->fw_crash_state = UNAVAILABLE;
3446 instance->fw_crash_buffer_size = 0;
3447 }
3448
3449 /**
3450 * megasas_adp_reset_fusion - For controller reset
3451 * @regs: MFI register set
3452 */
3453 static int
3454 megasas_adp_reset_fusion(struct megasas_instance *instance,
3455 struct megasas_register_set __iomem *regs)
3456 {
3457 u32 host_diag, abs_state, retry;
3458
3459 /* Now try to reset the chip */
3460 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3461 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3462 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3463 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3464 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3465 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3466 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3467
3468 /* Check that the diag write enable (DRWE) bit is on */
3469 host_diag = readl(&instance->reg_set->fusion_host_diag);
3470 retry = 0;
3471 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3472 msleep(100);
3473 host_diag = readl(&instance->reg_set->fusion_host_diag);
3474 if (retry++ == 100) {
3475 dev_warn(&instance->pdev->dev,
3476 "Host diag unlock failed from %s %d\n",
3477 __func__, __LINE__);
3478 break;
3479 }
3480 }
3481 if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3482 return -1;
3483
3484 /* Send chip reset command */
3485 writel(host_diag | HOST_DIAG_RESET_ADAPTER,
3486 &instance->reg_set->fusion_host_diag);
3487 msleep(3000);
3488
3489 /* Make sure reset adapter bit is cleared */
3490 host_diag = readl(&instance->reg_set->fusion_host_diag);
3491 retry = 0;
3492 while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3493 msleep(100);
3494 host_diag = readl(&instance->reg_set->fusion_host_diag);
3495 if (retry++ == 1000) {
3496 dev_warn(&instance->pdev->dev,
3497 "Diag reset adapter never cleared %s %d\n",
3498 __func__, __LINE__);
3499 break;
3500 }
3501 }
3502 if (host_diag & HOST_DIAG_RESET_ADAPTER)
3503 return -1;
3504
3505 abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
3506 & MFI_STATE_MASK;
3507 retry = 0;
3508
3509 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3510 msleep(100);
3511 abs_state = instance->instancet->
3512 read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
3513 }
3514 if (abs_state <= MFI_STATE_FW_INIT) {
3515 dev_warn(&instance->pdev->dev,
3516 "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
3517 abs_state, __func__, __LINE__);
3518 return -1;
3519 }
3520
3521 return 0;
3522 }
3523
3524 /**
3525 * megasas_check_reset_fusion - For controller reset check
3526 * @regs: MFI register set
3527 */
3528 static int
3529 megasas_check_reset_fusion(struct megasas_instance *instance,
3530 struct megasas_register_set __iomem *regs)
3531 {
3532 return 0;
3533 }
3534
3535 /* This function waits for outstanding commands on fusion to complete */
3536 int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
3537 int reason, int *convert)
3538 {
3539 int i, outstanding, retval = 0, hb_seconds_missed = 0;
3540 u32 fw_state;
3541
3542 for (i = 0; i < resetwaittime; i++) {
3543 /* Check if firmware is in fault state */
3544 fw_state = instance->instancet->read_fw_status_reg(
3545 instance->reg_set) & MFI_STATE_MASK;
3546 if (fw_state == MFI_STATE_FAULT) {
3547 dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
3548 " will reset adapter scsi%d.\n",
3549 instance->host->host_no);
3550 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3551 if (instance->requestorId && reason) {
3552 dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
3553 " state while polling during"
3554 " I/O timeout handling for %d\n",
3555 instance->host->host_no);
3556 *convert = 1;
3557 }
3558
3559 retval = 1;
3560 goto out;
3561 }
3562
3563 if (reason == MFI_IO_TIMEOUT_OCR) {
3564 dev_info(&instance->pdev->dev,
3565 "MFI IO is timed out, initiating OCR\n");
3566 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3567 retval = 1;
3568 goto out;
3569 }
3570
3571 /* If SR-IOV VF mode & heartbeat timeout, don't wait */
3572 if (instance->requestorId && !reason) {
3573 retval = 1;
3574 goto out;
3575 }
3576
3577 /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
3578 if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
3579 if (instance->hb_host_mem->HB.fwCounter !=
3580 instance->hb_host_mem->HB.driverCounter) {
3581 instance->hb_host_mem->HB.driverCounter =
3582 instance->hb_host_mem->HB.fwCounter;
3583 hb_seconds_missed = 0;
3584 } else {
3585 hb_seconds_missed++;
3586 if (hb_seconds_missed ==
3587 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
3588 dev_warn(&instance->pdev->dev, "SR-IOV:"
3589 " Heartbeat never completed "
3590 " while polling during I/O "
3591 " timeout handling for "
3592 "scsi%d.\n",
3593 instance->host->host_no);
3594 *convert = 1;
3595 retval = 1;
3596 goto out;
3597 }
3598 }
3599 }
3600
3601 megasas_complete_cmd_dpc_fusion((unsigned long)instance);
3602 outstanding = atomic_read(&instance->fw_outstanding);
3603 if (!outstanding)
3604 goto out;
3605
3606 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
3607 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
3608 "commands to complete for scsi%d\n", i,
3609 outstanding, instance->host->host_no);
3610 }
3611 msleep(1000);
3612 }
3613
3614 if (atomic_read(&instance->fw_outstanding)) {
3615 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
3616 "will reset adapter scsi%d.\n",
3617 instance->host->host_no);
3618 *convert = 1;
3619 retval = 1;
3620 }
3621 out:
3622 return retval;
3623 }
3624
3625 void megasas_reset_reply_desc(struct megasas_instance *instance)
3626 {
3627 int i, j, count;
3628 struct fusion_context *fusion;
3629 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3630
3631 fusion = instance->ctrl_context;
3632 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3633 for (i = 0 ; i < count ; i++) {
3634 fusion->last_reply_idx[i] = 0;
3635 reply_desc = fusion->reply_frames_desc[i];
3636 for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
3637 reply_desc->Words = cpu_to_le64(ULLONG_MAX);
3638 }
3639 }
3640
3641 /*
3642 * megasas_refire_mgmt_cmd : Re-fire management commands
3643 * @instance: Controller's soft instance
3644 */
3645 void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
3646 {
3647 int j;
3648 struct megasas_cmd_fusion *cmd_fusion;
3649 struct fusion_context *fusion;
3650 struct megasas_cmd *cmd_mfi;
3651 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3652 u16 smid;
3653 bool refire_cmd = 0;
3654
3655 fusion = instance->ctrl_context;
3656
3657 /* Re-fire management commands.
3658 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
3659 */
3660 for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
3661 cmd_fusion = fusion->cmd_list[j];
3662 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
3663 smid = le16_to_cpu(cmd_mfi->context.smid);
3664
3665 if (!smid)
3666 continue;
3667
3668 /* Do not refire shutdown command */
3669 if (le32_to_cpu(cmd_mfi->frame->dcmd.opcode) ==
3670 MR_DCMD_CTRL_SHUTDOWN) {
3671 cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
3672 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
3673 continue;
3674 }
3675
3676 req_desc = megasas_get_request_descriptor
3677 (instance, smid - 1);
3678 refire_cmd = req_desc && ((cmd_mfi->frame->dcmd.opcode !=
3679 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
3680 (cmd_mfi->frame->dcmd.opcode !=
3681 cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO)))
3682 && !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
3683 if (refire_cmd)
3684 megasas_fire_cmd_fusion(instance, req_desc);
3685 else
3686 megasas_return_cmd(instance, cmd_mfi);
3687 }
3688 }
3689
3690 /*
3691 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
3692 * @instance: per adapter struct
3693 * @channel: the channel assigned by the OS
3694 * @id: the id assigned by the OS
3695 *
3696 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
3697 */
3698
3699 static int megasas_track_scsiio(struct megasas_instance *instance,
3700 int id, int channel)
3701 {
3702 int i, found = 0;
3703 struct megasas_cmd_fusion *cmd_fusion;
3704 struct fusion_context *fusion;
3705 fusion = instance->ctrl_context;
3706
3707 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
3708 cmd_fusion = fusion->cmd_list[i];
3709 if (cmd_fusion->scmd &&
3710 (cmd_fusion->scmd->device->id == id &&
3711 cmd_fusion->scmd->device->channel == channel)) {
3712 dev_info(&instance->pdev->dev,
3713 "SCSI commands pending to target"
3714 "channel %d id %d \tSMID: 0x%x\n",
3715 channel, id, cmd_fusion->index);
3716 scsi_print_command(cmd_fusion->scmd);
3717 found = 1;
3718 break;
3719 }
3720 }
3721
3722 return found ? FAILED : SUCCESS;
3723 }
3724
3725 /**
3726 * megasas_tm_response_code - translation of device response code
3727 * @ioc: per adapter object
3728 * @mpi_reply: MPI reply returned by firmware
3729 *
3730 * Return nothing.
3731 */
3732 static void
3733 megasas_tm_response_code(struct megasas_instance *instance,
3734 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
3735 {
3736 char *desc;
3737
3738 switch (mpi_reply->ResponseCode) {
3739 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
3740 desc = "task management request completed";
3741 break;
3742 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
3743 desc = "invalid frame";
3744 break;
3745 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
3746 desc = "task management request not supported";
3747 break;
3748 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
3749 desc = "task management request failed";
3750 break;
3751 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
3752 desc = "task management request succeeded";
3753 break;
3754 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
3755 desc = "invalid lun";
3756 break;
3757 case 0xA:
3758 desc = "overlapped tag attempted";
3759 break;
3760 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
3761 desc = "task queued, however not sent to target";
3762 break;
3763 default:
3764 desc = "unknown";
3765 break;
3766 }
3767 dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
3768 mpi_reply->ResponseCode, desc);
3769 dev_dbg(&instance->pdev->dev,
3770 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
3771 " 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
3772 mpi_reply->TerminationCount, mpi_reply->DevHandle,
3773 mpi_reply->Function, mpi_reply->TaskType,
3774 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
3775 }
3776
3777 /**
3778 * megasas_issue_tm - main routine for sending tm requests
3779 * @instance: per adapter struct
3780 * @device_handle: device handle
3781 * @channel: the channel assigned by the OS
3782 * @id: the id assigned by the OS
3783 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
3784 * @smid_task: smid assigned to the task
3785 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
3786 * Context: user
3787 *
3788 * MegaRaid use MPT interface for Task Magement request.
3789 * A generic API for sending task management requests to firmware.
3790 *
3791 * Return SUCCESS or FAILED.
3792 */
3793 static int
3794 megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
3795 uint channel, uint id, u16 smid_task, u8 type)
3796 {
3797 struct MR_TASK_MANAGE_REQUEST *mr_request;
3798 struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
3799 unsigned long timeleft;
3800 struct megasas_cmd_fusion *cmd_fusion;
3801 struct megasas_cmd *cmd_mfi;
3802 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3803 struct fusion_context *fusion = NULL;
3804 struct megasas_cmd_fusion *scsi_lookup;
3805 int rc;
3806 struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
3807
3808 fusion = instance->ctrl_context;
3809
3810 cmd_mfi = megasas_get_cmd(instance);
3811
3812 if (!cmd_mfi) {
3813 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
3814 __func__, __LINE__);
3815 return -ENOMEM;
3816 }
3817
3818 cmd_fusion = megasas_get_cmd_fusion(instance,
3819 instance->max_scsi_cmds + cmd_mfi->index);
3820
3821 /* Save the smid. To be used for returning the cmd */
3822 cmd_mfi->context.smid = cmd_fusion->index;
3823
3824 req_desc = megasas_get_request_descriptor(instance,
3825 (cmd_fusion->index - 1));
3826
3827 cmd_fusion->request_desc = req_desc;
3828 req_desc->Words = 0;
3829
3830 mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
3831 memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
3832 mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
3833 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3834 mpi_request->DevHandle = cpu_to_le16(device_handle);
3835 mpi_request->TaskType = type;
3836 mpi_request->TaskMID = cpu_to_le16(smid_task);
3837 mpi_request->LUN[1] = 0;
3838
3839
3840 req_desc = cmd_fusion->request_desc;
3841 req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
3842 req_desc->HighPriority.RequestFlags =
3843 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
3844 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3845 req_desc->HighPriority.MSIxIndex = 0;
3846 req_desc->HighPriority.LMID = 0;
3847 req_desc->HighPriority.Reserved1 = 0;
3848
3849 if (channel < MEGASAS_MAX_PD_CHANNELS)
3850 mr_request->tmReqFlags.isTMForPD = 1;
3851 else
3852 mr_request->tmReqFlags.isTMForLD = 1;
3853
3854 init_completion(&cmd_fusion->done);
3855 megasas_fire_cmd_fusion(instance, req_desc);
3856
3857 timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ);
3858
3859 if (!timeleft) {
3860 dev_err(&instance->pdev->dev,
3861 "task mgmt type 0x%x timed out\n", type);
3862 cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE;
3863 mutex_unlock(&instance->reset_mutex);
3864 rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
3865 mutex_lock(&instance->reset_mutex);
3866 return rc;
3867 }
3868
3869 mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
3870 megasas_tm_response_code(instance, mpi_reply);
3871
3872 megasas_return_cmd(instance, cmd_mfi);
3873 rc = SUCCESS;
3874 switch (type) {
3875 case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3876 scsi_lookup = fusion->cmd_list[smid_task - 1];
3877
3878 if (scsi_lookup->scmd == NULL)
3879 break;
3880 else {
3881 instance->instancet->disable_intr(instance);
3882 megasas_sync_irqs((unsigned long)instance);
3883 instance->instancet->enable_intr(instance);
3884 if (scsi_lookup->scmd == NULL)
3885 break;
3886 }
3887 rc = FAILED;
3888 break;
3889
3890 case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3891 if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
3892 break;
3893 instance->instancet->disable_intr(instance);
3894 megasas_sync_irqs((unsigned long)instance);
3895 rc = megasas_track_scsiio(instance, id, channel);
3896 instance->instancet->enable_intr(instance);
3897
3898 break;
3899 case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3900 case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3901 break;
3902 default:
3903 rc = FAILED;
3904 break;
3905 }
3906
3907 return rc;
3908
3909 }
3910
3911 /*
3912 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
3913 * @instance: per adapter struct
3914 *
3915 * Return Non Zero index, if SMID found in outstanding commands
3916 */
3917 static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
3918 {
3919 int i, ret = 0;
3920 struct megasas_instance *instance;
3921 struct megasas_cmd_fusion *cmd_fusion;
3922 struct fusion_context *fusion;
3923
3924 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3925
3926 fusion = instance->ctrl_context;
3927
3928 for (i = 0; i < instance->max_scsi_cmds; i++) {
3929 cmd_fusion = fusion->cmd_list[i];
3930 if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
3931 scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
3932 " SMID: %d\n", cmd_fusion->index);
3933 ret = cmd_fusion->index;
3934 break;
3935 }
3936 }
3937
3938 return ret;
3939 }
3940
3941 /*
3942 * megasas_get_tm_devhandle - Get devhandle for TM request
3943 * @sdev- OS provided scsi device
3944 *
3945 * Returns- devhandle/targetID of SCSI device
3946 */
3947 static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
3948 {
3949 u16 pd_index = 0;
3950 u32 device_id;
3951 struct megasas_instance *instance;
3952 struct fusion_context *fusion;
3953 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3954 u16 devhandle = (u16)ULONG_MAX;
3955
3956 instance = (struct megasas_instance *)sdev->host->hostdata;
3957 fusion = instance->ctrl_context;
3958
3959 if (!MEGASAS_IS_LOGICAL(sdev)) {
3960 if (instance->use_seqnum_jbod_fp) {
3961 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
3962 + sdev->id;
3963 pd_sync = (void *)fusion->pd_seq_sync
3964 [(instance->pd_seq_map_id - 1) & 1];
3965 devhandle = pd_sync->seq[pd_index].devHandle;
3966 } else
3967 sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
3968 " without JBOD MAP support from %s %d\n", __func__, __LINE__);
3969 } else {
3970 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
3971 + sdev->id;
3972 devhandle = device_id;
3973 }
3974
3975 return devhandle;
3976 }
3977
3978 /*
3979 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
3980 * @scmd : pointer to scsi command object
3981 *
3982 * Return SUCCESS, if command aborted else FAILED
3983 */
3984
3985 int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
3986 {
3987 struct megasas_instance *instance;
3988 u16 smid, devhandle;
3989 struct fusion_context *fusion;
3990 int ret;
3991 struct MR_PRIV_DEVICE *mr_device_priv_data;
3992 mr_device_priv_data = scmd->device->hostdata;
3993
3994
3995 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3996 fusion = instance->ctrl_context;
3997
3998 scmd_printk(KERN_INFO, scmd, "task abort called for scmd(%p)\n", scmd);
3999 scsi_print_command(scmd);
4000
4001 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4002 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4003 "SCSI host:%d\n", instance->host->host_no);
4004 ret = FAILED;
4005 return ret;
4006 }
4007
4008 if (!mr_device_priv_data) {
4009 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
4010 "scmd(%p)\n", scmd);
4011 scmd->result = DID_NO_CONNECT << 16;
4012 ret = SUCCESS;
4013 goto out;
4014 }
4015
4016
4017 if (!mr_device_priv_data->is_tm_capable) {
4018 ret = FAILED;
4019 goto out;
4020 }
4021
4022 mutex_lock(&instance->reset_mutex);
4023
4024 smid = megasas_fusion_smid_lookup(scmd);
4025
4026 if (!smid) {
4027 ret = SUCCESS;
4028 scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
4029 " issued is not found in oustanding commands\n");
4030 mutex_unlock(&instance->reset_mutex);
4031 goto out;
4032 }
4033
4034 devhandle = megasas_get_tm_devhandle(scmd->device);
4035
4036 if (devhandle == (u16)ULONG_MAX) {
4037 ret = SUCCESS;
4038 sdev_printk(KERN_INFO, scmd->device,
4039 "task abort issued for invalid devhandle\n");
4040 mutex_unlock(&instance->reset_mutex);
4041 goto out;
4042 }
4043 sdev_printk(KERN_INFO, scmd->device,
4044 "attempting task abort! scmd(%p) tm_dev_handle 0x%x\n",
4045 scmd, devhandle);
4046
4047 mr_device_priv_data->tm_busy = 1;
4048 ret = megasas_issue_tm(instance, devhandle,
4049 scmd->device->channel, scmd->device->id, smid,
4050 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
4051 mr_device_priv_data->tm_busy = 0;
4052
4053 mutex_unlock(&instance->reset_mutex);
4054 out:
4055 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
4056 ((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4057
4058 return ret;
4059 }
4060
4061 /*
4062 * megasas_reset_target_fusion : target reset function for fusion adapters
4063 * scmd: SCSI command pointer
4064 *
4065 * Returns SUCCESS if all commands associated with target aborted else FAILED
4066 */
4067
4068 int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
4069 {
4070
4071 struct megasas_instance *instance;
4072 int ret = FAILED;
4073 u16 devhandle;
4074 struct fusion_context *fusion;
4075 struct MR_PRIV_DEVICE *mr_device_priv_data;
4076 mr_device_priv_data = scmd->device->hostdata;
4077
4078 instance = (struct megasas_instance *)scmd->device->host->hostdata;
4079 fusion = instance->ctrl_context;
4080
4081 sdev_printk(KERN_INFO, scmd->device,
4082 "target reset called for scmd(%p)\n", scmd);
4083
4084 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4085 dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4086 "SCSI host:%d\n", instance->host->host_no);
4087 ret = FAILED;
4088 return ret;
4089 }
4090
4091 if (!mr_device_priv_data) {
4092 sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
4093 "scmd(%p)\n", scmd);
4094 scmd->result = DID_NO_CONNECT << 16;
4095 ret = SUCCESS;
4096 goto out;
4097 }
4098
4099
4100 if (!mr_device_priv_data->is_tm_capable) {
4101 ret = FAILED;
4102 goto out;
4103 }
4104
4105 mutex_lock(&instance->reset_mutex);
4106 devhandle = megasas_get_tm_devhandle(scmd->device);
4107
4108 if (devhandle == (u16)ULONG_MAX) {
4109 ret = SUCCESS;
4110 sdev_printk(KERN_INFO, scmd->device,
4111 "target reset issued for invalid devhandle\n");
4112 mutex_unlock(&instance->reset_mutex);
4113 goto out;
4114 }
4115
4116 sdev_printk(KERN_INFO, scmd->device,
4117 "attempting target reset! scmd(%p) tm_dev_handle 0x%x\n",
4118 scmd, devhandle);
4119 mr_device_priv_data->tm_busy = 1;
4120 ret = megasas_issue_tm(instance, devhandle,
4121 scmd->device->channel, scmd->device->id, 0,
4122 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET);
4123 mr_device_priv_data->tm_busy = 0;
4124 mutex_unlock(&instance->reset_mutex);
4125 out:
4126 scmd_printk(KERN_NOTICE, scmd, "megasas: target reset %s!!\n",
4127 (ret == SUCCESS) ? "SUCCESS" : "FAILED");
4128
4129 return ret;
4130 }
4131
4132 /*SRIOV get other instance in cluster if any*/
4133 struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
4134 {
4135 int i;
4136
4137 for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
4138 if (megasas_mgmt_info.instance[i] &&
4139 (megasas_mgmt_info.instance[i] != instance) &&
4140 megasas_mgmt_info.instance[i]->requestorId &&
4141 megasas_mgmt_info.instance[i]->peerIsPresent &&
4142 (memcmp((megasas_mgmt_info.instance[i]->clusterId),
4143 instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
4144 return megasas_mgmt_info.instance[i];
4145 }
4146 return NULL;
4147 }
4148
4149 /* Check for a second path that is currently UP */
4150 int megasas_check_mpio_paths(struct megasas_instance *instance,
4151 struct scsi_cmnd *scmd)
4152 {
4153 struct megasas_instance *peer_instance = NULL;
4154 int retval = (DID_REQUEUE << 16);
4155
4156 if (instance->peerIsPresent) {
4157 peer_instance = megasas_get_peer_instance(instance);
4158 if ((peer_instance) &&
4159 (atomic_read(&peer_instance->adprecovery) ==
4160 MEGASAS_HBA_OPERATIONAL))
4161 retval = (DID_NO_CONNECT << 16);
4162 }
4163 return retval;
4164 }
4165
4166 /* Core fusion reset function */
4167 int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4168 {
4169 int retval = SUCCESS, i, j, convert = 0;
4170 struct megasas_instance *instance;
4171 struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
4172 struct fusion_context *fusion;
4173 u32 abs_state, status_reg, reset_adapter;
4174 u32 io_timeout_in_crash_mode = 0;
4175 struct scsi_cmnd *scmd_local = NULL;
4176 struct scsi_device *sdev;
4177
4178 instance = (struct megasas_instance *)shost->hostdata;
4179 fusion = instance->ctrl_context;
4180
4181 mutex_lock(&instance->reset_mutex);
4182
4183 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
4184 dev_warn(&instance->pdev->dev, "Hardware critical error, "
4185 "returning FAILED for scsi%d.\n",
4186 instance->host->host_no);
4187 mutex_unlock(&instance->reset_mutex);
4188 return FAILED;
4189 }
4190 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
4191 abs_state = status_reg & MFI_STATE_MASK;
4192
4193 /* IO timeout detected, forcibly put FW in FAULT state */
4194 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
4195 instance->crash_dump_app_support && reason) {
4196 dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
4197 "forcibly FAULT Firmware\n");
4198 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4199 status_reg = readl(&instance->reg_set->doorbell);
4200 writel(status_reg | MFI_STATE_FORCE_OCR,
4201 &instance->reg_set->doorbell);
4202 readl(&instance->reg_set->doorbell);
4203 mutex_unlock(&instance->reset_mutex);
4204 do {
4205 ssleep(3);
4206 io_timeout_in_crash_mode++;
4207 dev_dbg(&instance->pdev->dev, "waiting for [%d] "
4208 "seconds for crash dump collection and OCR "
4209 "to be done\n", (io_timeout_in_crash_mode * 3));
4210 } while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
4211 (io_timeout_in_crash_mode < 80));
4212
4213 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
4214 dev_info(&instance->pdev->dev, "OCR done for IO "
4215 "timeout case\n");
4216 retval = SUCCESS;
4217 } else {
4218 dev_info(&instance->pdev->dev, "Controller is not "
4219 "operational after 240 seconds wait for IO "
4220 "timeout case in FW crash dump mode\n do "
4221 "OCR/kill adapter\n");
4222 retval = megasas_reset_fusion(shost, 0);
4223 }
4224 return retval;
4225 }
4226
4227 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
4228 del_timer_sync(&instance->sriov_heartbeat_timer);
4229 set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
4230 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
4231 instance->instancet->disable_intr(instance);
4232 megasas_sync_irqs((unsigned long)instance);
4233
4234 /* First try waiting for commands to complete */
4235 if (megasas_wait_for_outstanding_fusion(instance, reason,
4236 &convert)) {
4237 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4238 dev_warn(&instance->pdev->dev, "resetting fusion "
4239 "adapter scsi%d.\n", instance->host->host_no);
4240 if (convert)
4241 reason = 0;
4242
4243 if (megasas_dbg_lvl & OCR_LOGS)
4244 dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
4245
4246 /* Now return commands back to the OS */
4247 for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4248 cmd_fusion = fusion->cmd_list[i];
4249 /*check for extra commands issued by driver*/
4250 if (instance->is_ventura) {
4251 r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4252 megasas_return_cmd_fusion(instance, r1_cmd);
4253 }
4254 scmd_local = cmd_fusion->scmd;
4255 if (cmd_fusion->scmd) {
4256 if (megasas_dbg_lvl & OCR_LOGS) {
4257 sdev_printk(KERN_INFO,
4258 cmd_fusion->scmd->device, "SMID: 0x%x\n",
4259 cmd_fusion->index);
4260 scsi_print_command(cmd_fusion->scmd);
4261 }
4262
4263 scmd_local->result =
4264 megasas_check_mpio_paths(instance,
4265 scmd_local);
4266 if (instance->ldio_threshold &&
4267 megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
4268 atomic_dec(&instance->ldio_outstanding);
4269 megasas_return_cmd_fusion(instance, cmd_fusion);
4270 scsi_dma_unmap(scmd_local);
4271 scmd_local->scsi_done(scmd_local);
4272 }
4273 }
4274
4275 atomic_set(&instance->fw_outstanding, 0);
4276
4277 status_reg = instance->instancet->read_fw_status_reg(
4278 instance->reg_set);
4279 abs_state = status_reg & MFI_STATE_MASK;
4280 reset_adapter = status_reg & MFI_RESET_ADAPTER;
4281 if (instance->disableOnlineCtrlReset ||
4282 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
4283 /* Reset not supported, kill adapter */
4284 dev_warn(&instance->pdev->dev, "Reset not supported"
4285 ", killing adapter scsi%d.\n",
4286 instance->host->host_no);
4287 megaraid_sas_kill_hba(instance);
4288 instance->skip_heartbeat_timer_del = 1;
4289 retval = FAILED;
4290 goto out;
4291 }
4292
4293 /* Let SR-IOV VF & PF sync up if there was a HB failure */
4294 if (instance->requestorId && !reason) {
4295 msleep(MEGASAS_OCR_SETTLE_TIME_VF);
4296 goto transition_to_ready;
4297 }
4298
4299 /* Now try to reset the chip */
4300 for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
4301
4302 if (instance->instancet->adp_reset
4303 (instance, instance->reg_set))
4304 continue;
4305 transition_to_ready:
4306 /* Wait for FW to become ready */
4307 if (megasas_transition_to_ready(instance, 1)) {
4308 dev_warn(&instance->pdev->dev,
4309 "Failed to transition controller to ready for "
4310 "scsi%d.\n", instance->host->host_no);
4311 if (instance->requestorId && !reason)
4312 goto fail_kill_adapter;
4313 else
4314 continue;
4315 }
4316 megasas_reset_reply_desc(instance);
4317 megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
4318
4319 if (megasas_ioc_init_fusion(instance)) {
4320 if (instance->requestorId && !reason)
4321 goto fail_kill_adapter;
4322 else
4323 continue;
4324 }
4325
4326 megasas_refire_mgmt_cmd(instance);
4327
4328 if (megasas_get_ctrl_info(instance)) {
4329 dev_info(&instance->pdev->dev,
4330 "Failed from %s %d\n",
4331 __func__, __LINE__);
4332 megaraid_sas_kill_hba(instance);
4333 retval = FAILED;
4334 goto out;
4335 }
4336 /* Reset load balance info */
4337 if (fusion->load_balance_info)
4338 memset(fusion->load_balance_info, 0,
4339 (sizeof(struct LD_LOAD_BALANCE_INFO) *
4340 MAX_LOGICAL_DRIVES_EXT));
4341
4342 if (!megasas_get_map_info(instance))
4343 megasas_sync_map_info(instance);
4344
4345 megasas_setup_jbod_map(instance);
4346
4347 shost_for_each_device(sdev, shost)
4348 megasas_set_dynamic_target_properties(sdev);
4349
4350 /* reset stream detection array */
4351 if (instance->is_ventura) {
4352 for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4353 memset(fusion->stream_detect_by_ld[j],
4354 0, sizeof(struct LD_STREAM_DETECT));
4355 fusion->stream_detect_by_ld[j]->mru_bit_map
4356 = MR_STREAM_BITMAP;
4357 }
4358 }
4359
4360 clear_bit(MEGASAS_FUSION_IN_RESET,
4361 &instance->reset_flags);
4362 instance->instancet->enable_intr(instance);
4363 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
4364
4365 dev_info(&instance->pdev->dev, "Interrupts are enabled and"
4366 " controller is OPERATIONAL for scsi:%d\n",
4367 instance->host->host_no);
4368
4369 /* Restart SR-IOV heartbeat */
4370 if (instance->requestorId) {
4371 if (!megasas_sriov_start_heartbeat(instance, 0))
4372 megasas_start_timer(instance,
4373 &instance->sriov_heartbeat_timer,
4374 megasas_sriov_heartbeat_handler,
4375 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
4376 else
4377 instance->skip_heartbeat_timer_del = 1;
4378 }
4379
4380 if (instance->crash_dump_drv_support &&
4381 instance->crash_dump_app_support)
4382 megasas_set_crash_dump_params(instance,
4383 MR_CRASH_BUF_TURN_ON);
4384 else
4385 megasas_set_crash_dump_params(instance,
4386 MR_CRASH_BUF_TURN_OFF);
4387
4388 retval = SUCCESS;
4389
4390 /* Adapter reset completed successfully */
4391 dev_warn(&instance->pdev->dev,
4392 "Reset successful for scsi%d.\n",
4393 instance->host->host_no);
4394
4395 goto out;
4396 }
4397 fail_kill_adapter:
4398 /* Reset failed, kill the adapter */
4399 dev_warn(&instance->pdev->dev, "Reset failed, killing "
4400 "adapter scsi%d.\n", instance->host->host_no);
4401 megaraid_sas_kill_hba(instance);
4402 instance->skip_heartbeat_timer_del = 1;
4403 retval = FAILED;
4404 } else {
4405 /* For VF: Restart HB timer if we didn't OCR */
4406 if (instance->requestorId) {
4407 megasas_start_timer(instance,
4408 &instance->sriov_heartbeat_timer,
4409 megasas_sriov_heartbeat_handler,
4410 MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
4411 }
4412 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
4413 instance->instancet->enable_intr(instance);
4414 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
4415 }
4416 out:
4417 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
4418 mutex_unlock(&instance->reset_mutex);
4419 return retval;
4420 }
4421
4422 /* Fusion Crash dump collection work queue */
4423 void megasas_fusion_crash_dump_wq(struct work_struct *work)
4424 {
4425 struct megasas_instance *instance =
4426 container_of(work, struct megasas_instance, crash_init);
4427 u32 status_reg;
4428 u8 partial_copy = 0;
4429
4430
4431 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
4432
4433 /*
4434 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
4435 * to host crash buffers
4436 */
4437 if (instance->drv_buf_index == 0) {
4438 /* Buffer is already allocated for old Crash dump.
4439 * Do OCR and do not wait for crash dump collection
4440 */
4441 if (instance->drv_buf_alloc) {
4442 dev_info(&instance->pdev->dev, "earlier crash dump is "
4443 "not yet copied by application, ignoring this "
4444 "crash dump and initiating OCR\n");
4445 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
4446 writel(status_reg,
4447 &instance->reg_set->outbound_scratch_pad);
4448 readl(&instance->reg_set->outbound_scratch_pad);
4449 return;
4450 }
4451 megasas_alloc_host_crash_buffer(instance);
4452 dev_info(&instance->pdev->dev, "Number of host crash buffers "
4453 "allocated: %d\n", instance->drv_buf_alloc);
4454 }
4455
4456 /*
4457 * Driver has allocated max buffers, which can be allocated
4458 * and FW has more crash dump data, then driver will
4459 * ignore the data.
4460 */
4461 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
4462 dev_info(&instance->pdev->dev, "Driver is done copying "
4463 "the buffer: %d\n", instance->drv_buf_alloc);
4464 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
4465 partial_copy = 1;
4466 } else {
4467 memcpy(instance->crash_buf[instance->drv_buf_index],
4468 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
4469 instance->drv_buf_index++;
4470 status_reg &= ~MFI_STATE_DMADONE;
4471 }
4472
4473 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
4474 dev_info(&instance->pdev->dev, "Crash Dump is available,number "
4475 "of copied buffers: %d\n", instance->drv_buf_index);
4476 instance->fw_crash_buffer_size = instance->drv_buf_index;
4477 instance->fw_crash_state = AVAILABLE;
4478 instance->drv_buf_index = 0;
4479 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
4480 readl(&instance->reg_set->outbound_scratch_pad);
4481 if (!partial_copy)
4482 megasas_reset_fusion(instance->host, 0);
4483 } else {
4484 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
4485 readl(&instance->reg_set->outbound_scratch_pad);
4486 }
4487 }
4488
4489
4490 /* Fusion OCR work queue */
4491 void megasas_fusion_ocr_wq(struct work_struct *work)
4492 {
4493 struct megasas_instance *instance =
4494 container_of(work, struct megasas_instance, work_init);
4495
4496 megasas_reset_fusion(instance->host, 0);
4497 }
4498
4499 /* Allocate fusion context */
4500 int
4501 megasas_alloc_fusion_context(struct megasas_instance *instance)
4502 {
4503 struct fusion_context *fusion;
4504
4505 instance->ctrl_context_pages = get_order(sizeof(struct fusion_context));
4506 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4507 instance->ctrl_context_pages);
4508 if (!instance->ctrl_context) {
4509 /* fall back to using vmalloc for fusion_context */
4510 instance->ctrl_context = vzalloc(sizeof(struct fusion_context));
4511 if (!instance->ctrl_context) {
4512 dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__);
4513 return -ENOMEM;
4514 }
4515 }
4516
4517 fusion = instance->ctrl_context;
4518
4519 fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
4520 sizeof(struct LD_LOAD_BALANCE_INFO));
4521 fusion->load_balance_info =
4522 (struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
4523 fusion->load_balance_info_pages);
4524 if (!fusion->load_balance_info) {
4525 fusion->load_balance_info = vzalloc(MAX_LOGICAL_DRIVES_EXT *
4526 sizeof(struct LD_LOAD_BALANCE_INFO));
4527 if (!fusion->load_balance_info)
4528 dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
4529 "continuing without Load Balance support\n");
4530 }
4531
4532 return 0;
4533 }
4534
4535 void
4536 megasas_free_fusion_context(struct megasas_instance *instance)
4537 {
4538 struct fusion_context *fusion = instance->ctrl_context;
4539
4540 if (fusion) {
4541 if (fusion->load_balance_info) {
4542 if (is_vmalloc_addr(fusion->load_balance_info))
4543 vfree(fusion->load_balance_info);
4544 else
4545 free_pages((ulong)fusion->load_balance_info,
4546 fusion->load_balance_info_pages);
4547 }
4548
4549 if (is_vmalloc_addr(fusion))
4550 vfree(fusion);
4551 else
4552 free_pages((ulong)fusion,
4553 instance->ctrl_context_pages);
4554 }
4555 }
4556
4557 struct megasas_instance_template megasas_instance_template_fusion = {
4558 .enable_intr = megasas_enable_intr_fusion,
4559 .disable_intr = megasas_disable_intr_fusion,
4560 .clear_intr = megasas_clear_intr_fusion,
4561 .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
4562 .adp_reset = megasas_adp_reset_fusion,
4563 .check_reset = megasas_check_reset_fusion,
4564 .service_isr = megasas_isr_fusion,
4565 .tasklet = megasas_complete_cmd_dpc_fusion,
4566 .init_adapter = megasas_init_adapter_fusion,
4567 .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
4568 .issue_dcmd = megasas_issue_dcmd_fusion,
4569 };