]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/scsi/mpt3sas/mpt3sas_base.c
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
CommitLineData
f92363d1
SR
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
a4ffce0d 6 * Copyright (C) 2012-2014 LSI Corporation
a03bd153
SR
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
f92363d1
SR
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
f92363d1
SR
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
23409bd4 60#include <linux/ktime.h>
f92363d1
SR
61#include <linux/kthread.h>
62#include <linux/aer.h>
63
64
65#include "mpt3sas_base.h"
66
67static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
68
69
70#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
71
72 /* maximum controller queue depth */
73#define MAX_HBA_QUEUE_DEPTH 30000
74#define MAX_CHAIN_DEPTH 100000
75static int max_queue_depth = -1;
76module_param(max_queue_depth, int, 0);
77MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
78
79static int max_sgl_entries = -1;
80module_param(max_sgl_entries, int, 0);
81MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
82
83static int msix_disable = -1;
84module_param(msix_disable, int, 0);
85MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
86
64038301
SPS
87static int smp_affinity_enable = 1;
88module_param(smp_affinity_enable, int, S_IRUGO);
89MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
90
fb77bb53 91static int max_msix_vectors = -1;
9c500060
SR
92module_param(max_msix_vectors, int, 0);
93MODULE_PARM_DESC(max_msix_vectors,
fb77bb53 94 " max msix vectors");
f92363d1
SR
95
96static int mpt3sas_fwfault_debug;
97MODULE_PARM_DESC(mpt3sas_fwfault_debug,
98 " enable detection of firmware fault and halt firmware - (default=0)");
99
9b05c91a 100static int
98c56ad3 101_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
f92363d1
SR
102
103/**
104 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
105 *
106 */
107static int
108_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
109{
110 int ret = param_set_int(val, kp);
111 struct MPT3SAS_ADAPTER *ioc;
112
113 if (ret)
114 return ret;
115
08c4d550 116 /* global ioc spinlock to protect controller list on list operations */
f92363d1 117 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
08c4d550 118 spin_lock(&gioc_lock);
f92363d1
SR
119 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
120 ioc->fwfault_debug = mpt3sas_fwfault_debug;
08c4d550 121 spin_unlock(&gioc_lock);
f92363d1
SR
122 return 0;
123}
124module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
125 param_get_int, &mpt3sas_fwfault_debug, 0644);
126
127/**
128 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
129 * @arg: input argument, used to derive ioc
130 *
131 * Return 0 if controller is removed from pci subsystem.
132 * Return -1 for other case.
133 */
134static int mpt3sas_remove_dead_ioc_func(void *arg)
135{
136 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
137 struct pci_dev *pdev;
138
139 if ((ioc == NULL))
140 return -1;
141
142 pdev = ioc->pdev;
143 if ((pdev == NULL))
144 return -1;
64cdb418 145 pci_stop_and_remove_bus_device_locked(pdev);
f92363d1
SR
146 return 0;
147}
148
149/**
150 * _base_fault_reset_work - workq handling ioc fault conditions
151 * @work: input argument, used to derive ioc
152 * Context: sleep.
153 *
154 * Return nothing.
155 */
156static void
157_base_fault_reset_work(struct work_struct *work)
158{
159 struct MPT3SAS_ADAPTER *ioc =
160 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
161 unsigned long flags;
162 u32 doorbell;
163 int rc;
164 struct task_struct *p;
165
166
167 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
16e179bd 168 if (ioc->shost_recovery || ioc->pci_error_recovery)
f92363d1
SR
169 goto rearm_timer;
170 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
171
172 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
173 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
174 pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
175 ioc->name);
176
16e179bd
SR
177 /* It may be possible that EEH recovery can resolve some of
178 * pci bus failure issues rather removing the dead ioc function
179 * by considering controller is in a non-operational state. So
180 * here priority is given to the EEH recovery. If it doesn't
181 * not resolve this issue, mpt3sas driver will consider this
182 * controller to non-operational state and remove the dead ioc
183 * function.
184 */
185 if (ioc->non_operational_loop++ < 5) {
186 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
187 flags);
188 goto rearm_timer;
189 }
190
f92363d1
SR
191 /*
192 * Call _scsih_flush_pending_cmds callback so that we flush all
193 * pending commands back to OS. This call is required to aovid
194 * deadlock at block layer. Dead IOC will fail to do diag reset,
195 * and this call is safe since dead ioc will never return any
196 * command back from HW.
197 */
198 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
199 /*
200 * Set remove_host flag early since kernel thread will
201 * take some time to execute.
202 */
203 ioc->remove_host = 1;
204 /*Remove the Dead Host */
205 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
c84b06a4 206 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
f92363d1
SR
207 if (IS_ERR(p))
208 pr_err(MPT3SAS_FMT
209 "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
210 ioc->name, __func__);
211 else
212 pr_err(MPT3SAS_FMT
213 "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
214 ioc->name, __func__);
215 return; /* don't rearm timer */
216 }
217
16e179bd
SR
218 ioc->non_operational_loop = 0;
219
f92363d1 220 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
98c56ad3 221 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
222 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
223 __func__, (rc == 0) ? "success" : "failed");
224 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
225 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
226 mpt3sas_base_fault_info(ioc, doorbell &
227 MPI2_DOORBELL_DATA_MASK);
228 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
229 MPI2_IOC_STATE_OPERATIONAL)
230 return; /* don't rearm timer */
231 }
232
233 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
234 rearm_timer:
235 if (ioc->fault_reset_work_q)
236 queue_delayed_work(ioc->fault_reset_work_q,
237 &ioc->fault_reset_work,
238 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
239 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
240}
241
242/**
243 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
244 * @ioc: per adapter object
245 * Context: sleep.
246 *
247 * Return nothing.
248 */
249void
250mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
251{
252 unsigned long flags;
253
254 if (ioc->fault_reset_work_q)
255 return;
256
257 /* initialize fault polling */
258
259 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
260 snprintf(ioc->fault_reset_work_q_name,
c84b06a4
SR
261 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
262 ioc->driver_name, ioc->id);
f92363d1
SR
263 ioc->fault_reset_work_q =
264 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
265 if (!ioc->fault_reset_work_q) {
266 pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
267 ioc->name, __func__, __LINE__);
268 return;
269 }
270 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
271 if (ioc->fault_reset_work_q)
272 queue_delayed_work(ioc->fault_reset_work_q,
273 &ioc->fault_reset_work,
274 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
275 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
276}
277
278/**
279 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
280 * @ioc: per adapter object
281 * Context: sleep.
282 *
283 * Return nothing.
284 */
285void
286mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
287{
288 unsigned long flags;
289 struct workqueue_struct *wq;
290
291 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
292 wq = ioc->fault_reset_work_q;
293 ioc->fault_reset_work_q = NULL;
294 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
295 if (wq) {
4dc06fd8 296 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
f92363d1
SR
297 flush_workqueue(wq);
298 destroy_workqueue(wq);
299 }
300}
301
302/**
303 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
304 * @ioc: per adapter object
305 * @fault_code: fault code
306 *
307 * Return nothing.
308 */
309void
310mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
311{
312 pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
313 ioc->name, fault_code);
314}
315
316/**
317 * mpt3sas_halt_firmware - halt's mpt controller firmware
318 * @ioc: per adapter object
319 *
320 * For debugging timeout related issues. Writing 0xCOFFEE00
321 * to the doorbell register will halt controller firmware. With
322 * the purpose to stop both driver and firmware, the enduser can
323 * obtain a ring buffer from controller UART.
324 */
325void
326mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
327{
328 u32 doorbell;
329
330 if (!ioc->fwfault_debug)
331 return;
332
333 dump_stack();
334
335 doorbell = readl(&ioc->chip->Doorbell);
336 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
337 mpt3sas_base_fault_info(ioc , doorbell);
338 else {
339 writel(0xC0FFEE00, &ioc->chip->Doorbell);
340 pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
341 ioc->name);
342 }
343
344 if (ioc->fwfault_debug == 2)
345 for (;;)
346 ;
347 else
348 panic("panic in %s\n", __func__);
349}
350
f92363d1
SR
351/**
352 * _base_sas_ioc_info - verbose translation of the ioc status
353 * @ioc: per adapter object
354 * @mpi_reply: reply mf payload returned from firmware
355 * @request_hdr: request mf
356 *
357 * Return nothing.
358 */
359static void
360_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
361 MPI2RequestHeader_t *request_hdr)
362{
363 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
364 MPI2_IOCSTATUS_MASK;
365 char *desc = NULL;
366 u16 frame_sz;
367 char *func_str = NULL;
368
369 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
370 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
371 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
372 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
373 return;
374
375 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
376 return;
377
378 switch (ioc_status) {
379
380/****************************************************************************
381* Common IOCStatus values for all replies
382****************************************************************************/
383
384 case MPI2_IOCSTATUS_INVALID_FUNCTION:
385 desc = "invalid function";
386 break;
387 case MPI2_IOCSTATUS_BUSY:
388 desc = "busy";
389 break;
390 case MPI2_IOCSTATUS_INVALID_SGL:
391 desc = "invalid sgl";
392 break;
393 case MPI2_IOCSTATUS_INTERNAL_ERROR:
394 desc = "internal error";
395 break;
396 case MPI2_IOCSTATUS_INVALID_VPID:
397 desc = "invalid vpid";
398 break;
399 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
400 desc = "insufficient resources";
401 break;
b130b0d5
SS
402 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
403 desc = "insufficient power";
404 break;
f92363d1
SR
405 case MPI2_IOCSTATUS_INVALID_FIELD:
406 desc = "invalid field";
407 break;
408 case MPI2_IOCSTATUS_INVALID_STATE:
409 desc = "invalid state";
410 break;
411 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
412 desc = "op state not supported";
413 break;
414
415/****************************************************************************
416* Config IOCStatus values
417****************************************************************************/
418
419 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
420 desc = "config invalid action";
421 break;
422 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
423 desc = "config invalid type";
424 break;
425 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
426 desc = "config invalid page";
427 break;
428 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
429 desc = "config invalid data";
430 break;
431 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
432 desc = "config no defaults";
433 break;
434 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
435 desc = "config cant commit";
436 break;
437
438/****************************************************************************
439* SCSI IO Reply
440****************************************************************************/
441
442 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
443 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
444 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
445 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
446 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
447 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
448 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
449 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
450 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
451 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
452 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
453 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
454 break;
455
456/****************************************************************************
457* For use by SCSI Initiator and SCSI Target end-to-end data protection
458****************************************************************************/
459
460 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
461 desc = "eedp guard error";
462 break;
463 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
464 desc = "eedp ref tag error";
465 break;
466 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
467 desc = "eedp app tag error";
468 break;
469
470/****************************************************************************
471* SCSI Target values
472****************************************************************************/
473
474 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
475 desc = "target invalid io index";
476 break;
477 case MPI2_IOCSTATUS_TARGET_ABORTED:
478 desc = "target aborted";
479 break;
480 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
481 desc = "target no conn retryable";
482 break;
483 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
484 desc = "target no connection";
485 break;
486 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
487 desc = "target xfer count mismatch";
488 break;
489 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
490 desc = "target data offset error";
491 break;
492 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
493 desc = "target too much write data";
494 break;
495 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
496 desc = "target iu too short";
497 break;
498 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
499 desc = "target ack nak timeout";
500 break;
501 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
502 desc = "target nak received";
503 break;
504
505/****************************************************************************
506* Serial Attached SCSI values
507****************************************************************************/
508
509 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
510 desc = "smp request failed";
511 break;
512 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
513 desc = "smp data overrun";
514 break;
515
516/****************************************************************************
517* Diagnostic Buffer Post / Diagnostic Release values
518****************************************************************************/
519
520 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
521 desc = "diagnostic released";
522 break;
523 default:
524 break;
525 }
526
527 if (!desc)
528 return;
529
530 switch (request_hdr->Function) {
531 case MPI2_FUNCTION_CONFIG:
532 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
533 func_str = "config_page";
534 break;
535 case MPI2_FUNCTION_SCSI_TASK_MGMT:
536 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
537 func_str = "task_mgmt";
538 break;
539 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
540 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
541 func_str = "sas_iounit_ctl";
542 break;
543 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
544 frame_sz = sizeof(Mpi2SepRequest_t);
545 func_str = "enclosure";
546 break;
547 case MPI2_FUNCTION_IOC_INIT:
548 frame_sz = sizeof(Mpi2IOCInitRequest_t);
549 func_str = "ioc_init";
550 break;
551 case MPI2_FUNCTION_PORT_ENABLE:
552 frame_sz = sizeof(Mpi2PortEnableRequest_t);
553 func_str = "port_enable";
554 break;
555 case MPI2_FUNCTION_SMP_PASSTHROUGH:
556 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
557 func_str = "smp_passthru";
558 break;
559 default:
560 frame_sz = 32;
561 func_str = "unknown";
562 break;
563 }
564
565 pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
566 ioc->name, desc, ioc_status, request_hdr, func_str);
567
568 _debug_dump_mf(request_hdr, frame_sz/4);
569}
570
571/**
572 * _base_display_event_data - verbose translation of firmware asyn events
573 * @ioc: per adapter object
574 * @mpi_reply: reply mf payload returned from firmware
575 *
576 * Return nothing.
577 */
578static void
579_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
580 Mpi2EventNotificationReply_t *mpi_reply)
581{
582 char *desc = NULL;
583 u16 event;
584
585 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
586 return;
587
588 event = le16_to_cpu(mpi_reply->Event);
589
590 switch (event) {
591 case MPI2_EVENT_LOG_DATA:
592 desc = "Log Data";
593 break;
594 case MPI2_EVENT_STATE_CHANGE:
595 desc = "Status Change";
596 break;
597 case MPI2_EVENT_HARD_RESET_RECEIVED:
598 desc = "Hard Reset Received";
599 break;
600 case MPI2_EVENT_EVENT_CHANGE:
601 desc = "Event Change";
602 break;
603 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
604 desc = "Device Status Change";
605 break;
606 case MPI2_EVENT_IR_OPERATION_STATUS:
7786ab6a
SR
607 if (!ioc->hide_ir_msg)
608 desc = "IR Operation Status";
f92363d1
SR
609 break;
610 case MPI2_EVENT_SAS_DISCOVERY:
611 {
612 Mpi2EventDataSasDiscovery_t *event_data =
613 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
614 pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
615 (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
616 "start" : "stop");
617 if (event_data->DiscoveryStatus)
618 pr_info("discovery_status(0x%08x)",
619 le32_to_cpu(event_data->DiscoveryStatus));
620 pr_info("\n");
621 return;
622 }
623 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
624 desc = "SAS Broadcast Primitive";
625 break;
626 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
627 desc = "SAS Init Device Status Change";
628 break;
629 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
630 desc = "SAS Init Table Overflow";
631 break;
632 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
633 desc = "SAS Topology Change List";
634 break;
635 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
636 desc = "SAS Enclosure Device Status Change";
637 break;
638 case MPI2_EVENT_IR_VOLUME:
7786ab6a
SR
639 if (!ioc->hide_ir_msg)
640 desc = "IR Volume";
f92363d1
SR
641 break;
642 case MPI2_EVENT_IR_PHYSICAL_DISK:
7786ab6a
SR
643 if (!ioc->hide_ir_msg)
644 desc = "IR Physical Disk";
f92363d1
SR
645 break;
646 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7786ab6a
SR
647 if (!ioc->hide_ir_msg)
648 desc = "IR Configuration Change List";
f92363d1
SR
649 break;
650 case MPI2_EVENT_LOG_ENTRY_ADDED:
7786ab6a
SR
651 if (!ioc->hide_ir_msg)
652 desc = "Log Entry Added";
f92363d1 653 break;
2d8ce8c9
SR
654 case MPI2_EVENT_TEMP_THRESHOLD:
655 desc = "Temperature Threshold";
656 break;
a470a51c
C
657 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
658 desc = "Active cable exception";
659 break;
f92363d1
SR
660 }
661
662 if (!desc)
663 return;
664
665 pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
666}
f92363d1
SR
667
668/**
669 * _base_sas_log_info - verbose translation of firmware log info
670 * @ioc: per adapter object
671 * @log_info: log info
672 *
673 * Return nothing.
674 */
675static void
676_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
677{
678 union loginfo_type {
679 u32 loginfo;
680 struct {
681 u32 subcode:16;
682 u32 code:8;
683 u32 originator:4;
684 u32 bus_type:4;
685 } dw;
686 };
687 union loginfo_type sas_loginfo;
688 char *originator_str = NULL;
689
690 sas_loginfo.loginfo = log_info;
691 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
692 return;
693
694 /* each nexus loss loginfo */
695 if (log_info == 0x31170000)
696 return;
697
698 /* eat the loginfos associated with task aborts */
699 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
700 0x31140000 || log_info == 0x31130000))
701 return;
702
703 switch (sas_loginfo.dw.originator) {
704 case 0:
705 originator_str = "IOP";
706 break;
707 case 1:
708 originator_str = "PL";
709 break;
710 case 2:
7786ab6a
SR
711 if (!ioc->hide_ir_msg)
712 originator_str = "IR";
713 else
714 originator_str = "WarpDrive";
f92363d1
SR
715 break;
716 }
717
718 pr_warn(MPT3SAS_FMT
719 "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
720 ioc->name, log_info,
721 originator_str, sas_loginfo.dw.code,
722 sas_loginfo.dw.subcode);
723}
724
725/**
726 * _base_display_reply_info -
727 * @ioc: per adapter object
728 * @smid: system request message index
729 * @msix_index: MSIX table index supplied by the OS
730 * @reply: reply message frame(lower 32bit addr)
731 *
732 * Return nothing.
733 */
734static void
735_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
736 u32 reply)
737{
738 MPI2DefaultReply_t *mpi_reply;
739 u16 ioc_status;
740 u32 loginfo = 0;
741
742 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
743 if (unlikely(!mpi_reply)) {
744 pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
745 ioc->name, __FILE__, __LINE__, __func__);
746 return;
747 }
748 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
af009411 749
f92363d1
SR
750 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
751 (ioc->logging_level & MPT_DEBUG_REPLY)) {
752 _base_sas_ioc_info(ioc , mpi_reply,
753 mpt3sas_base_get_msg_frame(ioc, smid));
754 }
af009411 755
f92363d1
SR
756 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
757 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
758 _base_sas_log_info(ioc, loginfo);
759 }
760
761 if (ioc_status || loginfo) {
762 ioc_status &= MPI2_IOCSTATUS_MASK;
763 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
764 }
765}
766
767/**
768 * mpt3sas_base_done - base internal command completion routine
769 * @ioc: per adapter object
770 * @smid: system request message index
771 * @msix_index: MSIX table index supplied by the OS
772 * @reply: reply message frame(lower 32bit addr)
773 *
774 * Return 1 meaning mf should be freed from _base_interrupt
775 * 0 means the mf is freed from this function.
776 */
777u8
778mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
779 u32 reply)
780{
781 MPI2DefaultReply_t *mpi_reply;
782
783 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
784 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
fd0331b3 785 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
f92363d1
SR
786
787 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
788 return 1;
789
790 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
791 if (mpi_reply) {
792 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
793 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
794 }
795 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
796
797 complete(&ioc->base_cmds.done);
798 return 1;
799}
800
801/**
802 * _base_async_event - main callback handler for firmware asyn events
803 * @ioc: per adapter object
804 * @msix_index: MSIX table index supplied by the OS
805 * @reply: reply message frame(lower 32bit addr)
806 *
807 * Return 1 meaning mf should be freed from _base_interrupt
808 * 0 means the mf is freed from this function.
809 */
810static u8
811_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
812{
813 Mpi2EventNotificationReply_t *mpi_reply;
814 Mpi2EventAckRequest_t *ack_request;
815 u16 smid;
fd0331b3 816 struct _event_ack_list *delayed_event_ack;
f92363d1
SR
817
818 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
819 if (!mpi_reply)
820 return 1;
821 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
822 return 1;
af009411 823
f92363d1 824 _base_display_event_data(ioc, mpi_reply);
af009411 825
f92363d1
SR
826 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
827 goto out;
828 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
829 if (!smid) {
fd0331b3
SS
830 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
831 GFP_ATOMIC);
832 if (!delayed_event_ack)
833 goto out;
834 INIT_LIST_HEAD(&delayed_event_ack->list);
835 delayed_event_ack->Event = mpi_reply->Event;
836 delayed_event_ack->EventContext = mpi_reply->EventContext;
837 list_add_tail(&delayed_event_ack->list,
838 &ioc->delayed_event_ack_list);
839 dewtprintk(ioc, pr_info(MPT3SAS_FMT
840 "DELAYED: EVENT ACK: event (0x%04x)\n",
841 ioc->name, le16_to_cpu(mpi_reply->Event)));
f92363d1
SR
842 goto out;
843 }
844
845 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
846 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
847 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
848 ack_request->Event = mpi_reply->Event;
849 ack_request->EventContext = mpi_reply->EventContext;
850 ack_request->VF_ID = 0; /* TODO */
851 ack_request->VP_ID = 0;
81c16f83 852 ioc->put_smid_default(ioc, smid);
f92363d1
SR
853
854 out:
855
856 /* scsih callback handler */
857 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
858
859 /* ctl callback handler */
860 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
861
862 return 1;
863}
864
865/**
866 * _base_get_cb_idx - obtain the callback index
867 * @ioc: per adapter object
868 * @smid: system request message index
869 *
870 * Return callback index.
871 */
872static u8
873_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
874{
875 int i;
876 u8 cb_idx;
877
878 if (smid < ioc->hi_priority_smid) {
879 i = smid - 1;
880 cb_idx = ioc->scsi_lookup[i].cb_idx;
881 } else if (smid < ioc->internal_smid) {
882 i = smid - ioc->hi_priority_smid;
883 cb_idx = ioc->hpr_lookup[i].cb_idx;
884 } else if (smid <= ioc->hba_queue_depth) {
885 i = smid - ioc->internal_smid;
886 cb_idx = ioc->internal_lookup[i].cb_idx;
887 } else
888 cb_idx = 0xFF;
889 return cb_idx;
890}
891
892/**
893 * _base_mask_interrupts - disable interrupts
894 * @ioc: per adapter object
895 *
896 * Disabling ResetIRQ, Reply and Doorbell Interrupts
897 *
898 * Return nothing.
899 */
900static void
901_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
902{
903 u32 him_register;
904
905 ioc->mask_interrupts = 1;
906 him_register = readl(&ioc->chip->HostInterruptMask);
907 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
908 writel(him_register, &ioc->chip->HostInterruptMask);
909 readl(&ioc->chip->HostInterruptMask);
910}
911
912/**
913 * _base_unmask_interrupts - enable interrupts
914 * @ioc: per adapter object
915 *
916 * Enabling only Reply Interrupts
917 *
918 * Return nothing.
919 */
920static void
921_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
922{
923 u32 him_register;
924
925 him_register = readl(&ioc->chip->HostInterruptMask);
926 him_register &= ~MPI2_HIM_RIM;
927 writel(him_register, &ioc->chip->HostInterruptMask);
928 ioc->mask_interrupts = 0;
929}
930
931union reply_descriptor {
932 u64 word;
933 struct {
934 u32 low;
935 u32 high;
936 } u;
937};
938
939/**
940 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
941 * @irq: irq number (not used)
942 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
943 * @r: pt_regs pointer (not used)
944 *
945 * Return IRQ_HANDLE if processed, else IRQ_NONE.
946 */
947static irqreturn_t
948_base_interrupt(int irq, void *bus_id)
949{
950 struct adapter_reply_queue *reply_q = bus_id;
951 union reply_descriptor rd;
952 u32 completed_cmds;
953 u8 request_desript_type;
954 u16 smid;
955 u8 cb_idx;
956 u32 reply;
957 u8 msix_index = reply_q->msix_index;
958 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
959 Mpi2ReplyDescriptorsUnion_t *rpf;
960 u8 rc;
961
962 if (ioc->mask_interrupts)
963 return IRQ_NONE;
964
965 if (!atomic_add_unless(&reply_q->busy, 1, 1))
966 return IRQ_NONE;
967
968 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
969 request_desript_type = rpf->Default.ReplyFlags
970 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
971 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
972 atomic_dec(&reply_q->busy);
973 return IRQ_NONE;
974 }
975
976 completed_cmds = 0;
977 cb_idx = 0xFF;
978 do {
979 rd.word = le64_to_cpu(rpf->Words);
980 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
981 goto out;
982 reply = 0;
983 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
984 if (request_desript_type ==
985 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
986 request_desript_type ==
987 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
988 cb_idx = _base_get_cb_idx(ioc, smid);
989 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
990 (likely(mpt_callbacks[cb_idx] != NULL))) {
991 rc = mpt_callbacks[cb_idx](ioc, smid,
992 msix_index, 0);
993 if (rc)
994 mpt3sas_base_free_smid(ioc, smid);
995 }
996 } else if (request_desript_type ==
997 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
998 reply = le32_to_cpu(
999 rpf->AddressReply.ReplyFrameAddress);
1000 if (reply > ioc->reply_dma_max_address ||
1001 reply < ioc->reply_dma_min_address)
1002 reply = 0;
1003 if (smid) {
1004 cb_idx = _base_get_cb_idx(ioc, smid);
1005 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1006 (likely(mpt_callbacks[cb_idx] != NULL))) {
1007 rc = mpt_callbacks[cb_idx](ioc, smid,
1008 msix_index, reply);
1009 if (reply)
1010 _base_display_reply_info(ioc,
1011 smid, msix_index, reply);
1012 if (rc)
1013 mpt3sas_base_free_smid(ioc,
1014 smid);
1015 }
1016 } else {
1017 _base_async_event(ioc, msix_index, reply);
1018 }
1019
1020 /* reply free queue handling */
1021 if (reply) {
1022 ioc->reply_free_host_index =
1023 (ioc->reply_free_host_index ==
1024 (ioc->reply_free_queue_depth - 1)) ?
1025 0 : ioc->reply_free_host_index + 1;
1026 ioc->reply_free[ioc->reply_free_host_index] =
1027 cpu_to_le32(reply);
1028 wmb();
1029 writel(ioc->reply_free_host_index,
1030 &ioc->chip->ReplyFreeHostIndex);
1031 }
1032 }
1033
1034 rpf->Words = cpu_to_le64(ULLONG_MAX);
1035 reply_q->reply_post_host_index =
1036 (reply_q->reply_post_host_index ==
1037 (ioc->reply_post_queue_depth - 1)) ? 0 :
1038 reply_q->reply_post_host_index + 1;
1039 request_desript_type =
1040 reply_q->reply_post_free[reply_q->reply_post_host_index].
1041 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1042 completed_cmds++;
6b4c335a
C
1043 /* Update the reply post host index after continuously
1044 * processing the threshold number of Reply Descriptors.
1045 * So that FW can find enough entries to post the Reply
1046 * Descriptors in the reply descriptor post queue.
1047 */
1048 if (completed_cmds > ioc->hba_queue_depth/3) {
1049 if (ioc->combined_reply_queue) {
1050 writel(reply_q->reply_post_host_index |
1051 ((msix_index & 7) <<
1052 MPI2_RPHI_MSIX_INDEX_SHIFT),
1053 ioc->replyPostRegisterIndex[msix_index/8]);
1054 } else {
1055 writel(reply_q->reply_post_host_index |
1056 (msix_index <<
1057 MPI2_RPHI_MSIX_INDEX_SHIFT),
1058 &ioc->chip->ReplyPostHostIndex);
1059 }
1060 completed_cmds = 1;
1061 }
f92363d1
SR
1062 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1063 goto out;
1064 if (!reply_q->reply_post_host_index)
1065 rpf = reply_q->reply_post_free;
1066 else
1067 rpf++;
1068 } while (1);
1069
1070 out:
1071
1072 if (!completed_cmds) {
1073 atomic_dec(&reply_q->busy);
1074 return IRQ_NONE;
1075 }
1076
1077 wmb();
7786ab6a
SR
1078 if (ioc->is_warpdrive) {
1079 writel(reply_q->reply_post_host_index,
1080 ioc->reply_post_host_index[msix_index]);
1081 atomic_dec(&reply_q->busy);
1082 return IRQ_HANDLED;
1083 }
fb77bb53
SR
1084
1085 /* Update Reply Post Host Index.
1086 * For those HBA's which support combined reply queue feature
1087 * 1. Get the correct Supplemental Reply Post Host Index Register.
1088 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1089 * Index Register address bank i.e replyPostRegisterIndex[],
1090 * 2. Then update this register with new reply host index value
1091 * in ReplyPostIndex field and the MSIxIndex field with
1092 * msix_index value reduced to a value between 0 and 7,
1093 * using a modulo 8 operation. Since each Supplemental Reply Post
1094 * Host Index Register supports 8 MSI-X vectors.
1095 *
1096 * For other HBA's just update the Reply Post Host Index register with
1097 * new reply host index value in ReplyPostIndex Field and msix_index
1098 * value in MSIxIndex field.
1099 */
0bb337c9 1100 if (ioc->combined_reply_queue)
fb77bb53
SR
1101 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1102 MPI2_RPHI_MSIX_INDEX_SHIFT),
1103 ioc->replyPostRegisterIndex[msix_index/8]);
1104 else
1105 writel(reply_q->reply_post_host_index | (msix_index <<
1106 MPI2_RPHI_MSIX_INDEX_SHIFT),
1107 &ioc->chip->ReplyPostHostIndex);
f92363d1
SR
1108 atomic_dec(&reply_q->busy);
1109 return IRQ_HANDLED;
1110}
1111
1112/**
1113 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1114 * @ioc: per adapter object
1115 *
1116 */
1117static inline int
1118_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1119{
1120 return (ioc->facts.IOCCapabilities &
1121 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1122}
1123
1124/**
5f0dfb7a 1125 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
f92363d1 1126 * @ioc: per adapter object
5f0dfb7a 1127 * Context: non ISR conext
f92363d1 1128 *
5f0dfb7a 1129 * Called when a Task Management request has completed.
f92363d1
SR
1130 *
1131 * Return nothing.
1132 */
1133void
5f0dfb7a 1134mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
1135{
1136 struct adapter_reply_queue *reply_q;
1137
1138 /* If MSIX capability is turned off
1139 * then multi-queues are not enabled
1140 */
1141 if (!_base_is_controller_msix_enabled(ioc))
1142 return;
1143
1144 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5f0dfb7a
C
1145 if (ioc->shost_recovery || ioc->remove_host ||
1146 ioc->pci_error_recovery)
f92363d1
SR
1147 return;
1148 /* TMs are on msix_index == 0 */
1149 if (reply_q->msix_index == 0)
1150 continue;
5f0dfb7a 1151 synchronize_irq(reply_q->vector);
f92363d1
SR
1152 }
1153}
1154
1155/**
1156 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1157 * @cb_idx: callback index
1158 *
1159 * Return nothing.
1160 */
1161void
1162mpt3sas_base_release_callback_handler(u8 cb_idx)
1163{
1164 mpt_callbacks[cb_idx] = NULL;
1165}
1166
1167/**
1168 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1169 * @cb_func: callback function
1170 *
1171 * Returns cb_func.
1172 */
1173u8
1174mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1175{
1176 u8 cb_idx;
1177
1178 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1179 if (mpt_callbacks[cb_idx] == NULL)
1180 break;
1181
1182 mpt_callbacks[cb_idx] = cb_func;
1183 return cb_idx;
1184}
1185
1186/**
1187 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
1188 *
1189 * Return nothing.
1190 */
1191void
1192mpt3sas_base_initialize_callback_handler(void)
1193{
1194 u8 cb_idx;
1195
1196 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1197 mpt3sas_base_release_callback_handler(cb_idx);
1198}
1199
1200
1201/**
1202 * _base_build_zero_len_sge - build zero length sg entry
1203 * @ioc: per adapter object
1204 * @paddr: virtual address for SGE
1205 *
1206 * Create a zero length scatter gather entry to insure the IOCs hardware has
1207 * something to use if the target device goes brain dead and tries
1208 * to send data even when none is asked for.
1209 *
1210 * Return nothing.
1211 */
1212static void
1213_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1214{
1215 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1216 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1217 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1218 MPI2_SGE_FLAGS_SHIFT);
1219 ioc->base_add_sg_single(paddr, flags_length, -1);
1220}
1221
1222/**
1223 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1224 * @paddr: virtual address for SGE
1225 * @flags_length: SGE flags and data transfer length
1226 * @dma_addr: Physical address
1227 *
1228 * Return nothing.
1229 */
1230static void
1231_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1232{
1233 Mpi2SGESimple32_t *sgel = paddr;
1234
1235 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1236 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1237 sgel->FlagsLength = cpu_to_le32(flags_length);
1238 sgel->Address = cpu_to_le32(dma_addr);
1239}
1240
1241
1242/**
1243 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1244 * @paddr: virtual address for SGE
1245 * @flags_length: SGE flags and data transfer length
1246 * @dma_addr: Physical address
1247 *
1248 * Return nothing.
1249 */
1250static void
1251_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1252{
1253 Mpi2SGESimple64_t *sgel = paddr;
1254
1255 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1256 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1257 sgel->FlagsLength = cpu_to_le32(flags_length);
1258 sgel->Address = cpu_to_le64(dma_addr);
1259}
1260
1261/**
1262 * _base_get_chain_buffer_tracker - obtain chain tracker
1263 * @ioc: per adapter object
1264 * @smid: smid associated to an IO request
1265 *
1266 * Returns chain tracker(from ioc->free_chain_list)
1267 */
1268static struct chain_tracker *
1269_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1270{
1271 struct chain_tracker *chain_req;
1272 unsigned long flags;
1273
1274 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1275 if (list_empty(&ioc->free_chain_list)) {
1276 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1277 dfailprintk(ioc, pr_warn(MPT3SAS_FMT
1278 "chain buffers not available\n", ioc->name));
1279 return NULL;
1280 }
1281 chain_req = list_entry(ioc->free_chain_list.next,
1282 struct chain_tracker, tracker_list);
1283 list_del_init(&chain_req->tracker_list);
1284 list_add_tail(&chain_req->tracker_list,
1285 &ioc->scsi_lookup[smid - 1].chain_list);
1286 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1287 return chain_req;
1288}
1289
1290
1291/**
1292 * _base_build_sg - build generic sg
1293 * @ioc: per adapter object
1294 * @psge: virtual address for SGE
1295 * @data_out_dma: physical address for WRITES
1296 * @data_out_sz: data xfer size for WRITES
1297 * @data_in_dma: physical address for READS
1298 * @data_in_sz: data xfer size for READS
1299 *
1300 * Return nothing.
1301 */
1302static void
1303_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1304 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1305 size_t data_in_sz)
1306{
1307 u32 sgl_flags;
1308
1309 if (!data_out_sz && !data_in_sz) {
1310 _base_build_zero_len_sge(ioc, psge);
1311 return;
1312 }
1313
1314 if (data_out_sz && data_in_sz) {
1315 /* WRITE sgel first */
1316 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1317 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1318 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1319 ioc->base_add_sg_single(psge, sgl_flags |
1320 data_out_sz, data_out_dma);
1321
1322 /* incr sgel */
1323 psge += ioc->sge_size;
1324
1325 /* READ sgel last */
1326 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1327 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1328 MPI2_SGE_FLAGS_END_OF_LIST);
1329 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1330 ioc->base_add_sg_single(psge, sgl_flags |
1331 data_in_sz, data_in_dma);
1332 } else if (data_out_sz) /* WRITE */ {
1333 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1334 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1335 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1336 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1337 ioc->base_add_sg_single(psge, sgl_flags |
1338 data_out_sz, data_out_dma);
1339 } else if (data_in_sz) /* READ */ {
1340 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1341 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1342 MPI2_SGE_FLAGS_END_OF_LIST);
1343 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1344 ioc->base_add_sg_single(psge, sgl_flags |
1345 data_in_sz, data_in_dma);
1346 }
1347}
1348
1349/* IEEE format sgls */
1350
1351/**
1352 * _base_add_sg_single_ieee - add sg element for IEEE format
1353 * @paddr: virtual address for SGE
1354 * @flags: SGE flags
1355 * @chain_offset: number of 128 byte elements from start of segment
1356 * @length: data transfer length
1357 * @dma_addr: Physical address
1358 *
1359 * Return nothing.
1360 */
1361static void
1362_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
1363 dma_addr_t dma_addr)
1364{
1365 Mpi25IeeeSgeChain64_t *sgel = paddr;
1366
1367 sgel->Flags = flags;
1368 sgel->NextChainOffset = chain_offset;
1369 sgel->Length = cpu_to_le32(length);
1370 sgel->Address = cpu_to_le64(dma_addr);
1371}
1372
1373/**
1374 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
1375 * @ioc: per adapter object
1376 * @paddr: virtual address for SGE
1377 *
1378 * Create a zero length scatter gather entry to insure the IOCs hardware has
1379 * something to use if the target device goes brain dead and tries
1380 * to send data even when none is asked for.
1381 *
1382 * Return nothing.
1383 */
1384static void
1385_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1386{
1387 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1388 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
1389 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
b130b0d5 1390
f92363d1
SR
1391 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
1392}
1393
471ef9d4
SR
1394/**
1395 * _base_build_sg_scmd - main sg creation routine
1396 * @ioc: per adapter object
1397 * @scmd: scsi command
1398 * @smid: system request message index
1399 * Context: none.
1400 *
1401 * The main routine that builds scatter gather table from a given
1402 * scsi request sent via the .queuecommand main handler.
1403 *
1404 * Returns 0 success, anything else error
1405 */
1406static int
1407_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
1408 struct scsi_cmnd *scmd, u16 smid)
1409{
1410 Mpi2SCSIIORequest_t *mpi_request;
1411 dma_addr_t chain_dma;
1412 struct scatterlist *sg_scmd;
1413 void *sg_local, *chain;
1414 u32 chain_offset;
1415 u32 chain_length;
1416 u32 chain_flags;
1417 int sges_left;
1418 u32 sges_in_segment;
1419 u32 sgl_flags;
1420 u32 sgl_flags_last_element;
1421 u32 sgl_flags_end_buffer;
1422 struct chain_tracker *chain_req;
1423
1424 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1425
1426 /* init scatter gather flags */
1427 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
1428 if (scmd->sc_data_direction == DMA_TO_DEVICE)
1429 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
1430 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
1431 << MPI2_SGE_FLAGS_SHIFT;
1432 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
1433 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
1434 << MPI2_SGE_FLAGS_SHIFT;
1435 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1436
1437 sg_scmd = scsi_sglist(scmd);
1438 sges_left = scsi_dma_map(scmd);
1439 if (sges_left < 0) {
1440 sdev_printk(KERN_ERR, scmd->device,
1441 "pci_map_sg failed: request for %d bytes!\n",
1442 scsi_bufflen(scmd));
1443 return -ENOMEM;
1444 }
1445
1446 sg_local = &mpi_request->SGL;
1447 sges_in_segment = ioc->max_sges_in_main_message;
1448 if (sges_left <= sges_in_segment)
1449 goto fill_in_last_segment;
1450
1451 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
1452 (sges_in_segment * ioc->sge_size))/4;
1453
1454 /* fill in main message segment when there is a chain following */
1455 while (sges_in_segment) {
1456 if (sges_in_segment == 1)
1457 ioc->base_add_sg_single(sg_local,
1458 sgl_flags_last_element | sg_dma_len(sg_scmd),
1459 sg_dma_address(sg_scmd));
1460 else
1461 ioc->base_add_sg_single(sg_local, sgl_flags |
1462 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1463 sg_scmd = sg_next(sg_scmd);
1464 sg_local += ioc->sge_size;
1465 sges_left--;
1466 sges_in_segment--;
1467 }
1468
1469 /* initializing the chain flags and pointers */
1470 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
1471 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1472 if (!chain_req)
1473 return -1;
1474 chain = chain_req->chain_buffer;
1475 chain_dma = chain_req->chain_buffer_dma;
1476 do {
1477 sges_in_segment = (sges_left <=
1478 ioc->max_sges_in_chain_message) ? sges_left :
1479 ioc->max_sges_in_chain_message;
1480 chain_offset = (sges_left == sges_in_segment) ?
1481 0 : (sges_in_segment * ioc->sge_size)/4;
1482 chain_length = sges_in_segment * ioc->sge_size;
1483 if (chain_offset) {
1484 chain_offset = chain_offset <<
1485 MPI2_SGE_CHAIN_OFFSET_SHIFT;
1486 chain_length += ioc->sge_size;
1487 }
1488 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
1489 chain_length, chain_dma);
1490 sg_local = chain;
1491 if (!chain_offset)
1492 goto fill_in_last_segment;
1493
1494 /* fill in chain segments */
1495 while (sges_in_segment) {
1496 if (sges_in_segment == 1)
1497 ioc->base_add_sg_single(sg_local,
1498 sgl_flags_last_element |
1499 sg_dma_len(sg_scmd),
1500 sg_dma_address(sg_scmd));
1501 else
1502 ioc->base_add_sg_single(sg_local, sgl_flags |
1503 sg_dma_len(sg_scmd),
1504 sg_dma_address(sg_scmd));
1505 sg_scmd = sg_next(sg_scmd);
1506 sg_local += ioc->sge_size;
1507 sges_left--;
1508 sges_in_segment--;
1509 }
1510
1511 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1512 if (!chain_req)
1513 return -1;
1514 chain = chain_req->chain_buffer;
1515 chain_dma = chain_req->chain_buffer_dma;
1516 } while (1);
1517
1518
1519 fill_in_last_segment:
1520
1521 /* fill the last segment */
1522 while (sges_left) {
1523 if (sges_left == 1)
1524 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
1525 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1526 else
1527 ioc->base_add_sg_single(sg_local, sgl_flags |
1528 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1529 sg_scmd = sg_next(sg_scmd);
1530 sg_local += ioc->sge_size;
1531 sges_left--;
1532 }
1533
1534 return 0;
1535}
1536
f92363d1
SR
1537/**
1538 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
1539 * @ioc: per adapter object
1540 * @scmd: scsi command
1541 * @smid: system request message index
1542 * Context: none.
1543 *
1544 * The main routine that builds scatter gather table from a given
1545 * scsi request sent via the .queuecommand main handler.
1546 *
1547 * Returns 0 success, anything else error
1548 */
1549static int
1550_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
1551 struct scsi_cmnd *scmd, u16 smid)
1552{
1553 Mpi2SCSIIORequest_t *mpi_request;
1554 dma_addr_t chain_dma;
1555 struct scatterlist *sg_scmd;
1556 void *sg_local, *chain;
1557 u32 chain_offset;
1558 u32 chain_length;
f92363d1
SR
1559 int sges_left;
1560 u32 sges_in_segment;
1561 u8 simple_sgl_flags;
1562 u8 simple_sgl_flags_last;
1563 u8 chain_sgl_flags;
1564 struct chain_tracker *chain_req;
1565
1566 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1567
1568 /* init scatter gather flags */
1569 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1570 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1571 simple_sgl_flags_last = simple_sgl_flags |
1572 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1573 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1574 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1575
1576 sg_scmd = scsi_sglist(scmd);
1577 sges_left = scsi_dma_map(scmd);
62f5c74c 1578 if (sges_left < 0) {
f92363d1
SR
1579 sdev_printk(KERN_ERR, scmd->device,
1580 "pci_map_sg failed: request for %d bytes!\n",
1581 scsi_bufflen(scmd));
1582 return -ENOMEM;
1583 }
1584
1585 sg_local = &mpi_request->SGL;
1586 sges_in_segment = (ioc->request_sz -
1587 offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
1588 if (sges_left <= sges_in_segment)
1589 goto fill_in_last_segment;
1590
1591 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
1592 (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
1593
1594 /* fill in main message segment when there is a chain following */
1595 while (sges_in_segment > 1) {
1596 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1597 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1598 sg_scmd = sg_next(sg_scmd);
1599 sg_local += ioc->sge_size_ieee;
1600 sges_left--;
1601 sges_in_segment--;
1602 }
1603
25ef16d0 1604 /* initializing the pointers */
f92363d1
SR
1605 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1606 if (!chain_req)
1607 return -1;
1608 chain = chain_req->chain_buffer;
1609 chain_dma = chain_req->chain_buffer_dma;
1610 do {
1611 sges_in_segment = (sges_left <=
1612 ioc->max_sges_in_chain_message) ? sges_left :
1613 ioc->max_sges_in_chain_message;
1614 chain_offset = (sges_left == sges_in_segment) ?
1615 0 : sges_in_segment;
1616 chain_length = sges_in_segment * ioc->sge_size_ieee;
1617 if (chain_offset)
1618 chain_length += ioc->sge_size_ieee;
1619 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
1620 chain_offset, chain_length, chain_dma);
1621
1622 sg_local = chain;
1623 if (!chain_offset)
1624 goto fill_in_last_segment;
1625
1626 /* fill in chain segments */
1627 while (sges_in_segment) {
1628 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1629 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1630 sg_scmd = sg_next(sg_scmd);
1631 sg_local += ioc->sge_size_ieee;
1632 sges_left--;
1633 sges_in_segment--;
1634 }
1635
1636 chain_req = _base_get_chain_buffer_tracker(ioc, smid);
1637 if (!chain_req)
1638 return -1;
1639 chain = chain_req->chain_buffer;
1640 chain_dma = chain_req->chain_buffer_dma;
1641 } while (1);
1642
1643
1644 fill_in_last_segment:
1645
1646 /* fill the last segment */
62f5c74c 1647 while (sges_left > 0) {
f92363d1
SR
1648 if (sges_left == 1)
1649 _base_add_sg_single_ieee(sg_local,
1650 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
1651 sg_dma_address(sg_scmd));
1652 else
1653 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
1654 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
1655 sg_scmd = sg_next(sg_scmd);
1656 sg_local += ioc->sge_size_ieee;
1657 sges_left--;
1658 }
1659
1660 return 0;
1661}
1662
1663/**
1664 * _base_build_sg_ieee - build generic sg for IEEE format
1665 * @ioc: per adapter object
1666 * @psge: virtual address for SGE
1667 * @data_out_dma: physical address for WRITES
1668 * @data_out_sz: data xfer size for WRITES
1669 * @data_in_dma: physical address for READS
1670 * @data_in_sz: data xfer size for READS
1671 *
1672 * Return nothing.
1673 */
1674static void
1675_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
1676 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1677 size_t data_in_sz)
1678{
1679 u8 sgl_flags;
1680
1681 if (!data_out_sz && !data_in_sz) {
1682 _base_build_zero_len_sge_ieee(ioc, psge);
1683 return;
1684 }
1685
1686 if (data_out_sz && data_in_sz) {
1687 /* WRITE sgel first */
1688 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1689 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1690 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1691 data_out_dma);
1692
1693 /* incr sgel */
1694 psge += ioc->sge_size_ieee;
1695
1696 /* READ sgel last */
1697 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
1698 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1699 data_in_dma);
1700 } else if (data_out_sz) /* WRITE */ {
1701 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1702 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1703 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1704 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
1705 data_out_dma);
1706 } else if (data_in_sz) /* READ */ {
1707 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
1708 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
1709 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
1710 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
1711 data_in_dma);
1712 }
1713}
1714
1715#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1716
1717/**
1718 * _base_config_dma_addressing - set dma addressing
1719 * @ioc: per adapter object
1720 * @pdev: PCI device struct
1721 *
1722 * Returns 0 for success, non-zero for failure.
1723 */
1724static int
1725_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1726{
1727 struct sysinfo s;
9b05c91a
SR
1728 u64 consistent_dma_mask;
1729
1730 if (ioc->dma_mask)
1731 consistent_dma_mask = DMA_BIT_MASK(64);
1732 else
1733 consistent_dma_mask = DMA_BIT_MASK(32);
f92363d1
SR
1734
1735 if (sizeof(dma_addr_t) > 4) {
1736 const uint64_t required_mask =
1737 dma_get_required_mask(&pdev->dev);
1738 if ((required_mask > DMA_BIT_MASK(32)) &&
1739 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
9b05c91a 1740 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
f92363d1
SR
1741 ioc->base_add_sg_single = &_base_add_sg_single_64;
1742 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
9b05c91a 1743 ioc->dma_mask = 64;
f92363d1
SR
1744 goto out;
1745 }
1746 }
1747
1748 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1749 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1750 ioc->base_add_sg_single = &_base_add_sg_single_32;
1751 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
9b05c91a 1752 ioc->dma_mask = 32;
f92363d1
SR
1753 } else
1754 return -ENODEV;
1755
1756 out:
1757 si_meminfo(&s);
1758 pr_info(MPT3SAS_FMT
9b05c91a
SR
1759 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1760 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1761
1762 return 0;
1763}
f92363d1 1764
9b05c91a
SR
1765static int
1766_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1767 struct pci_dev *pdev)
1768{
1769 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1770 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1771 return -ENODEV;
1772 }
f92363d1
SR
1773 return 0;
1774}
1775
1776/**
1777 * _base_check_enable_msix - checks MSIX capabable.
1778 * @ioc: per adapter object
1779 *
1780 * Check to see if card is capable of MSIX, and set number
1781 * of available msix vectors
1782 */
1783static int
1784_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1785{
1786 int base;
1787 u16 message_control;
1788
42081173
SR
1789 /* Check whether controller SAS2008 B0 controller,
1790 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
1791 */
1792 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
1793 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
1794 return -EINVAL;
1795 }
1796
f92363d1
SR
1797 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1798 if (!base) {
1799 dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
1800 ioc->name));
1801 return -EINVAL;
1802 }
1803
1804 /* get msix vector count */
42081173
SR
1805 /* NUMA_IO not supported for older controllers */
1806 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1807 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1808 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1809 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1810 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1811 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1812 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1813 ioc->msix_vector_count = 1;
1814 else {
1815 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1816 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1817 }
f92363d1
SR
1818 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1819 "msix is supported, vector_count(%d)\n",
1820 ioc->name, ioc->msix_vector_count));
1821 return 0;
1822}
1823
1824/**
1825 * _base_free_irq - free irq
1826 * @ioc: per adapter object
1827 *
1828 * Freeing respective reply_queue from the list.
1829 */
1830static void
1831_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
1832{
1833 struct adapter_reply_queue *reply_q, *next;
1834
1835 if (list_empty(&ioc->reply_queue_list))
1836 return;
1837
1838 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1839 list_del(&reply_q->list);
64038301
SPS
1840 if (smp_affinity_enable) {
1841 irq_set_affinity_hint(reply_q->vector, NULL);
1842 free_cpumask_var(reply_q->affinity_hint);
1843 }
f92363d1
SR
1844 free_irq(reply_q->vector, reply_q);
1845 kfree(reply_q);
1846 }
1847}
1848
1849/**
1850 * _base_request_irq - request irq
1851 * @ioc: per adapter object
1852 * @index: msix index into vector table
1853 * @vector: irq vector
1854 *
1855 * Inserting respective reply_queue into the list.
1856 */
1857static int
1858_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
1859{
1860 struct adapter_reply_queue *reply_q;
1861 int r;
1862
1863 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1864 if (!reply_q) {
1865 pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
1866 ioc->name, (int)sizeof(struct adapter_reply_queue));
1867 return -ENOMEM;
1868 }
1869 reply_q->ioc = ioc;
1870 reply_q->msix_index = index;
1871 reply_q->vector = vector;
14b3114d 1872
64038301
SPS
1873 if (smp_affinity_enable) {
1874 if (!zalloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) {
1875 kfree(reply_q);
1876 return -ENOMEM;
1877 }
64038301 1878 }
14b3114d 1879
f92363d1
SR
1880 atomic_set(&reply_q->busy, 0);
1881 if (ioc->msix_enable)
1882 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
c84b06a4 1883 ioc->driver_name, ioc->id, index);
f92363d1
SR
1884 else
1885 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
c84b06a4 1886 ioc->driver_name, ioc->id);
f92363d1
SR
1887 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1888 reply_q);
1889 if (r) {
1890 pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
1891 reply_q->name, vector);
64038301 1892 free_cpumask_var(reply_q->affinity_hint);
da3cec25 1893 kfree(reply_q);
f92363d1
SR
1894 return -EBUSY;
1895 }
1896
1897 INIT_LIST_HEAD(&reply_q->list);
1898 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1899 return 0;
1900}
1901
1902/**
1903 * _base_assign_reply_queues - assigning msix index for each cpu
1904 * @ioc: per adapter object
1905 *
1906 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1907 *
1908 * It would nice if we could call irq_set_affinity, however it is not
1909 * an exported symbol
1910 */
1911static void
1912_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
1913{
91b265bf 1914 unsigned int cpu, nr_cpus, nr_msix, index = 0;
14b3114d 1915 struct adapter_reply_queue *reply_q;
f92363d1
SR
1916
1917 if (!_base_is_controller_msix_enabled(ioc))
1918 return;
1919
1920 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1921
91b265bf
MP
1922 nr_cpus = num_online_cpus();
1923 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
1924 ioc->facts.MaxMSIxVectors);
1925 if (!nr_msix)
1926 return;
f92363d1 1927
91b265bf
MP
1928 cpu = cpumask_first(cpu_online_mask);
1929
14b3114d
SR
1930 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1931
91b265bf
MP
1932 unsigned int i, group = nr_cpus / nr_msix;
1933
14b3114d
SR
1934 if (cpu >= nr_cpus)
1935 break;
1936
91b265bf
MP
1937 if (index < nr_cpus % nr_msix)
1938 group++;
1939
1940 for (i = 0 ; i < group ; i++) {
1941 ioc->cpu_msix_table[cpu] = index;
64038301
SPS
1942 if (smp_affinity_enable)
1943 cpumask_or(reply_q->affinity_hint,
14b3114d 1944 reply_q->affinity_hint, get_cpu_mask(cpu));
91b265bf 1945 cpu = cpumask_next(cpu, cpu_online_mask);
f92363d1 1946 }
64038301
SPS
1947 if (smp_affinity_enable)
1948 if (irq_set_affinity_hint(reply_q->vector,
14b3114d 1949 reply_q->affinity_hint))
64038301
SPS
1950 dinitprintk(ioc, pr_info(MPT3SAS_FMT
1951 "Err setting affinity hint to irq vector %d\n",
1952 ioc->name, reply_q->vector));
91b265bf 1953 index++;
14b3114d 1954 }
f92363d1
SR
1955}
1956
1957/**
1958 * _base_disable_msix - disables msix
1959 * @ioc: per adapter object
1960 *
1961 */
1962static void
1963_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
1964{
1965 if (!ioc->msix_enable)
1966 return;
1967 pci_disable_msix(ioc->pdev);
1968 ioc->msix_enable = 0;
1969}
1970
1971/**
1972 * _base_enable_msix - enables msix, failback to io_apic
1973 * @ioc: per adapter object
1974 *
1975 */
1976static int
1977_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1978{
1979 struct msix_entry *entries, *a;
1980 int r;
bb350661 1981 int i, local_max_msix_vectors;
f92363d1
SR
1982 u8 try_msix = 0;
1983
f92363d1
SR
1984 if (msix_disable == -1 || msix_disable == 0)
1985 try_msix = 1;
1986
1987 if (!try_msix)
1988 goto try_ioapic;
1989
1990 if (_base_check_enable_msix(ioc) != 0)
1991 goto try_ioapic;
1992
1993 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1994 ioc->msix_vector_count);
1995
9c500060
SR
1996 printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
1997 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1998 ioc->cpu_count, max_msix_vectors);
1999
9b05c91a 2000 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
bb350661
SPS
2001 local_max_msix_vectors = 8;
2002 else
2003 local_max_msix_vectors = max_msix_vectors;
9b05c91a 2004
bb350661
SPS
2005 if (local_max_msix_vectors > 0) {
2006 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
9c500060
SR
2007 ioc->reply_queue_count);
2008 ioc->msix_vector_count = ioc->reply_queue_count;
bb350661 2009 } else if (local_max_msix_vectors == 0)
9b05c91a 2010 goto try_ioapic;
9c500060 2011
64038301
SPS
2012 if (ioc->msix_vector_count < ioc->cpu_count)
2013 smp_affinity_enable = 0;
2014
f92363d1
SR
2015 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
2016 GFP_KERNEL);
2017 if (!entries) {
2018 dfailprintk(ioc, pr_info(MPT3SAS_FMT
2019 "kcalloc failed @ at %s:%d/%s() !!!\n",
2020 ioc->name, __FILE__, __LINE__, __func__));
2021 goto try_ioapic;
2022 }
2023
2024 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
2025 a->entry = i;
2026
6bfa6907 2027 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
f92363d1
SR
2028 if (r) {
2029 dfailprintk(ioc, pr_info(MPT3SAS_FMT
6bfa6907 2030 "pci_enable_msix_exact failed (r=%d) !!!\n",
f92363d1
SR
2031 ioc->name, r));
2032 kfree(entries);
2033 goto try_ioapic;
2034 }
2035
2036 ioc->msix_enable = 1;
2037 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
2038 r = _base_request_irq(ioc, i, a->vector);
2039 if (r) {
2040 _base_free_irq(ioc);
2041 _base_disable_msix(ioc);
2042 kfree(entries);
2043 goto try_ioapic;
2044 }
2045 }
2046
2047 kfree(entries);
2048 return 0;
2049
2050/* failback to io_apic interrupt routing */
2051 try_ioapic:
2052
9b05c91a 2053 ioc->reply_queue_count = 1;
f92363d1
SR
2054 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
2055
2056 return r;
2057}
2058
580d4e31
SR
2059/**
2060 * mpt3sas_base_unmap_resources - free controller resources
2061 * @ioc: per adapter object
2062 */
8bbb1cf6 2063static void
580d4e31
SR
2064mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2065{
2066 struct pci_dev *pdev = ioc->pdev;
2067
2068 dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
2069 ioc->name, __func__));
2070
2071 _base_free_irq(ioc);
2072 _base_disable_msix(ioc);
2073
0bb337c9 2074 if (ioc->combined_reply_queue) {
580d4e31 2075 kfree(ioc->replyPostRegisterIndex);
5f985d88
TH
2076 ioc->replyPostRegisterIndex = NULL;
2077 }
580d4e31
SR
2078
2079 if (ioc->chip_phys) {
2080 iounmap(ioc->chip);
2081 ioc->chip_phys = 0;
2082 }
2083
2084 if (pci_is_enabled(pdev)) {
2085 pci_release_selected_regions(ioc->pdev, ioc->bars);
2086 pci_disable_pcie_error_reporting(pdev);
2087 pci_disable_device(pdev);
2088 }
2089}
2090
f92363d1
SR
2091/**
2092 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2093 * @ioc: per adapter object
2094 *
2095 * Returns 0 for success, non-zero for failure.
2096 */
2097int
2098mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2099{
2100 struct pci_dev *pdev = ioc->pdev;
2101 u32 memap_sz;
2102 u32 pio_sz;
2103 int i, r = 0;
2104 u64 pio_chip = 0;
2105 u64 chip_phys = 0;
2106 struct adapter_reply_queue *reply_q;
2107
2108 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
2109 ioc->name, __func__));
2110
2111 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2112 if (pci_enable_device_mem(pdev)) {
2113 pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
2114 ioc->name);
cf9bd21a 2115 ioc->bars = 0;
f92363d1
SR
2116 return -ENODEV;
2117 }
2118
2119
2120 if (pci_request_selected_regions(pdev, ioc->bars,
c84b06a4 2121 ioc->driver_name)) {
f92363d1
SR
2122 pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
2123 ioc->name);
cf9bd21a 2124 ioc->bars = 0;
f92363d1
SR
2125 r = -ENODEV;
2126 goto out_fail;
2127 }
2128
2129/* AER (Advanced Error Reporting) hooks */
2130 pci_enable_pcie_error_reporting(pdev);
2131
2132 pci_set_master(pdev);
2133
2134
2135 if (_base_config_dma_addressing(ioc, pdev) != 0) {
2136 pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
2137 ioc->name, pci_name(pdev));
2138 r = -ENODEV;
2139 goto out_fail;
2140 }
2141
5aeeb78a
SR
2142 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2143 (!memap_sz || !pio_sz); i++) {
f92363d1
SR
2144 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2145 if (pio_sz)
2146 continue;
2147 pio_chip = (u64)pci_resource_start(pdev, i);
2148 pio_sz = pci_resource_len(pdev, i);
2149 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2150 if (memap_sz)
2151 continue;
2152 ioc->chip_phys = pci_resource_start(pdev, i);
2153 chip_phys = (u64)ioc->chip_phys;
2154 memap_sz = pci_resource_len(pdev, i);
2155 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
f92363d1
SR
2156 }
2157 }
2158
5aeeb78a
SR
2159 if (ioc->chip == NULL) {
2160 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
2161 " or resource not found\n", ioc->name);
2162 r = -EINVAL;
2163 goto out_fail;
2164 }
2165
f92363d1 2166 _base_mask_interrupts(ioc);
9b05c91a 2167
98c56ad3 2168 r = _base_get_ioc_facts(ioc);
9b05c91a
SR
2169 if (r)
2170 goto out_fail;
2171
2172 if (!ioc->rdpq_array_enable_assigned) {
2173 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
2174 ioc->rdpq_array_enable_assigned = 1;
2175 }
2176
f92363d1
SR
2177 r = _base_enable_msix(ioc);
2178 if (r)
2179 goto out_fail;
2180
fb77bb53
SR
2181 /* Use the Combined reply queue feature only for SAS3 C0 & higher
2182 * revision HBAs and also only when reply queue count is greater than 8
2183 */
0bb337c9 2184 if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
fb77bb53
SR
2185 /* Determine the Supplemental Reply Post Host Index Registers
2186 * Addresse. Supplemental Reply Post Host Index Registers
2187 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
2188 * each register is at offset bytes of
2189 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
2190 */
2191 ioc->replyPostRegisterIndex = kcalloc(
0bb337c9 2192 ioc->combined_reply_index_count,
fb77bb53
SR
2193 sizeof(resource_size_t *), GFP_KERNEL);
2194 if (!ioc->replyPostRegisterIndex) {
2195 dfailprintk(ioc, printk(MPT3SAS_FMT
2196 "allocation for reply Post Register Index failed!!!\n",
2197 ioc->name));
2198 r = -ENOMEM;
2199 goto out_fail;
2200 }
2201
0bb337c9 2202 for (i = 0; i < ioc->combined_reply_index_count; i++) {
fb77bb53
SR
2203 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
2204 ((u8 *)&ioc->chip->Doorbell +
2205 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
2206 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
2207 }
2208 } else
0bb337c9 2209 ioc->combined_reply_queue = 0;
fb77bb53 2210
ce7c6c9e
GE
2211 if (ioc->is_warpdrive) {
2212 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
2213 &ioc->chip->ReplyPostHostIndex;
2214
2215 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
2216 ioc->reply_post_host_index[i] =
2217 (resource_size_t __iomem *)
2218 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
2219 * 4)));
2220 }
2221
f92363d1
SR
2222 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
2223 pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
2224 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
2225 "IO-APIC enabled"), reply_q->vector);
2226
2227 pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
2228 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
2229 pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
2230 ioc->name, (unsigned long long)pio_chip, pio_sz);
2231
2232 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
2233 pci_save_state(pdev);
2234 return 0;
2235
2236 out_fail:
580d4e31 2237 mpt3sas_base_unmap_resources(ioc);
f92363d1
SR
2238 return r;
2239}
2240
2241/**
2242 * mpt3sas_base_get_msg_frame - obtain request mf pointer
2243 * @ioc: per adapter object
2244 * @smid: system request message index(smid zero is invalid)
2245 *
2246 * Returns virt pointer to message frame.
2247 */
2248void *
2249mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2250{
2251 return (void *)(ioc->request + (smid * ioc->request_sz));
2252}
2253
2254/**
2255 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
2256 * @ioc: per adapter object
2257 * @smid: system request message index
2258 *
2259 * Returns virt pointer to sense buffer.
2260 */
2261void *
2262mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2263{
2264 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
2265}
2266
2267/**
2268 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
2269 * @ioc: per adapter object
2270 * @smid: system request message index
2271 *
2272 * Returns phys pointer to the low 32bit address of the sense buffer.
2273 */
2274__le32
2275mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2276{
2277 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
2278 SCSI_SENSE_BUFFERSIZE));
2279}
2280
2281/**
2282 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2283 * @ioc: per adapter object
2284 * @phys_addr: lower 32 physical addr of the reply
2285 *
2286 * Converts 32bit lower physical addr into a virt address.
2287 */
2288void *
2289mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
2290{
2291 if (!phys_addr)
2292 return NULL;
2293 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
2294}
2295
03d1fb3a
SS
2296static inline u8
2297_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
2298{
2299 return ioc->cpu_msix_table[raw_smp_processor_id()];
2300}
2301
f92363d1
SR
2302/**
2303 * mpt3sas_base_get_smid - obtain a free smid from internal queue
2304 * @ioc: per adapter object
2305 * @cb_idx: callback index
2306 *
2307 * Returns smid (zero is invalid)
2308 */
2309u16
2310mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2311{
2312 unsigned long flags;
2313 struct request_tracker *request;
2314 u16 smid;
2315
2316 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2317 if (list_empty(&ioc->internal_free_list)) {
2318 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2319 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2320 ioc->name, __func__);
2321 return 0;
2322 }
2323
2324 request = list_entry(ioc->internal_free_list.next,
2325 struct request_tracker, tracker_list);
2326 request->cb_idx = cb_idx;
2327 smid = request->smid;
2328 list_del(&request->tracker_list);
2329 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2330 return smid;
2331}
2332
2333/**
2334 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
2335 * @ioc: per adapter object
2336 * @cb_idx: callback index
2337 * @scmd: pointer to scsi command object
2338 *
2339 * Returns smid (zero is invalid)
2340 */
2341u16
2342mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
2343 struct scsi_cmnd *scmd)
2344{
2345 unsigned long flags;
2346 struct scsiio_tracker *request;
2347 u16 smid;
2348
2349 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2350 if (list_empty(&ioc->free_list)) {
2351 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2352 pr_err(MPT3SAS_FMT "%s: smid not available\n",
2353 ioc->name, __func__);
2354 return 0;
2355 }
2356
2357 request = list_entry(ioc->free_list.next,
2358 struct scsiio_tracker, tracker_list);
2359 request->scmd = scmd;
2360 request->cb_idx = cb_idx;
2361 smid = request->smid;
03d1fb3a 2362 request->msix_io = _base_get_msix_index(ioc);
f92363d1
SR
2363 list_del(&request->tracker_list);
2364 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2365 return smid;
2366}
2367
2368/**
2369 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
2370 * @ioc: per adapter object
2371 * @cb_idx: callback index
2372 *
2373 * Returns smid (zero is invalid)
2374 */
2375u16
2376mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
2377{
2378 unsigned long flags;
2379 struct request_tracker *request;
2380 u16 smid;
2381
2382 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2383 if (list_empty(&ioc->hpr_free_list)) {
2384 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2385 return 0;
2386 }
2387
2388 request = list_entry(ioc->hpr_free_list.next,
2389 struct request_tracker, tracker_list);
2390 request->cb_idx = cb_idx;
2391 smid = request->smid;
2392 list_del(&request->tracker_list);
2393 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2394 return smid;
2395}
2396
2397/**
2398 * mpt3sas_base_free_smid - put smid back on free_list
2399 * @ioc: per adapter object
2400 * @smid: system request message index
2401 *
2402 * Return nothing.
2403 */
2404void
2405mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2406{
2407 unsigned long flags;
2408 int i;
2409 struct chain_tracker *chain_req, *next;
2410
2411 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
2412 if (smid < ioc->hi_priority_smid) {
2413 /* scsiio queue */
2414 i = smid - 1;
2415 if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
2416 list_for_each_entry_safe(chain_req, next,
2417 &ioc->scsi_lookup[i].chain_list, tracker_list) {
2418 list_del_init(&chain_req->tracker_list);
2419 list_add(&chain_req->tracker_list,
2420 &ioc->free_chain_list);
2421 }
2422 }
2423 ioc->scsi_lookup[i].cb_idx = 0xFF;
2424 ioc->scsi_lookup[i].scmd = NULL;
7786ab6a 2425 ioc->scsi_lookup[i].direct_io = 0;
f92363d1
SR
2426 list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
2427 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2428
2429 /*
2430 * See _wait_for_commands_to_complete() call with regards
2431 * to this code.
2432 */
2433 if (ioc->shost_recovery && ioc->pending_io_count) {
2434 if (ioc->pending_io_count == 1)
2435 wake_up(&ioc->reset_wq);
2436 ioc->pending_io_count--;
2437 }
2438 return;
2439 } else if (smid < ioc->internal_smid) {
2440 /* hi-priority */
2441 i = smid - ioc->hi_priority_smid;
2442 ioc->hpr_lookup[i].cb_idx = 0xFF;
2443 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
2444 } else if (smid <= ioc->hba_queue_depth) {
2445 /* internal queue */
2446 i = smid - ioc->internal_smid;
2447 ioc->internal_lookup[i].cb_idx = 0xFF;
2448 list_add(&ioc->internal_lookup[i].tracker_list,
2449 &ioc->internal_free_list);
2450 }
2451 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
2452}
2453
2454/**
2455 * _base_writeq - 64 bit write to MMIO
2456 * @ioc: per adapter object
2457 * @b: data payload
2458 * @addr: address in MMIO space
2459 * @writeq_lock: spin lock
2460 *
2461 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
2462 * care of 32 bit environment where its not quarenteed to send the entire word
2463 * in one transfer.
2464 */
2465#if defined(writeq) && defined(CONFIG_64BIT)
2466static inline void
2467_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2468{
2469 writeq(cpu_to_le64(b), addr);
2470}
2471#else
2472static inline void
2473_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
2474{
2475 unsigned long flags;
2476 __u64 data_out = cpu_to_le64(b);
2477
2478 spin_lock_irqsave(writeq_lock, flags);
2479 writel((u32)(data_out), addr);
2480 writel((u32)(data_out >> 32), (addr + 4));
2481 spin_unlock_irqrestore(writeq_lock, flags);
2482}
2483#endif
2484
f92363d1 2485/**
81c16f83 2486 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
f92363d1
SR
2487 * @ioc: per adapter object
2488 * @smid: system request message index
2489 * @handle: device handle
2490 *
2491 * Return nothing.
2492 */
81c16f83
SPS
2493static void
2494_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
f92363d1
SR
2495{
2496 Mpi2RequestDescriptorUnion_t descriptor;
2497 u64 *request = (u64 *)&descriptor;
2498
2499
2500 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2501 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2502 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2503 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2504 descriptor.SCSIIO.LMID = 0;
2505 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2506 &ioc->scsi_lookup_lock);
2507}
2508
2509/**
81c16f83 2510 * _base_put_smid_fast_path - send fast path request to firmware
f92363d1
SR
2511 * @ioc: per adapter object
2512 * @smid: system request message index
2513 * @handle: device handle
2514 *
2515 * Return nothing.
2516 */
81c16f83
SPS
2517static void
2518_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
f92363d1
SR
2519 u16 handle)
2520{
2521 Mpi2RequestDescriptorUnion_t descriptor;
2522 u64 *request = (u64 *)&descriptor;
2523
2524 descriptor.SCSIIO.RequestFlags =
2525 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2526 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
2527 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
2528 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
2529 descriptor.SCSIIO.LMID = 0;
2530 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2531 &ioc->scsi_lookup_lock);
2532}
2533
2534/**
81c16f83 2535 * _base_put_smid_hi_priority - send Task Management request to firmware
f92363d1
SR
2536 * @ioc: per adapter object
2537 * @smid: system request message index
03d1fb3a 2538 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
f92363d1
SR
2539 * Return nothing.
2540 */
81c16f83
SPS
2541static void
2542_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
03d1fb3a 2543 u16 msix_task)
f92363d1
SR
2544{
2545 Mpi2RequestDescriptorUnion_t descriptor;
2546 u64 *request = (u64 *)&descriptor;
2547
2548 descriptor.HighPriority.RequestFlags =
2549 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
03d1fb3a 2550 descriptor.HighPriority.MSIxIndex = msix_task;
f92363d1
SR
2551 descriptor.HighPriority.SMID = cpu_to_le16(smid);
2552 descriptor.HighPriority.LMID = 0;
2553 descriptor.HighPriority.Reserved1 = 0;
2554 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2555 &ioc->scsi_lookup_lock);
2556}
2557
2558/**
81c16f83 2559 * _base_put_smid_default - Default, primarily used for config pages
f92363d1
SR
2560 * @ioc: per adapter object
2561 * @smid: system request message index
2562 *
2563 * Return nothing.
2564 */
81c16f83
SPS
2565static void
2566_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
f92363d1
SR
2567{
2568 Mpi2RequestDescriptorUnion_t descriptor;
2569 u64 *request = (u64 *)&descriptor;
2570
2571 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2572 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
2573 descriptor.Default.SMID = cpu_to_le16(smid);
2574 descriptor.Default.LMID = 0;
2575 descriptor.Default.DescriptorTypeDependent = 0;
2576 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
2577 &ioc->scsi_lookup_lock);
2578}
2579
81c16f83
SPS
2580/**
2581* _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
2582* Atomic Request Descriptor
2583* @ioc: per adapter object
2584* @smid: system request message index
2585* @handle: device handle, unused in this function, for function type match
2586*
2587* Return nothing.
2588*/
2589static void
2590_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2591 u16 handle)
2592{
2593 Mpi26AtomicRequestDescriptor_t descriptor;
2594 u32 *request = (u32 *)&descriptor;
2595
2596 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
2597 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2598 descriptor.SMID = cpu_to_le16(smid);
2599
2600 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2601}
2602
2603/**
2604 * _base_put_smid_fast_path_atomic - send fast path request to firmware
2605 * using Atomic Request Descriptor
2606 * @ioc: per adapter object
2607 * @smid: system request message index
2608 * @handle: device handle, unused in this function, for function type match
2609 * Return nothing
2610 */
2611static void
2612_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2613 u16 handle)
2614{
2615 Mpi26AtomicRequestDescriptor_t descriptor;
2616 u32 *request = (u32 *)&descriptor;
2617
2618 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
2619 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2620 descriptor.SMID = cpu_to_le16(smid);
2621
2622 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2623}
2624
2625/**
2626 * _base_put_smid_hi_priority_atomic - send Task Management request to
2627 * firmware using Atomic Request Descriptor
2628 * @ioc: per adapter object
2629 * @smid: system request message index
2630 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
2631 *
2632 * Return nothing.
2633 */
2634static void
2635_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2636 u16 msix_task)
2637{
2638 Mpi26AtomicRequestDescriptor_t descriptor;
2639 u32 *request = (u32 *)&descriptor;
2640
2641 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
2642 descriptor.MSIxIndex = msix_task;
2643 descriptor.SMID = cpu_to_le16(smid);
2644
2645 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2646}
2647
2648/**
2649 * _base_put_smid_default - Default, primarily used for config pages
2650 * use Atomic Request Descriptor
2651 * @ioc: per adapter object
2652 * @smid: system request message index
2653 *
2654 * Return nothing.
2655 */
2656static void
2657_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2658{
2659 Mpi26AtomicRequestDescriptor_t descriptor;
2660 u32 *request = (u32 *)&descriptor;
2661
2662 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2663 descriptor.MSIxIndex = _base_get_msix_index(ioc);
2664 descriptor.SMID = cpu_to_le16(smid);
2665
2666 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
2667}
2668
1117b31a 2669/**
989e43c7 2670 * _base_display_OEMs_branding - Display branding string
1117b31a
SR
2671 * @ioc: per adapter object
2672 *
2673 * Return nothing.
2674 */
2675static void
989e43c7 2676_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
1117b31a
SR
2677{
2678 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2679 return;
2680
989e43c7
SR
2681 switch (ioc->pdev->subsystem_vendor) {
2682 case PCI_VENDOR_ID_INTEL:
2683 switch (ioc->pdev->device) {
2684 case MPI2_MFGPAGE_DEVID_SAS2008:
2685 switch (ioc->pdev->subsystem_device) {
2686 case MPT2SAS_INTEL_RMS2LL080_SSDID:
2687 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2688 MPT2SAS_INTEL_RMS2LL080_BRANDING);
2689 break;
2690 case MPT2SAS_INTEL_RMS2LL040_SSDID:
2691 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2692 MPT2SAS_INTEL_RMS2LL040_BRANDING);
2693 break;
2694 case MPT2SAS_INTEL_SSD910_SSDID:
2695 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2696 MPT2SAS_INTEL_SSD910_BRANDING);
2697 break;
2698 default:
2699 pr_info(MPT3SAS_FMT
2700 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2701 ioc->name, ioc->pdev->subsystem_device);
2702 break;
2703 }
2704 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2705 switch (ioc->pdev->subsystem_device) {
2706 case MPT2SAS_INTEL_RS25GB008_SSDID:
2707 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2708 MPT2SAS_INTEL_RS25GB008_BRANDING);
2709 break;
2710 case MPT2SAS_INTEL_RMS25JB080_SSDID:
2711 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2712 MPT2SAS_INTEL_RMS25JB080_BRANDING);
2713 break;
2714 case MPT2SAS_INTEL_RMS25JB040_SSDID:
2715 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2716 MPT2SAS_INTEL_RMS25JB040_BRANDING);
2717 break;
2718 case MPT2SAS_INTEL_RMS25KB080_SSDID:
2719 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2720 MPT2SAS_INTEL_RMS25KB080_BRANDING);
2721 break;
2722 case MPT2SAS_INTEL_RMS25KB040_SSDID:
2723 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2724 MPT2SAS_INTEL_RMS25KB040_BRANDING);
2725 break;
2726 case MPT2SAS_INTEL_RMS25LB040_SSDID:
2727 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2728 MPT2SAS_INTEL_RMS25LB040_BRANDING);
2729 break;
2730 case MPT2SAS_INTEL_RMS25LB080_SSDID:
2731 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2732 MPT2SAS_INTEL_RMS25LB080_BRANDING);
2733 break;
2734 default:
2735 pr_info(MPT3SAS_FMT
2736 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2737 ioc->name, ioc->pdev->subsystem_device);
2738 break;
2739 }
2740 case MPI25_MFGPAGE_DEVID_SAS3008:
2741 switch (ioc->pdev->subsystem_device) {
2742 case MPT3SAS_INTEL_RMS3JC080_SSDID:
2743 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2744 MPT3SAS_INTEL_RMS3JC080_BRANDING);
2745 break;
2746
2747 case MPT3SAS_INTEL_RS3GC008_SSDID:
2748 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2749 MPT3SAS_INTEL_RS3GC008_BRANDING);
2750 break;
2751 case MPT3SAS_INTEL_RS3FC044_SSDID:
2752 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2753 MPT3SAS_INTEL_RS3FC044_BRANDING);
2754 break;
2755 case MPT3SAS_INTEL_RS3UC080_SSDID:
2756 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2757 MPT3SAS_INTEL_RS3UC080_BRANDING);
2758 break;
2759 default:
2760 pr_info(MPT3SAS_FMT
2761 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2762 ioc->name, ioc->pdev->subsystem_device);
2763 break;
2764 }
1117b31a
SR
2765 break;
2766 default:
2767 pr_info(MPT3SAS_FMT
989e43c7
SR
2768 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2769 ioc->name, ioc->pdev->subsystem_device);
1117b31a
SR
2770 break;
2771 }
2772 break;
989e43c7
SR
2773 case PCI_VENDOR_ID_DELL:
2774 switch (ioc->pdev->device) {
2775 case MPI2_MFGPAGE_DEVID_SAS2008:
2776 switch (ioc->pdev->subsystem_device) {
2777 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
2778 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2779 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
2780 break;
2781 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
2782 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2783 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
2784 break;
2785 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
2786 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2787 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
2788 break;
2789 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
2790 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2791 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
2792 break;
2793 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
2794 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2795 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
2796 break;
2797 case MPT2SAS_DELL_PERC_H200_SSDID:
2798 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2799 MPT2SAS_DELL_PERC_H200_BRANDING);
2800 break;
2801 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
2802 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2803 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
2804 break;
2805 default:
2806 pr_info(MPT3SAS_FMT
2807 "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
2808 ioc->name, ioc->pdev->subsystem_device);
2809 break;
2810 }
2811 break;
2812 case MPI25_MFGPAGE_DEVID_SAS3008:
2813 switch (ioc->pdev->subsystem_device) {
2814 case MPT3SAS_DELL_12G_HBA_SSDID:
2815 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2816 MPT3SAS_DELL_12G_HBA_BRANDING);
2817 break;
2818 default:
2819 pr_info(MPT3SAS_FMT
2820 "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
2821 ioc->name, ioc->pdev->subsystem_device);
2822 break;
2823 }
fb84dfc4
SR
2824 break;
2825 default:
2826 pr_info(MPT3SAS_FMT
989e43c7 2827 "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
fb84dfc4
SR
2828 ioc->pdev->subsystem_device);
2829 break;
2830 }
2831 break;
989e43c7
SR
2832 case PCI_VENDOR_ID_CISCO:
2833 switch (ioc->pdev->device) {
2834 case MPI25_MFGPAGE_DEVID_SAS3008:
2835 switch (ioc->pdev->subsystem_device) {
2836 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
2837 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2838 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
2839 break;
2840 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
2841 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2842 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
2843 break;
2844 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2845 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2846 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
2847 break;
2848 default:
2849 pr_info(MPT3SAS_FMT
2850 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2851 ioc->name, ioc->pdev->subsystem_device);
2852 break;
2853 }
d8eb4a47 2854 break;
989e43c7
SR
2855 case MPI25_MFGPAGE_DEVID_SAS3108_1:
2856 switch (ioc->pdev->subsystem_device) {
2857 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
2858 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
d8eb4a47 2859 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
989e43c7
SR
2860 break;
2861 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
2862 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2863 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
2864 );
2865 break;
2866 default:
2867 pr_info(MPT3SAS_FMT
2868 "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
2869 ioc->name, ioc->pdev->subsystem_device);
2870 break;
2871 }
38e4141e
SR
2872 break;
2873 default:
2874 pr_info(MPT3SAS_FMT
989e43c7
SR
2875 "Cisco SAS HBA: Subsystem ID: 0x%X\n",
2876 ioc->name, ioc->pdev->subsystem_device);
38e4141e
SR
2877 break;
2878 }
2879 break;
989e43c7
SR
2880 case MPT2SAS_HP_3PAR_SSVID:
2881 switch (ioc->pdev->device) {
2882 case MPI2_MFGPAGE_DEVID_SAS2004:
2883 switch (ioc->pdev->subsystem_device) {
2884 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
2885 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2886 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
2887 break;
2888 default:
2889 pr_info(MPT3SAS_FMT
2890 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2891 ioc->name, ioc->pdev->subsystem_device);
2892 break;
2893 }
2894 case MPI2_MFGPAGE_DEVID_SAS2308_2:
2895 switch (ioc->pdev->subsystem_device) {
2896 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
2897 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2898 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
2899 break;
2900 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
2901 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2902 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
2903 break;
2904 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
2905 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2906 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
2907 break;
2908 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
2909 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2910 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
2911 break;
2912 default:
2913 pr_info(MPT3SAS_FMT
2914 "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
2915 ioc->name, ioc->pdev->subsystem_device);
2916 break;
2917 }
d8eb4a47
SR
2918 default:
2919 pr_info(MPT3SAS_FMT
989e43c7
SR
2920 "HP SAS HBA: Subsystem ID: 0x%X\n",
2921 ioc->name, ioc->pdev->subsystem_device);
d8eb4a47
SR
2922 break;
2923 }
38e4141e 2924 default:
38e4141e
SR
2925 break;
2926 }
2927}
fb84dfc4 2928
f92363d1
SR
2929/**
2930 * _base_display_ioc_capabilities - Disply IOC's capabilities.
2931 * @ioc: per adapter object
2932 *
2933 * Return nothing.
2934 */
2935static void
2936_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2937{
2938 int i = 0;
2939 char desc[16];
2940 u32 iounit_pg1_flags;
2941 u32 bios_version;
2942
2943 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2944 strncpy(desc, ioc->manu_pg0.ChipName, 16);
2945 pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
2946 "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
2947 ioc->name, desc,
2948 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2949 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2950 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2951 ioc->facts.FWVersion.Word & 0x000000FF,
2952 ioc->pdev->revision,
2953 (bios_version & 0xFF000000) >> 24,
2954 (bios_version & 0x00FF0000) >> 16,
2955 (bios_version & 0x0000FF00) >> 8,
2956 bios_version & 0x000000FF);
2957
989e43c7 2958 _base_display_OEMs_branding(ioc);
1117b31a 2959
f92363d1
SR
2960 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2961
2962 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
2963 pr_info("Initiator");
2964 i++;
2965 }
2966
2967 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
2968 pr_info("%sTarget", i ? "," : "");
2969 i++;
2970 }
2971
2972 i = 0;
2973 pr_info("), ");
2974 pr_info("Capabilities=(");
2975
7786ab6a
SR
2976 if (!ioc->hide_ir_msg) {
2977 if (ioc->facts.IOCCapabilities &
f92363d1
SR
2978 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
2979 pr_info("Raid");
2980 i++;
7786ab6a 2981 }
f92363d1
SR
2982 }
2983
2984 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
2985 pr_info("%sTLR", i ? "," : "");
2986 i++;
2987 }
2988
2989 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
2990 pr_info("%sMulticast", i ? "," : "");
2991 i++;
2992 }
2993
2994 if (ioc->facts.IOCCapabilities &
2995 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
2996 pr_info("%sBIDI Target", i ? "," : "");
2997 i++;
2998 }
2999
3000 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
3001 pr_info("%sEEDP", i ? "," : "");
3002 i++;
3003 }
3004
3005 if (ioc->facts.IOCCapabilities &
3006 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
3007 pr_info("%sSnapshot Buffer", i ? "," : "");
3008 i++;
3009 }
3010
3011 if (ioc->facts.IOCCapabilities &
3012 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
3013 pr_info("%sDiag Trace Buffer", i ? "," : "");
3014 i++;
3015 }
3016
3017 if (ioc->facts.IOCCapabilities &
3018 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
3019 pr_info("%sDiag Extended Buffer", i ? "," : "");
3020 i++;
3021 }
3022
3023 if (ioc->facts.IOCCapabilities &
3024 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
3025 pr_info("%sTask Set Full", i ? "," : "");
3026 i++;
3027 }
3028
3029 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3030 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
3031 pr_info("%sNCQ", i ? "," : "");
3032 i++;
3033 }
3034
3035 pr_info(")\n");
3036}
3037
3038/**
3039 * mpt3sas_base_update_missing_delay - change the missing delay timers
3040 * @ioc: per adapter object
3041 * @device_missing_delay: amount of time till device is reported missing
3042 * @io_missing_delay: interval IO is returned when there is a missing device
3043 *
3044 * Return nothing.
3045 *
3046 * Passed on the command line, this function will modify the device missing
3047 * delay, as well as the io missing delay. This should be called at driver
3048 * load time.
3049 */
3050void
3051mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
3052 u16 device_missing_delay, u8 io_missing_delay)
3053{
3054 u16 dmd, dmd_new, dmd_orignal;
3055 u8 io_missing_delay_original;
3056 u16 sz;
3057 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
3058 Mpi2ConfigReply_t mpi_reply;
3059 u8 num_phys = 0;
3060 u16 ioc_status;
3061
3062 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
3063 if (!num_phys)
3064 return;
3065
3066 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
3067 sizeof(Mpi2SasIOUnit1PhyData_t));
3068 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
3069 if (!sas_iounit_pg1) {
3070 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3071 ioc->name, __FILE__, __LINE__, __func__);
3072 goto out;
3073 }
3074 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
3075 sas_iounit_pg1, sz))) {
3076 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3077 ioc->name, __FILE__, __LINE__, __func__);
3078 goto out;
3079 }
3080 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3081 MPI2_IOCSTATUS_MASK;
3082 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
3083 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
3084 ioc->name, __FILE__, __LINE__, __func__);
3085 goto out;
3086 }
3087
3088 /* device missing delay */
3089 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
3090 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
3091 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
3092 else
3093 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
3094 dmd_orignal = dmd;
3095 if (device_missing_delay > 0x7F) {
3096 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
3097 device_missing_delay;
3098 dmd = dmd / 16;
3099 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
3100 } else
3101 dmd = device_missing_delay;
3102 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
3103
3104 /* io missing delay */
3105 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
3106 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
3107
3108 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
3109 sz)) {
3110 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
3111 dmd_new = (dmd &
3112 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
3113 else
3114 dmd_new =
3115 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
3116 pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
3117 ioc->name, dmd_orignal, dmd_new);
3118 pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
3119 ioc->name, io_missing_delay_original,
3120 io_missing_delay);
3121 ioc->device_missing_delay = dmd_new;
3122 ioc->io_missing_delay = io_missing_delay;
3123 }
3124
3125out:
3126 kfree(sas_iounit_pg1);
3127}
3128/**
3129 * _base_static_config_pages - static start of day config pages
3130 * @ioc: per adapter object
3131 *
3132 * Return nothing.
3133 */
3134static void
3135_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
3136{
3137 Mpi2ConfigReply_t mpi_reply;
3138 u32 iounit_pg1_flags;
3139
3140 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
3141 if (ioc->ir_firmware)
3142 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
3143 &ioc->manu_pg10);
3144
3145 /*
3146 * Ensure correct T10 PI operation if vendor left EEDPTagMode
3147 * flag unset in NVDATA.
3148 */
3149 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
3150 if (ioc->manu_pg11.EEDPTagMode == 0) {
3151 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
3152 ioc->name);
3153 ioc->manu_pg11.EEDPTagMode &= ~0x3;
3154 ioc->manu_pg11.EEDPTagMode |= 0x1;
3155 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
3156 &ioc->manu_pg11);
3157 }
3158
3159 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
3160 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
3161 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
3162 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
3163 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9 3164 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
f92363d1
SR
3165 _base_display_ioc_capabilities(ioc);
3166
3167 /*
3168 * Enable task_set_full handling in iounit_pg1 when the
3169 * facts capabilities indicate that its supported.
3170 */
3171 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3172 if ((ioc->facts.IOCCapabilities &
3173 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
3174 iounit_pg1_flags &=
3175 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3176 else
3177 iounit_pg1_flags |=
3178 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
3179 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
3180 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9
SR
3181
3182 if (ioc->iounit_pg8.NumSensors)
3183 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
f92363d1
SR
3184}
3185
3186/**
3187 * _base_release_memory_pools - release memory
3188 * @ioc: per adapter object
3189 *
3190 * Free memory allocated from _base_allocate_memory_pools.
3191 *
3192 * Return nothing.
3193 */
3194static void
3195_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3196{
9b05c91a
SR
3197 int i = 0;
3198 struct reply_post_struct *rps;
f92363d1
SR
3199
3200 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3201 __func__));
3202
3203 if (ioc->request) {
3204 pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
3205 ioc->request, ioc->request_dma);
3206 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3207 "request_pool(0x%p): free\n",
3208 ioc->name, ioc->request));
3209 ioc->request = NULL;
3210 }
3211
3212 if (ioc->sense) {
3213 pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
3214 if (ioc->sense_dma_pool)
3215 pci_pool_destroy(ioc->sense_dma_pool);
3216 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3217 "sense_pool(0x%p): free\n",
3218 ioc->name, ioc->sense));
3219 ioc->sense = NULL;
3220 }
3221
3222 if (ioc->reply) {
3223 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
3224 if (ioc->reply_dma_pool)
3225 pci_pool_destroy(ioc->reply_dma_pool);
3226 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3227 "reply_pool(0x%p): free\n",
3228 ioc->name, ioc->reply));
3229 ioc->reply = NULL;
3230 }
3231
3232 if (ioc->reply_free) {
3233 pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
3234 ioc->reply_free_dma);
3235 if (ioc->reply_free_dma_pool)
3236 pci_pool_destroy(ioc->reply_free_dma_pool);
3237 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3238 "reply_free_pool(0x%p): free\n",
3239 ioc->name, ioc->reply_free));
3240 ioc->reply_free = NULL;
3241 }
3242
9b05c91a
SR
3243 if (ioc->reply_post) {
3244 do {
3245 rps = &ioc->reply_post[i];
3246 if (rps->reply_post_free) {
3247 pci_pool_free(
3248 ioc->reply_post_free_dma_pool,
3249 rps->reply_post_free,
3250 rps->reply_post_free_dma);
3251 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3252 "reply_post_free_pool(0x%p): free\n",
3253 ioc->name, rps->reply_post_free));
3254 rps->reply_post_free = NULL;
3255 }
3256 } while (ioc->rdpq_array_enable &&
3257 (++i < ioc->reply_queue_count));
3258
f92363d1
SR
3259 if (ioc->reply_post_free_dma_pool)
3260 pci_pool_destroy(ioc->reply_post_free_dma_pool);
9b05c91a 3261 kfree(ioc->reply_post);
f92363d1
SR
3262 }
3263
3264 if (ioc->config_page) {
3265 dexitprintk(ioc, pr_info(MPT3SAS_FMT
3266 "config_page(0x%p): free\n", ioc->name,
3267 ioc->config_page));
3268 pci_free_consistent(ioc->pdev, ioc->config_page_sz,
3269 ioc->config_page, ioc->config_page_dma);
3270 }
3271
3272 if (ioc->scsi_lookup) {
3273 free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
3274 ioc->scsi_lookup = NULL;
3275 }
3276 kfree(ioc->hpr_lookup);
3277 kfree(ioc->internal_lookup);
3278 if (ioc->chain_lookup) {
3279 for (i = 0; i < ioc->chain_depth; i++) {
3280 if (ioc->chain_lookup[i].chain_buffer)
3281 pci_pool_free(ioc->chain_dma_pool,
3282 ioc->chain_lookup[i].chain_buffer,
3283 ioc->chain_lookup[i].chain_buffer_dma);
3284 }
3285 if (ioc->chain_dma_pool)
3286 pci_pool_destroy(ioc->chain_dma_pool);
3287 free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
3288 ioc->chain_lookup = NULL;
3289 }
3290}
3291
3292/**
3293 * _base_allocate_memory_pools - allocate start of day memory pools
3294 * @ioc: per adapter object
f92363d1
SR
3295 *
3296 * Returns 0 success, anything else error
3297 */
3298static int
98c56ad3 3299_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
3300{
3301 struct mpt3sas_facts *facts;
3302 u16 max_sge_elements;
3303 u16 chains_needed_per_io;
3304 u32 sz, total_sz, reply_post_free_sz;
3305 u32 retry_sz;
3306 u16 max_request_credit;
3307 unsigned short sg_tablesize;
3308 u16 sge_size;
3309 int i;
3310
3311 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3312 __func__));
3313
3314
3315 retry_sz = 0;
3316 facts = &ioc->facts;
3317
3318 /* command line tunables for max sgl entries */
3319 if (max_sgl_entries != -1)
3320 sg_tablesize = max_sgl_entries;
471ef9d4
SR
3321 else {
3322 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
3323 sg_tablesize = MPT2SAS_SG_DEPTH;
3324 else
3325 sg_tablesize = MPT3SAS_SG_DEPTH;
3326 }
f92363d1 3327
8a7e4c24
SR
3328 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
3329 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
3330 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
ad666a0f 3331 sg_tablesize = min_t(unsigned short, sg_tablesize,
65e8617f 3332 SG_MAX_SEGMENTS);
ad666a0f
SR
3333 pr_warn(MPT3SAS_FMT
3334 "sg_tablesize(%u) is bigger than kernel"
65e8617f 3335 " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
8a7e4c24 3336 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
ad666a0f 3337 }
f92363d1
SR
3338 ioc->shost->sg_tablesize = sg_tablesize;
3339
fd0331b3
SS
3340 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
3341 (facts->RequestCredit / 4));
3342 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
3343 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
3344 INTERNAL_SCSIIO_CMDS_COUNT)) {
3345 pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
3346 Credits, it has just %d number of credits\n",
3347 ioc->name, facts->RequestCredit);
3348 return -ENOMEM;
3349 }
3350 ioc->internal_depth = 10;
3351 }
3352
3353 ioc->hi_priority_depth = ioc->internal_depth - (5);
f92363d1
SR
3354 /* command line tunables for max controller queue depth */
3355 if (max_queue_depth != -1 && max_queue_depth != 0) {
3356 max_request_credit = min_t(u16, max_queue_depth +
fd0331b3 3357 ioc->internal_depth, facts->RequestCredit);
f92363d1
SR
3358 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
3359 max_request_credit = MAX_HBA_QUEUE_DEPTH;
3360 } else
3361 max_request_credit = min_t(u16, facts->RequestCredit,
3362 MAX_HBA_QUEUE_DEPTH);
3363
fd0331b3
SS
3364 /* Firmware maintains additional facts->HighPriorityCredit number of
3365 * credits for HiPriprity Request messages, so hba queue depth will be
3366 * sum of max_request_credit and high priority queue depth.
3367 */
3368 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
f92363d1
SR
3369
3370 /* request frame size */
3371 ioc->request_sz = facts->IOCRequestFrameSize * 4;
3372
3373 /* reply frame size */
3374 ioc->reply_sz = facts->ReplyFrameSize * 4;
3375
ebb3024e
SS
3376 /* chain segment size */
3377 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3378 if (facts->IOCMaxChainSegmentSize)
3379 ioc->chain_segment_sz =
3380 facts->IOCMaxChainSegmentSize *
3381 MAX_CHAIN_ELEMT_SZ;
3382 else
3383 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
3384 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
3385 MAX_CHAIN_ELEMT_SZ;
3386 } else
3387 ioc->chain_segment_sz = ioc->request_sz;
3388
f92363d1
SR
3389 /* calculate the max scatter element size */
3390 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
3391
3392 retry_allocation:
3393 total_sz = 0;
3394 /* calculate number of sg elements left over in the 1st frame */
3395 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
3396 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
3397 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
3398
3399 /* now do the same for a chain buffer */
ebb3024e 3400 max_sge_elements = ioc->chain_segment_sz - sge_size;
f92363d1
SR
3401 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
3402
3403 /*
3404 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
3405 */
3406 chains_needed_per_io = ((ioc->shost->sg_tablesize -
3407 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
3408 + 1;
3409 if (chains_needed_per_io > facts->MaxChainDepth) {
3410 chains_needed_per_io = facts->MaxChainDepth;
3411 ioc->shost->sg_tablesize = min_t(u16,
3412 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
3413 * chains_needed_per_io), ioc->shost->sg_tablesize);
3414 }
3415 ioc->chains_needed_per_io = chains_needed_per_io;
3416
3417 /* reply free queue sizing - taking into account for 64 FW events */
3418 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3419
3420 /* calculate reply descriptor post queue depth */
3421 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
3422 ioc->reply_free_queue_depth + 1 ;
3423 /* align the reply post queue on the next 16 count boundary */
3424 if (ioc->reply_post_queue_depth % 16)
3425 ioc->reply_post_queue_depth += 16 -
3426 (ioc->reply_post_queue_depth % 16);
3427
f92363d1
SR
3428 if (ioc->reply_post_queue_depth >
3429 facts->MaxReplyDescriptorPostQueueDepth) {
3430 ioc->reply_post_queue_depth =
3431 facts->MaxReplyDescriptorPostQueueDepth -
3432 (facts->MaxReplyDescriptorPostQueueDepth % 16);
3433 ioc->hba_queue_depth =
3434 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
3435 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
3436 }
3437
3438 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
3439 "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
3440 "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
3441 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
3442 ioc->chains_needed_per_io));
3443
9b05c91a
SR
3444 /* reply post queue, 16 byte align */
3445 reply_post_free_sz = ioc->reply_post_queue_depth *
3446 sizeof(Mpi2DefaultReplyDescriptor_t);
3447
3448 sz = reply_post_free_sz;
3449 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
3450 sz *= ioc->reply_queue_count;
3451
3452 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
3453 (ioc->reply_queue_count):1,
3454 sizeof(struct reply_post_struct), GFP_KERNEL);
3455
3456 if (!ioc->reply_post) {
3457 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
3458 ioc->name);
3459 goto out;
3460 }
3461 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
3462 ioc->pdev, sz, 16, 0);
3463 if (!ioc->reply_post_free_dma_pool) {
3464 pr_err(MPT3SAS_FMT
3465 "reply_post_free pool: pci_pool_create failed\n",
3466 ioc->name);
3467 goto out;
3468 }
3469 i = 0;
3470 do {
3471 ioc->reply_post[i].reply_post_free =
3472 pci_pool_alloc(ioc->reply_post_free_dma_pool,
3473 GFP_KERNEL,
3474 &ioc->reply_post[i].reply_post_free_dma);
3475 if (!ioc->reply_post[i].reply_post_free) {
3476 pr_err(MPT3SAS_FMT
3477 "reply_post_free pool: pci_pool_alloc failed\n",
3478 ioc->name);
3479 goto out;
3480 }
3481 memset(ioc->reply_post[i].reply_post_free, 0, sz);
3482 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3483 "reply post free pool (0x%p): depth(%d),"
3484 "element_size(%d), pool_size(%d kB)\n", ioc->name,
3485 ioc->reply_post[i].reply_post_free,
3486 ioc->reply_post_queue_depth, 8, sz/1024));
3487 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3488 "reply_post_free_dma = (0x%llx)\n", ioc->name,
3489 (unsigned long long)
3490 ioc->reply_post[i].reply_post_free_dma));
3491 total_sz += sz;
3492 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
3493
3494 if (ioc->dma_mask == 64) {
3495 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
3496 pr_warn(MPT3SAS_FMT
3497 "no suitable consistent DMA mask for %s\n",
3498 ioc->name, pci_name(ioc->pdev));
3499 goto out;
3500 }
3501 }
3502
f92363d1
SR
3503 ioc->scsiio_depth = ioc->hba_queue_depth -
3504 ioc->hi_priority_depth - ioc->internal_depth;
3505
3506 /* set the scsi host can_queue depth
3507 * with some internal commands that could be outstanding
3508 */
fd0331b3 3509 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
f92363d1
SR
3510 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3511 "scsi host: can_queue depth (%d)\n",
3512 ioc->name, ioc->shost->can_queue));
3513
3514
3515 /* contiguous pool for request and chains, 16 byte align, one extra "
3516 * "frame for smid=0
3517 */
3518 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
3519 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
3520
3521 /* hi-priority queue */
3522 sz += (ioc->hi_priority_depth * ioc->request_sz);
3523
3524 /* internal queue */
3525 sz += (ioc->internal_depth * ioc->request_sz);
3526
3527 ioc->request_dma_sz = sz;
3528 ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
3529 if (!ioc->request) {
3530 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3531 "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3532 "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
3533 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3534 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
3535 goto out;
fd0331b3
SS
3536 retry_sz = 64;
3537 ioc->hba_queue_depth -= retry_sz;
8ff045c9 3538 _base_release_memory_pools(ioc);
f92363d1
SR
3539 goto retry_allocation;
3540 }
3541
3542 if (retry_sz)
3543 pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
3544 "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
3545 "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
3546 ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
3547
3548 /* hi-priority queue */
3549 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
3550 ioc->request_sz);
3551 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
3552 ioc->request_sz);
3553
3554 /* internal queue */
3555 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
3556 ioc->request_sz);
3557 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
3558 ioc->request_sz);
3559
3560 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3561 "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3562 ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
3563 (ioc->hba_queue_depth * ioc->request_sz)/1024));
3564
3565 dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
3566 ioc->name, (unsigned long long) ioc->request_dma));
3567 total_sz += sz;
3568
3569 sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
3570 ioc->scsi_lookup_pages = get_order(sz);
3571 ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
3572 GFP_KERNEL, ioc->scsi_lookup_pages);
3573 if (!ioc->scsi_lookup) {
3574 pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
3575 ioc->name, (int)sz);
3576 goto out;
3577 }
3578
3579 dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
3580 ioc->name, ioc->request, ioc->scsiio_depth));
3581
3582 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
3583 sz = ioc->chain_depth * sizeof(struct chain_tracker);
3584 ioc->chain_pages = get_order(sz);
3585 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
3586 GFP_KERNEL, ioc->chain_pages);
3587 if (!ioc->chain_lookup) {
3588 pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
3589 ioc->name);
3590 goto out;
3591 }
3592 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ebb3024e 3593 ioc->chain_segment_sz, 16, 0);
f92363d1
SR
3594 if (!ioc->chain_dma_pool) {
3595 pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
3596 ioc->name);
3597 goto out;
3598 }
3599 for (i = 0; i < ioc->chain_depth; i++) {
3600 ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
3601 ioc->chain_dma_pool , GFP_KERNEL,
3602 &ioc->chain_lookup[i].chain_buffer_dma);
3603 if (!ioc->chain_lookup[i].chain_buffer) {
3604 ioc->chain_depth = i;
3605 goto chain_done;
3606 }
ebb3024e 3607 total_sz += ioc->chain_segment_sz;
f92363d1
SR
3608 }
3609 chain_done:
3610 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3611 "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
ebb3024e
SS
3612 ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
3613 ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
f92363d1
SR
3614
3615 /* initialize hi-priority queue smid's */
3616 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
3617 sizeof(struct request_tracker), GFP_KERNEL);
3618 if (!ioc->hpr_lookup) {
3619 pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
3620 ioc->name);
3621 goto out;
3622 }
3623 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
3624 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3625 "hi_priority(0x%p): depth(%d), start smid(%d)\n",
3626 ioc->name, ioc->hi_priority,
3627 ioc->hi_priority_depth, ioc->hi_priority_smid));
3628
3629 /* initialize internal queue smid's */
3630 ioc->internal_lookup = kcalloc(ioc->internal_depth,
3631 sizeof(struct request_tracker), GFP_KERNEL);
3632 if (!ioc->internal_lookup) {
3633 pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
3634 ioc->name);
3635 goto out;
3636 }
3637 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
3638 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3639 "internal(0x%p): depth(%d), start smid(%d)\n",
3640 ioc->name, ioc->internal,
3641 ioc->internal_depth, ioc->internal_smid));
3642
3643 /* sense buffers, 4 byte align */
3644 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
3645 ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
3646 0);
3647 if (!ioc->sense_dma_pool) {
3648 pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
3649 ioc->name);
3650 goto out;
3651 }
3652 ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
3653 &ioc->sense_dma);
3654 if (!ioc->sense) {
3655 pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
3656 ioc->name);
3657 goto out;
3658 }
3659 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3660 "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
3661 "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
3662 SCSI_SENSE_BUFFERSIZE, sz/1024));
3663 dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
3664 ioc->name, (unsigned long long)ioc->sense_dma));
3665 total_sz += sz;
3666
3667 /* reply pool, 4 byte align */
3668 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
3669 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
3670 0);
3671 if (!ioc->reply_dma_pool) {
3672 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
3673 ioc->name);
3674 goto out;
3675 }
3676 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
3677 &ioc->reply_dma);
3678 if (!ioc->reply) {
3679 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
3680 ioc->name);
3681 goto out;
3682 }
3683 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
3684 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
3685 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3686 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
3687 ioc->name, ioc->reply,
3688 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
3689 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
3690 ioc->name, (unsigned long long)ioc->reply_dma));
3691 total_sz += sz;
3692
3693 /* reply free queue, 16 byte align */
3694 sz = ioc->reply_free_queue_depth * 4;
3695 ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
3696 ioc->pdev, sz, 16, 0);
3697 if (!ioc->reply_free_dma_pool) {
3698 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
3699 ioc->name);
3700 goto out;
3701 }
3702 ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
3703 &ioc->reply_free_dma);
3704 if (!ioc->reply_free) {
3705 pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
3706 ioc->name);
3707 goto out;
3708 }
3709 memset(ioc->reply_free, 0, sz);
3710 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
3711 "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
3712 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
3713 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3714 "reply_free_dma (0x%llx)\n",
3715 ioc->name, (unsigned long long)ioc->reply_free_dma));
3716 total_sz += sz;
3717
f92363d1
SR
3718 ioc->config_page_sz = 512;
3719 ioc->config_page = pci_alloc_consistent(ioc->pdev,
3720 ioc->config_page_sz, &ioc->config_page_dma);
3721 if (!ioc->config_page) {
3722 pr_err(MPT3SAS_FMT
3723 "config page: pci_pool_alloc failed\n",
3724 ioc->name);
3725 goto out;
3726 }
3727 dinitprintk(ioc, pr_info(MPT3SAS_FMT
3728 "config page(0x%p): size(%d)\n",
3729 ioc->name, ioc->config_page, ioc->config_page_sz));
3730 dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
3731 ioc->name, (unsigned long long)ioc->config_page_dma));
3732 total_sz += ioc->config_page_sz;
3733
3734 pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
3735 ioc->name, total_sz/1024);
3736 pr_info(MPT3SAS_FMT
3737 "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
3738 ioc->name, ioc->shost->can_queue, facts->RequestCredit);
3739 pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
3740 ioc->name, ioc->shost->sg_tablesize);
3741 return 0;
3742
3743 out:
3744 return -ENOMEM;
3745}
3746
3747/**
3748 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
3749 * @ioc: Pointer to MPT_ADAPTER structure
3750 * @cooked: Request raw or cooked IOC state
3751 *
3752 * Returns all IOC Doorbell register bits if cooked==0, else just the
3753 * Doorbell bits in MPI_IOC_STATE_MASK.
3754 */
3755u32
3756mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3757{
3758 u32 s, sc;
3759
3760 s = readl(&ioc->chip->Doorbell);
3761 sc = s & MPI2_IOC_STATE_MASK;
3762 return cooked ? sc : s;
3763}
3764
3765/**
3766 * _base_wait_on_iocstate - waiting on a particular ioc state
3767 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3768 * @timeout: timeout in second
f92363d1
SR
3769 *
3770 * Returns 0 for success, non-zero for failure.
3771 */
3772static int
98c56ad3 3773_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
f92363d1
SR
3774{
3775 u32 count, cntdn;
3776 u32 current_state;
3777
3778 count = 0;
98c56ad3 3779 cntdn = 1000 * timeout;
f92363d1
SR
3780 do {
3781 current_state = mpt3sas_base_get_iocstate(ioc, 1);
3782 if (current_state == ioc_state)
3783 return 0;
3784 if (count && current_state == MPI2_IOC_STATE_FAULT)
3785 break;
98c56ad3
CO
3786
3787 usleep_range(1000, 1500);
f92363d1
SR
3788 count++;
3789 } while (--cntdn);
3790
3791 return current_state;
3792}
3793
3794/**
3795 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
3796 * a write to the doorbell)
3797 * @ioc: per adapter object
3798 * @timeout: timeout in second
f92363d1
SR
3799 *
3800 * Returns 0 for success, non-zero for failure.
3801 *
3802 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3803 */
4dc8c808 3804static int
98c56ad3 3805_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
4dc8c808 3806
f92363d1 3807static int
98c56ad3 3808_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
3809{
3810 u32 cntdn, count;
3811 u32 int_status;
3812
3813 count = 0;
98c56ad3 3814 cntdn = 1000 * timeout;
f92363d1
SR
3815 do {
3816 int_status = readl(&ioc->chip->HostInterruptStatus);
3817 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3818 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3819 "%s: successful count(%d), timeout(%d)\n",
3820 ioc->name, __func__, count, timeout));
3821 return 0;
3822 }
98c56ad3
CO
3823
3824 usleep_range(1000, 1500);
3825 count++;
3826 } while (--cntdn);
3827
3828 pr_err(MPT3SAS_FMT
3829 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3830 ioc->name, __func__, count, int_status);
3831 return -EFAULT;
3832}
3833
3834static int
3835_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
3836{
3837 u32 cntdn, count;
3838 u32 int_status;
3839
3840 count = 0;
3841 cntdn = 2000 * timeout;
3842 do {
3843 int_status = readl(&ioc->chip->HostInterruptStatus);
3844 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3845 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3846 "%s: successful count(%d), timeout(%d)\n",
3847 ioc->name, __func__, count, timeout));
3848 return 0;
3849 }
3850
3851 udelay(500);
f92363d1
SR
3852 count++;
3853 } while (--cntdn);
3854
3855 pr_err(MPT3SAS_FMT
3856 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3857 ioc->name, __func__, count, int_status);
3858 return -EFAULT;
98c56ad3 3859
f92363d1
SR
3860}
3861
3862/**
3863 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3864 * @ioc: per adapter object
3865 * @timeout: timeout in second
f92363d1
SR
3866 *
3867 * Returns 0 for success, non-zero for failure.
3868 *
3869 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
3870 * doorbell.
3871 */
3872static int
98c56ad3 3873_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
3874{
3875 u32 cntdn, count;
3876 u32 int_status;
3877 u32 doorbell;
3878
3879 count = 0;
98c56ad3 3880 cntdn = 1000 * timeout;
f92363d1
SR
3881 do {
3882 int_status = readl(&ioc->chip->HostInterruptStatus);
3883 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
3884 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3885 "%s: successful count(%d), timeout(%d)\n",
3886 ioc->name, __func__, count, timeout));
3887 return 0;
3888 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3889 doorbell = readl(&ioc->chip->Doorbell);
3890 if ((doorbell & MPI2_IOC_STATE_MASK) ==
3891 MPI2_IOC_STATE_FAULT) {
3892 mpt3sas_base_fault_info(ioc , doorbell);
3893 return -EFAULT;
3894 }
3895 } else if (int_status == 0xFFFFFFFF)
3896 goto out;
3897
98c56ad3 3898 usleep_range(1000, 1500);
f92363d1
SR
3899 count++;
3900 } while (--cntdn);
3901
3902 out:
3903 pr_err(MPT3SAS_FMT
3904 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3905 ioc->name, __func__, count, int_status);
3906 return -EFAULT;
3907}
3908
3909/**
3910 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3911 * @ioc: per adapter object
3912 * @timeout: timeout in second
f92363d1
SR
3913 *
3914 * Returns 0 for success, non-zero for failure.
3915 *
3916 */
3917static int
98c56ad3 3918_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
3919{
3920 u32 cntdn, count;
3921 u32 doorbell_reg;
3922
3923 count = 0;
98c56ad3 3924 cntdn = 1000 * timeout;
f92363d1
SR
3925 do {
3926 doorbell_reg = readl(&ioc->chip->Doorbell);
3927 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
3928 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3929 "%s: successful count(%d), timeout(%d)\n",
3930 ioc->name, __func__, count, timeout));
3931 return 0;
3932 }
98c56ad3
CO
3933
3934 usleep_range(1000, 1500);
f92363d1
SR
3935 count++;
3936 } while (--cntdn);
3937
3938 pr_err(MPT3SAS_FMT
3939 "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
3940 ioc->name, __func__, count, doorbell_reg);
3941 return -EFAULT;
3942}
3943
3944/**
3945 * _base_send_ioc_reset - send doorbell reset
3946 * @ioc: per adapter object
3947 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3948 * @timeout: timeout in second
f92363d1
SR
3949 *
3950 * Returns 0 for success, non-zero for failure.
3951 */
3952static int
98c56ad3 3953_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
f92363d1
SR
3954{
3955 u32 ioc_state;
3956 int r = 0;
3957
3958 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
3959 pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
3960 ioc->name, __func__);
3961 return -EFAULT;
3962 }
3963
3964 if (!(ioc->facts.IOCCapabilities &
3965 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
3966 return -EFAULT;
3967
3968 pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
3969
3970 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3971 &ioc->chip->Doorbell);
98c56ad3 3972 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
f92363d1
SR
3973 r = -EFAULT;
3974 goto out;
3975 }
98c56ad3 3976 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
f92363d1
SR
3977 if (ioc_state) {
3978 pr_err(MPT3SAS_FMT
3979 "%s: failed going to ready state (ioc_state=0x%x)\n",
3980 ioc->name, __func__, ioc_state);
3981 r = -EFAULT;
3982 goto out;
3983 }
3984 out:
3985 pr_info(MPT3SAS_FMT "message unit reset: %s\n",
3986 ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3987 return r;
3988}
3989
3990/**
3991 * _base_handshake_req_reply_wait - send request thru doorbell interface
3992 * @ioc: per adapter object
3993 * @request_bytes: request length
3994 * @request: pointer having request payload
3995 * @reply_bytes: reply length
3996 * @reply: pointer to reply payload
3997 * @timeout: timeout in second
f92363d1
SR
3998 *
3999 * Returns 0 for success, non-zero for failure.
4000 */
4001static int
4002_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
98c56ad3 4003 u32 *request, int reply_bytes, u16 *reply, int timeout)
f92363d1
SR
4004{
4005 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
4006 int i;
4007 u8 failed;
f92363d1
SR
4008 __le32 *mfp;
4009
4010 /* make sure doorbell is not in use */
4011 if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
4012 pr_err(MPT3SAS_FMT
4013 "doorbell is in use (line=%d)\n",
4014 ioc->name, __LINE__);
4015 return -EFAULT;
4016 }
4017
4018 /* clear pending doorbell interrupts from previous state changes */
4019 if (readl(&ioc->chip->HostInterruptStatus) &
4020 MPI2_HIS_IOC2SYS_DB_STATUS)
4021 writel(0, &ioc->chip->HostInterruptStatus);
4022
4023 /* send message to ioc */
4024 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
4025 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
4026 &ioc->chip->Doorbell);
4027
98c56ad3 4028 if ((_base_spin_on_doorbell_int(ioc, 5))) {
f92363d1
SR
4029 pr_err(MPT3SAS_FMT
4030 "doorbell handshake int failed (line=%d)\n",
4031 ioc->name, __LINE__);
4032 return -EFAULT;
4033 }
4034 writel(0, &ioc->chip->HostInterruptStatus);
4035
98c56ad3 4036 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
f92363d1
SR
4037 pr_err(MPT3SAS_FMT
4038 "doorbell handshake ack failed (line=%d)\n",
4039 ioc->name, __LINE__);
4040 return -EFAULT;
4041 }
4042
4043 /* send message 32-bits at a time */
4044 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
4045 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
98c56ad3 4046 if ((_base_wait_for_doorbell_ack(ioc, 5)))
f92363d1
SR
4047 failed = 1;
4048 }
4049
4050 if (failed) {
4051 pr_err(MPT3SAS_FMT
4052 "doorbell handshake sending request failed (line=%d)\n",
4053 ioc->name, __LINE__);
4054 return -EFAULT;
4055 }
4056
4057 /* now wait for the reply */
98c56ad3 4058 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
f92363d1
SR
4059 pr_err(MPT3SAS_FMT
4060 "doorbell handshake int failed (line=%d)\n",
4061 ioc->name, __LINE__);
4062 return -EFAULT;
4063 }
4064
4065 /* read the first two 16-bits, it gives the total length of the reply */
4066 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
4067 & MPI2_DOORBELL_DATA_MASK);
4068 writel(0, &ioc->chip->HostInterruptStatus);
98c56ad3 4069 if ((_base_wait_for_doorbell_int(ioc, 5))) {
f92363d1
SR
4070 pr_err(MPT3SAS_FMT
4071 "doorbell handshake int failed (line=%d)\n",
4072 ioc->name, __LINE__);
4073 return -EFAULT;
4074 }
4075 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
4076 & MPI2_DOORBELL_DATA_MASK);
4077 writel(0, &ioc->chip->HostInterruptStatus);
4078
4079 for (i = 2; i < default_reply->MsgLength * 2; i++) {
98c56ad3 4080 if ((_base_wait_for_doorbell_int(ioc, 5))) {
f92363d1
SR
4081 pr_err(MPT3SAS_FMT
4082 "doorbell handshake int failed (line=%d)\n",
4083 ioc->name, __LINE__);
4084 return -EFAULT;
4085 }
4086 if (i >= reply_bytes/2) /* overflow case */
8bbb1cf6 4087 readl(&ioc->chip->Doorbell);
f92363d1
SR
4088 else
4089 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
4090 & MPI2_DOORBELL_DATA_MASK);
4091 writel(0, &ioc->chip->HostInterruptStatus);
4092 }
4093
98c56ad3
CO
4094 _base_wait_for_doorbell_int(ioc, 5);
4095 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
f92363d1
SR
4096 dhsprintk(ioc, pr_info(MPT3SAS_FMT
4097 "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
4098 }
4099 writel(0, &ioc->chip->HostInterruptStatus);
4100
4101 if (ioc->logging_level & MPT_DEBUG_INIT) {
4102 mfp = (__le32 *)reply;
4103 pr_info("\toffset:data\n");
4104 for (i = 0; i < reply_bytes/4; i++)
4105 pr_info("\t[0x%02x]:%08x\n", i*4,
4106 le32_to_cpu(mfp[i]));
4107 }
4108 return 0;
4109}
4110
4111/**
4112 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
4113 * @ioc: per adapter object
4114 * @mpi_reply: the reply payload from FW
4115 * @mpi_request: the request payload sent to FW
4116 *
4117 * The SAS IO Unit Control Request message allows the host to perform low-level
4118 * operations, such as resets on the PHYs of the IO Unit, also allows the host
4119 * to obtain the IOC assigned device handles for a device if it has other
4120 * identifying information about the device, in addition allows the host to
4121 * remove IOC resources associated with the device.
4122 *
4123 * Returns 0 for success, non-zero for failure.
4124 */
4125int
4126mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4127 Mpi2SasIoUnitControlReply_t *mpi_reply,
4128 Mpi2SasIoUnitControlRequest_t *mpi_request)
4129{
4130 u16 smid;
4131 u32 ioc_state;
eb44552b 4132 bool issue_reset = false;
f92363d1
SR
4133 int rc;
4134 void *request;
4135 u16 wait_state_count;
4136
4137 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4138 __func__));
4139
4140 mutex_lock(&ioc->base_cmds.mutex);
4141
4142 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4143 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4144 ioc->name, __func__);
4145 rc = -EAGAIN;
4146 goto out;
4147 }
4148
4149 wait_state_count = 0;
4150 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4151 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4152 if (wait_state_count++ == 10) {
4153 pr_err(MPT3SAS_FMT
4154 "%s: failed due to ioc not operational\n",
4155 ioc->name, __func__);
4156 rc = -EFAULT;
4157 goto out;
4158 }
4159 ssleep(1);
4160 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4161 pr_info(MPT3SAS_FMT
4162 "%s: waiting for operational state(count=%d)\n",
4163 ioc->name, __func__, wait_state_count);
4164 }
4165
4166 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4167 if (!smid) {
4168 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4169 ioc->name, __func__);
4170 rc = -EAGAIN;
4171 goto out;
4172 }
4173
4174 rc = 0;
4175 ioc->base_cmds.status = MPT3_CMD_PENDING;
4176 request = mpt3sas_base_get_msg_frame(ioc, smid);
4177 ioc->base_cmds.smid = smid;
4178 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
4179 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4180 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
4181 ioc->ioc_link_reset_in_progress = 1;
4182 init_completion(&ioc->base_cmds.done);
81c16f83 4183 ioc->put_smid_default(ioc, smid);
8bbb1cf6 4184 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
4185 msecs_to_jiffies(10000));
4186 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4187 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
4188 ioc->ioc_link_reset_in_progress)
4189 ioc->ioc_link_reset_in_progress = 0;
4190 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4191 pr_err(MPT3SAS_FMT "%s: timeout\n",
4192 ioc->name, __func__);
4193 _debug_dump_mf(mpi_request,
4194 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
4195 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
eb44552b 4196 issue_reset = true;
f92363d1
SR
4197 goto issue_host_reset;
4198 }
4199 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4200 memcpy(mpi_reply, ioc->base_cmds.reply,
4201 sizeof(Mpi2SasIoUnitControlReply_t));
4202 else
4203 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
4204 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4205 goto out;
4206
4207 issue_host_reset:
4208 if (issue_reset)
98c56ad3 4209 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
4210 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4211 rc = -EFAULT;
4212 out:
4213 mutex_unlock(&ioc->base_cmds.mutex);
4214 return rc;
4215}
4216
4217/**
4218 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
4219 * @ioc: per adapter object
4220 * @mpi_reply: the reply payload from FW
4221 * @mpi_request: the request payload sent to FW
4222 *
4223 * The SCSI Enclosure Processor request message causes the IOC to
4224 * communicate with SES devices to control LED status signals.
4225 *
4226 * Returns 0 for success, non-zero for failure.
4227 */
4228int
4229mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4230 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
4231{
4232 u16 smid;
4233 u32 ioc_state;
eb44552b 4234 bool issue_reset = false;
f92363d1
SR
4235 int rc;
4236 void *request;
4237 u16 wait_state_count;
4238
4239 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4240 __func__));
4241
4242 mutex_lock(&ioc->base_cmds.mutex);
4243
4244 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
4245 pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
4246 ioc->name, __func__);
4247 rc = -EAGAIN;
4248 goto out;
4249 }
4250
4251 wait_state_count = 0;
4252 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4253 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4254 if (wait_state_count++ == 10) {
4255 pr_err(MPT3SAS_FMT
4256 "%s: failed due to ioc not operational\n",
4257 ioc->name, __func__);
4258 rc = -EFAULT;
4259 goto out;
4260 }
4261 ssleep(1);
4262 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4263 pr_info(MPT3SAS_FMT
4264 "%s: waiting for operational state(count=%d)\n",
4265 ioc->name,
4266 __func__, wait_state_count);
4267 }
4268
4269 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4270 if (!smid) {
4271 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4272 ioc->name, __func__);
4273 rc = -EAGAIN;
4274 goto out;
4275 }
4276
4277 rc = 0;
4278 ioc->base_cmds.status = MPT3_CMD_PENDING;
4279 request = mpt3sas_base_get_msg_frame(ioc, smid);
4280 ioc->base_cmds.smid = smid;
4281 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4282 init_completion(&ioc->base_cmds.done);
81c16f83 4283 ioc->put_smid_default(ioc, smid);
8bbb1cf6 4284 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
4285 msecs_to_jiffies(10000));
4286 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4287 pr_err(MPT3SAS_FMT "%s: timeout\n",
4288 ioc->name, __func__);
4289 _debug_dump_mf(mpi_request,
4290 sizeof(Mpi2SepRequest_t)/4);
4291 if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
eb44552b 4292 issue_reset = false;
f92363d1
SR
4293 goto issue_host_reset;
4294 }
4295 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
4296 memcpy(mpi_reply, ioc->base_cmds.reply,
4297 sizeof(Mpi2SepReply_t));
4298 else
4299 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
4300 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4301 goto out;
4302
4303 issue_host_reset:
4304 if (issue_reset)
98c56ad3 4305 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
4306 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4307 rc = -EFAULT;
4308 out:
4309 mutex_unlock(&ioc->base_cmds.mutex);
4310 return rc;
4311}
4312
4313/**
4314 * _base_get_port_facts - obtain port facts reply and save in ioc
4315 * @ioc: per adapter object
f92363d1
SR
4316 *
4317 * Returns 0 for success, non-zero for failure.
4318 */
4319static int
98c56ad3 4320_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
f92363d1
SR
4321{
4322 Mpi2PortFactsRequest_t mpi_request;
4323 Mpi2PortFactsReply_t mpi_reply;
4324 struct mpt3sas_port_facts *pfacts;
4325 int mpi_reply_sz, mpi_request_sz, r;
4326
4327 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4328 __func__));
4329
4330 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
4331 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
4332 memset(&mpi_request, 0, mpi_request_sz);
4333 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4334 mpi_request.PortNumber = port;
4335 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 4336 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
4337
4338 if (r != 0) {
4339 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4340 ioc->name, __func__, r);
4341 return r;
4342 }
4343
4344 pfacts = &ioc->pfacts[port];
4345 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
4346 pfacts->PortNumber = mpi_reply.PortNumber;
4347 pfacts->VP_ID = mpi_reply.VP_ID;
4348 pfacts->VF_ID = mpi_reply.VF_ID;
4349 pfacts->MaxPostedCmdBuffers =
4350 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
4351
4352 return 0;
4353}
4354
4dc8c808
SR
4355/**
4356 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4357 * @ioc: per adapter object
4358 * @timeout:
4dc8c808
SR
4359 *
4360 * Returns 0 for success, non-zero for failure.
4361 */
4362static int
98c56ad3 4363_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
4dc8c808
SR
4364{
4365 u32 ioc_state;
4366 int rc;
4367
4368 dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
4369 __func__));
4370
4371 if (ioc->pci_error_recovery) {
4372 dfailprintk(ioc, printk(MPT3SAS_FMT
4373 "%s: host in pci error recovery\n", ioc->name, __func__));
4374 return -EFAULT;
4375 }
4376
4377 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
4378 dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
4379 ioc->name, __func__, ioc_state));
4380
4381 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
4382 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
4383 return 0;
4384
4385 if (ioc_state & MPI2_DOORBELL_USED) {
4386 dhsprintk(ioc, printk(MPT3SAS_FMT
4387 "unexpected doorbell active!\n", ioc->name));
4388 goto issue_diag_reset;
4389 }
4390
4391 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
4392 mpt3sas_base_fault_info(ioc, ioc_state &
4393 MPI2_DOORBELL_DATA_MASK);
4394 goto issue_diag_reset;
4395 }
4396
98c56ad3 4397 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
4dc8c808
SR
4398 if (ioc_state) {
4399 dfailprintk(ioc, printk(MPT3SAS_FMT
4400 "%s: failed going to ready state (ioc_state=0x%x)\n",
4401 ioc->name, __func__, ioc_state));
4402 return -EFAULT;
4403 }
4404
4405 issue_diag_reset:
98c56ad3 4406 rc = _base_diag_reset(ioc);
4dc8c808
SR
4407 return rc;
4408}
4409
f92363d1
SR
4410/**
4411 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4412 * @ioc: per adapter object
f92363d1
SR
4413 *
4414 * Returns 0 for success, non-zero for failure.
4415 */
4416static int
98c56ad3 4417_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4418{
4419 Mpi2IOCFactsRequest_t mpi_request;
4420 Mpi2IOCFactsReply_t mpi_reply;
4421 struct mpt3sas_facts *facts;
4422 int mpi_reply_sz, mpi_request_sz, r;
4423
4424 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4425 __func__));
4426
98c56ad3 4427 r = _base_wait_for_iocstate(ioc, 10);
4dc8c808
SR
4428 if (r) {
4429 dfailprintk(ioc, printk(MPT3SAS_FMT
4430 "%s: failed getting to correct state\n",
4431 ioc->name, __func__));
4432 return r;
4433 }
f92363d1
SR
4434 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
4435 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
4436 memset(&mpi_request, 0, mpi_request_sz);
4437 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4438 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 4439 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
4440
4441 if (r != 0) {
4442 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4443 ioc->name, __func__, r);
4444 return r;
4445 }
4446
4447 facts = &ioc->facts;
4448 memset(facts, 0, sizeof(struct mpt3sas_facts));
4449 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
4450 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
4451 facts->VP_ID = mpi_reply.VP_ID;
4452 facts->VF_ID = mpi_reply.VF_ID;
4453 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
4454 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
4455 facts->WhoInit = mpi_reply.WhoInit;
4456 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
4457 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
4458 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
4459 facts->MaxReplyDescriptorPostQueueDepth =
4460 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
4461 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
4462 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
4463 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
4464 ioc->ir_firmware = 1;
9b05c91a
SR
4465 if ((facts->IOCCapabilities &
4466 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
4467 ioc->rdpq_array_capable = 1;
81c16f83
SPS
4468 if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
4469 ioc->atomic_desc_capable = 1;
f92363d1
SR
4470 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
4471 facts->IOCRequestFrameSize =
4472 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
ebb3024e
SS
4473 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4474 facts->IOCMaxChainSegmentSize =
4475 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
4476 }
f92363d1
SR
4477 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
4478 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
4479 ioc->shost->max_id = -1;
4480 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
4481 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
4482 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
4483 facts->HighPriorityCredit =
4484 le16_to_cpu(mpi_reply.HighPriorityCredit);
4485 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
4486 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
4487
4488 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4489 "hba queue depth(%d), max chains per io(%d)\n",
4490 ioc->name, facts->RequestCredit,
4491 facts->MaxChainDepth));
4492 dinitprintk(ioc, pr_info(MPT3SAS_FMT
4493 "request frame size(%d), reply frame size(%d)\n", ioc->name,
4494 facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
4495 return 0;
4496}
4497
4498/**
4499 * _base_send_ioc_init - send ioc_init to firmware
4500 * @ioc: per adapter object
f92363d1
SR
4501 *
4502 * Returns 0 for success, non-zero for failure.
4503 */
4504static int
98c56ad3 4505_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4506{
4507 Mpi2IOCInitRequest_t mpi_request;
4508 Mpi2IOCInitReply_t mpi_reply;
9b05c91a 4509 int i, r = 0;
23409bd4 4510 ktime_t current_time;
f92363d1 4511 u16 ioc_status;
9b05c91a
SR
4512 u32 reply_post_free_array_sz = 0;
4513 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
4514 dma_addr_t reply_post_free_array_dma;
f92363d1
SR
4515
4516 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4517 __func__));
4518
4519 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
4520 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
4521 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
4522 mpi_request.VF_ID = 0; /* TODO */
4523 mpi_request.VP_ID = 0;
d357e84d 4524 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
f92363d1
SR
4525 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
4526
4527 if (_base_is_controller_msix_enabled(ioc))
4528 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
4529 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
4530 mpi_request.ReplyDescriptorPostQueueDepth =
4531 cpu_to_le16(ioc->reply_post_queue_depth);
4532 mpi_request.ReplyFreeQueueDepth =
4533 cpu_to_le16(ioc->reply_free_queue_depth);
4534
4535 mpi_request.SenseBufferAddressHigh =
4536 cpu_to_le32((u64)ioc->sense_dma >> 32);
4537 mpi_request.SystemReplyAddressHigh =
4538 cpu_to_le32((u64)ioc->reply_dma >> 32);
4539 mpi_request.SystemRequestFrameBaseAddress =
4540 cpu_to_le64((u64)ioc->request_dma);
4541 mpi_request.ReplyFreeQueueAddress =
4542 cpu_to_le64((u64)ioc->reply_free_dma);
f92363d1 4543
9b05c91a
SR
4544 if (ioc->rdpq_array_enable) {
4545 reply_post_free_array_sz = ioc->reply_queue_count *
4546 sizeof(Mpi2IOCInitRDPQArrayEntry);
4547 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
4548 reply_post_free_array_sz, &reply_post_free_array_dma);
4549 if (!reply_post_free_array) {
4550 pr_err(MPT3SAS_FMT
4551 "reply_post_free_array: pci_alloc_consistent failed\n",
4552 ioc->name);
4553 r = -ENOMEM;
4554 goto out;
4555 }
4556 memset(reply_post_free_array, 0, reply_post_free_array_sz);
4557 for (i = 0; i < ioc->reply_queue_count; i++)
4558 reply_post_free_array[i].RDPQBaseAddress =
4559 cpu_to_le64(
4560 (u64)ioc->reply_post[i].reply_post_free_dma);
4561 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
4562 mpi_request.ReplyDescriptorPostQueueAddress =
4563 cpu_to_le64((u64)reply_post_free_array_dma);
4564 } else {
4565 mpi_request.ReplyDescriptorPostQueueAddress =
4566 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
4567 }
f92363d1
SR
4568
4569 /* This time stamp specifies number of milliseconds
4570 * since epoch ~ midnight January 1, 1970.
4571 */
23409bd4
TR
4572 current_time = ktime_get_real();
4573 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
f92363d1
SR
4574
4575 if (ioc->logging_level & MPT_DEBUG_INIT) {
4576 __le32 *mfp;
4577 int i;
4578
4579 mfp = (__le32 *)&mpi_request;
4580 pr_info("\toffset:data\n");
4581 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
4582 pr_info("\t[0x%02x]:%08x\n", i*4,
4583 le32_to_cpu(mfp[i]));
4584 }
4585
4586 r = _base_handshake_req_reply_wait(ioc,
4587 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
98c56ad3 4588 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
f92363d1
SR
4589
4590 if (r != 0) {
4591 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
4592 ioc->name, __func__, r);
9b05c91a 4593 goto out;
f92363d1
SR
4594 }
4595
4596 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
4597 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
4598 mpi_reply.IOCLogInfo) {
4599 pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
4600 r = -EIO;
4601 }
4602
9b05c91a
SR
4603out:
4604 if (reply_post_free_array)
4605 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
4606 reply_post_free_array,
4607 reply_post_free_array_dma);
4608 return r;
f92363d1
SR
4609}
4610
4611/**
4612 * mpt3sas_port_enable_done - command completion routine for port enable
4613 * @ioc: per adapter object
4614 * @smid: system request message index
4615 * @msix_index: MSIX table index supplied by the OS
4616 * @reply: reply message frame(lower 32bit addr)
4617 *
4618 * Return 1 meaning mf should be freed from _base_interrupt
4619 * 0 means the mf is freed from this function.
4620 */
4621u8
4622mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4623 u32 reply)
4624{
4625 MPI2DefaultReply_t *mpi_reply;
4626 u16 ioc_status;
4627
4628 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
4629 return 1;
4630
4631 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
4632 if (!mpi_reply)
4633 return 1;
4634
4635 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
4636 return 1;
4637
4638 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
4639 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
4640 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
4641 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
4642 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4643 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
4644 ioc->port_enable_failed = 1;
4645
4646 if (ioc->is_driver_loading) {
4647 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4648 mpt3sas_port_enable_complete(ioc);
4649 return 1;
4650 } else {
4651 ioc->start_scan_failed = ioc_status;
4652 ioc->start_scan = 0;
4653 return 1;
4654 }
4655 }
4656 complete(&ioc->port_enable_cmds.done);
4657 return 1;
4658}
4659
4660/**
4661 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4662 * @ioc: per adapter object
f92363d1
SR
4663 *
4664 * Returns 0 for success, non-zero for failure.
4665 */
4666static int
98c56ad3 4667_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4668{
4669 Mpi2PortEnableRequest_t *mpi_request;
4670 Mpi2PortEnableReply_t *mpi_reply;
f92363d1
SR
4671 int r = 0;
4672 u16 smid;
4673 u16 ioc_status;
4674
4675 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4676
4677 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4678 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4679 ioc->name, __func__);
4680 return -EAGAIN;
4681 }
4682
4683 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4684 if (!smid) {
4685 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4686 ioc->name, __func__);
4687 return -EAGAIN;
4688 }
4689
4690 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4691 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4692 ioc->port_enable_cmds.smid = smid;
4693 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4694 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4695
4696 init_completion(&ioc->port_enable_cmds.done);
81c16f83 4697 ioc->put_smid_default(ioc, smid);
8bbb1cf6 4698 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
f92363d1
SR
4699 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4700 pr_err(MPT3SAS_FMT "%s: timeout\n",
4701 ioc->name, __func__);
4702 _debug_dump_mf(mpi_request,
4703 sizeof(Mpi2PortEnableRequest_t)/4);
4704 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
4705 r = -EFAULT;
4706 else
4707 r = -ETIME;
4708 goto out;
4709 }
4710
4711 mpi_reply = ioc->port_enable_cmds.reply;
4712 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
4713 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4714 pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
4715 ioc->name, __func__, ioc_status);
4716 r = -EFAULT;
4717 goto out;
4718 }
4719
4720 out:
4721 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
4722 pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
4723 "SUCCESS" : "FAILED"));
4724 return r;
4725}
4726
4727/**
4728 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
4729 * @ioc: per adapter object
4730 *
4731 * Returns 0 for success, non-zero for failure.
4732 */
4733int
4734mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
4735{
4736 Mpi2PortEnableRequest_t *mpi_request;
4737 u16 smid;
4738
4739 pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
4740
4741 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
4742 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4743 ioc->name, __func__);
4744 return -EAGAIN;
4745 }
4746
4747 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
4748 if (!smid) {
4749 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4750 ioc->name, __func__);
4751 return -EAGAIN;
4752 }
4753
4754 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
4755 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4756 ioc->port_enable_cmds.smid = smid;
4757 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
4758 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
4759
81c16f83 4760 ioc->put_smid_default(ioc, smid);
f92363d1
SR
4761 return 0;
4762}
4763
4764/**
4765 * _base_determine_wait_on_discovery - desposition
4766 * @ioc: per adapter object
4767 *
4768 * Decide whether to wait on discovery to complete. Used to either
4769 * locate boot device, or report volumes ahead of physical devices.
4770 *
4771 * Returns 1 for wait, 0 for don't wait
4772 */
4773static int
4774_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
4775{
4776 /* We wait for discovery to complete if IR firmware is loaded.
4777 * The sas topology events arrive before PD events, so we need time to
4778 * turn on the bit in ioc->pd_handles to indicate PD
4779 * Also, it maybe required to report Volumes ahead of physical
4780 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
4781 */
4782 if (ioc->ir_firmware)
4783 return 1;
4784
4785 /* if no Bios, then we don't need to wait */
4786 if (!ioc->bios_pg3.BiosVersion)
4787 return 0;
4788
4789 /* Bios is present, then we drop down here.
4790 *
4791 * If there any entries in the Bios Page 2, then we wait
4792 * for discovery to complete.
4793 */
4794
4795 /* Current Boot Device */
4796 if ((ioc->bios_pg2.CurrentBootDeviceForm &
4797 MPI2_BIOSPAGE2_FORM_MASK) ==
4798 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4799 /* Request Boot Device */
4800 (ioc->bios_pg2.ReqBootDeviceForm &
4801 MPI2_BIOSPAGE2_FORM_MASK) ==
4802 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
4803 /* Alternate Request Boot Device */
4804 (ioc->bios_pg2.ReqAltBootDeviceForm &
4805 MPI2_BIOSPAGE2_FORM_MASK) ==
4806 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
4807 return 0;
4808
4809 return 1;
4810}
4811
4812/**
4813 * _base_unmask_events - turn on notification for this event
4814 * @ioc: per adapter object
4815 * @event: firmware event
4816 *
4817 * The mask is stored in ioc->event_masks.
4818 */
4819static void
4820_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4821{
4822 u32 desired_event;
4823
4824 if (event >= 128)
4825 return;
4826
4827 desired_event = (1 << (event % 32));
4828
4829 if (event < 32)
4830 ioc->event_masks[0] &= ~desired_event;
4831 else if (event < 64)
4832 ioc->event_masks[1] &= ~desired_event;
4833 else if (event < 96)
4834 ioc->event_masks[2] &= ~desired_event;
4835 else if (event < 128)
4836 ioc->event_masks[3] &= ~desired_event;
4837}
4838
4839/**
4840 * _base_event_notification - send event notification
4841 * @ioc: per adapter object
f92363d1
SR
4842 *
4843 * Returns 0 for success, non-zero for failure.
4844 */
4845static int
98c56ad3 4846_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4847{
4848 Mpi2EventNotificationRequest_t *mpi_request;
f92363d1
SR
4849 u16 smid;
4850 int r = 0;
4851 int i;
4852
4853 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4854 __func__));
4855
4856 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4857 pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
4858 ioc->name, __func__);
4859 return -EAGAIN;
4860 }
4861
4862 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4863 if (!smid) {
4864 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
4865 ioc->name, __func__);
4866 return -EAGAIN;
4867 }
4868 ioc->base_cmds.status = MPT3_CMD_PENDING;
4869 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4870 ioc->base_cmds.smid = smid;
4871 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
4872 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
4873 mpi_request->VF_ID = 0; /* TODO */
4874 mpi_request->VP_ID = 0;
4875 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
4876 mpi_request->EventMasks[i] =
4877 cpu_to_le32(ioc->event_masks[i]);
4878 init_completion(&ioc->base_cmds.done);
81c16f83 4879 ioc->put_smid_default(ioc, smid);
8bbb1cf6 4880 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
f92363d1
SR
4881 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4882 pr_err(MPT3SAS_FMT "%s: timeout\n",
4883 ioc->name, __func__);
4884 _debug_dump_mf(mpi_request,
4885 sizeof(Mpi2EventNotificationRequest_t)/4);
4886 if (ioc->base_cmds.status & MPT3_CMD_RESET)
4887 r = -EFAULT;
4888 else
4889 r = -ETIME;
4890 } else
4891 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
4892 ioc->name, __func__));
4893 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4894 return r;
4895}
4896
4897/**
4898 * mpt3sas_base_validate_event_type - validating event types
4899 * @ioc: per adapter object
4900 * @event: firmware event
4901 *
4902 * This will turn on firmware event notification when application
4903 * ask for that event. We don't mask events that are already enabled.
4904 */
4905void
4906mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4907{
4908 int i, j;
4909 u32 event_mask, desired_event;
4910 u8 send_update_to_fw;
4911
4912 for (i = 0, send_update_to_fw = 0; i <
4913 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
4914 event_mask = ~event_type[i];
4915 desired_event = 1;
4916 for (j = 0; j < 32; j++) {
4917 if (!(event_mask & desired_event) &&
4918 (ioc->event_masks[i] & desired_event)) {
4919 ioc->event_masks[i] &= ~desired_event;
4920 send_update_to_fw = 1;
4921 }
4922 desired_event = (desired_event << 1);
4923 }
4924 }
4925
4926 if (!send_update_to_fw)
4927 return;
4928
4929 mutex_lock(&ioc->base_cmds.mutex);
98c56ad3 4930 _base_event_notification(ioc);
f92363d1
SR
4931 mutex_unlock(&ioc->base_cmds.mutex);
4932}
4933
4934/**
4935 * _base_diag_reset - the "big hammer" start of day reset
4936 * @ioc: per adapter object
f92363d1
SR
4937 *
4938 * Returns 0 for success, non-zero for failure.
4939 */
4940static int
98c56ad3 4941_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4942{
4943 u32 host_diagnostic;
4944 u32 ioc_state;
4945 u32 count;
4946 u32 hcb_size;
4947
4948 pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
4949
4950 drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
4951 ioc->name));
4952
4953 count = 0;
4954 do {
4955 /* Write magic sequence to WriteSequence register
4956 * Loop until in diagnostic mode
4957 */
4958 drsprintk(ioc, pr_info(MPT3SAS_FMT
4959 "write magic sequence\n", ioc->name));
4960 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
4961 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
4962 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
4963 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
4964 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
4965 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
4966 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4967
4968 /* wait 100 msec */
98c56ad3 4969 msleep(100);
f92363d1
SR
4970
4971 if (count++ > 20)
4972 goto out;
4973
4974 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4975 drsprintk(ioc, pr_info(MPT3SAS_FMT
4976 "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
4977 ioc->name, count, host_diagnostic));
4978
4979 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
4980
4981 hcb_size = readl(&ioc->chip->HCBSize);
4982
4983 drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
4984 ioc->name));
4985 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
4986 &ioc->chip->HostDiagnostic);
4987
b453ff84 4988 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
98c56ad3 4989 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
f92363d1 4990
b453ff84
SR
4991 /* Approximately 300 second max wait */
4992 for (count = 0; count < (300000000 /
4993 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
f92363d1
SR
4994
4995 host_diagnostic = readl(&ioc->chip->HostDiagnostic);
4996
4997 if (host_diagnostic == 0xFFFFFFFF)
4998 goto out;
4999 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
5000 break;
5001
98c56ad3 5002 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
f92363d1
SR
5003 }
5004
5005 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
5006
5007 drsprintk(ioc, pr_info(MPT3SAS_FMT
5008 "restart the adapter assuming the HCB Address points to good F/W\n",
5009 ioc->name));
5010 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
5011 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
5012 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
5013
5014 drsprintk(ioc, pr_info(MPT3SAS_FMT
5015 "re-enable the HCDW\n", ioc->name));
5016 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
5017 &ioc->chip->HCBSize);
5018 }
5019
5020 drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
5021 ioc->name));
5022 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
5023 &ioc->chip->HostDiagnostic);
5024
5025 drsprintk(ioc, pr_info(MPT3SAS_FMT
5026 "disable writes to the diagnostic register\n", ioc->name));
5027 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
5028
5029 drsprintk(ioc, pr_info(MPT3SAS_FMT
5030 "Wait for FW to go to the READY state\n", ioc->name));
98c56ad3 5031 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
f92363d1
SR
5032 if (ioc_state) {
5033 pr_err(MPT3SAS_FMT
5034 "%s: failed going to ready state (ioc_state=0x%x)\n",
5035 ioc->name, __func__, ioc_state);
5036 goto out;
5037 }
5038
5039 pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
5040 return 0;
5041
5042 out:
5043 pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
5044 return -EFAULT;
5045}
5046
5047/**
5048 * _base_make_ioc_ready - put controller in READY state
5049 * @ioc: per adapter object
f92363d1
SR
5050 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5051 *
5052 * Returns 0 for success, non-zero for failure.
5053 */
5054static int
98c56ad3 5055_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
f92363d1
SR
5056{
5057 u32 ioc_state;
5058 int rc;
5059 int count;
5060
5061 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5062 __func__));
5063
5064 if (ioc->pci_error_recovery)
5065 return 0;
5066
5067 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5068 dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
5069 ioc->name, __func__, ioc_state));
5070
5071 /* if in RESET state, it should move to READY state shortly */
5072 count = 0;
5073 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
5074 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
5075 MPI2_IOC_STATE_READY) {
5076 if (count++ == 10) {
5077 pr_err(MPT3SAS_FMT
5078 "%s: failed going to ready state (ioc_state=0x%x)\n",
5079 ioc->name, __func__, ioc_state);
5080 return -EFAULT;
5081 }
98c56ad3 5082 ssleep(1);
f92363d1
SR
5083 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5084 }
5085 }
5086
5087 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
5088 return 0;
5089
5090 if (ioc_state & MPI2_DOORBELL_USED) {
5091 dhsprintk(ioc, pr_info(MPT3SAS_FMT
5092 "unexpected doorbell active!\n",
5093 ioc->name));
5094 goto issue_diag_reset;
5095 }
5096
5097 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5098 mpt3sas_base_fault_info(ioc, ioc_state &
5099 MPI2_DOORBELL_DATA_MASK);
5100 goto issue_diag_reset;
5101 }
5102
5103 if (type == FORCE_BIG_HAMMER)
5104 goto issue_diag_reset;
5105
5106 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5107 if (!(_base_send_ioc_reset(ioc,
98c56ad3 5108 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
f92363d1
SR
5109 return 0;
5110 }
5111
5112 issue_diag_reset:
98c56ad3 5113 rc = _base_diag_reset(ioc);
f92363d1
SR
5114 return rc;
5115}
5116
5117/**
5118 * _base_make_ioc_operational - put controller in OPERATIONAL state
5119 * @ioc: per adapter object
f92363d1
SR
5120 *
5121 * Returns 0 for success, non-zero for failure.
5122 */
5123static int
98c56ad3 5124_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
f92363d1 5125{
5ec8a175 5126 int r, i, index;
f92363d1
SR
5127 unsigned long flags;
5128 u32 reply_address;
5129 u16 smid;
5130 struct _tr_list *delayed_tr, *delayed_tr_next;
fd0331b3
SS
5131 struct _sc_list *delayed_sc, *delayed_sc_next;
5132 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7786ab6a 5133 u8 hide_flag;
f92363d1 5134 struct adapter_reply_queue *reply_q;
5ec8a175 5135 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
f92363d1
SR
5136
5137 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5138 __func__));
5139
5140 /* clean the delayed target reset list */
5141 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5142 &ioc->delayed_tr_list, list) {
5143 list_del(&delayed_tr->list);
5144 kfree(delayed_tr);
5145 }
5146
5147
5148 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
5149 &ioc->delayed_tr_volume_list, list) {
5150 list_del(&delayed_tr->list);
5151 kfree(delayed_tr);
5152 }
5153
fd0331b3
SS
5154 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
5155 &ioc->delayed_sc_list, list) {
5156 list_del(&delayed_sc->list);
5157 kfree(delayed_sc);
5158 }
5159
5160 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
5161 &ioc->delayed_event_ack_list, list) {
5162 list_del(&delayed_event_ack->list);
5163 kfree(delayed_event_ack);
5164 }
5165
f92363d1
SR
5166 /* initialize the scsi lookup free list */
5167 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5168 INIT_LIST_HEAD(&ioc->free_list);
5169 smid = 1;
5170 for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
5171 INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
5172 ioc->scsi_lookup[i].cb_idx = 0xFF;
5173 ioc->scsi_lookup[i].smid = smid;
5174 ioc->scsi_lookup[i].scmd = NULL;
7786ab6a 5175 ioc->scsi_lookup[i].direct_io = 0;
f92363d1
SR
5176 list_add_tail(&ioc->scsi_lookup[i].tracker_list,
5177 &ioc->free_list);
5178 }
5179
5180 /* hi-priority queue */
5181 INIT_LIST_HEAD(&ioc->hpr_free_list);
5182 smid = ioc->hi_priority_smid;
5183 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
5184 ioc->hpr_lookup[i].cb_idx = 0xFF;
5185 ioc->hpr_lookup[i].smid = smid;
5186 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
5187 &ioc->hpr_free_list);
5188 }
5189
5190 /* internal queue */
5191 INIT_LIST_HEAD(&ioc->internal_free_list);
5192 smid = ioc->internal_smid;
5193 for (i = 0; i < ioc->internal_depth; i++, smid++) {
5194 ioc->internal_lookup[i].cb_idx = 0xFF;
5195 ioc->internal_lookup[i].smid = smid;
5196 list_add_tail(&ioc->internal_lookup[i].tracker_list,
5197 &ioc->internal_free_list);
5198 }
5199
5200 /* chain pool */
5201 INIT_LIST_HEAD(&ioc->free_chain_list);
5202 for (i = 0; i < ioc->chain_depth; i++)
5203 list_add_tail(&ioc->chain_lookup[i].tracker_list,
5204 &ioc->free_chain_list);
5205
5206 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5207
5208 /* initialize Reply Free Queue */
5209 for (i = 0, reply_address = (u32)ioc->reply_dma ;
5210 i < ioc->reply_free_queue_depth ; i++, reply_address +=
5211 ioc->reply_sz)
5212 ioc->reply_free[i] = cpu_to_le32(reply_address);
5213
5214 /* initialize reply queues */
5215 if (ioc->is_driver_loading)
5216 _base_assign_reply_queues(ioc);
5217
5218 /* initialize Reply Post Free Queue */
5ec8a175
CO
5219 index = 0;
5220 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
f92363d1 5221 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5ec8a175
CO
5222 /*
5223 * If RDPQ is enabled, switch to the next allocation.
5224 * Otherwise advance within the contiguous region.
5225 */
5226 if (ioc->rdpq_array_enable) {
5227 reply_q->reply_post_free =
5228 ioc->reply_post[index++].reply_post_free;
5229 } else {
5230 reply_q->reply_post_free = reply_post_free_contig;
5231 reply_post_free_contig += ioc->reply_post_queue_depth;
5232 }
5233
f92363d1 5234 reply_q->reply_post_host_index = 0;
f92363d1
SR
5235 for (i = 0; i < ioc->reply_post_queue_depth; i++)
5236 reply_q->reply_post_free[i].Words =
5237 cpu_to_le64(ULLONG_MAX);
5238 if (!_base_is_controller_msix_enabled(ioc))
5239 goto skip_init_reply_post_free_queue;
f92363d1
SR
5240 }
5241 skip_init_reply_post_free_queue:
5242
98c56ad3 5243 r = _base_send_ioc_init(ioc);
f92363d1
SR
5244 if (r)
5245 return r;
5246
5247 /* initialize reply free host index */
5248 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
5249 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
5250
5251 /* initialize reply post host index */
5252 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
0bb337c9 5253 if (ioc->combined_reply_queue)
fb77bb53
SR
5254 writel((reply_q->msix_index & 7)<<
5255 MPI2_RPHI_MSIX_INDEX_SHIFT,
5256 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
5257 else
5258 writel(reply_q->msix_index <<
5259 MPI2_RPHI_MSIX_INDEX_SHIFT,
5260 &ioc->chip->ReplyPostHostIndex);
5261
f92363d1
SR
5262 if (!_base_is_controller_msix_enabled(ioc))
5263 goto skip_init_reply_post_host_index;
5264 }
5265
5266 skip_init_reply_post_host_index:
5267
5268 _base_unmask_interrupts(ioc);
98c56ad3 5269 r = _base_event_notification(ioc);
f92363d1
SR
5270 if (r)
5271 return r;
5272
98c56ad3 5273 _base_static_config_pages(ioc);
f92363d1
SR
5274
5275 if (ioc->is_driver_loading) {
7786ab6a
SR
5276
5277 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
5278 == 0x80) {
5279 hide_flag = (u8) (
5280 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
5281 MFG_PAGE10_HIDE_SSDS_MASK);
5282 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
5283 ioc->mfg_pg10_hide_flag = hide_flag;
5284 }
5285
f92363d1
SR
5286 ioc->wait_for_discovery_to_complete =
5287 _base_determine_wait_on_discovery(ioc);
5288
5289 return r; /* scan_start and scan_finished support */
5290 }
5291
98c56ad3 5292 r = _base_send_port_enable(ioc);
f92363d1
SR
5293 if (r)
5294 return r;
5295
5296 return r;
5297}
5298
5299/**
5300 * mpt3sas_base_free_resources - free resources controller resources
5301 * @ioc: per adapter object
5302 *
5303 * Return nothing.
5304 */
5305void
5306mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5307{
f92363d1
SR
5308 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5309 __func__));
5310
08c4d550
SR
5311 /* synchronizing freeing resource with pci_access_mutex lock */
5312 mutex_lock(&ioc->pci_access_mutex);
cf9bd21a
JL
5313 if (ioc->chip_phys && ioc->chip) {
5314 _base_mask_interrupts(ioc);
5315 ioc->shost_recovery = 1;
98c56ad3 5316 _base_make_ioc_ready(ioc, SOFT_RESET);
cf9bd21a
JL
5317 ioc->shost_recovery = 0;
5318 }
5319
580d4e31 5320 mpt3sas_base_unmap_resources(ioc);
08c4d550 5321 mutex_unlock(&ioc->pci_access_mutex);
f92363d1
SR
5322 return;
5323}
5324
5325/**
5326 * mpt3sas_base_attach - attach controller instance
5327 * @ioc: per adapter object
5328 *
5329 * Returns 0 for success, non-zero for failure.
5330 */
5331int
5332mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5333{
5334 int r, i;
5335 int cpu_id, last_cpu_id = 0;
5336
5337 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5338 __func__));
5339
5340 /* setup cpu_msix_table */
5341 ioc->cpu_count = num_online_cpus();
5342 for_each_online_cpu(cpu_id)
5343 last_cpu_id = cpu_id;
5344 ioc->cpu_msix_table_sz = last_cpu_id + 1;
5345 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
5346 ioc->reply_queue_count = 1;
5347 if (!ioc->cpu_msix_table) {
5348 dfailprintk(ioc, pr_info(MPT3SAS_FMT
5349 "allocation for cpu_msix_table failed!!!\n",
5350 ioc->name));
5351 r = -ENOMEM;
5352 goto out_free_resources;
5353 }
5354
7786ab6a
SR
5355 if (ioc->is_warpdrive) {
5356 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
5357 sizeof(resource_size_t *), GFP_KERNEL);
5358 if (!ioc->reply_post_host_index) {
5359 dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
5360 "for cpu_msix_table failed!!!\n", ioc->name));
5361 r = -ENOMEM;
5362 goto out_free_resources;
5363 }
5364 }
5365
9b05c91a
SR
5366 ioc->rdpq_array_enable_assigned = 0;
5367 ioc->dma_mask = 0;
f92363d1
SR
5368 r = mpt3sas_base_map_resources(ioc);
5369 if (r)
5370 goto out_free_resources;
5371
f92363d1 5372 pci_set_drvdata(ioc->pdev, ioc->shost);
98c56ad3 5373 r = _base_get_ioc_facts(ioc);
f92363d1
SR
5374 if (r)
5375 goto out_free_resources;
5376
471ef9d4
SR
5377 switch (ioc->hba_mpi_version_belonged) {
5378 case MPI2_VERSION:
5379 ioc->build_sg_scmd = &_base_build_sg_scmd;
5380 ioc->build_sg = &_base_build_sg;
5381 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
5382 break;
5383 case MPI25_VERSION:
b130b0d5 5384 case MPI26_VERSION:
471ef9d4
SR
5385 /*
5386 * In SAS3.0,
5387 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
5388 * Target Status - all require the IEEE formated scatter gather
5389 * elements.
5390 */
5391 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
5392 ioc->build_sg = &_base_build_sg_ieee;
5393 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
5394 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
81c16f83 5395
471ef9d4
SR
5396 break;
5397 }
f92363d1 5398
81c16f83
SPS
5399 if (ioc->atomic_desc_capable) {
5400 ioc->put_smid_default = &_base_put_smid_default_atomic;
5401 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
5402 ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
5403 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
5404 } else {
5405 ioc->put_smid_default = &_base_put_smid_default;
5406 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
5407 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
5408 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
5409 }
5410
5411
f92363d1
SR
5412 /*
5413 * These function pointers for other requests that don't
5414 * the require IEEE scatter gather elements.
5415 *
5416 * For example Configuration Pages and SAS IOUNIT Control don't.
5417 */
5418 ioc->build_sg_mpi = &_base_build_sg;
5419 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5420
98c56ad3 5421 r = _base_make_ioc_ready(ioc, SOFT_RESET);
f92363d1
SR
5422 if (r)
5423 goto out_free_resources;
5424
5425 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
5426 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
5427 if (!ioc->pfacts) {
5428 r = -ENOMEM;
5429 goto out_free_resources;
5430 }
5431
5432 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
98c56ad3 5433 r = _base_get_port_facts(ioc, i);
f92363d1
SR
5434 if (r)
5435 goto out_free_resources;
5436 }
5437
98c56ad3 5438 r = _base_allocate_memory_pools(ioc);
f92363d1
SR
5439 if (r)
5440 goto out_free_resources;
5441
5442 init_waitqueue_head(&ioc->reset_wq);
5443
5444 /* allocate memory pd handle bitmask list */
5445 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
5446 if (ioc->facts.MaxDevHandle % 8)
5447 ioc->pd_handles_sz++;
5448 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
5449 GFP_KERNEL);
5450 if (!ioc->pd_handles) {
5451 r = -ENOMEM;
5452 goto out_free_resources;
5453 }
5454 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
5455 GFP_KERNEL);
5456 if (!ioc->blocking_handles) {
5457 r = -ENOMEM;
5458 goto out_free_resources;
5459 }
5460
c696f7b8
SPS
5461 /* allocate memory for pending OS device add list */
5462 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
5463 if (ioc->facts.MaxDevHandle % 8)
5464 ioc->pend_os_device_add_sz++;
5465 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
5466 GFP_KERNEL);
5467 if (!ioc->pend_os_device_add)
5468 goto out_free_resources;
5469
5470 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
5471 ioc->device_remove_in_progress =
5472 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
5473 if (!ioc->device_remove_in_progress)
5474 goto out_free_resources;
5475
f92363d1
SR
5476 ioc->fwfault_debug = mpt3sas_fwfault_debug;
5477
5478 /* base internal command bits */
5479 mutex_init(&ioc->base_cmds.mutex);
5480 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5481 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5482
5483 /* port_enable command bits */
5484 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5485 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
5486
5487 /* transport internal command bits */
5488 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5489 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
5490 mutex_init(&ioc->transport_cmds.mutex);
5491
5492 /* scsih internal command bits */
5493 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5494 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
5495 mutex_init(&ioc->scsih_cmds.mutex);
5496
5497 /* task management internal command bits */
5498 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5499 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
5500 mutex_init(&ioc->tm_cmds.mutex);
5501
5502 /* config page internal command bits */
5503 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5504 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
5505 mutex_init(&ioc->config_cmds.mutex);
5506
5507 /* ctl module internal command bits */
5508 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
5509 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
5510 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
5511 mutex_init(&ioc->ctl_cmds.mutex);
5512
5513 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
5514 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
5515 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
5516 !ioc->ctl_cmds.sense) {
5517 r = -ENOMEM;
5518 goto out_free_resources;
5519 }
5520
5521 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5522 ioc->event_masks[i] = -1;
5523
5524 /* here we enable the events we care about */
5525 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
5526 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
5527 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
5528 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5529 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
5530 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
5531 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
5532 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
5533 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
5534 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
2d8ce8c9 5535 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
a470a51c
C
5536 if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
5537 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
f92363d1 5538
98c56ad3 5539 r = _base_make_ioc_operational(ioc);
f92363d1
SR
5540 if (r)
5541 goto out_free_resources;
5542
16e179bd 5543 ioc->non_operational_loop = 0;
459325c4 5544 ioc->got_task_abort_from_ioctl = 0;
f92363d1
SR
5545 return 0;
5546
5547 out_free_resources:
5548
5549 ioc->remove_host = 1;
5550
5551 mpt3sas_base_free_resources(ioc);
5552 _base_release_memory_pools(ioc);
5553 pci_set_drvdata(ioc->pdev, NULL);
5554 kfree(ioc->cpu_msix_table);
7786ab6a
SR
5555 if (ioc->is_warpdrive)
5556 kfree(ioc->reply_post_host_index);
f92363d1
SR
5557 kfree(ioc->pd_handles);
5558 kfree(ioc->blocking_handles);
c696f7b8
SPS
5559 kfree(ioc->device_remove_in_progress);
5560 kfree(ioc->pend_os_device_add);
f92363d1
SR
5561 kfree(ioc->tm_cmds.reply);
5562 kfree(ioc->transport_cmds.reply);
5563 kfree(ioc->scsih_cmds.reply);
5564 kfree(ioc->config_cmds.reply);
5565 kfree(ioc->base_cmds.reply);
5566 kfree(ioc->port_enable_cmds.reply);
5567 kfree(ioc->ctl_cmds.reply);
5568 kfree(ioc->ctl_cmds.sense);
5569 kfree(ioc->pfacts);
5570 ioc->ctl_cmds.reply = NULL;
5571 ioc->base_cmds.reply = NULL;
5572 ioc->tm_cmds.reply = NULL;
5573 ioc->scsih_cmds.reply = NULL;
5574 ioc->transport_cmds.reply = NULL;
5575 ioc->config_cmds.reply = NULL;
5576 ioc->pfacts = NULL;
5577 return r;
5578}
5579
5580
5581/**
5582 * mpt3sas_base_detach - remove controller instance
5583 * @ioc: per adapter object
5584 *
5585 * Return nothing.
5586 */
5587void
5588mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
5589{
5590 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
5591 __func__));
5592
5593 mpt3sas_base_stop_watchdog(ioc);
5594 mpt3sas_base_free_resources(ioc);
5595 _base_release_memory_pools(ioc);
5596 pci_set_drvdata(ioc->pdev, NULL);
5597 kfree(ioc->cpu_msix_table);
7786ab6a
SR
5598 if (ioc->is_warpdrive)
5599 kfree(ioc->reply_post_host_index);
f92363d1
SR
5600 kfree(ioc->pd_handles);
5601 kfree(ioc->blocking_handles);
c696f7b8
SPS
5602 kfree(ioc->device_remove_in_progress);
5603 kfree(ioc->pend_os_device_add);
f92363d1
SR
5604 kfree(ioc->pfacts);
5605 kfree(ioc->ctl_cmds.reply);
5606 kfree(ioc->ctl_cmds.sense);
5607 kfree(ioc->base_cmds.reply);
5608 kfree(ioc->port_enable_cmds.reply);
5609 kfree(ioc->tm_cmds.reply);
5610 kfree(ioc->transport_cmds.reply);
5611 kfree(ioc->scsih_cmds.reply);
5612 kfree(ioc->config_cmds.reply);
5613}
5614
5615/**
5616 * _base_reset_handler - reset callback handler (for base)
5617 * @ioc: per adapter object
5618 * @reset_phase: phase
5619 *
5620 * The handler for doing any required cleanup or initialization.
5621 *
5622 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
5623 * MPT3_IOC_DONE_RESET
5624 *
5625 * Return nothing.
5626 */
5627static void
5628_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5629{
5630 mpt3sas_scsih_reset_handler(ioc, reset_phase);
5631 mpt3sas_ctl_reset_handler(ioc, reset_phase);
5632 switch (reset_phase) {
5633 case MPT3_IOC_PRE_RESET:
5634 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5635 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
5636 break;
5637 case MPT3_IOC_AFTER_RESET:
5638 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5639 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
5640 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
5641 ioc->transport_cmds.status |= MPT3_CMD_RESET;
5642 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
5643 complete(&ioc->transport_cmds.done);
5644 }
5645 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
5646 ioc->base_cmds.status |= MPT3_CMD_RESET;
5647 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
5648 complete(&ioc->base_cmds.done);
5649 }
5650 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
5651 ioc->port_enable_failed = 1;
5652 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
5653 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
5654 if (ioc->is_driver_loading) {
5655 ioc->start_scan_failed =
5656 MPI2_IOCSTATUS_INTERNAL_ERROR;
5657 ioc->start_scan = 0;
5658 ioc->port_enable_cmds.status =
5659 MPT3_CMD_NOT_USED;
5660 } else
5661 complete(&ioc->port_enable_cmds.done);
5662 }
5663 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
5664 ioc->config_cmds.status |= MPT3_CMD_RESET;
5665 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
5666 ioc->config_cmds.smid = USHRT_MAX;
5667 complete(&ioc->config_cmds.done);
5668 }
5669 break;
5670 case MPT3_IOC_DONE_RESET:
5671 dtmprintk(ioc, pr_info(MPT3SAS_FMT
5672 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
5673 break;
5674 }
5675}
5676
5677/**
5678 * _wait_for_commands_to_complete - reset controller
5679 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1
SR
5680 *
5681 * This function waiting(3s) for all pending commands to complete
5682 * prior to putting controller in reset.
5683 */
5684static void
98c56ad3 5685_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5686{
5687 u32 ioc_state;
5688 unsigned long flags;
5689 u16 i;
5690
5691 ioc->pending_io_count = 0;
f92363d1
SR
5692
5693 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5694 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
5695 return;
5696
5697 /* pending command count */
5698 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
5699 for (i = 0; i < ioc->scsiio_depth; i++)
5700 if (ioc->scsi_lookup[i].cb_idx != 0xFF)
5701 ioc->pending_io_count++;
5702 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
5703
5704 if (!ioc->pending_io_count)
5705 return;
5706
5707 /* wait for pending commands to complete */
5708 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
5709}
5710
5711/**
5712 * mpt3sas_base_hard_reset_handler - reset controller
5713 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1
SR
5714 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5715 *
5716 * Returns 0 for success, non-zero for failure.
5717 */
5718int
98c56ad3 5719mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
f92363d1
SR
5720 enum reset_type type)
5721{
5722 int r;
5723 unsigned long flags;
5724 u32 ioc_state;
5725 u8 is_fault = 0, is_trigger = 0;
5726
5727 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
5728 __func__));
5729
5730 if (ioc->pci_error_recovery) {
5731 pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
5732 ioc->name, __func__);
5733 r = 0;
5734 goto out_unlocked;
5735 }
5736
5737 if (mpt3sas_fwfault_debug)
5738 mpt3sas_halt_firmware(ioc);
5739
f92363d1
SR
5740 /* wait for an active reset in progress to complete */
5741 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5742 do {
5743 ssleep(1);
5744 } while (ioc->shost_recovery == 1);
5745 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5746 __func__));
5747 return ioc->ioc_reset_in_progress_status;
5748 }
5749
5750 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5751 ioc->shost_recovery = 1;
5752 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5753
5754 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5755 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
5756 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
5757 MPT3_DIAG_BUFFER_IS_RELEASED))) {
5758 is_trigger = 1;
5759 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5760 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
5761 is_fault = 1;
5762 }
5763 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
98c56ad3 5764 _wait_for_commands_to_complete(ioc);
f92363d1 5765 _base_mask_interrupts(ioc);
98c56ad3 5766 r = _base_make_ioc_ready(ioc, type);
f92363d1
SR
5767 if (r)
5768 goto out;
5769 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
5770
5771 /* If this hard reset is called while port enable is active, then
5772 * there is no reason to call make_ioc_operational
5773 */
5774 if (ioc->is_driver_loading && ioc->port_enable_failed) {
5775 ioc->remove_host = 1;
5776 r = -EFAULT;
5777 goto out;
5778 }
98c56ad3 5779 r = _base_get_ioc_facts(ioc);
f92363d1
SR
5780 if (r)
5781 goto out;
9b05c91a
SR
5782
5783 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
5784 panic("%s: Issue occurred with flashing controller firmware."
5785 "Please reboot the system and ensure that the correct"
5786 " firmware version is running\n", ioc->name);
5787
98c56ad3 5788 r = _base_make_ioc_operational(ioc);
f92363d1
SR
5789 if (!r)
5790 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5791
5792 out:
5793 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
5794 ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
5795
5796 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
5797 ioc->ioc_reset_in_progress_status = r;
5798 ioc->shost_recovery = 0;
5799 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
5800 ioc->ioc_reset_count++;
5801 mutex_unlock(&ioc->reset_in_progress_mutex);
5802
5803 out_unlocked:
5804 if ((r == 0) && is_trigger) {
5805 if (is_fault)
5806 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
5807 else
5808 mpt3sas_trigger_master(ioc,
5809 MASTER_TRIGGER_ADAPTER_RESET);
5810 }
5811 dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
5812 __func__));
5813 return r;
5814}