]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/scsi/mpt3sas/mpt3sas_base.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / scsi / mpt3sas / mpt3sas_base.c
CommitLineData
f92363d1
SR
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
a4ffce0d 6 * Copyright (C) 2012-2014 LSI Corporation
a03bd153
SR
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
f92363d1
SR
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
f92363d1
SR
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/io.h>
59#include <linux/time.h>
23409bd4 60#include <linux/ktime.h>
f92363d1 61#include <linux/kthread.h>
016d5c35 62#include <asm/page.h> /* To get host page size per arch */
f92363d1
SR
63#include <linux/aer.h>
64
65
66#include "mpt3sas_base.h"
67
68static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72
73 /* maximum controller queue depth */
74#define MAX_HBA_QUEUE_DEPTH 30000
75#define MAX_CHAIN_DEPTH 100000
76static int max_queue_depth = -1;
77module_param(max_queue_depth, int, 0);
78MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80static int max_sgl_entries = -1;
81module_param(max_sgl_entries, int, 0);
82MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84static int msix_disable = -1;
85module_param(msix_disable, int, 0);
86MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
64038301
SPS
88static int smp_affinity_enable = 1;
89module_param(smp_affinity_enable, int, S_IRUGO);
23b389c2 90MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
64038301 91
fb77bb53 92static int max_msix_vectors = -1;
9c500060
SR
93module_param(max_msix_vectors, int, 0);
94MODULE_PARM_DESC(max_msix_vectors,
fb77bb53 95 " max msix vectors");
f92363d1
SR
96
97static int mpt3sas_fwfault_debug;
98MODULE_PARM_DESC(mpt3sas_fwfault_debug,
99 " enable detection of firmware fault and halt firmware - (default=0)");
100
9b05c91a 101static int
98c56ad3 102_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
f92363d1 103
d37306ca
C
104/**
105 * mpt3sas_base_check_cmd_timeout - Function
106 * to check timeout and command termination due
107 * to Host reset.
108 *
109 * @ioc: per adapter object.
110 * @status: Status of issued command.
111 * @mpi_request:mf request pointer.
112 * @sz: size of buffer.
113 *
114 * @Returns - 1/0 Reset to be done or Not
115 */
116u8
117mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
118 u8 status, void *mpi_request, int sz)
119{
120 u8 issue_reset = 0;
121
122 if (!(status & MPT3_CMD_RESET))
123 issue_reset = 1;
124
919d8a3f
JP
125 ioc_err(ioc, "Command %s\n",
126 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
d37306ca
C
127 _debug_dump_mf(mpi_request, sz);
128
129 return issue_reset;
130}
131
f92363d1
SR
132/**
133 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
4beb4867
BVA
134 * @val: ?
135 * @kp: ?
f92363d1 136 *
4beb4867 137 * Return: ?
f92363d1
SR
138 */
139static int
e4dca7b7 140_scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
f92363d1
SR
141{
142 int ret = param_set_int(val, kp);
143 struct MPT3SAS_ADAPTER *ioc;
144
145 if (ret)
146 return ret;
147
08c4d550 148 /* global ioc spinlock to protect controller list on list operations */
f92363d1 149 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
08c4d550 150 spin_lock(&gioc_lock);
f92363d1
SR
151 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
152 ioc->fwfault_debug = mpt3sas_fwfault_debug;
08c4d550 153 spin_unlock(&gioc_lock);
f92363d1
SR
154 return 0;
155}
156module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
157 param_get_int, &mpt3sas_fwfault_debug, 0644);
158
b8992029
SP
159/**
160 * _base_readl_aero - retry readl for max three times.
161 * @addr - MPT Fusion system interface register address
162 *
163 * Retry the readl() for max three times if it gets zero value
164 * while reading the system interface register.
165 */
166static inline u32
167_base_readl_aero(const volatile void __iomem *addr)
168{
169 u32 i = 0, ret_val;
170
171 do {
172 ret_val = readl(addr);
173 i++;
174 } while (ret_val == 0 && i < 3);
175
176 return ret_val;
177}
178
179static inline u32
180_base_readl(const volatile void __iomem *addr)
181{
182 return readl(addr);
183}
184
b4472d71
SPS
185/**
186 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
187 * in BAR0 space.
188 *
189 * @ioc: per adapter object
190 * @reply: reply message frame(lower 32bit addr)
191 * @index: System request message index.
b4472d71
SPS
192 */
193static void
194_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
195 u32 index)
196{
197 /*
198 * 256 is offset within sys register.
199 * 256 offset MPI frame starts. Max MPI frame supported is 32.
200 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
201 */
202 u16 cmd_credit = ioc->facts.RequestCredit + 1;
203 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
204 MPI_FRAME_START_OFFSET +
205 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
206
207 writel(reply, reply_free_iomem);
208}
209
e5747439
SPS
210/**
211 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
212 * to system/BAR0 region.
213 *
4beb4867 214 * @dst_iomem: Pointer to the destination location in BAR0 space.
e5747439
SPS
215 * @src: Pointer to the Source data.
216 * @size: Size of data to be copied.
217 */
218static void
219_base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
220{
221 int i;
222 u32 *src_virt_mem = (u32 *)src;
223
224 for (i = 0; i < size/4; i++)
225 writel((u32)src_virt_mem[i],
226 (void __iomem *)dst_iomem + (i * 4));
227}
228
182ac784
SPS
229/**
230 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
231 *
232 * @dst_iomem: Pointer to the destination location in BAR0 space.
233 * @src: Pointer to the Source data.
234 * @size: Size of data to be copied.
235 */
236static void
237_base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
238{
239 int i;
240 u32 *src_virt_mem = (u32 *)(src);
241
242 for (i = 0; i < size/4; i++)
243 writel((u32)src_virt_mem[i],
244 (void __iomem *)dst_iomem + (i * 4));
245}
246
22ae5a3c
SPS
247/**
248 * _base_get_chain - Calculates and Returns virtual chain address
249 * for the provided smid in BAR0 space.
250 *
251 * @ioc: per adapter object
252 * @smid: system request message index
253 * @sge_chain_count: Scatter gather chain count.
254 *
4beb4867 255 * Return: the chain address.
22ae5a3c
SPS
256 */
257static inline void __iomem*
258_base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
259 u8 sge_chain_count)
260{
261 void __iomem *base_chain, *chain_virt;
262 u16 cmd_credit = ioc->facts.RequestCredit + 1;
263
264 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
265 (cmd_credit * ioc->request_sz) +
266 REPLY_FREE_POOL_SIZE;
267 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
268 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
269 return chain_virt;
270}
271
272/**
273 * _base_get_chain_phys - Calculates and Returns physical address
274 * in BAR0 for scatter gather chains, for
275 * the provided smid.
276 *
277 * @ioc: per adapter object
278 * @smid: system request message index
279 * @sge_chain_count: Scatter gather chain count.
280 *
4beb4867 281 * Return: Physical chain address.
22ae5a3c 282 */
6f9e09fd 283static inline phys_addr_t
22ae5a3c
SPS
284_base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
285 u8 sge_chain_count)
286{
6f9e09fd 287 phys_addr_t base_chain_phys, chain_phys;
22ae5a3c
SPS
288 u16 cmd_credit = ioc->facts.RequestCredit + 1;
289
6f9e09fd 290 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
22ae5a3c
SPS
291 (cmd_credit * ioc->request_sz) +
292 REPLY_FREE_POOL_SIZE;
293 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
294 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
295 return chain_phys;
296}
297
298/**
299 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
300 * buffer address for the provided smid.
301 * (Each smid can have 64K starts from 17024)
302 *
303 * @ioc: per adapter object
304 * @smid: system request message index
305 *
4beb4867 306 * Return: Pointer to buffer location in BAR0.
22ae5a3c
SPS
307 */
308
309static void __iomem *
310_base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
311{
312 u16 cmd_credit = ioc->facts.RequestCredit + 1;
313 // Added extra 1 to reach end of chain.
314 void __iomem *chain_end = _base_get_chain(ioc,
315 cmd_credit + 1,
316 ioc->facts.MaxChainDepth);
317 return chain_end + (smid * 64 * 1024);
318}
319
320/**
321 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
322 * Host buffer Physical address for the provided smid.
323 * (Each smid can have 64K starts from 17024)
324 *
325 * @ioc: per adapter object
326 * @smid: system request message index
327 *
4beb4867 328 * Return: Pointer to buffer location in BAR0.
22ae5a3c 329 */
6f9e09fd 330static phys_addr_t
22ae5a3c
SPS
331_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
332{
333 u16 cmd_credit = ioc->facts.RequestCredit + 1;
6f9e09fd 334 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
22ae5a3c
SPS
335 cmd_credit + 1,
336 ioc->facts.MaxChainDepth);
337 return chain_end_phys + (smid * 64 * 1024);
338}
339
182ac784
SPS
340/**
341 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
342 * lookup list and Provides chain_buffer
343 * address for the matching dma address.
344 * (Each smid can have 64K starts from 17024)
345 *
346 * @ioc: per adapter object
347 * @chain_buffer_dma: Chain buffer dma address.
348 *
4beb4867 349 * Return: Pointer to chain buffer. Or Null on Failure.
182ac784
SPS
350 */
351static void *
352_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
353 dma_addr_t chain_buffer_dma)
354{
93204b78
C
355 u16 index, j;
356 struct chain_tracker *ct;
357
358 for (index = 0; index < ioc->scsiio_depth; index++) {
359 for (j = 0; j < ioc->chains_needed_per_io; j++) {
360 ct = &ioc->chain_lookup[index].chains_per_smid[j];
361 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
362 return ct->chain_buffer;
363 }
182ac784 364 }
919d8a3f 365 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
182ac784
SPS
366 return NULL;
367}
368
369/**
370 * _clone_sg_entries - MPI EP's scsiio and config requests
371 * are handled here. Base function for
372 * double buffering, before submitting
373 * the requests.
374 *
375 * @ioc: per adapter object.
376 * @mpi_request: mf request pointer.
377 * @smid: system request message index.
182ac784
SPS
378 */
379static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
380 void *mpi_request, u16 smid)
381{
382 Mpi2SGESimple32_t *sgel, *sgel_next;
383 u32 sgl_flags, sge_chain_count = 0;
384 bool is_write = 0;
385 u16 i = 0;
386 void __iomem *buffer_iomem;
6f9e09fd 387 phys_addr_t buffer_iomem_phys;
182ac784 388 void __iomem *buff_ptr;
6f9e09fd 389 phys_addr_t buff_ptr_phys;
182ac784 390 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
6f9e09fd
AB
391 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
392 phys_addr_t dst_addr_phys;
182ac784
SPS
393 MPI2RequestHeader_t *request_hdr;
394 struct scsi_cmnd *scmd;
395 struct scatterlist *sg_scmd = NULL;
396 int is_scsiio_req = 0;
397
398 request_hdr = (MPI2RequestHeader_t *) mpi_request;
399
400 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
401 Mpi25SCSIIORequest_t *scsiio_request =
402 (Mpi25SCSIIORequest_t *)mpi_request;
403 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
404 is_scsiio_req = 1;
405 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
406 Mpi2ConfigRequest_t *config_req =
407 (Mpi2ConfigRequest_t *)mpi_request;
408 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
409 } else
410 return;
411
412 /* From smid we can get scsi_cmd, once we have sg_scmd,
413 * we just need to get sg_virt and sg_next to get virual
414 * address associated with sgel->Address.
415 */
416
417 if (is_scsiio_req) {
418 /* Get scsi_cmd using smid */
419 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
420 if (scmd == NULL) {
919d8a3f 421 ioc_err(ioc, "scmd is NULL\n");
182ac784
SPS
422 return;
423 }
424
425 /* Get sg_scmd from scmd provided */
426 sg_scmd = scsi_sglist(scmd);
427 }
428
429 /*
430 * 0 - 255 System register
431 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
432 * 4352 - 4864 Reply_free pool (512 byte is reserved
433 * considering maxCredit 32. Reply need extra
434 * room, for mCPU case kept four times of
435 * maxCredit).
436 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
437 * 128 byte size = 12288)
438 * 17152 - x Host buffer mapped with smid.
439 * (Each smid can have 64K Max IO.)
440 * BAR0+Last 1K MSIX Addr and Data
441 * Total size in use 2113664 bytes of 4MB BAR0
442 */
443
444 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
445 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
446
447 buff_ptr = buffer_iomem;
448 buff_ptr_phys = buffer_iomem_phys;
6f9e09fd 449 WARN_ON(buff_ptr_phys > U32_MAX);
182ac784 450
cf6bf971 451 if (le32_to_cpu(sgel->FlagsLength) &
182ac784
SPS
452 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
453 is_write = 1;
454
455 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
456
cf6bf971
C
457 sgl_flags =
458 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
182ac784
SPS
459
460 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
461 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
462 /*
463 * Helper function which on passing
464 * chain_buffer_dma returns chain_buffer. Get
465 * the virtual address for sgel->Address
466 */
467 sgel_next =
468 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
cf6bf971 469 le32_to_cpu(sgel->Address));
182ac784
SPS
470 if (sgel_next == NULL)
471 return;
472 /*
473 * This is coping 128 byte chain
474 * frame (not a host buffer)
475 */
476 dst_chain_addr[sge_chain_count] =
477 _base_get_chain(ioc,
478 smid, sge_chain_count);
479 src_chain_addr[sge_chain_count] =
480 (void *) sgel_next;
6f9e09fd 481 dst_addr_phys = _base_get_chain_phys(ioc,
182ac784 482 smid, sge_chain_count);
6f9e09fd 483 WARN_ON(dst_addr_phys > U32_MAX);
cf6bf971
C
484 sgel->Address =
485 cpu_to_le32(lower_32_bits(dst_addr_phys));
182ac784
SPS
486 sgel = sgel_next;
487 sge_chain_count++;
488 break;
489 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
490 if (is_write) {
491 if (is_scsiio_req) {
492 _base_clone_to_sys_mem(buff_ptr,
493 sg_virt(sg_scmd),
cf6bf971
C
494 (le32_to_cpu(sgel->FlagsLength) &
495 0x00ffffff));
6f9e09fd
AB
496 /*
497 * FIXME: this relies on a a zero
498 * PCI mem_offset.
499 */
cf6bf971
C
500 sgel->Address =
501 cpu_to_le32((u32)buff_ptr_phys);
182ac784
SPS
502 } else {
503 _base_clone_to_sys_mem(buff_ptr,
504 ioc->config_vaddr,
cf6bf971
C
505 (le32_to_cpu(sgel->FlagsLength) &
506 0x00ffffff));
507 sgel->Address =
508 cpu_to_le32((u32)buff_ptr_phys);
182ac784
SPS
509 }
510 }
cf6bf971
C
511 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
512 0x00ffffff);
513 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
514 0x00ffffff);
515 if ((le32_to_cpu(sgel->FlagsLength) &
182ac784
SPS
516 (MPI2_SGE_FLAGS_END_OF_BUFFER
517 << MPI2_SGE_FLAGS_SHIFT)))
518 goto eob_clone_chain;
519 else {
520 /*
521 * Every single element in MPT will have
522 * associated sg_next. Better to sanity that
523 * sg_next is not NULL, but it will be a bug
524 * if it is null.
525 */
526 if (is_scsiio_req) {
527 sg_scmd = sg_next(sg_scmd);
528 if (sg_scmd)
529 sgel++;
530 else
531 goto eob_clone_chain;
532 }
533 }
534 break;
535 }
536 }
537
538eob_clone_chain:
539 for (i = 0; i < sge_chain_count; i++) {
540 if (is_scsiio_req)
541 _base_clone_to_sys_mem(dst_chain_addr[i],
542 src_chain_addr[i], ioc->request_sz);
543 }
544}
545
f92363d1
SR
546/**
547 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
548 * @arg: input argument, used to derive ioc
549 *
4beb4867
BVA
550 * Return:
551 * 0 if controller is removed from pci subsystem.
552 * -1 for other case.
f92363d1
SR
553 */
554static int mpt3sas_remove_dead_ioc_func(void *arg)
555{
556 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
557 struct pci_dev *pdev;
558
b51d577a 559 if (!ioc)
f92363d1
SR
560 return -1;
561
562 pdev = ioc->pdev;
b51d577a 563 if (!pdev)
f92363d1 564 return -1;
64cdb418 565 pci_stop_and_remove_bus_device_locked(pdev);
f92363d1
SR
566 return 0;
567}
568
569/**
570 * _base_fault_reset_work - workq handling ioc fault conditions
571 * @work: input argument, used to derive ioc
f92363d1 572 *
4beb4867 573 * Context: sleep.
f92363d1
SR
574 */
575static void
576_base_fault_reset_work(struct work_struct *work)
577{
578 struct MPT3SAS_ADAPTER *ioc =
579 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
580 unsigned long flags;
581 u32 doorbell;
582 int rc;
583 struct task_struct *p;
584
585
586 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
16e179bd 587 if (ioc->shost_recovery || ioc->pci_error_recovery)
f92363d1
SR
588 goto rearm_timer;
589 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
590
591 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
592 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
919d8a3f 593 ioc_err(ioc, "SAS host is non-operational !!!!\n");
f92363d1 594
16e179bd
SR
595 /* It may be possible that EEH recovery can resolve some of
596 * pci bus failure issues rather removing the dead ioc function
597 * by considering controller is in a non-operational state. So
598 * here priority is given to the EEH recovery. If it doesn't
599 * not resolve this issue, mpt3sas driver will consider this
600 * controller to non-operational state and remove the dead ioc
601 * function.
602 */
603 if (ioc->non_operational_loop++ < 5) {
604 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
605 flags);
606 goto rearm_timer;
607 }
608
f92363d1
SR
609 /*
610 * Call _scsih_flush_pending_cmds callback so that we flush all
611 * pending commands back to OS. This call is required to aovid
612 * deadlock at block layer. Dead IOC will fail to do diag reset,
613 * and this call is safe since dead ioc will never return any
614 * command back from HW.
615 */
616 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
617 /*
618 * Set remove_host flag early since kernel thread will
619 * take some time to execute.
620 */
621 ioc->remove_host = 1;
622 /*Remove the Dead Host */
623 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
c84b06a4 624 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
f92363d1 625 if (IS_ERR(p))
919d8a3f
JP
626 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
627 __func__);
f92363d1 628 else
919d8a3f
JP
629 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
630 __func__);
f92363d1
SR
631 return; /* don't rearm timer */
632 }
633
16e179bd
SR
634 ioc->non_operational_loop = 0;
635
f92363d1 636 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
98c56ad3 637 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
919d8a3f
JP
638 ioc_warn(ioc, "%s: hard reset: %s\n",
639 __func__, rc == 0 ? "success" : "failed");
f92363d1
SR
640 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
641 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
642 mpt3sas_base_fault_info(ioc, doorbell &
643 MPI2_DOORBELL_DATA_MASK);
644 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
645 MPI2_IOC_STATE_OPERATIONAL)
646 return; /* don't rearm timer */
647 }
648
649 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
650 rearm_timer:
651 if (ioc->fault_reset_work_q)
652 queue_delayed_work(ioc->fault_reset_work_q,
653 &ioc->fault_reset_work,
654 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
655 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
656}
657
658/**
659 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
660 * @ioc: per adapter object
f92363d1 661 *
4beb4867 662 * Context: sleep.
f92363d1
SR
663 */
664void
665mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
666{
667 unsigned long flags;
668
669 if (ioc->fault_reset_work_q)
670 return;
671
672 /* initialize fault polling */
673
674 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
675 snprintf(ioc->fault_reset_work_q_name,
c84b06a4
SR
676 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
677 ioc->driver_name, ioc->id);
f92363d1
SR
678 ioc->fault_reset_work_q =
679 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
680 if (!ioc->fault_reset_work_q) {
919d8a3f 681 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
199fd79a 682 return;
f92363d1
SR
683 }
684 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
685 if (ioc->fault_reset_work_q)
686 queue_delayed_work(ioc->fault_reset_work_q,
687 &ioc->fault_reset_work,
688 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
689 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
690}
691
692/**
693 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
694 * @ioc: per adapter object
f92363d1 695 *
4beb4867 696 * Context: sleep.
f92363d1
SR
697 */
698void
699mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
700{
701 unsigned long flags;
702 struct workqueue_struct *wq;
703
704 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
705 wq = ioc->fault_reset_work_q;
706 ioc->fault_reset_work_q = NULL;
707 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
708 if (wq) {
4dc06fd8 709 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
f92363d1
SR
710 flush_workqueue(wq);
711 destroy_workqueue(wq);
712 }
713}
714
715/**
716 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
717 * @ioc: per adapter object
718 * @fault_code: fault code
f92363d1
SR
719 */
720void
721mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
722{
919d8a3f 723 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
f92363d1
SR
724}
725
726/**
727 * mpt3sas_halt_firmware - halt's mpt controller firmware
728 * @ioc: per adapter object
729 *
730 * For debugging timeout related issues. Writing 0xCOFFEE00
731 * to the doorbell register will halt controller firmware. With
732 * the purpose to stop both driver and firmware, the enduser can
733 * obtain a ring buffer from controller UART.
734 */
735void
736mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
737{
738 u32 doorbell;
739
740 if (!ioc->fwfault_debug)
741 return;
742
743 dump_stack();
744
306eaf27 745 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
f92363d1
SR
746 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
747 mpt3sas_base_fault_info(ioc , doorbell);
748 else {
749 writel(0xC0FFEE00, &ioc->chip->Doorbell);
919d8a3f 750 ioc_err(ioc, "Firmware is halted due to command timeout\n");
f92363d1
SR
751 }
752
753 if (ioc->fwfault_debug == 2)
754 for (;;)
755 ;
756 else
757 panic("panic in %s\n", __func__);
758}
759
f92363d1
SR
760/**
761 * _base_sas_ioc_info - verbose translation of the ioc status
762 * @ioc: per adapter object
763 * @mpi_reply: reply mf payload returned from firmware
764 * @request_hdr: request mf
f92363d1
SR
765 */
766static void
767_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
768 MPI2RequestHeader_t *request_hdr)
769{
770 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
771 MPI2_IOCSTATUS_MASK;
772 char *desc = NULL;
773 u16 frame_sz;
774 char *func_str = NULL;
775
776 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
777 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
778 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
779 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
780 return;
781
782 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
783 return;
784
785 switch (ioc_status) {
786
787/****************************************************************************
788* Common IOCStatus values for all replies
789****************************************************************************/
790
791 case MPI2_IOCSTATUS_INVALID_FUNCTION:
792 desc = "invalid function";
793 break;
794 case MPI2_IOCSTATUS_BUSY:
795 desc = "busy";
796 break;
797 case MPI2_IOCSTATUS_INVALID_SGL:
798 desc = "invalid sgl";
799 break;
800 case MPI2_IOCSTATUS_INTERNAL_ERROR:
801 desc = "internal error";
802 break;
803 case MPI2_IOCSTATUS_INVALID_VPID:
804 desc = "invalid vpid";
805 break;
806 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
807 desc = "insufficient resources";
808 break;
b130b0d5
SS
809 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
810 desc = "insufficient power";
811 break;
f92363d1
SR
812 case MPI2_IOCSTATUS_INVALID_FIELD:
813 desc = "invalid field";
814 break;
815 case MPI2_IOCSTATUS_INVALID_STATE:
816 desc = "invalid state";
817 break;
818 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
819 desc = "op state not supported";
820 break;
821
822/****************************************************************************
823* Config IOCStatus values
824****************************************************************************/
825
826 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
827 desc = "config invalid action";
828 break;
829 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
830 desc = "config invalid type";
831 break;
832 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
833 desc = "config invalid page";
834 break;
835 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
836 desc = "config invalid data";
837 break;
838 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
839 desc = "config no defaults";
840 break;
841 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
842 desc = "config cant commit";
843 break;
844
845/****************************************************************************
846* SCSI IO Reply
847****************************************************************************/
848
849 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
850 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
851 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
852 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
853 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
854 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
855 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
856 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
857 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
858 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
859 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
860 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
861 break;
862
863/****************************************************************************
864* For use by SCSI Initiator and SCSI Target end-to-end data protection
865****************************************************************************/
866
867 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
868 desc = "eedp guard error";
869 break;
870 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
871 desc = "eedp ref tag error";
872 break;
873 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
874 desc = "eedp app tag error";
875 break;
876
877/****************************************************************************
878* SCSI Target values
879****************************************************************************/
880
881 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
882 desc = "target invalid io index";
883 break;
884 case MPI2_IOCSTATUS_TARGET_ABORTED:
885 desc = "target aborted";
886 break;
887 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
888 desc = "target no conn retryable";
889 break;
890 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
891 desc = "target no connection";
892 break;
893 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
894 desc = "target xfer count mismatch";
895 break;
896 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
897 desc = "target data offset error";
898 break;
899 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
900 desc = "target too much write data";
901 break;
902 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
903 desc = "target iu too short";
904 break;
905 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
906 desc = "target ack nak timeout";
907 break;
908 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
909 desc = "target nak received";
910 break;
911
912/****************************************************************************
913* Serial Attached SCSI values
914****************************************************************************/
915
916 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
917 desc = "smp request failed";
918 break;
919 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
920 desc = "smp data overrun";
921 break;
922
923/****************************************************************************
924* Diagnostic Buffer Post / Diagnostic Release values
925****************************************************************************/
926
927 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
928 desc = "diagnostic released";
929 break;
930 default:
931 break;
932 }
933
934 if (!desc)
935 return;
936
937 switch (request_hdr->Function) {
938 case MPI2_FUNCTION_CONFIG:
939 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
940 func_str = "config_page";
941 break;
942 case MPI2_FUNCTION_SCSI_TASK_MGMT:
943 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
944 func_str = "task_mgmt";
945 break;
946 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
947 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
948 func_str = "sas_iounit_ctl";
949 break;
950 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
951 frame_sz = sizeof(Mpi2SepRequest_t);
952 func_str = "enclosure";
953 break;
954 case MPI2_FUNCTION_IOC_INIT:
955 frame_sz = sizeof(Mpi2IOCInitRequest_t);
956 func_str = "ioc_init";
957 break;
958 case MPI2_FUNCTION_PORT_ENABLE:
959 frame_sz = sizeof(Mpi2PortEnableRequest_t);
960 func_str = "port_enable";
961 break;
962 case MPI2_FUNCTION_SMP_PASSTHROUGH:
963 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
964 func_str = "smp_passthru";
965 break;
aff39e61
SPS
966 case MPI2_FUNCTION_NVME_ENCAPSULATED:
967 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
968 ioc->sge_size;
969 func_str = "nvme_encapsulated";
970 break;
f92363d1
SR
971 default:
972 frame_sz = 32;
973 func_str = "unknown";
974 break;
975 }
976
919d8a3f
JP
977 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
978 desc, ioc_status, request_hdr, func_str);
f92363d1
SR
979
980 _debug_dump_mf(request_hdr, frame_sz/4);
981}
982
983/**
984 * _base_display_event_data - verbose translation of firmware asyn events
985 * @ioc: per adapter object
986 * @mpi_reply: reply mf payload returned from firmware
f92363d1
SR
987 */
988static void
989_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
990 Mpi2EventNotificationReply_t *mpi_reply)
991{
992 char *desc = NULL;
993 u16 event;
994
995 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
996 return;
997
998 event = le16_to_cpu(mpi_reply->Event);
999
1000 switch (event) {
1001 case MPI2_EVENT_LOG_DATA:
1002 desc = "Log Data";
1003 break;
1004 case MPI2_EVENT_STATE_CHANGE:
1005 desc = "Status Change";
1006 break;
1007 case MPI2_EVENT_HARD_RESET_RECEIVED:
1008 desc = "Hard Reset Received";
1009 break;
1010 case MPI2_EVENT_EVENT_CHANGE:
1011 desc = "Event Change";
1012 break;
1013 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1014 desc = "Device Status Change";
1015 break;
1016 case MPI2_EVENT_IR_OPERATION_STATUS:
7786ab6a
SR
1017 if (!ioc->hide_ir_msg)
1018 desc = "IR Operation Status";
f92363d1
SR
1019 break;
1020 case MPI2_EVENT_SAS_DISCOVERY:
1021 {
1022 Mpi2EventDataSasDiscovery_t *event_data =
1023 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
919d8a3f
JP
1024 ioc_info(ioc, "Discovery: (%s)",
1025 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1026 "start" : "stop");
f92363d1 1027 if (event_data->DiscoveryStatus)
bbaf61e2 1028 pr_cont(" discovery_status(0x%08x)",
f92363d1 1029 le32_to_cpu(event_data->DiscoveryStatus));
bbaf61e2 1030 pr_cont("\n");
f92363d1
SR
1031 return;
1032 }
1033 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1034 desc = "SAS Broadcast Primitive";
1035 break;
1036 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1037 desc = "SAS Init Device Status Change";
1038 break;
1039 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1040 desc = "SAS Init Table Overflow";
1041 break;
1042 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1043 desc = "SAS Topology Change List";
1044 break;
1045 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1046 desc = "SAS Enclosure Device Status Change";
1047 break;
1048 case MPI2_EVENT_IR_VOLUME:
7786ab6a
SR
1049 if (!ioc->hide_ir_msg)
1050 desc = "IR Volume";
f92363d1
SR
1051 break;
1052 case MPI2_EVENT_IR_PHYSICAL_DISK:
7786ab6a
SR
1053 if (!ioc->hide_ir_msg)
1054 desc = "IR Physical Disk";
f92363d1
SR
1055 break;
1056 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7786ab6a
SR
1057 if (!ioc->hide_ir_msg)
1058 desc = "IR Configuration Change List";
f92363d1
SR
1059 break;
1060 case MPI2_EVENT_LOG_ENTRY_ADDED:
7786ab6a
SR
1061 if (!ioc->hide_ir_msg)
1062 desc = "Log Entry Added";
f92363d1 1063 break;
2d8ce8c9
SR
1064 case MPI2_EVENT_TEMP_THRESHOLD:
1065 desc = "Temperature Threshold";
1066 break;
a470a51c 1067 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
b99b1993 1068 desc = "Cable Event";
a470a51c 1069 break;
95540b8e
C
1070 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1071 desc = "SAS Device Discovery Error";
1072 break;
4318c734
SPS
1073 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1074 desc = "PCIE Device Status Change";
1075 break;
1076 case MPI2_EVENT_PCIE_ENUMERATION:
1077 {
1078 Mpi26EventDataPCIeEnumeration_t *event_data =
1079 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
919d8a3f
JP
1080 ioc_info(ioc, "PCIE Enumeration: (%s)",
1081 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1082 "start" : "stop");
4318c734 1083 if (event_data->EnumerationStatus)
919d8a3f
JP
1084 pr_cont("enumeration_status(0x%08x)",
1085 le32_to_cpu(event_data->EnumerationStatus));
1086 pr_cont("\n");
4318c734
SPS
1087 return;
1088 }
1089 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1090 desc = "PCIE Topology Change List";
1091 break;
f92363d1
SR
1092 }
1093
1094 if (!desc)
1095 return;
1096
919d8a3f 1097 ioc_info(ioc, "%s\n", desc);
f92363d1 1098}
f92363d1
SR
1099
1100/**
1101 * _base_sas_log_info - verbose translation of firmware log info
1102 * @ioc: per adapter object
1103 * @log_info: log info
f92363d1
SR
1104 */
1105static void
1106_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1107{
1108 union loginfo_type {
1109 u32 loginfo;
1110 struct {
1111 u32 subcode:16;
1112 u32 code:8;
1113 u32 originator:4;
1114 u32 bus_type:4;
1115 } dw;
1116 };
1117 union loginfo_type sas_loginfo;
1118 char *originator_str = NULL;
1119
1120 sas_loginfo.loginfo = log_info;
1121 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1122 return;
1123
1124 /* each nexus loss loginfo */
1125 if (log_info == 0x31170000)
1126 return;
1127
1128 /* eat the loginfos associated with task aborts */
1129 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1130 0x31140000 || log_info == 0x31130000))
1131 return;
1132
1133 switch (sas_loginfo.dw.originator) {
1134 case 0:
1135 originator_str = "IOP";
1136 break;
1137 case 1:
1138 originator_str = "PL";
1139 break;
1140 case 2:
7786ab6a
SR
1141 if (!ioc->hide_ir_msg)
1142 originator_str = "IR";
1143 else
1144 originator_str = "WarpDrive";
f92363d1
SR
1145 break;
1146 }
1147
919d8a3f
JP
1148 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1149 log_info,
1150 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
f92363d1
SR
1151}
1152
1153/**
1154 * _base_display_reply_info -
1155 * @ioc: per adapter object
1156 * @smid: system request message index
1157 * @msix_index: MSIX table index supplied by the OS
1158 * @reply: reply message frame(lower 32bit addr)
f92363d1
SR
1159 */
1160static void
1161_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1162 u32 reply)
1163{
1164 MPI2DefaultReply_t *mpi_reply;
1165 u16 ioc_status;
1166 u32 loginfo = 0;
1167
1168 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1169 if (unlikely(!mpi_reply)) {
919d8a3f
JP
1170 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1171 __FILE__, __LINE__, __func__);
f92363d1
SR
1172 return;
1173 }
1174 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
af009411 1175
f92363d1
SR
1176 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1177 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1178 _base_sas_ioc_info(ioc , mpi_reply,
1179 mpt3sas_base_get_msg_frame(ioc, smid));
1180 }
af009411 1181
f92363d1
SR
1182 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1183 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1184 _base_sas_log_info(ioc, loginfo);
1185 }
1186
1187 if (ioc_status || loginfo) {
1188 ioc_status &= MPI2_IOCSTATUS_MASK;
1189 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1190 }
1191}
1192
1193/**
1194 * mpt3sas_base_done - base internal command completion routine
1195 * @ioc: per adapter object
1196 * @smid: system request message index
1197 * @msix_index: MSIX table index supplied by the OS
1198 * @reply: reply message frame(lower 32bit addr)
1199 *
4beb4867
BVA
1200 * Return:
1201 * 1 meaning mf should be freed from _base_interrupt
1202 * 0 means the mf is freed from this function.
f92363d1
SR
1203 */
1204u8
1205mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1206 u32 reply)
1207{
1208 MPI2DefaultReply_t *mpi_reply;
1209
1210 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1211 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
fd0331b3 1212 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
f92363d1
SR
1213
1214 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1215 return 1;
1216
1217 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1218 if (mpi_reply) {
1219 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1220 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1221 }
1222 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1223
1224 complete(&ioc->base_cmds.done);
1225 return 1;
1226}
1227
1228/**
1229 * _base_async_event - main callback handler for firmware asyn events
1230 * @ioc: per adapter object
1231 * @msix_index: MSIX table index supplied by the OS
1232 * @reply: reply message frame(lower 32bit addr)
1233 *
4beb4867
BVA
1234 * Return:
1235 * 1 meaning mf should be freed from _base_interrupt
1236 * 0 means the mf is freed from this function.
f92363d1
SR
1237 */
1238static u8
1239_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1240{
1241 Mpi2EventNotificationReply_t *mpi_reply;
1242 Mpi2EventAckRequest_t *ack_request;
1243 u16 smid;
fd0331b3 1244 struct _event_ack_list *delayed_event_ack;
f92363d1
SR
1245
1246 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1247 if (!mpi_reply)
1248 return 1;
1249 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1250 return 1;
af009411 1251
f92363d1 1252 _base_display_event_data(ioc, mpi_reply);
af009411 1253
f92363d1
SR
1254 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1255 goto out;
1256 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1257 if (!smid) {
fd0331b3
SS
1258 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1259 GFP_ATOMIC);
1260 if (!delayed_event_ack)
1261 goto out;
1262 INIT_LIST_HEAD(&delayed_event_ack->list);
1263 delayed_event_ack->Event = mpi_reply->Event;
1264 delayed_event_ack->EventContext = mpi_reply->EventContext;
1265 list_add_tail(&delayed_event_ack->list,
1266 &ioc->delayed_event_ack_list);
919d8a3f
JP
1267 dewtprintk(ioc,
1268 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1269 le16_to_cpu(mpi_reply->Event)));
f92363d1
SR
1270 goto out;
1271 }
1272
1273 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1274 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1275 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1276 ack_request->Event = mpi_reply->Event;
1277 ack_request->EventContext = mpi_reply->EventContext;
1278 ack_request->VF_ID = 0; /* TODO */
1279 ack_request->VP_ID = 0;
40114bde 1280 mpt3sas_base_put_smid_default(ioc, smid);
f92363d1
SR
1281
1282 out:
1283
1284 /* scsih callback handler */
1285 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1286
1287 /* ctl callback handler */
1288 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1289
1290 return 1;
1291}
1292
61dfb8a5 1293static struct scsiio_tracker *
dbec4c90 1294_get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
12e7c678 1295{
dbec4c90
SPS
1296 struct scsi_cmnd *cmd;
1297
12e7c678
HR
1298 if (WARN_ON(!smid) ||
1299 WARN_ON(smid >= ioc->hi_priority_smid))
1300 return NULL;
dbec4c90
SPS
1301
1302 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1303 if (cmd)
1304 return scsi_cmd_priv(cmd);
1305
1306 return NULL;
12e7c678
HR
1307}
1308
f92363d1
SR
1309/**
1310 * _base_get_cb_idx - obtain the callback index
1311 * @ioc: per adapter object
1312 * @smid: system request message index
1313 *
4beb4867 1314 * Return: callback index.
f92363d1
SR
1315 */
1316static u8
1317_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1318{
1319 int i;
b0cd285e 1320 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
ba4494d4 1321 u8 cb_idx = 0xFF;
f92363d1
SR
1322
1323 if (smid < ioc->hi_priority_smid) {
12e7c678
HR
1324 struct scsiio_tracker *st;
1325
b0cd285e 1326 if (smid < ctl_smid) {
dbec4c90 1327 st = _get_st_from_smid(ioc, smid);
b0cd285e
HR
1328 if (st)
1329 cb_idx = st->cb_idx;
1330 } else if (smid == ctl_smid)
1331 cb_idx = ioc->ctl_cb_idx;
f92363d1
SR
1332 } else if (smid < ioc->internal_smid) {
1333 i = smid - ioc->hi_priority_smid;
1334 cb_idx = ioc->hpr_lookup[i].cb_idx;
1335 } else if (smid <= ioc->hba_queue_depth) {
1336 i = smid - ioc->internal_smid;
1337 cb_idx = ioc->internal_lookup[i].cb_idx;
ba4494d4 1338 }
f92363d1
SR
1339 return cb_idx;
1340}
1341
1342/**
1343 * _base_mask_interrupts - disable interrupts
1344 * @ioc: per adapter object
1345 *
1346 * Disabling ResetIRQ, Reply and Doorbell Interrupts
f92363d1
SR
1347 */
1348static void
1349_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1350{
1351 u32 him_register;
1352
1353 ioc->mask_interrupts = 1;
306eaf27 1354 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
f92363d1
SR
1355 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1356 writel(him_register, &ioc->chip->HostInterruptMask);
306eaf27 1357 ioc->base_readl(&ioc->chip->HostInterruptMask);
f92363d1
SR
1358}
1359
1360/**
1361 * _base_unmask_interrupts - enable interrupts
1362 * @ioc: per adapter object
1363 *
1364 * Enabling only Reply Interrupts
f92363d1
SR
1365 */
1366static void
1367_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1368{
1369 u32 him_register;
1370
306eaf27 1371 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
f92363d1
SR
1372 him_register &= ~MPI2_HIM_RIM;
1373 writel(him_register, &ioc->chip->HostInterruptMask);
1374 ioc->mask_interrupts = 0;
1375}
1376
1377union reply_descriptor {
1378 u64 word;
1379 struct {
1380 u32 low;
1381 u32 high;
1382 } u;
1383};
1384
1385/**
1386 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1387 * @irq: irq number (not used)
1388 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
f92363d1 1389 *
4beb4867 1390 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
f92363d1
SR
1391 */
1392static irqreturn_t
1393_base_interrupt(int irq, void *bus_id)
1394{
1395 struct adapter_reply_queue *reply_q = bus_id;
1396 union reply_descriptor rd;
1397 u32 completed_cmds;
1398 u8 request_desript_type;
1399 u16 smid;
1400 u8 cb_idx;
1401 u32 reply;
1402 u8 msix_index = reply_q->msix_index;
1403 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1404 Mpi2ReplyDescriptorsUnion_t *rpf;
1405 u8 rc;
1406
1407 if (ioc->mask_interrupts)
1408 return IRQ_NONE;
1409
1410 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1411 return IRQ_NONE;
1412
1413 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1414 request_desript_type = rpf->Default.ReplyFlags
1415 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1416 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1417 atomic_dec(&reply_q->busy);
1418 return IRQ_NONE;
1419 }
1420
1421 completed_cmds = 0;
1422 cb_idx = 0xFF;
1423 do {
1424 rd.word = le64_to_cpu(rpf->Words);
1425 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1426 goto out;
1427 reply = 0;
1428 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1429 if (request_desript_type ==
1430 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1431 request_desript_type ==
aff39e61
SPS
1432 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1433 request_desript_type ==
1434 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
f92363d1
SR
1435 cb_idx = _base_get_cb_idx(ioc, smid);
1436 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1437 (likely(mpt_callbacks[cb_idx] != NULL))) {
1438 rc = mpt_callbacks[cb_idx](ioc, smid,
1439 msix_index, 0);
1440 if (rc)
1441 mpt3sas_base_free_smid(ioc, smid);
1442 }
1443 } else if (request_desript_type ==
1444 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1445 reply = le32_to_cpu(
1446 rpf->AddressReply.ReplyFrameAddress);
1447 if (reply > ioc->reply_dma_max_address ||
1448 reply < ioc->reply_dma_min_address)
1449 reply = 0;
1450 if (smid) {
1451 cb_idx = _base_get_cb_idx(ioc, smid);
1452 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1453 (likely(mpt_callbacks[cb_idx] != NULL))) {
1454 rc = mpt_callbacks[cb_idx](ioc, smid,
1455 msix_index, reply);
1456 if (reply)
1457 _base_display_reply_info(ioc,
1458 smid, msix_index, reply);
1459 if (rc)
1460 mpt3sas_base_free_smid(ioc,
1461 smid);
1462 }
1463 } else {
1464 _base_async_event(ioc, msix_index, reply);
1465 }
1466
1467 /* reply free queue handling */
1468 if (reply) {
1469 ioc->reply_free_host_index =
1470 (ioc->reply_free_host_index ==
1471 (ioc->reply_free_queue_depth - 1)) ?
1472 0 : ioc->reply_free_host_index + 1;
1473 ioc->reply_free[ioc->reply_free_host_index] =
1474 cpu_to_le32(reply);
b4472d71
SPS
1475 if (ioc->is_mcpu_endpoint)
1476 _base_clone_reply_to_sys_mem(ioc,
cf6bf971 1477 reply,
b4472d71 1478 ioc->reply_free_host_index);
f92363d1
SR
1479 writel(ioc->reply_free_host_index,
1480 &ioc->chip->ReplyFreeHostIndex);
1481 }
1482 }
1483
1484 rpf->Words = cpu_to_le64(ULLONG_MAX);
1485 reply_q->reply_post_host_index =
1486 (reply_q->reply_post_host_index ==
1487 (ioc->reply_post_queue_depth - 1)) ? 0 :
1488 reply_q->reply_post_host_index + 1;
1489 request_desript_type =
1490 reply_q->reply_post_free[reply_q->reply_post_host_index].
1491 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1492 completed_cmds++;
6b4c335a
C
1493 /* Update the reply post host index after continuously
1494 * processing the threshold number of Reply Descriptors.
1495 * So that FW can find enough entries to post the Reply
1496 * Descriptors in the reply descriptor post queue.
1497 */
1498 if (completed_cmds > ioc->hba_queue_depth/3) {
1499 if (ioc->combined_reply_queue) {
1500 writel(reply_q->reply_post_host_index |
1501 ((msix_index & 7) <<
1502 MPI2_RPHI_MSIX_INDEX_SHIFT),
1503 ioc->replyPostRegisterIndex[msix_index/8]);
1504 } else {
1505 writel(reply_q->reply_post_host_index |
1506 (msix_index <<
1507 MPI2_RPHI_MSIX_INDEX_SHIFT),
1508 &ioc->chip->ReplyPostHostIndex);
1509 }
1510 completed_cmds = 1;
1511 }
f92363d1
SR
1512 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1513 goto out;
1514 if (!reply_q->reply_post_host_index)
1515 rpf = reply_q->reply_post_free;
1516 else
1517 rpf++;
1518 } while (1);
1519
1520 out:
1521
1522 if (!completed_cmds) {
1523 atomic_dec(&reply_q->busy);
1524 return IRQ_NONE;
1525 }
1526
7786ab6a
SR
1527 if (ioc->is_warpdrive) {
1528 writel(reply_q->reply_post_host_index,
1529 ioc->reply_post_host_index[msix_index]);
1530 atomic_dec(&reply_q->busy);
1531 return IRQ_HANDLED;
1532 }
fb77bb53
SR
1533
1534 /* Update Reply Post Host Index.
1535 * For those HBA's which support combined reply queue feature
1536 * 1. Get the correct Supplemental Reply Post Host Index Register.
1537 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1538 * Index Register address bank i.e replyPostRegisterIndex[],
1539 * 2. Then update this register with new reply host index value
1540 * in ReplyPostIndex field and the MSIxIndex field with
1541 * msix_index value reduced to a value between 0 and 7,
1542 * using a modulo 8 operation. Since each Supplemental Reply Post
1543 * Host Index Register supports 8 MSI-X vectors.
1544 *
1545 * For other HBA's just update the Reply Post Host Index register with
1546 * new reply host index value in ReplyPostIndex Field and msix_index
1547 * value in MSIxIndex field.
1548 */
0bb337c9 1549 if (ioc->combined_reply_queue)
fb77bb53
SR
1550 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1551 MPI2_RPHI_MSIX_INDEX_SHIFT),
1552 ioc->replyPostRegisterIndex[msix_index/8]);
1553 else
1554 writel(reply_q->reply_post_host_index | (msix_index <<
1555 MPI2_RPHI_MSIX_INDEX_SHIFT),
1556 &ioc->chip->ReplyPostHostIndex);
f92363d1
SR
1557 atomic_dec(&reply_q->busy);
1558 return IRQ_HANDLED;
1559}
1560
1561/**
1562 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1563 * @ioc: per adapter object
1564 *
4beb4867 1565 * Return: Whether or not MSI/X is enabled.
f92363d1
SR
1566 */
1567static inline int
1568_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1569{
1570 return (ioc->facts.IOCCapabilities &
1571 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1572}
1573
1574/**
5f0dfb7a 1575 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
f92363d1 1576 * @ioc: per adapter object
5f0dfb7a 1577 * Context: non ISR conext
f92363d1 1578 *
5f0dfb7a 1579 * Called when a Task Management request has completed.
f92363d1
SR
1580 */
1581void
5f0dfb7a 1582mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
1583{
1584 struct adapter_reply_queue *reply_q;
1585
1586 /* If MSIX capability is turned off
1587 * then multi-queues are not enabled
1588 */
1589 if (!_base_is_controller_msix_enabled(ioc))
1590 return;
1591
1592 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5f0dfb7a
C
1593 if (ioc->shost_recovery || ioc->remove_host ||
1594 ioc->pci_error_recovery)
f92363d1
SR
1595 return;
1596 /* TMs are on msix_index == 0 */
1597 if (reply_q->msix_index == 0)
1598 continue;
1d55abc0 1599 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
f92363d1
SR
1600 }
1601}
1602
1603/**
1604 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
1605 * @cb_idx: callback index
f92363d1
SR
1606 */
1607void
1608mpt3sas_base_release_callback_handler(u8 cb_idx)
1609{
1610 mpt_callbacks[cb_idx] = NULL;
1611}
1612
1613/**
1614 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
1615 * @cb_func: callback function
1616 *
4beb4867 1617 * Return: Index of @cb_func.
f92363d1
SR
1618 */
1619u8
1620mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1621{
1622 u8 cb_idx;
1623
1624 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1625 if (mpt_callbacks[cb_idx] == NULL)
1626 break;
1627
1628 mpt_callbacks[cb_idx] = cb_func;
1629 return cb_idx;
1630}
1631
1632/**
1633 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
f92363d1
SR
1634 */
1635void
1636mpt3sas_base_initialize_callback_handler(void)
1637{
1638 u8 cb_idx;
1639
1640 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1641 mpt3sas_base_release_callback_handler(cb_idx);
1642}
1643
1644
1645/**
1646 * _base_build_zero_len_sge - build zero length sg entry
1647 * @ioc: per adapter object
1648 * @paddr: virtual address for SGE
1649 *
1650 * Create a zero length scatter gather entry to insure the IOCs hardware has
1651 * something to use if the target device goes brain dead and tries
1652 * to send data even when none is asked for.
f92363d1
SR
1653 */
1654static void
1655_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1656{
1657 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1658 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1659 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1660 MPI2_SGE_FLAGS_SHIFT);
1661 ioc->base_add_sg_single(paddr, flags_length, -1);
1662}
1663
1664/**
1665 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
1666 * @paddr: virtual address for SGE
1667 * @flags_length: SGE flags and data transfer length
1668 * @dma_addr: Physical address
f92363d1
SR
1669 */
1670static void
1671_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1672{
1673 Mpi2SGESimple32_t *sgel = paddr;
1674
1675 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1676 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1677 sgel->FlagsLength = cpu_to_le32(flags_length);
1678 sgel->Address = cpu_to_le32(dma_addr);
1679}
1680
1681
1682/**
1683 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
1684 * @paddr: virtual address for SGE
1685 * @flags_length: SGE flags and data transfer length
1686 * @dma_addr: Physical address
f92363d1
SR
1687 */
1688static void
1689_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1690{
1691 Mpi2SGESimple64_t *sgel = paddr;
1692
1693 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1694 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1695 sgel->FlagsLength = cpu_to_le32(flags_length);
1696 sgel->Address = cpu_to_le64(dma_addr);
1697}
1698
1699/**
1700 * _base_get_chain_buffer_tracker - obtain chain tracker
1701 * @ioc: per adapter object
dbec4c90 1702 * @scmd: SCSI commands of the IO request
f92363d1 1703 *
4beb4867 1704 * Return: chain tracker from chain_lookup table using key as
93204b78 1705 * smid and smid's chain_offset.
f92363d1
SR
1706 */
1707static struct chain_tracker *
dbec4c90
SPS
1708_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1709 struct scsi_cmnd *scmd)
f92363d1
SR
1710{
1711 struct chain_tracker *chain_req;
dbec4c90 1712 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
93204b78
C
1713 u16 smid = st->smid;
1714 u8 chain_offset =
1715 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
f92363d1 1716
93204b78 1717 if (chain_offset == ioc->chains_needed_per_io)
f92363d1 1718 return NULL;
93204b78
C
1719
1720 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1721 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
f92363d1
SR
1722 return chain_req;
1723}
1724
1725
1726/**
1727 * _base_build_sg - build generic sg
1728 * @ioc: per adapter object
1729 * @psge: virtual address for SGE
1730 * @data_out_dma: physical address for WRITES
1731 * @data_out_sz: data xfer size for WRITES
1732 * @data_in_dma: physical address for READS
1733 * @data_in_sz: data xfer size for READS
f92363d1
SR
1734 */
1735static void
1736_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1737 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1738 size_t data_in_sz)
1739{
1740 u32 sgl_flags;
1741
1742 if (!data_out_sz && !data_in_sz) {
1743 _base_build_zero_len_sge(ioc, psge);
1744 return;
1745 }
1746
1747 if (data_out_sz && data_in_sz) {
1748 /* WRITE sgel first */
1749 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1750 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1751 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1752 ioc->base_add_sg_single(psge, sgl_flags |
1753 data_out_sz, data_out_dma);
1754
1755 /* incr sgel */
1756 psge += ioc->sge_size;
1757
1758 /* READ sgel last */
1759 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1760 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1761 MPI2_SGE_FLAGS_END_OF_LIST);
1762 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1763 ioc->base_add_sg_single(psge, sgl_flags |
1764 data_in_sz, data_in_dma);
1765 } else if (data_out_sz) /* WRITE */ {
1766 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1767 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1768 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1769 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1770 ioc->base_add_sg_single(psge, sgl_flags |
1771 data_out_sz, data_out_dma);
1772 } else if (data_in_sz) /* READ */ {
1773 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1774 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1775 MPI2_SGE_FLAGS_END_OF_LIST);
1776 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1777 ioc->base_add_sg_single(psge, sgl_flags |
1778 data_in_sz, data_in_dma);
1779 }
1780}
1781
aff39e61
SPS
1782/* IEEE format sgls */
1783
1784/**
1785 * _base_build_nvme_prp - This function is called for NVMe end devices to build
1786 * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
1787 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
1788 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
1789 * used to describe a larger data buffer. If the data buffer is too large to
1790 * describe using the two PRP entriess inside the NVMe message, then PRP1
1791 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
1792 * list located elsewhere in memory to describe the remaining data memory
1793 * segments. The PRP list will be contiguous.
4beb4867 1794 *
aff39e61
SPS
1795 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
1796 * consists of a list of PRP entries to describe a number of noncontigous
1797 * physical memory segments as a single memory buffer, just as a SGL does. Note
1798 * however, that this function is only used by the IOCTL call, so the memory
1799 * given will be guaranteed to be contiguous. There is no need to translate
1800 * non-contiguous SGL into a PRP in this case. All PRPs will describe
1801 * contiguous space that is one page size each.
1802 *
1803 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
1804 * a PRP list pointer or a PRP element, depending upon the command. PRP2
1805 * contains the second PRP element if the memory being described fits within 2
1806 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
1807 *
1808 * A PRP list pointer contains the address of a PRP list, structured as a linear
1809 * array of PRP entries. Each PRP entry in this list describes a segment of
1810 * physical memory.
1811 *
1812 * Each 64-bit PRP entry comprises an address and an offset field. The address
1813 * always points at the beginning of a 4KB physical memory page, and the offset
1814 * describes where within that 4KB page the memory segment begins. Only the
1815 * first element in a PRP list may contain a non-zero offest, implying that all
1816 * memory segments following the first begin at the start of a 4KB page.
1817 *
1818 * Each PRP element normally describes 4KB of physical memory, with exceptions
1819 * for the first and last elements in the list. If the memory being described
1820 * by the list begins at a non-zero offset within the first 4KB page, then the
1821 * first PRP element will contain a non-zero offset indicating where the region
1822 * begins within the 4KB page. The last memory segment may end before the end
1823 * of the 4KB segment, depending upon the overall size of the memory being
1824 * described by the PRP list.
1825 *
1826 * Since PRP entries lack any indication of size, the overall data buffer length
1827 * is used to determine where the end of the data memory buffer is located, and
1828 * how many PRP entries are required to describe it.
1829 *
1830 * @ioc: per adapter object
1831 * @smid: system request message index for getting asscociated SGL
1832 * @nvme_encap_request: the NVMe request msg frame pointer
1833 * @data_out_dma: physical address for WRITES
1834 * @data_out_sz: data xfer size for WRITES
1835 * @data_in_dma: physical address for READS
1836 * @data_in_sz: data xfer size for READS
aff39e61
SPS
1837 */
1838static void
1839_base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1840 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1841 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1842 size_t data_in_sz)
1843{
1844 int prp_size = NVME_PRP_SIZE;
d8335ae2
AB
1845 __le64 *prp_entry, *prp1_entry, *prp2_entry;
1846 __le64 *prp_page;
1847 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
aff39e61
SPS
1848 u32 offset, entry_len;
1849 u32 page_mask_result, page_mask;
aff39e61 1850 size_t length;
84203b35
BVA
1851 struct mpt3sas_nvme_cmd *nvme_cmd =
1852 (void *)nvme_encap_request->NVMe_Command;
aff39e61
SPS
1853
1854 /*
1855 * Not all commands require a data transfer. If no data, just return
1856 * without constructing any PRP.
1857 */
1858 if (!data_in_sz && !data_out_sz)
1859 return;
84203b35
BVA
1860 prp1_entry = &nvme_cmd->prp1;
1861 prp2_entry = &nvme_cmd->prp2;
aff39e61
SPS
1862 prp_entry = prp1_entry;
1863 /*
1864 * For the PRP entries, use the specially allocated buffer of
1865 * contiguous memory.
1866 */
494f401b 1867 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
d8335ae2 1868 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
aff39e61
SPS
1869
1870 /*
1871 * Check if we are within 1 entry of a page boundary we don't
1872 * want our first entry to be a PRP List entry.
1873 */
1874 page_mask = ioc->page_size - 1;
1875 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1876 if (!page_mask_result) {
1877 /* Bump up to next page boundary. */
494f401b 1878 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
d8335ae2 1879 prp_page_dma = prp_page_dma + prp_size;
aff39e61
SPS
1880 }
1881
1882 /*
1883 * Set PRP physical pointer, which initially points to the current PRP
1884 * DMA memory page.
1885 */
d8335ae2 1886 prp_entry_dma = prp_page_dma;
aff39e61
SPS
1887
1888 /* Get physical address and length of the data buffer. */
1889 if (data_in_sz) {
d8335ae2 1890 dma_addr = data_in_dma;
aff39e61
SPS
1891 length = data_in_sz;
1892 } else {
d8335ae2 1893 dma_addr = data_out_dma;
aff39e61
SPS
1894 length = data_out_sz;
1895 }
1896
1897 /* Loop while the length is not zero. */
1898 while (length) {
1899 /*
1900 * Check if we need to put a list pointer here if we are at
1901 * page boundary - prp_size (8 bytes).
1902 */
d8335ae2 1903 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
aff39e61
SPS
1904 if (!page_mask_result) {
1905 /*
1906 * This is the last entry in a PRP List, so we need to
1907 * put a PRP list pointer here. What this does is:
1908 * - bump the current memory pointer to the next
1909 * address, which will be the next full page.
1910 * - set the PRP Entry to point to that page. This
1911 * is now the PRP List pointer.
1912 * - bump the PRP Entry pointer the start of the
1913 * next page. Since all of this PRP memory is
1914 * contiguous, no need to get a new page - it's
1915 * just the next address.
1916 */
d8335ae2
AB
1917 prp_entry_dma++;
1918 *prp_entry = cpu_to_le64(prp_entry_dma);
aff39e61
SPS
1919 prp_entry++;
1920 }
1921
1922 /* Need to handle if entry will be part of a page. */
d8335ae2 1923 offset = dma_addr & page_mask;
aff39e61
SPS
1924 entry_len = ioc->page_size - offset;
1925
1926 if (prp_entry == prp1_entry) {
1927 /*
1928 * Must fill in the first PRP pointer (PRP1) before
1929 * moving on.
1930 */
d8335ae2 1931 *prp1_entry = cpu_to_le64(dma_addr);
aff39e61
SPS
1932
1933 /*
1934 * Now point to the second PRP entry within the
1935 * command (PRP2).
1936 */
1937 prp_entry = prp2_entry;
1938 } else if (prp_entry == prp2_entry) {
1939 /*
1940 * Should the PRP2 entry be a PRP List pointer or just
1941 * a regular PRP pointer? If there is more than one
1942 * more page of data, must use a PRP List pointer.
1943 */
1944 if (length > ioc->page_size) {
1945 /*
1946 * PRP2 will contain a PRP List pointer because
1947 * more PRP's are needed with this command. The
1948 * list will start at the beginning of the
1949 * contiguous buffer.
1950 */
d8335ae2 1951 *prp2_entry = cpu_to_le64(prp_entry_dma);
aff39e61
SPS
1952
1953 /*
1954 * The next PRP Entry will be the start of the
1955 * first PRP List.
1956 */
1957 prp_entry = prp_page;
1958 } else {
1959 /*
1960 * After this, the PRP Entries are complete.
1961 * This command uses 2 PRP's and no PRP list.
1962 */
d8335ae2 1963 *prp2_entry = cpu_to_le64(dma_addr);
aff39e61
SPS
1964 }
1965 } else {
1966 /*
1967 * Put entry in list and bump the addresses.
1968 *
1969 * After PRP1 and PRP2 are filled in, this will fill in
1970 * all remaining PRP entries in a PRP List, one per
1971 * each time through the loop.
1972 */
d8335ae2 1973 *prp_entry = cpu_to_le64(dma_addr);
aff39e61 1974 prp_entry++;
d8335ae2 1975 prp_entry_dma++;
aff39e61
SPS
1976 }
1977
1978 /*
1979 * Bump the phys address of the command's data buffer by the
1980 * entry_len.
1981 */
d8335ae2 1982 dma_addr += entry_len;
aff39e61
SPS
1983
1984 /* Decrement length accounting for last partial page. */
1985 if (entry_len > length)
1986 length = 0;
1987 else
1988 length -= entry_len;
1989 }
1990}
1991
016d5c35
SPS
1992/**
1993 * base_make_prp_nvme -
1994 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
1995 *
1996 * @ioc: per adapter object
1997 * @scmd: SCSI command from the mid-layer
1998 * @mpi_request: mpi request
1999 * @smid: msg Index
2000 * @sge_count: scatter gather element count.
2001 *
4beb4867 2002 * Return: true: PRPs are built
016d5c35
SPS
2003 * false: IEEE SGLs needs to be built
2004 */
494f401b 2005static void
016d5c35
SPS
2006base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2007 struct scsi_cmnd *scmd,
2008 Mpi25SCSIIORequest_t *mpi_request,
2009 u16 smid, int sge_count)
2010{
d8335ae2 2011 int sge_len, num_prp_in_chain = 0;
016d5c35 2012 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
494f401b 2013 __le64 *curr_buff;
d8335ae2 2014 dma_addr_t msg_dma, sge_addr, offset;
016d5c35
SPS
2015 u32 page_mask, page_mask_result;
2016 struct scatterlist *sg_scmd;
2017 u32 first_prp_len;
2018 int data_len = scsi_bufflen(scmd);
2019 u32 nvme_pg_size;
2020
2021 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2022 /*
2023 * Nvme has a very convoluted prp format. One prp is required
2024 * for each page or partial page. Driver need to split up OS sg_list
2025 * entries if it is longer than one page or cross a page
2026 * boundary. Driver also have to insert a PRP list pointer entry as
2027 * the last entry in each physical page of the PRP list.
2028 *
2029 * NOTE: The first PRP "entry" is actually placed in the first
2030 * SGL entry in the main message as IEEE 64 format. The 2nd
2031 * entry in the main message is the chain element, and the rest
2032 * of the PRP entries are built in the contiguous pcie buffer.
2033 */
2034 page_mask = nvme_pg_size - 1;
2035
2036 /*
2037 * Native SGL is needed.
2038 * Put a chain element in main message frame that points to the first
2039 * chain buffer.
2040 *
2041 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2042 * a native SGL.
2043 */
2044
2045 /* Set main message chain element pointer */
2046 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2047 /*
2048 * For NVMe the chain element needs to be the 2nd SG entry in the main
2049 * message.
2050 */
2051 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2052 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2053
2054 /*
2055 * For the PRP entries, use the specially allocated buffer of
2056 * contiguous memory. Normal chain buffers can't be used
2057 * because each chain buffer would need to be the size of an OS
2058 * page (4k).
2059 */
2060 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
d8335ae2 2061 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
016d5c35 2062
d8335ae2 2063 main_chain_element->Address = cpu_to_le64(msg_dma);
016d5c35
SPS
2064 main_chain_element->NextChainOffset = 0;
2065 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2066 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2067 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2068
2069 /* Build first prp, sge need not to be page aligned*/
2070 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2071 sg_scmd = scsi_sglist(scmd);
2072 sge_addr = sg_dma_address(sg_scmd);
2073 sge_len = sg_dma_len(sg_scmd);
2074
d8335ae2 2075 offset = sge_addr & page_mask;
016d5c35
SPS
2076 first_prp_len = nvme_pg_size - offset;
2077
2078 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2079 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2080
2081 data_len -= first_prp_len;
2082
2083 if (sge_len > first_prp_len) {
2084 sge_addr += first_prp_len;
2085 sge_len -= first_prp_len;
2086 } else if (data_len && (sge_len == first_prp_len)) {
2087 sg_scmd = sg_next(sg_scmd);
2088 sge_addr = sg_dma_address(sg_scmd);
2089 sge_len = sg_dma_len(sg_scmd);
2090 }
2091
2092 for (;;) {
d8335ae2 2093 offset = sge_addr & page_mask;
016d5c35
SPS
2094
2095 /* Put PRP pointer due to page boundary*/
2096 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2097 if (unlikely(!page_mask_result)) {
2098 scmd_printk(KERN_NOTICE,
2099 scmd, "page boundary curr_buff: 0x%p\n",
2100 curr_buff);
d8335ae2
AB
2101 msg_dma += 8;
2102 *curr_buff = cpu_to_le64(msg_dma);
016d5c35
SPS
2103 curr_buff++;
2104 num_prp_in_chain++;
2105 }
2106
2107 *curr_buff = cpu_to_le64(sge_addr);
2108 curr_buff++;
d8335ae2 2109 msg_dma += 8;
016d5c35
SPS
2110 num_prp_in_chain++;
2111
2112 sge_addr += nvme_pg_size;
2113 sge_len -= nvme_pg_size;
2114 data_len -= nvme_pg_size;
2115
2116 if (data_len <= 0)
2117 break;
2118
2119 if (sge_len > 0)
2120 continue;
2121
2122 sg_scmd = sg_next(sg_scmd);
2123 sge_addr = sg_dma_address(sg_scmd);
2124 sge_len = sg_dma_len(sg_scmd);
2125 }
2126
2127 main_chain_element->Length =
2128 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2129 return;
2130}
2131
2132static bool
2133base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2134 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2135{
2136 u32 data_length = 0;
016d5c35
SPS
2137 bool build_prp = true;
2138
494f401b 2139 data_length = scsi_bufflen(scmd);
016d5c35
SPS
2140
2141 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2142 * we built IEEE SGL
2143 */
2144 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2145 build_prp = false;
2146
2147 return build_prp;
2148}
2149
2150/**
2151 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2152 * determine if the driver needs to build a native SGL. If so, that native
2153 * SGL is built in the special contiguous buffers allocated especially for
2154 * PCIe SGL creation. If the driver will not build a native SGL, return
2155 * TRUE and a normal IEEE SGL will be built. Currently this routine
2156 * supports NVMe.
2157 * @ioc: per adapter object
2158 * @mpi_request: mf request pointer
2159 * @smid: system request message index
2160 * @scmd: scsi command
2161 * @pcie_device: points to the PCIe device's info
2162 *
4beb4867 2163 * Return: 0 if native SGL was built, 1 if no SGL was built
016d5c35
SPS
2164 */
2165static int
2166_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2167 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2168 struct _pcie_device *pcie_device)
2169{
016d5c35
SPS
2170 int sges_left;
2171
2172 /* Get the SG list pointer and info. */
016d5c35
SPS
2173 sges_left = scsi_dma_map(scmd);
2174 if (sges_left < 0) {
2175 sdev_printk(KERN_ERR, scmd->device,
2176 "scsi_dma_map failed: request for %d bytes!\n",
2177 scsi_bufflen(scmd));
2178 return 1;
2179 }
2180
2181 /* Check if we need to build a native SG list. */
2182 if (base_is_prp_possible(ioc, pcie_device,
2183 scmd, sges_left) == 0) {
2184 /* We built a native SG list, just return. */
2185 goto out;
2186 }
2187
2188 /*
2189 * Build native NVMe PRP.
2190 */
2191 base_make_prp_nvme(ioc, scmd, mpi_request,
2192 smid, sges_left);
2193
2194 return 0;
2195out:
2196 scsi_dma_unmap(scmd);
2197 return 1;
2198}
f92363d1
SR
2199
2200/**
2201 * _base_add_sg_single_ieee - add sg element for IEEE format
2202 * @paddr: virtual address for SGE
2203 * @flags: SGE flags
2204 * @chain_offset: number of 128 byte elements from start of segment
2205 * @length: data transfer length
2206 * @dma_addr: Physical address
f92363d1
SR
2207 */
2208static void
2209_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2210 dma_addr_t dma_addr)
2211{
2212 Mpi25IeeeSgeChain64_t *sgel = paddr;
2213
2214 sgel->Flags = flags;
2215 sgel->NextChainOffset = chain_offset;
2216 sgel->Length = cpu_to_le32(length);
2217 sgel->Address = cpu_to_le64(dma_addr);
2218}
2219
2220/**
2221 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2222 * @ioc: per adapter object
2223 * @paddr: virtual address for SGE
2224 *
2225 * Create a zero length scatter gather entry to insure the IOCs hardware has
2226 * something to use if the target device goes brain dead and tries
2227 * to send data even when none is asked for.
f92363d1
SR
2228 */
2229static void
2230_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2231{
2232 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2233 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2234 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
b130b0d5 2235
f92363d1
SR
2236 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2237}
2238
471ef9d4
SR
2239/**
2240 * _base_build_sg_scmd - main sg creation routine
016d5c35 2241 * pcie_device is unused here!
471ef9d4
SR
2242 * @ioc: per adapter object
2243 * @scmd: scsi command
2244 * @smid: system request message index
016d5c35 2245 * @unused: unused pcie_device pointer
471ef9d4
SR
2246 * Context: none.
2247 *
2248 * The main routine that builds scatter gather table from a given
2249 * scsi request sent via the .queuecommand main handler.
2250 *
4beb4867 2251 * Return: 0 success, anything else error
471ef9d4
SR
2252 */
2253static int
2254_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
016d5c35 2255 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
471ef9d4
SR
2256{
2257 Mpi2SCSIIORequest_t *mpi_request;
2258 dma_addr_t chain_dma;
2259 struct scatterlist *sg_scmd;
2260 void *sg_local, *chain;
2261 u32 chain_offset;
2262 u32 chain_length;
2263 u32 chain_flags;
2264 int sges_left;
2265 u32 sges_in_segment;
2266 u32 sgl_flags;
2267 u32 sgl_flags_last_element;
2268 u32 sgl_flags_end_buffer;
2269 struct chain_tracker *chain_req;
2270
2271 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2272
2273 /* init scatter gather flags */
2274 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2275 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2276 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2277 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2278 << MPI2_SGE_FLAGS_SHIFT;
2279 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2280 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2281 << MPI2_SGE_FLAGS_SHIFT;
2282 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2283
2284 sg_scmd = scsi_sglist(scmd);
2285 sges_left = scsi_dma_map(scmd);
2286 if (sges_left < 0) {
2287 sdev_printk(KERN_ERR, scmd->device,
1c2048bd 2288 "scsi_dma_map failed: request for %d bytes!\n",
471ef9d4
SR
2289 scsi_bufflen(scmd));
2290 return -ENOMEM;
2291 }
2292
2293 sg_local = &mpi_request->SGL;
2294 sges_in_segment = ioc->max_sges_in_main_message;
2295 if (sges_left <= sges_in_segment)
2296 goto fill_in_last_segment;
2297
2298 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2299 (sges_in_segment * ioc->sge_size))/4;
2300
2301 /* fill in main message segment when there is a chain following */
2302 while (sges_in_segment) {
2303 if (sges_in_segment == 1)
2304 ioc->base_add_sg_single(sg_local,
2305 sgl_flags_last_element | sg_dma_len(sg_scmd),
2306 sg_dma_address(sg_scmd));
2307 else
2308 ioc->base_add_sg_single(sg_local, sgl_flags |
2309 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2310 sg_scmd = sg_next(sg_scmd);
2311 sg_local += ioc->sge_size;
2312 sges_left--;
2313 sges_in_segment--;
2314 }
2315
2316 /* initializing the chain flags and pointers */
2317 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
dbec4c90 2318 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
471ef9d4
SR
2319 if (!chain_req)
2320 return -1;
2321 chain = chain_req->chain_buffer;
2322 chain_dma = chain_req->chain_buffer_dma;
2323 do {
2324 sges_in_segment = (sges_left <=
2325 ioc->max_sges_in_chain_message) ? sges_left :
2326 ioc->max_sges_in_chain_message;
2327 chain_offset = (sges_left == sges_in_segment) ?
2328 0 : (sges_in_segment * ioc->sge_size)/4;
2329 chain_length = sges_in_segment * ioc->sge_size;
2330 if (chain_offset) {
2331 chain_offset = chain_offset <<
2332 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2333 chain_length += ioc->sge_size;
2334 }
2335 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2336 chain_length, chain_dma);
2337 sg_local = chain;
2338 if (!chain_offset)
2339 goto fill_in_last_segment;
2340
2341 /* fill in chain segments */
2342 while (sges_in_segment) {
2343 if (sges_in_segment == 1)
2344 ioc->base_add_sg_single(sg_local,
2345 sgl_flags_last_element |
2346 sg_dma_len(sg_scmd),
2347 sg_dma_address(sg_scmd));
2348 else
2349 ioc->base_add_sg_single(sg_local, sgl_flags |
2350 sg_dma_len(sg_scmd),
2351 sg_dma_address(sg_scmd));
2352 sg_scmd = sg_next(sg_scmd);
2353 sg_local += ioc->sge_size;
2354 sges_left--;
2355 sges_in_segment--;
2356 }
2357
dbec4c90 2358 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
471ef9d4
SR
2359 if (!chain_req)
2360 return -1;
2361 chain = chain_req->chain_buffer;
2362 chain_dma = chain_req->chain_buffer_dma;
2363 } while (1);
2364
2365
2366 fill_in_last_segment:
2367
2368 /* fill the last segment */
2369 while (sges_left) {
2370 if (sges_left == 1)
2371 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2372 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2373 else
2374 ioc->base_add_sg_single(sg_local, sgl_flags |
2375 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2376 sg_scmd = sg_next(sg_scmd);
2377 sg_local += ioc->sge_size;
2378 sges_left--;
2379 }
2380
2381 return 0;
2382}
2383
f92363d1
SR
2384/**
2385 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2386 * @ioc: per adapter object
2387 * @scmd: scsi command
2388 * @smid: system request message index
016d5c35
SPS
2389 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2390 * constructed on need.
f92363d1
SR
2391 * Context: none.
2392 *
2393 * The main routine that builds scatter gather table from a given
2394 * scsi request sent via the .queuecommand main handler.
2395 *
4beb4867 2396 * Return: 0 success, anything else error
f92363d1
SR
2397 */
2398static int
2399_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
016d5c35 2400 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
f92363d1 2401{
016d5c35 2402 Mpi25SCSIIORequest_t *mpi_request;
f92363d1
SR
2403 dma_addr_t chain_dma;
2404 struct scatterlist *sg_scmd;
2405 void *sg_local, *chain;
2406 u32 chain_offset;
2407 u32 chain_length;
f92363d1
SR
2408 int sges_left;
2409 u32 sges_in_segment;
2410 u8 simple_sgl_flags;
2411 u8 simple_sgl_flags_last;
2412 u8 chain_sgl_flags;
2413 struct chain_tracker *chain_req;
2414
2415 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2416
2417 /* init scatter gather flags */
2418 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2419 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2420 simple_sgl_flags_last = simple_sgl_flags |
2421 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2422 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2423 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2424
016d5c35
SPS
2425 /* Check if we need to build a native SG list. */
2426 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2427 smid, scmd, pcie_device) == 0)) {
2428 /* We built a native SG list, just return. */
2429 return 0;
2430 }
2431
f92363d1
SR
2432 sg_scmd = scsi_sglist(scmd);
2433 sges_left = scsi_dma_map(scmd);
62f5c74c 2434 if (sges_left < 0) {
f92363d1 2435 sdev_printk(KERN_ERR, scmd->device,
1c2048bd 2436 "scsi_dma_map failed: request for %d bytes!\n",
f92363d1
SR
2437 scsi_bufflen(scmd));
2438 return -ENOMEM;
2439 }
2440
2441 sg_local = &mpi_request->SGL;
2442 sges_in_segment = (ioc->request_sz -
016d5c35 2443 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
f92363d1
SR
2444 if (sges_left <= sges_in_segment)
2445 goto fill_in_last_segment;
2446
2447 mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
016d5c35 2448 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
f92363d1
SR
2449
2450 /* fill in main message segment when there is a chain following */
2451 while (sges_in_segment > 1) {
2452 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2453 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2454 sg_scmd = sg_next(sg_scmd);
2455 sg_local += ioc->sge_size_ieee;
2456 sges_left--;
2457 sges_in_segment--;
2458 }
2459
25ef16d0 2460 /* initializing the pointers */
dbec4c90 2461 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
f92363d1
SR
2462 if (!chain_req)
2463 return -1;
2464 chain = chain_req->chain_buffer;
2465 chain_dma = chain_req->chain_buffer_dma;
2466 do {
2467 sges_in_segment = (sges_left <=
2468 ioc->max_sges_in_chain_message) ? sges_left :
2469 ioc->max_sges_in_chain_message;
2470 chain_offset = (sges_left == sges_in_segment) ?
2471 0 : sges_in_segment;
2472 chain_length = sges_in_segment * ioc->sge_size_ieee;
2473 if (chain_offset)
2474 chain_length += ioc->sge_size_ieee;
2475 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2476 chain_offset, chain_length, chain_dma);
2477
2478 sg_local = chain;
2479 if (!chain_offset)
2480 goto fill_in_last_segment;
2481
2482 /* fill in chain segments */
2483 while (sges_in_segment) {
2484 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2485 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2486 sg_scmd = sg_next(sg_scmd);
2487 sg_local += ioc->sge_size_ieee;
2488 sges_left--;
2489 sges_in_segment--;
2490 }
2491
dbec4c90 2492 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
f92363d1
SR
2493 if (!chain_req)
2494 return -1;
2495 chain = chain_req->chain_buffer;
2496 chain_dma = chain_req->chain_buffer_dma;
2497 } while (1);
2498
2499
2500 fill_in_last_segment:
2501
2502 /* fill the last segment */
62f5c74c 2503 while (sges_left > 0) {
f92363d1
SR
2504 if (sges_left == 1)
2505 _base_add_sg_single_ieee(sg_local,
2506 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2507 sg_dma_address(sg_scmd));
2508 else
2509 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2510 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2511 sg_scmd = sg_next(sg_scmd);
2512 sg_local += ioc->sge_size_ieee;
2513 sges_left--;
2514 }
2515
2516 return 0;
2517}
2518
2519/**
2520 * _base_build_sg_ieee - build generic sg for IEEE format
2521 * @ioc: per adapter object
2522 * @psge: virtual address for SGE
2523 * @data_out_dma: physical address for WRITES
2524 * @data_out_sz: data xfer size for WRITES
2525 * @data_in_dma: physical address for READS
2526 * @data_in_sz: data xfer size for READS
f92363d1
SR
2527 */
2528static void
2529_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2530 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2531 size_t data_in_sz)
2532{
2533 u8 sgl_flags;
2534
2535 if (!data_out_sz && !data_in_sz) {
2536 _base_build_zero_len_sge_ieee(ioc, psge);
2537 return;
2538 }
2539
2540 if (data_out_sz && data_in_sz) {
2541 /* WRITE sgel first */
2542 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2543 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2544 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2545 data_out_dma);
2546
2547 /* incr sgel */
2548 psge += ioc->sge_size_ieee;
2549
2550 /* READ sgel last */
2551 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2552 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2553 data_in_dma);
2554 } else if (data_out_sz) /* WRITE */ {
2555 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2556 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2557 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2558 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2559 data_out_dma);
2560 } else if (data_in_sz) /* READ */ {
2561 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2562 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2563 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2564 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2565 data_in_dma);
2566 }
2567}
2568
2569#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2570
2571/**
2572 * _base_config_dma_addressing - set dma addressing
2573 * @ioc: per adapter object
2574 * @pdev: PCI device struct
2575 *
4beb4867 2576 * Return: 0 for success, non-zero for failure.
f92363d1
SR
2577 */
2578static int
2579_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2580{
1c2048bd 2581 u64 required_mask, coherent_mask;
f92363d1 2582 struct sysinfo s;
9b05c91a 2583
0448f019
SPS
2584 if (ioc->is_mcpu_endpoint)
2585 goto try_32bit;
2586
1c2048bd
CH
2587 required_mask = dma_get_required_mask(&pdev->dev);
2588 if (sizeof(dma_addr_t) == 4 || required_mask == 32)
2589 goto try_32bit;
2590
9b05c91a 2591 if (ioc->dma_mask)
1c2048bd 2592 coherent_mask = DMA_BIT_MASK(64);
9b05c91a 2593 else
1c2048bd
CH
2594 coherent_mask = DMA_BIT_MASK(32);
2595
2596 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
2597 dma_set_coherent_mask(&pdev->dev, coherent_mask))
2598 goto try_32bit;
2599
2600 ioc->base_add_sg_single = &_base_add_sg_single_64;
2601 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2602 ioc->dma_mask = 64;
2603 goto out;
f92363d1 2604
0448f019 2605 try_32bit:
1c2048bd 2606 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
f92363d1
SR
2607 return -ENODEV;
2608
1c2048bd
CH
2609 ioc->base_add_sg_single = &_base_add_sg_single_32;
2610 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2611 ioc->dma_mask = 32;
f92363d1
SR
2612 out:
2613 si_meminfo(&s);
919d8a3f
JP
2614 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2615 ioc->dma_mask, convert_to_kb(s.totalram));
9b05c91a
SR
2616
2617 return 0;
2618}
f92363d1 2619
9b05c91a
SR
2620static int
2621_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2622 struct pci_dev *pdev)
2623{
2624 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
2625 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2626 return -ENODEV;
2627 }
f92363d1
SR
2628 return 0;
2629}
2630
2631/**
2632 * _base_check_enable_msix - checks MSIX capabable.
2633 * @ioc: per adapter object
2634 *
2635 * Check to see if card is capable of MSIX, and set number
2636 * of available msix vectors
2637 */
2638static int
2639_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2640{
2641 int base;
2642 u16 message_control;
2643
42081173
SR
2644 /* Check whether controller SAS2008 B0 controller,
2645 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
2646 */
2647 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2648 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2649 return -EINVAL;
2650 }
2651
f92363d1
SR
2652 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2653 if (!base) {
919d8a3f 2654 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
f92363d1
SR
2655 return -EINVAL;
2656 }
2657
2658 /* get msix vector count */
42081173
SR
2659 /* NUMA_IO not supported for older controllers */
2660 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2661 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2662 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2663 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2664 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2665 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2666 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2667 ioc->msix_vector_count = 1;
2668 else {
2669 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2670 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2671 }
919d8a3f
JP
2672 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2673 ioc->msix_vector_count));
f92363d1
SR
2674 return 0;
2675}
2676
2677/**
2678 * _base_free_irq - free irq
2679 * @ioc: per adapter object
2680 *
2681 * Freeing respective reply_queue from the list.
2682 */
2683static void
2684_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2685{
2686 struct adapter_reply_queue *reply_q, *next;
2687
2688 if (list_empty(&ioc->reply_queue_list))
2689 return;
2690
2691 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2692 list_del(&reply_q->list);
1d55abc0
HR
2693 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2694 reply_q);
f92363d1
SR
2695 kfree(reply_q);
2696 }
2697}
2698
2699/**
2700 * _base_request_irq - request irq
2701 * @ioc: per adapter object
2702 * @index: msix index into vector table
f92363d1
SR
2703 *
2704 * Inserting respective reply_queue into the list.
2705 */
2706static int
1d55abc0 2707_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
f92363d1 2708{
1d55abc0 2709 struct pci_dev *pdev = ioc->pdev;
f92363d1
SR
2710 struct adapter_reply_queue *reply_q;
2711 int r;
2712
2713 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2714 if (!reply_q) {
919d8a3f
JP
2715 ioc_err(ioc, "unable to allocate memory %zu!\n",
2716 sizeof(struct adapter_reply_queue));
f92363d1
SR
2717 return -ENOMEM;
2718 }
2719 reply_q->ioc = ioc;
2720 reply_q->msix_index = index;
14b3114d 2721
f92363d1
SR
2722 atomic_set(&reply_q->busy, 0);
2723 if (ioc->msix_enable)
2724 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
c84b06a4 2725 ioc->driver_name, ioc->id, index);
f92363d1
SR
2726 else
2727 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
c84b06a4 2728 ioc->driver_name, ioc->id);
1d55abc0
HR
2729 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2730 IRQF_SHARED, reply_q->name, reply_q);
f92363d1 2731 if (r) {
fc7d510e 2732 pr_err("%s: unable to allocate interrupt %d!\n",
1d55abc0 2733 reply_q->name, pci_irq_vector(pdev, index));
da3cec25 2734 kfree(reply_q);
f92363d1
SR
2735 return -EBUSY;
2736 }
2737
2738 INIT_LIST_HEAD(&reply_q->list);
2739 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2740 return 0;
2741}
2742
2743/**
2744 * _base_assign_reply_queues - assigning msix index for each cpu
2745 * @ioc: per adapter object
2746 *
2747 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
2748 *
2749 * It would nice if we could call irq_set_affinity, however it is not
2750 * an exported symbol
2751 */
2752static void
2753_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2754{
91b265bf 2755 unsigned int cpu, nr_cpus, nr_msix, index = 0;
14b3114d 2756 struct adapter_reply_queue *reply_q;
f92363d1
SR
2757
2758 if (!_base_is_controller_msix_enabled(ioc))
2759 return;
2760
2761 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2762
91b265bf
MP
2763 nr_cpus = num_online_cpus();
2764 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2765 ioc->facts.MaxMSIxVectors);
2766 if (!nr_msix)
2767 return;
f92363d1 2768
1d55abc0
HR
2769 if (smp_affinity_enable) {
2770 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2771 const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
2772 reply_q->msix_index);
2773 if (!mask) {
919d8a3f
JP
2774 ioc_warn(ioc, "no affinity for msi %x\n",
2775 reply_q->msix_index);
1d55abc0
HR
2776 continue;
2777 }
2778
4a8842de
TH
2779 for_each_cpu_and(cpu, mask, cpu_online_mask) {
2780 if (cpu >= ioc->cpu_msix_table_sz)
2781 break;
1d55abc0 2782 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
4a8842de 2783 }
1d55abc0
HR
2784 }
2785 return;
2786 }
91b265bf
MP
2787 cpu = cpumask_first(cpu_online_mask);
2788
14b3114d
SR
2789 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2790
91b265bf
MP
2791 unsigned int i, group = nr_cpus / nr_msix;
2792
14b3114d
SR
2793 if (cpu >= nr_cpus)
2794 break;
2795
91b265bf
MP
2796 if (index < nr_cpus % nr_msix)
2797 group++;
2798
2799 for (i = 0 ; i < group ; i++) {
1d55abc0 2800 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
91b265bf 2801 cpu = cpumask_next(cpu, cpu_online_mask);
f92363d1 2802 }
91b265bf 2803 index++;
14b3114d 2804 }
f92363d1
SR
2805}
2806
2807/**
2808 * _base_disable_msix - disables msix
2809 * @ioc: per adapter object
2810 *
2811 */
2812static void
2813_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
2814{
2815 if (!ioc->msix_enable)
2816 return;
2817 pci_disable_msix(ioc->pdev);
2818 ioc->msix_enable = 0;
2819}
2820
2821/**
2822 * _base_enable_msix - enables msix, failback to io_apic
2823 * @ioc: per adapter object
2824 *
2825 */
2826static int
2827_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2828{
f92363d1 2829 int r;
bb350661 2830 int i, local_max_msix_vectors;
f92363d1 2831 u8 try_msix = 0;
1d55abc0 2832 unsigned int irq_flags = PCI_IRQ_MSIX;
f92363d1 2833
f92363d1
SR
2834 if (msix_disable == -1 || msix_disable == 0)
2835 try_msix = 1;
2836
2837 if (!try_msix)
2838 goto try_ioapic;
2839
2840 if (_base_check_enable_msix(ioc) != 0)
2841 goto try_ioapic;
2842
2843 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1d55abc0 2844 ioc->msix_vector_count);
f92363d1 2845
1f95a47e
JP
2846 ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
2847 ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
9c500060 2848
9b05c91a 2849 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
06f5f976 2850 local_max_msix_vectors = (reset_devices) ? 1 : 8;
bb350661
SPS
2851 else
2852 local_max_msix_vectors = max_msix_vectors;
9b05c91a 2853
1d55abc0 2854 if (local_max_msix_vectors > 0)
bb350661 2855 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
9c500060 2856 ioc->reply_queue_count);
1d55abc0 2857 else if (local_max_msix_vectors == 0)
9b05c91a 2858 goto try_ioapic;
9c500060 2859
64038301
SPS
2860 if (ioc->msix_vector_count < ioc->cpu_count)
2861 smp_affinity_enable = 0;
2862
1d55abc0
HR
2863 if (smp_affinity_enable)
2864 irq_flags |= PCI_IRQ_AFFINITY;
f92363d1 2865
1d55abc0
HR
2866 r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
2867 irq_flags);
2868 if (r < 0) {
919d8a3f
JP
2869 dfailprintk(ioc,
2870 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
2871 r));
f92363d1
SR
2872 goto try_ioapic;
2873 }
2874
2875 ioc->msix_enable = 1;
1d55abc0
HR
2876 ioc->reply_queue_count = r;
2877 for (i = 0; i < ioc->reply_queue_count; i++) {
2878 r = _base_request_irq(ioc, i);
f92363d1
SR
2879 if (r) {
2880 _base_free_irq(ioc);
2881 _base_disable_msix(ioc);
f92363d1
SR
2882 goto try_ioapic;
2883 }
2884 }
2885
f92363d1
SR
2886 return 0;
2887
2888/* failback to io_apic interrupt routing */
2889 try_ioapic:
2890
9b05c91a 2891 ioc->reply_queue_count = 1;
1d55abc0
HR
2892 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
2893 if (r < 0) {
919d8a3f
JP
2894 dfailprintk(ioc,
2895 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
2896 r));
1d55abc0
HR
2897 } else
2898 r = _base_request_irq(ioc, 0);
f92363d1
SR
2899
2900 return r;
2901}
2902
580d4e31
SR
2903/**
2904 * mpt3sas_base_unmap_resources - free controller resources
2905 * @ioc: per adapter object
2906 */
8bbb1cf6 2907static void
580d4e31
SR
2908mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2909{
2910 struct pci_dev *pdev = ioc->pdev;
2911
1f95a47e 2912 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
580d4e31
SR
2913
2914 _base_free_irq(ioc);
2915 _base_disable_msix(ioc);
2916
2b48be65
C
2917 kfree(ioc->replyPostRegisterIndex);
2918 ioc->replyPostRegisterIndex = NULL;
2919
580d4e31
SR
2920
2921 if (ioc->chip_phys) {
2922 iounmap(ioc->chip);
2923 ioc->chip_phys = 0;
2924 }
2925
2926 if (pci_is_enabled(pdev)) {
2927 pci_release_selected_regions(ioc->pdev, ioc->bars);
2928 pci_disable_pcie_error_reporting(pdev);
2929 pci_disable_device(pdev);
2930 }
2931}
2932
f92363d1
SR
2933/**
2934 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
2935 * @ioc: per adapter object
2936 *
4beb4867 2937 * Return: 0 for success, non-zero for failure.
f92363d1
SR
2938 */
2939int
2940mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2941{
2942 struct pci_dev *pdev = ioc->pdev;
2943 u32 memap_sz;
2944 u32 pio_sz;
2945 int i, r = 0;
2946 u64 pio_chip = 0;
6f9e09fd 2947 phys_addr_t chip_phys = 0;
f92363d1
SR
2948 struct adapter_reply_queue *reply_q;
2949
919d8a3f 2950 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
2951
2952 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
2953 if (pci_enable_device_mem(pdev)) {
919d8a3f 2954 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
cf9bd21a 2955 ioc->bars = 0;
f92363d1
SR
2956 return -ENODEV;
2957 }
2958
2959
2960 if (pci_request_selected_regions(pdev, ioc->bars,
c84b06a4 2961 ioc->driver_name)) {
919d8a3f 2962 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
cf9bd21a 2963 ioc->bars = 0;
f92363d1
SR
2964 r = -ENODEV;
2965 goto out_fail;
2966 }
2967
2968/* AER (Advanced Error Reporting) hooks */
2969 pci_enable_pcie_error_reporting(pdev);
2970
2971 pci_set_master(pdev);
2972
2973
2974 if (_base_config_dma_addressing(ioc, pdev) != 0) {
919d8a3f 2975 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
f92363d1
SR
2976 r = -ENODEV;
2977 goto out_fail;
2978 }
2979
5aeeb78a
SR
2980 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
2981 (!memap_sz || !pio_sz); i++) {
f92363d1
SR
2982 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
2983 if (pio_sz)
2984 continue;
2985 pio_chip = (u64)pci_resource_start(pdev, i);
2986 pio_sz = pci_resource_len(pdev, i);
2987 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
2988 if (memap_sz)
2989 continue;
2990 ioc->chip_phys = pci_resource_start(pdev, i);
6f9e09fd 2991 chip_phys = ioc->chip_phys;
f92363d1
SR
2992 memap_sz = pci_resource_len(pdev, i);
2993 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
f92363d1
SR
2994 }
2995 }
2996
5aeeb78a 2997 if (ioc->chip == NULL) {
919d8a3f 2998 ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
5aeeb78a
SR
2999 r = -EINVAL;
3000 goto out_fail;
3001 }
3002
f92363d1 3003 _base_mask_interrupts(ioc);
9b05c91a 3004
98c56ad3 3005 r = _base_get_ioc_facts(ioc);
9b05c91a
SR
3006 if (r)
3007 goto out_fail;
3008
3009 if (!ioc->rdpq_array_enable_assigned) {
3010 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3011 ioc->rdpq_array_enable_assigned = 1;
3012 }
3013
f92363d1
SR
3014 r = _base_enable_msix(ioc);
3015 if (r)
3016 goto out_fail;
3017
fb77bb53
SR
3018 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3019 * revision HBAs and also only when reply queue count is greater than 8
3020 */
2b48be65 3021 if (ioc->combined_reply_queue) {
fb77bb53
SR
3022 /* Determine the Supplemental Reply Post Host Index Registers
3023 * Addresse. Supplemental Reply Post Host Index Registers
3024 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3025 * each register is at offset bytes of
3026 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3027 */
3028 ioc->replyPostRegisterIndex = kcalloc(
0bb337c9 3029 ioc->combined_reply_index_count,
fb77bb53
SR
3030 sizeof(resource_size_t *), GFP_KERNEL);
3031 if (!ioc->replyPostRegisterIndex) {
1f95a47e
JP
3032 dfailprintk(ioc,
3033 ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
fb77bb53
SR
3034 r = -ENOMEM;
3035 goto out_fail;
3036 }
3037
0bb337c9 3038 for (i = 0; i < ioc->combined_reply_index_count; i++) {
fb77bb53 3039 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
cf6bf971 3040 ((u8 __force *)&ioc->chip->Doorbell +
fb77bb53
SR
3041 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3042 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3043 }
2b48be65 3044 }
fb77bb53 3045
ce7c6c9e
GE
3046 if (ioc->is_warpdrive) {
3047 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3048 &ioc->chip->ReplyPostHostIndex;
3049
3050 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3051 ioc->reply_post_host_index[i] =
3052 (resource_size_t __iomem *)
3053 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3054 * 4)));
3055 }
3056
f92363d1 3057 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
fc7d510e
JP
3058 pr_info("%s: %s enabled: IRQ %d\n",
3059 reply_q->name,
3060 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3061 pci_irq_vector(ioc->pdev, reply_q->msix_index));
f92363d1 3062
919d8a3f
JP
3063 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3064 &chip_phys, ioc->chip, memap_sz);
3065 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3066 (unsigned long long)pio_chip, pio_sz);
f92363d1
SR
3067
3068 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3069 pci_save_state(pdev);
3070 return 0;
3071
3072 out_fail:
580d4e31 3073 mpt3sas_base_unmap_resources(ioc);
f92363d1
SR
3074 return r;
3075}
3076
3077/**
3078 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3079 * @ioc: per adapter object
3080 * @smid: system request message index(smid zero is invalid)
3081 *
4beb4867 3082 * Return: virt pointer to message frame.
f92363d1
SR
3083 */
3084void *
3085mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3086{
3087 return (void *)(ioc->request + (smid * ioc->request_sz));
3088}
3089
3090/**
3091 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3092 * @ioc: per adapter object
3093 * @smid: system request message index
3094 *
4beb4867 3095 * Return: virt pointer to sense buffer.
f92363d1
SR
3096 */
3097void *
3098mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3099{
3100 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3101}
3102
3103/**
3104 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3105 * @ioc: per adapter object
3106 * @smid: system request message index
3107 *
4beb4867 3108 * Return: phys pointer to the low 32bit address of the sense buffer.
f92363d1
SR
3109 */
3110__le32
3111mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3112{
3113 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3114 SCSI_SENSE_BUFFERSIZE));
3115}
3116
016d5c35
SPS
3117/**
3118 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3119 * @ioc: per adapter object
3120 * @smid: system request message index
3121 *
4beb4867 3122 * Return: virt pointer to a PCIe SGL.
016d5c35
SPS
3123 */
3124void *
3125mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3126{
dbec4c90 3127 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
016d5c35
SPS
3128}
3129
3130/**
3131 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3132 * @ioc: per adapter object
3133 * @smid: system request message index
3134 *
4beb4867 3135 * Return: phys pointer to the address of the PCIe buffer.
016d5c35 3136 */
d8335ae2 3137dma_addr_t
016d5c35
SPS
3138mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3139{
dbec4c90 3140 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
016d5c35
SPS
3141}
3142
f92363d1
SR
3143/**
3144 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3145 * @ioc: per adapter object
3146 * @phys_addr: lower 32 physical addr of the reply
3147 *
3148 * Converts 32bit lower physical addr into a virt address.
3149 */
3150void *
3151mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3152{
3153 if (!phys_addr)
3154 return NULL;
3155 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3156}
3157
03d1fb3a
SS
3158static inline u8
3159_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
3160{
3161 return ioc->cpu_msix_table[raw_smp_processor_id()];
3162}
3163
f92363d1
SR
3164/**
3165 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3166 * @ioc: per adapter object
3167 * @cb_idx: callback index
3168 *
4beb4867 3169 * Return: smid (zero is invalid)
f92363d1
SR
3170 */
3171u16
3172mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3173{
3174 unsigned long flags;
3175 struct request_tracker *request;
3176 u16 smid;
3177
3178 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3179 if (list_empty(&ioc->internal_free_list)) {
3180 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
919d8a3f 3181 ioc_err(ioc, "%s: smid not available\n", __func__);
f92363d1
SR
3182 return 0;
3183 }
3184
3185 request = list_entry(ioc->internal_free_list.next,
3186 struct request_tracker, tracker_list);
3187 request->cb_idx = cb_idx;
3188 smid = request->smid;
3189 list_del(&request->tracker_list);
3190 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3191 return smid;
3192}
3193
3194/**
3195 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3196 * @ioc: per adapter object
3197 * @cb_idx: callback index
3198 * @scmd: pointer to scsi command object
3199 *
4beb4867 3200 * Return: smid (zero is invalid)
f92363d1
SR
3201 */
3202u16
3203mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3204 struct scsi_cmnd *scmd)
3205{
dbec4c90
SPS
3206 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3207 unsigned int tag = scmd->request->tag;
f92363d1
SR
3208 u16 smid;
3209
dbec4c90 3210 smid = tag + 1;
f92363d1 3211 request->cb_idx = cb_idx;
03d1fb3a 3212 request->msix_io = _base_get_msix_index(ioc);
dbec4c90
SPS
3213 request->smid = smid;
3214 INIT_LIST_HEAD(&request->chain_list);
f92363d1
SR
3215 return smid;
3216}
3217
3218/**
3219 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3220 * @ioc: per adapter object
3221 * @cb_idx: callback index
3222 *
4beb4867 3223 * Return: smid (zero is invalid)
f92363d1
SR
3224 */
3225u16
3226mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3227{
3228 unsigned long flags;
3229 struct request_tracker *request;
3230 u16 smid;
3231
3232 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3233 if (list_empty(&ioc->hpr_free_list)) {
3234 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3235 return 0;
3236 }
3237
3238 request = list_entry(ioc->hpr_free_list.next,
3239 struct request_tracker, tracker_list);
3240 request->cb_idx = cb_idx;
3241 smid = request->smid;
3242 list_del(&request->tracker_list);
3243 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3244 return smid;
3245}
3246
6a2d4618
HR
3247static void
3248_base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3249{
3250 /*
3251 * See _wait_for_commands_to_complete() call with regards to this code.
3252 */
3253 if (ioc->shost_recovery && ioc->pending_io_count) {
c84b023a 3254 ioc->pending_io_count = scsi_host_busy(ioc->shost);
272e253c 3255 if (ioc->pending_io_count == 0)
6a2d4618 3256 wake_up(&ioc->reset_wq);
6a2d4618
HR
3257 }
3258}
3259
dbec4c90
SPS
3260void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3261 struct scsiio_tracker *st)
3262{
3263 if (WARN_ON(st->smid == 0))
3264 return;
3265 st->cb_idx = 0xFF;
3266 st->direct_io = 0;
93204b78 3267 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
e7018314 3268 st->smid = 0;
dbec4c90
SPS
3269}
3270
f92363d1
SR
3271/**
3272 * mpt3sas_base_free_smid - put smid back on free_list
3273 * @ioc: per adapter object
3274 * @smid: system request message index
f92363d1
SR
3275 */
3276void
3277mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3278{
3279 unsigned long flags;
3280 int i;
f92363d1 3281
f92363d1 3282 if (smid < ioc->hi_priority_smid) {
dbec4c90 3283 struct scsiio_tracker *st;
c2fe742f 3284 void *request;
f92363d1 3285
dbec4c90
SPS
3286 st = _get_st_from_smid(ioc, smid);
3287 if (!st) {
3288 _base_recovery_check(ioc);
3289 return;
3290 }
c2fe742f
SR
3291
3292 /* Clear MPI request frame */
3293 request = mpt3sas_base_get_msg_frame(ioc, smid);
3294 memset(request, 0, ioc->request_sz);
3295
dbec4c90 3296 mpt3sas_base_clear_st(ioc, st);
6a2d4618 3297 _base_recovery_check(ioc);
f92363d1 3298 return;
dbec4c90
SPS
3299 }
3300
3301 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3302 if (smid < ioc->internal_smid) {
f92363d1
SR
3303 /* hi-priority */
3304 i = smid - ioc->hi_priority_smid;
3305 ioc->hpr_lookup[i].cb_idx = 0xFF;
3306 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3307 } else if (smid <= ioc->hba_queue_depth) {
3308 /* internal queue */
3309 i = smid - ioc->internal_smid;
3310 ioc->internal_lookup[i].cb_idx = 0xFF;
3311 list_add(&ioc->internal_lookup[i].tracker_list,
3312 &ioc->internal_free_list);
3313 }
3314 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3315}
3316
e5747439
SPS
3317/**
3318 * _base_mpi_ep_writeq - 32 bit write to MMIO
3319 * @b: data payload
3320 * @addr: address in MMIO space
3321 * @writeq_lock: spin lock
3322 *
3323 * This special handling for MPI EP to take care of 32 bit
3324 * environment where its not quarenteed to send the entire word
3325 * in one transfer.
3326 */
3327static inline void
3328_base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3329 spinlock_t *writeq_lock)
3330{
3331 unsigned long flags;
e5747439
SPS
3332
3333 spin_lock_irqsave(writeq_lock, flags);
09c2f95a
SR
3334 __raw_writel((u32)(b), addr);
3335 __raw_writel((u32)(b >> 32), (addr + 4));
10ee1f22 3336 mmiowb();
e5747439
SPS
3337 spin_unlock_irqrestore(writeq_lock, flags);
3338}
3339
f92363d1
SR
3340/**
3341 * _base_writeq - 64 bit write to MMIO
f92363d1
SR
3342 * @b: data payload
3343 * @addr: address in MMIO space
3344 * @writeq_lock: spin lock
3345 *
3346 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
3347 * care of 32 bit environment where its not quarenteed to send the entire word
3348 * in one transfer.
3349 */
3350#if defined(writeq) && defined(CONFIG_64BIT)
3351static inline void
3352_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3353{
23c3828a 3354 wmb();
09c2f95a 3355 __raw_writeq(b, addr);
23c3828a 3356 barrier();
f92363d1
SR
3357}
3358#else
3359static inline void
3360_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3361{
e5747439 3362 _base_mpi_ep_writeq(b, addr, writeq_lock);
f92363d1
SR
3363}
3364#endif
3365
e5747439
SPS
3366/**
3367 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
3368 * @ioc: per adapter object
3369 * @smid: system request message index
3370 * @handle: device handle
e5747439
SPS
3371 */
3372static void
3373_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3374{
3375 Mpi2RequestDescriptorUnion_t descriptor;
3376 u64 *request = (u64 *)&descriptor;
3377 void *mpi_req_iomem;
3378 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3379
3380 _clone_sg_entries(ioc, (void *) mfp, smid);
cf6bf971 3381 mpi_req_iomem = (void __force *)ioc->chip +
e5747439
SPS
3382 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3383 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3384 ioc->request_sz);
3385 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3386 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3387 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3388 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3389 descriptor.SCSIIO.LMID = 0;
3390 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3391 &ioc->scsi_lookup_lock);
3392}
3393
f92363d1 3394/**
81c16f83 3395 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
f92363d1
SR
3396 * @ioc: per adapter object
3397 * @smid: system request message index
3398 * @handle: device handle
f92363d1 3399 */
81c16f83
SPS
3400static void
3401_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
f92363d1
SR
3402{
3403 Mpi2RequestDescriptorUnion_t descriptor;
3404 u64 *request = (u64 *)&descriptor;
3405
3406
3407 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3408 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3409 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3410 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3411 descriptor.SCSIIO.LMID = 0;
3412 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3413 &ioc->scsi_lookup_lock);
3414}
3415
3416/**
40114bde 3417 * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
f92363d1
SR
3418 * @ioc: per adapter object
3419 * @smid: system request message index
3420 * @handle: device handle
f92363d1 3421 */
40114bde
SP
3422void
3423mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
f92363d1
SR
3424 u16 handle)
3425{
3426 Mpi2RequestDescriptorUnion_t descriptor;
3427 u64 *request = (u64 *)&descriptor;
3428
3429 descriptor.SCSIIO.RequestFlags =
3430 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3431 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
3432 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3433 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3434 descriptor.SCSIIO.LMID = 0;
3435 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3436 &ioc->scsi_lookup_lock);
3437}
3438
3439/**
40114bde 3440 * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
f92363d1
SR
3441 * @ioc: per adapter object
3442 * @smid: system request message index
03d1fb3a 3443 * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
f92363d1 3444 */
40114bde
SP
3445void
3446mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
03d1fb3a 3447 u16 msix_task)
f92363d1
SR
3448{
3449 Mpi2RequestDescriptorUnion_t descriptor;
e5747439
SPS
3450 void *mpi_req_iomem;
3451 u64 *request;
3452
3453 if (ioc->is_mcpu_endpoint) {
e5747439
SPS
3454 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3455
e5747439 3456 /* TBD 256 is offset within sys register. */
cf6bf971
C
3457 mpi_req_iomem = (void __force *)ioc->chip
3458 + MPI_FRAME_START_OFFSET
e5747439
SPS
3459 + (smid * ioc->request_sz);
3460 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3461 ioc->request_sz);
3462 }
3463
3464 request = (u64 *)&descriptor;
f92363d1
SR
3465
3466 descriptor.HighPriority.RequestFlags =
3467 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
03d1fb3a 3468 descriptor.HighPriority.MSIxIndex = msix_task;
f92363d1
SR
3469 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3470 descriptor.HighPriority.LMID = 0;
3471 descriptor.HighPriority.Reserved1 = 0;
e5747439
SPS
3472 if (ioc->is_mcpu_endpoint)
3473 _base_mpi_ep_writeq(*request,
3474 &ioc->chip->RequestDescriptorPostLow,
3475 &ioc->scsi_lookup_lock);
3476 else
3477 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3478 &ioc->scsi_lookup_lock);
f92363d1
SR
3479}
3480
aff39e61 3481/**
40114bde 3482 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
aff39e61
SPS
3483 * firmware
3484 * @ioc: per adapter object
3485 * @smid: system request message index
aff39e61 3486 */
40114bde
SP
3487void
3488mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
aff39e61
SPS
3489{
3490 Mpi2RequestDescriptorUnion_t descriptor;
3491 u64 *request = (u64 *)&descriptor;
3492
3493 descriptor.Default.RequestFlags =
3494 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3495 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3496 descriptor.Default.SMID = cpu_to_le16(smid);
3497 descriptor.Default.LMID = 0;
3498 descriptor.Default.DescriptorTypeDependent = 0;
3499 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3500 &ioc->scsi_lookup_lock);
3501}
3502
f92363d1 3503/**
40114bde 3504 * mpt3sas_base_put_smid_default - Default, primarily used for config pages
f92363d1
SR
3505 * @ioc: per adapter object
3506 * @smid: system request message index
f92363d1 3507 */
40114bde
SP
3508void
3509mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
f92363d1
SR
3510{
3511 Mpi2RequestDescriptorUnion_t descriptor;
e5747439
SPS
3512 void *mpi_req_iomem;
3513 u64 *request;
e5747439
SPS
3514
3515 if (ioc->is_mcpu_endpoint) {
3516 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3517
e5747439
SPS
3518 _clone_sg_entries(ioc, (void *) mfp, smid);
3519 /* TBD 256 is offset within sys register */
cf6bf971 3520 mpi_req_iomem = (void __force *)ioc->chip +
e5747439
SPS
3521 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3522 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3523 ioc->request_sz);
3524 }
3525 request = (u64 *)&descriptor;
f92363d1
SR
3526 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3527 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
3528 descriptor.Default.SMID = cpu_to_le16(smid);
3529 descriptor.Default.LMID = 0;
3530 descriptor.Default.DescriptorTypeDependent = 0;
e5747439
SPS
3531 if (ioc->is_mcpu_endpoint)
3532 _base_mpi_ep_writeq(*request,
3533 &ioc->chip->RequestDescriptorPostLow,
3534 &ioc->scsi_lookup_lock);
3535 else
3536 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3537 &ioc->scsi_lookup_lock);
f92363d1
SR
3538}
3539
1117b31a 3540/**
989e43c7 3541 * _base_display_OEMs_branding - Display branding string
1117b31a 3542 * @ioc: per adapter object
1117b31a
SR
3543 */
3544static void
989e43c7 3545_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
1117b31a
SR
3546{
3547 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3548 return;
3549
989e43c7
SR
3550 switch (ioc->pdev->subsystem_vendor) {
3551 case PCI_VENDOR_ID_INTEL:
3552 switch (ioc->pdev->device) {
3553 case MPI2_MFGPAGE_DEVID_SAS2008:
3554 switch (ioc->pdev->subsystem_device) {
3555 case MPT2SAS_INTEL_RMS2LL080_SSDID:
919d8a3f
JP
3556 ioc_info(ioc, "%s\n",
3557 MPT2SAS_INTEL_RMS2LL080_BRANDING);
989e43c7
SR
3558 break;
3559 case MPT2SAS_INTEL_RMS2LL040_SSDID:
919d8a3f
JP
3560 ioc_info(ioc, "%s\n",
3561 MPT2SAS_INTEL_RMS2LL040_BRANDING);
989e43c7
SR
3562 break;
3563 case MPT2SAS_INTEL_SSD910_SSDID:
919d8a3f
JP
3564 ioc_info(ioc, "%s\n",
3565 MPT2SAS_INTEL_SSD910_BRANDING);
989e43c7
SR
3566 break;
3567 default:
919d8a3f
JP
3568 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3569 ioc->pdev->subsystem_device);
989e43c7
SR
3570 break;
3571 }
7850b51b 3572 break;
989e43c7
SR
3573 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3574 switch (ioc->pdev->subsystem_device) {
3575 case MPT2SAS_INTEL_RS25GB008_SSDID:
919d8a3f
JP
3576 ioc_info(ioc, "%s\n",
3577 MPT2SAS_INTEL_RS25GB008_BRANDING);
989e43c7
SR
3578 break;
3579 case MPT2SAS_INTEL_RMS25JB080_SSDID:
919d8a3f
JP
3580 ioc_info(ioc, "%s\n",
3581 MPT2SAS_INTEL_RMS25JB080_BRANDING);
989e43c7
SR
3582 break;
3583 case MPT2SAS_INTEL_RMS25JB040_SSDID:
919d8a3f
JP
3584 ioc_info(ioc, "%s\n",
3585 MPT2SAS_INTEL_RMS25JB040_BRANDING);
989e43c7
SR
3586 break;
3587 case MPT2SAS_INTEL_RMS25KB080_SSDID:
919d8a3f
JP
3588 ioc_info(ioc, "%s\n",
3589 MPT2SAS_INTEL_RMS25KB080_BRANDING);
989e43c7
SR
3590 break;
3591 case MPT2SAS_INTEL_RMS25KB040_SSDID:
919d8a3f
JP
3592 ioc_info(ioc, "%s\n",
3593 MPT2SAS_INTEL_RMS25KB040_BRANDING);
989e43c7
SR
3594 break;
3595 case MPT2SAS_INTEL_RMS25LB040_SSDID:
919d8a3f
JP
3596 ioc_info(ioc, "%s\n",
3597 MPT2SAS_INTEL_RMS25LB040_BRANDING);
989e43c7
SR
3598 break;
3599 case MPT2SAS_INTEL_RMS25LB080_SSDID:
919d8a3f
JP
3600 ioc_info(ioc, "%s\n",
3601 MPT2SAS_INTEL_RMS25LB080_BRANDING);
989e43c7
SR
3602 break;
3603 default:
919d8a3f
JP
3604 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3605 ioc->pdev->subsystem_device);
989e43c7
SR
3606 break;
3607 }
7850b51b 3608 break;
989e43c7
SR
3609 case MPI25_MFGPAGE_DEVID_SAS3008:
3610 switch (ioc->pdev->subsystem_device) {
3611 case MPT3SAS_INTEL_RMS3JC080_SSDID:
919d8a3f
JP
3612 ioc_info(ioc, "%s\n",
3613 MPT3SAS_INTEL_RMS3JC080_BRANDING);
989e43c7
SR
3614 break;
3615
3616 case MPT3SAS_INTEL_RS3GC008_SSDID:
919d8a3f
JP
3617 ioc_info(ioc, "%s\n",
3618 MPT3SAS_INTEL_RS3GC008_BRANDING);
989e43c7
SR
3619 break;
3620 case MPT3SAS_INTEL_RS3FC044_SSDID:
919d8a3f
JP
3621 ioc_info(ioc, "%s\n",
3622 MPT3SAS_INTEL_RS3FC044_BRANDING);
989e43c7
SR
3623 break;
3624 case MPT3SAS_INTEL_RS3UC080_SSDID:
919d8a3f
JP
3625 ioc_info(ioc, "%s\n",
3626 MPT3SAS_INTEL_RS3UC080_BRANDING);
989e43c7
SR
3627 break;
3628 default:
919d8a3f
JP
3629 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3630 ioc->pdev->subsystem_device);
989e43c7
SR
3631 break;
3632 }
1117b31a
SR
3633 break;
3634 default:
919d8a3f
JP
3635 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
3636 ioc->pdev->subsystem_device);
1117b31a
SR
3637 break;
3638 }
3639 break;
989e43c7
SR
3640 case PCI_VENDOR_ID_DELL:
3641 switch (ioc->pdev->device) {
3642 case MPI2_MFGPAGE_DEVID_SAS2008:
3643 switch (ioc->pdev->subsystem_device) {
3644 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
919d8a3f
JP
3645 ioc_info(ioc, "%s\n",
3646 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
989e43c7
SR
3647 break;
3648 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
919d8a3f
JP
3649 ioc_info(ioc, "%s\n",
3650 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
989e43c7
SR
3651 break;
3652 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
919d8a3f
JP
3653 ioc_info(ioc, "%s\n",
3654 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
989e43c7
SR
3655 break;
3656 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
919d8a3f
JP
3657 ioc_info(ioc, "%s\n",
3658 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
989e43c7
SR
3659 break;
3660 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
919d8a3f
JP
3661 ioc_info(ioc, "%s\n",
3662 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
989e43c7
SR
3663 break;
3664 case MPT2SAS_DELL_PERC_H200_SSDID:
919d8a3f
JP
3665 ioc_info(ioc, "%s\n",
3666 MPT2SAS_DELL_PERC_H200_BRANDING);
989e43c7
SR
3667 break;
3668 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
919d8a3f
JP
3669 ioc_info(ioc, "%s\n",
3670 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
989e43c7
SR
3671 break;
3672 default:
919d8a3f
JP
3673 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
3674 ioc->pdev->subsystem_device);
989e43c7
SR
3675 break;
3676 }
3677 break;
3678 case MPI25_MFGPAGE_DEVID_SAS3008:
3679 switch (ioc->pdev->subsystem_device) {
3680 case MPT3SAS_DELL_12G_HBA_SSDID:
919d8a3f
JP
3681 ioc_info(ioc, "%s\n",
3682 MPT3SAS_DELL_12G_HBA_BRANDING);
989e43c7
SR
3683 break;
3684 default:
919d8a3f
JP
3685 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
3686 ioc->pdev->subsystem_device);
989e43c7
SR
3687 break;
3688 }
fb84dfc4
SR
3689 break;
3690 default:
919d8a3f
JP
3691 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
3692 ioc->pdev->subsystem_device);
fb84dfc4
SR
3693 break;
3694 }
3695 break;
989e43c7
SR
3696 case PCI_VENDOR_ID_CISCO:
3697 switch (ioc->pdev->device) {
3698 case MPI25_MFGPAGE_DEVID_SAS3008:
3699 switch (ioc->pdev->subsystem_device) {
3700 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
919d8a3f
JP
3701 ioc_info(ioc, "%s\n",
3702 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
989e43c7
SR
3703 break;
3704 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
919d8a3f
JP
3705 ioc_info(ioc, "%s\n",
3706 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
989e43c7
SR
3707 break;
3708 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
919d8a3f
JP
3709 ioc_info(ioc, "%s\n",
3710 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
989e43c7
SR
3711 break;
3712 default:
919d8a3f
JP
3713 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3714 ioc->pdev->subsystem_device);
989e43c7
SR
3715 break;
3716 }
d8eb4a47 3717 break;
989e43c7
SR
3718 case MPI25_MFGPAGE_DEVID_SAS3108_1:
3719 switch (ioc->pdev->subsystem_device) {
3720 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
919d8a3f
JP
3721 ioc_info(ioc, "%s\n",
3722 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
989e43c7
SR
3723 break;
3724 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
919d8a3f
JP
3725 ioc_info(ioc, "%s\n",
3726 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
989e43c7
SR
3727 break;
3728 default:
919d8a3f
JP
3729 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
3730 ioc->pdev->subsystem_device);
989e43c7
SR
3731 break;
3732 }
38e4141e
SR
3733 break;
3734 default:
919d8a3f
JP
3735 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
3736 ioc->pdev->subsystem_device);
38e4141e
SR
3737 break;
3738 }
3739 break;
989e43c7
SR
3740 case MPT2SAS_HP_3PAR_SSVID:
3741 switch (ioc->pdev->device) {
3742 case MPI2_MFGPAGE_DEVID_SAS2004:
3743 switch (ioc->pdev->subsystem_device) {
3744 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
919d8a3f
JP
3745 ioc_info(ioc, "%s\n",
3746 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
989e43c7
SR
3747 break;
3748 default:
919d8a3f
JP
3749 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3750 ioc->pdev->subsystem_device);
989e43c7
SR
3751 break;
3752 }
7850b51b 3753 break;
989e43c7
SR
3754 case MPI2_MFGPAGE_DEVID_SAS2308_2:
3755 switch (ioc->pdev->subsystem_device) {
3756 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
919d8a3f
JP
3757 ioc_info(ioc, "%s\n",
3758 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
989e43c7
SR
3759 break;
3760 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
919d8a3f
JP
3761 ioc_info(ioc, "%s\n",
3762 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
989e43c7
SR
3763 break;
3764 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
919d8a3f
JP
3765 ioc_info(ioc, "%s\n",
3766 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
989e43c7
SR
3767 break;
3768 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
919d8a3f
JP
3769 ioc_info(ioc, "%s\n",
3770 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
989e43c7
SR
3771 break;
3772 default:
919d8a3f
JP
3773 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
3774 ioc->pdev->subsystem_device);
989e43c7
SR
3775 break;
3776 }
7850b51b 3777 break;
d8eb4a47 3778 default:
919d8a3f
JP
3779 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
3780 ioc->pdev->subsystem_device);
d8eb4a47
SR
3781 break;
3782 }
38e4141e 3783 default:
38e4141e
SR
3784 break;
3785 }
3786}
fb84dfc4 3787
3d29ed85
C
3788/**
3789 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
3790 * version from FW Image Header.
3791 * @ioc: per adapter object
3792 *
4beb4867 3793 * Return: 0 for success, non-zero for failure.
3d29ed85
C
3794 */
3795 static int
3796_base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
3797{
3798 Mpi2FWImageHeader_t *FWImgHdr;
3799 Mpi25FWUploadRequest_t *mpi_request;
3800 Mpi2FWUploadReply_t mpi_reply;
3801 int r = 0;
3802 void *fwpkg_data = NULL;
3803 dma_addr_t fwpkg_data_dma;
3804 u16 smid, ioc_status;
3805 size_t data_length;
3806
919d8a3f 3807 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3d29ed85
C
3808
3809 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
919d8a3f 3810 ioc_err(ioc, "%s: internal command already in use\n", __func__);
3d29ed85
C
3811 return -EAGAIN;
3812 }
3813
3814 data_length = sizeof(Mpi2FWImageHeader_t);
1c2048bd
CH
3815 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
3816 &fwpkg_data_dma, GFP_KERNEL);
3d29ed85 3817 if (!fwpkg_data) {
919d8a3f
JP
3818 ioc_err(ioc, "failure at %s:%d/%s()!\n",
3819 __FILE__, __LINE__, __func__);
3d29ed85
C
3820 return -ENOMEM;
3821 }
3822
3823 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
3824 if (!smid) {
919d8a3f 3825 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3d29ed85
C
3826 r = -EAGAIN;
3827 goto out;
3828 }
3829
3830 ioc->base_cmds.status = MPT3_CMD_PENDING;
3831 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3832 ioc->base_cmds.smid = smid;
3833 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
3834 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
3835 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
3836 mpi_request->ImageSize = cpu_to_le32(data_length);
3837 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
3838 data_length);
3839 init_completion(&ioc->base_cmds.done);
3840 mpt3sas_base_put_smid_default(ioc, smid);
3841 /* Wait for 15 seconds */
3842 wait_for_completion_timeout(&ioc->base_cmds.done,
3843 FW_IMG_HDR_READ_TIMEOUT*HZ);
919d8a3f 3844 ioc_info(ioc, "%s: complete\n", __func__);
3d29ed85 3845 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
919d8a3f 3846 ioc_err(ioc, "%s: timeout\n", __func__);
3d29ed85
C
3847 _debug_dump_mf(mpi_request,
3848 sizeof(Mpi25FWUploadRequest_t)/4);
3849 r = -ETIME;
3850 } else {
3851 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
3852 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
3853 memcpy(&mpi_reply, ioc->base_cmds.reply,
3854 sizeof(Mpi2FWUploadReply_t));
3855 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
3856 MPI2_IOCSTATUS_MASK;
3857 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
3858 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
3859 if (FWImgHdr->PackageVersion.Word) {
919d8a3f
JP
3860 ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
3861 FWImgHdr->PackageVersion.Struct.Major,
3862 FWImgHdr->PackageVersion.Struct.Minor,
3863 FWImgHdr->PackageVersion.Struct.Unit,
3864 FWImgHdr->PackageVersion.Struct.Dev);
3d29ed85
C
3865 }
3866 } else {
3867 _debug_dump_mf(&mpi_reply,
3868 sizeof(Mpi2FWUploadReply_t)/4);
3869 }
3870 }
3871 }
3872 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
3873out:
3874 if (fwpkg_data)
1c2048bd 3875 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
3d29ed85
C
3876 fwpkg_data_dma);
3877 return r;
3878}
3879
f92363d1
SR
3880/**
3881 * _base_display_ioc_capabilities - Disply IOC's capabilities.
3882 * @ioc: per adapter object
f92363d1
SR
3883 */
3884static void
3885_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
3886{
3887 int i = 0;
3888 char desc[16];
3889 u32 iounit_pg1_flags;
3890 u32 bios_version;
3891
3892 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
3893 strncpy(desc, ioc->manu_pg0.ChipName, 16);
919d8a3f
JP
3894 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
3895 desc,
3896 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
3897 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
3898 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
3899 ioc->facts.FWVersion.Word & 0x000000FF,
3900 ioc->pdev->revision,
3901 (bios_version & 0xFF000000) >> 24,
3902 (bios_version & 0x00FF0000) >> 16,
3903 (bios_version & 0x0000FF00) >> 8,
3904 bios_version & 0x000000FF);
f92363d1 3905
989e43c7 3906 _base_display_OEMs_branding(ioc);
1117b31a 3907
016d5c35
SPS
3908 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
3909 pr_info("%sNVMe", i ? "," : "");
3910 i++;
3911 }
3912
919d8a3f 3913 ioc_info(ioc, "Protocol=(");
f92363d1
SR
3914
3915 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
919d8a3f 3916 pr_cont("Initiator");
f92363d1
SR
3917 i++;
3918 }
3919
3920 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
919d8a3f 3921 pr_cont("%sTarget", i ? "," : "");
f92363d1
SR
3922 i++;
3923 }
3924
3925 i = 0;
919d8a3f 3926 pr_cont("), Capabilities=(");
f92363d1 3927
7786ab6a
SR
3928 if (!ioc->hide_ir_msg) {
3929 if (ioc->facts.IOCCapabilities &
f92363d1 3930 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
919d8a3f 3931 pr_cont("Raid");
f92363d1 3932 i++;
7786ab6a 3933 }
f92363d1
SR
3934 }
3935
3936 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
919d8a3f 3937 pr_cont("%sTLR", i ? "," : "");
f92363d1
SR
3938 i++;
3939 }
3940
3941 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
919d8a3f 3942 pr_cont("%sMulticast", i ? "," : "");
f92363d1
SR
3943 i++;
3944 }
3945
3946 if (ioc->facts.IOCCapabilities &
3947 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
919d8a3f 3948 pr_cont("%sBIDI Target", i ? "," : "");
f92363d1
SR
3949 i++;
3950 }
3951
3952 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
919d8a3f 3953 pr_cont("%sEEDP", i ? "," : "");
f92363d1
SR
3954 i++;
3955 }
3956
3957 if (ioc->facts.IOCCapabilities &
3958 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
919d8a3f 3959 pr_cont("%sSnapshot Buffer", i ? "," : "");
f92363d1
SR
3960 i++;
3961 }
3962
3963 if (ioc->facts.IOCCapabilities &
3964 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
919d8a3f 3965 pr_cont("%sDiag Trace Buffer", i ? "," : "");
f92363d1
SR
3966 i++;
3967 }
3968
3969 if (ioc->facts.IOCCapabilities &
3970 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
919d8a3f 3971 pr_cont("%sDiag Extended Buffer", i ? "," : "");
f92363d1
SR
3972 i++;
3973 }
3974
3975 if (ioc->facts.IOCCapabilities &
3976 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
919d8a3f 3977 pr_cont("%sTask Set Full", i ? "," : "");
f92363d1
SR
3978 i++;
3979 }
3980
3981 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
3982 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
919d8a3f 3983 pr_cont("%sNCQ", i ? "," : "");
f92363d1
SR
3984 i++;
3985 }
3986
919d8a3f 3987 pr_cont(")\n");
f92363d1
SR
3988}
3989
3990/**
3991 * mpt3sas_base_update_missing_delay - change the missing delay timers
3992 * @ioc: per adapter object
3993 * @device_missing_delay: amount of time till device is reported missing
3994 * @io_missing_delay: interval IO is returned when there is a missing device
3995 *
f92363d1
SR
3996 * Passed on the command line, this function will modify the device missing
3997 * delay, as well as the io missing delay. This should be called at driver
3998 * load time.
3999 */
4000void
4001mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4002 u16 device_missing_delay, u8 io_missing_delay)
4003{
4004 u16 dmd, dmd_new, dmd_orignal;
4005 u8 io_missing_delay_original;
4006 u16 sz;
4007 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4008 Mpi2ConfigReply_t mpi_reply;
4009 u8 num_phys = 0;
4010 u16 ioc_status;
4011
4012 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4013 if (!num_phys)
4014 return;
4015
4016 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4017 sizeof(Mpi2SasIOUnit1PhyData_t));
4018 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4019 if (!sas_iounit_pg1) {
919d8a3f
JP
4020 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4021 __FILE__, __LINE__, __func__);
f92363d1
SR
4022 goto out;
4023 }
4024 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4025 sas_iounit_pg1, sz))) {
919d8a3f
JP
4026 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4027 __FILE__, __LINE__, __func__);
f92363d1
SR
4028 goto out;
4029 }
4030 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4031 MPI2_IOCSTATUS_MASK;
4032 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
4033 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4034 __FILE__, __LINE__, __func__);
f92363d1
SR
4035 goto out;
4036 }
4037
4038 /* device missing delay */
4039 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4040 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4041 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4042 else
4043 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4044 dmd_orignal = dmd;
4045 if (device_missing_delay > 0x7F) {
4046 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4047 device_missing_delay;
4048 dmd = dmd / 16;
4049 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4050 } else
4051 dmd = device_missing_delay;
4052 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4053
4054 /* io missing delay */
4055 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4056 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4057
4058 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4059 sz)) {
4060 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4061 dmd_new = (dmd &
4062 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4063 else
4064 dmd_new =
4065 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
919d8a3f
JP
4066 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4067 dmd_orignal, dmd_new);
4068 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4069 io_missing_delay_original,
4070 io_missing_delay);
f92363d1
SR
4071 ioc->device_missing_delay = dmd_new;
4072 ioc->io_missing_delay = io_missing_delay;
4073 }
4074
4075out:
4076 kfree(sas_iounit_pg1);
4077}
4beb4867 4078
f92363d1
SR
4079/**
4080 * _base_static_config_pages - static start of day config pages
4081 * @ioc: per adapter object
f92363d1
SR
4082 */
4083static void
4084_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4085{
4086 Mpi2ConfigReply_t mpi_reply;
4087 u32 iounit_pg1_flags;
4088
c1a6c5ac 4089 ioc->nvme_abort_timeout = 30;
f92363d1
SR
4090 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4091 if (ioc->ir_firmware)
4092 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4093 &ioc->manu_pg10);
4094
4095 /*
4096 * Ensure correct T10 PI operation if vendor left EEDPTagMode
4097 * flag unset in NVDATA.
4098 */
4099 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
6cd1bc7b 4100 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
f92363d1
SR
4101 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4102 ioc->name);
4103 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4104 ioc->manu_pg11.EEDPTagMode |= 0x1;
4105 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4106 &ioc->manu_pg11);
4107 }
c1a6c5ac
C
4108 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4109 ioc->tm_custom_handling = 1;
4110 else {
4111 ioc->tm_custom_handling = 0;
4112 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4113 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4114 else if (ioc->manu_pg11.NVMeAbortTO >
4115 NVME_TASK_ABORT_MAX_TIMEOUT)
4116 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4117 else
4118 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4119 }
f92363d1
SR
4120
4121 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4122 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4123 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4124 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4125 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9 4126 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
f92363d1
SR
4127 _base_display_ioc_capabilities(ioc);
4128
4129 /*
4130 * Enable task_set_full handling in iounit_pg1 when the
4131 * facts capabilities indicate that its supported.
4132 */
4133 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4134 if ((ioc->facts.IOCCapabilities &
4135 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4136 iounit_pg1_flags &=
4137 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4138 else
4139 iounit_pg1_flags |=
4140 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4141 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4142 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
2d8ce8c9
SR
4143
4144 if (ioc->iounit_pg8.NumSensors)
4145 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
f92363d1
SR
4146}
4147
22a923c3
C
4148/**
4149 * mpt3sas_free_enclosure_list - release memory
4150 * @ioc: per adapter object
4151 *
4152 * Free memory allocated during encloure add.
22a923c3
C
4153 */
4154void
4155mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4156{
4157 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4158
4159 /* Free enclosure list */
4160 list_for_each_entry_safe(enclosure_dev,
4161 enclosure_dev_next, &ioc->enclosure_list, list) {
4162 list_del(&enclosure_dev->list);
4163 kfree(enclosure_dev);
4164 }
4165}
4166
f92363d1
SR
4167/**
4168 * _base_release_memory_pools - release memory
4169 * @ioc: per adapter object
4170 *
4171 * Free memory allocated from _base_allocate_memory_pools.
f92363d1
SR
4172 */
4173static void
4174_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4175{
9b05c91a 4176 int i = 0;
93204b78
C
4177 int j = 0;
4178 struct chain_tracker *ct;
9b05c91a 4179 struct reply_post_struct *rps;
f92363d1 4180
919d8a3f 4181 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
4182
4183 if (ioc->request) {
1c2048bd 4184 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
f92363d1 4185 ioc->request, ioc->request_dma);
919d8a3f
JP
4186 dexitprintk(ioc,
4187 ioc_info(ioc, "request_pool(0x%p): free\n",
4188 ioc->request));
f92363d1
SR
4189 ioc->request = NULL;
4190 }
4191
4192 if (ioc->sense) {
e9d98418
RP
4193 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4194 dma_pool_destroy(ioc->sense_dma_pool);
919d8a3f
JP
4195 dexitprintk(ioc,
4196 ioc_info(ioc, "sense_pool(0x%p): free\n",
4197 ioc->sense));
f92363d1
SR
4198 ioc->sense = NULL;
4199 }
4200
4201 if (ioc->reply) {
e9d98418
RP
4202 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4203 dma_pool_destroy(ioc->reply_dma_pool);
919d8a3f
JP
4204 dexitprintk(ioc,
4205 ioc_info(ioc, "reply_pool(0x%p): free\n",
4206 ioc->reply));
f92363d1
SR
4207 ioc->reply = NULL;
4208 }
4209
4210 if (ioc->reply_free) {
e9d98418 4211 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
f92363d1 4212 ioc->reply_free_dma);
e9d98418 4213 dma_pool_destroy(ioc->reply_free_dma_pool);
919d8a3f
JP
4214 dexitprintk(ioc,
4215 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4216 ioc->reply_free));
f92363d1
SR
4217 ioc->reply_free = NULL;
4218 }
4219
9b05c91a
SR
4220 if (ioc->reply_post) {
4221 do {
4222 rps = &ioc->reply_post[i];
4223 if (rps->reply_post_free) {
e9d98418 4224 dma_pool_free(
9b05c91a
SR
4225 ioc->reply_post_free_dma_pool,
4226 rps->reply_post_free,
4227 rps->reply_post_free_dma);
919d8a3f
JP
4228 dexitprintk(ioc,
4229 ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
4230 rps->reply_post_free));
9b05c91a
SR
4231 rps->reply_post_free = NULL;
4232 }
4233 } while (ioc->rdpq_array_enable &&
4234 (++i < ioc->reply_queue_count));
cd33223b
C
4235 if (ioc->reply_post_free_array &&
4236 ioc->rdpq_array_enable) {
4237 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4238 ioc->reply_post_free_array,
4239 ioc->reply_post_free_array_dma);
4240 ioc->reply_post_free_array = NULL;
4241 }
4242 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
e9d98418 4243 dma_pool_destroy(ioc->reply_post_free_dma_pool);
9b05c91a 4244 kfree(ioc->reply_post);
f92363d1
SR
4245 }
4246
016d5c35
SPS
4247 if (ioc->pcie_sgl_dma_pool) {
4248 for (i = 0; i < ioc->scsiio_depth; i++) {
dbec4c90
SPS
4249 dma_pool_free(ioc->pcie_sgl_dma_pool,
4250 ioc->pcie_sg_lookup[i].pcie_sgl,
4251 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
016d5c35
SPS
4252 }
4253 if (ioc->pcie_sgl_dma_pool)
13a06405 4254 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
016d5c35
SPS
4255 }
4256
f92363d1 4257 if (ioc->config_page) {
919d8a3f
JP
4258 dexitprintk(ioc,
4259 ioc_info(ioc, "config_page(0x%p): free\n",
4260 ioc->config_page));
1c2048bd 4261 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
f92363d1
SR
4262 ioc->config_page, ioc->config_page_dma);
4263 }
4264
f92363d1
SR
4265 kfree(ioc->hpr_lookup);
4266 kfree(ioc->internal_lookup);
4267 if (ioc->chain_lookup) {
93204b78 4268 for (i = 0; i < ioc->scsiio_depth; i++) {
74522a92
C
4269 for (j = ioc->chains_per_prp_buffer;
4270 j < ioc->chains_needed_per_io; j++) {
93204b78
C
4271 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4272 if (ct && ct->chain_buffer)
4273 dma_pool_free(ioc->chain_dma_pool,
4274 ct->chain_buffer,
4275 ct->chain_buffer_dma);
4276 }
4277 kfree(ioc->chain_lookup[i].chains_per_smid);
f92363d1 4278 }
e9d98418 4279 dma_pool_destroy(ioc->chain_dma_pool);
93204b78 4280 kfree(ioc->chain_lookup);
f92363d1
SR
4281 ioc->chain_lookup = NULL;
4282 }
4283}
4284
e21fef6f
C
4285/**
4286 * is_MSB_are_same - checks whether all reply queues in a set are
4287 * having same upper 32bits in their base memory address.
4288 * @reply_pool_start_address: Base address of a reply queue set
4289 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
4290 *
4beb4867
BVA
4291 * Return: 1 if reply queues in a set have a same upper 32bits in their base
4292 * memory address, else 0.
e21fef6f
C
4293 */
4294
4295static int
4296is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4297{
4298 long reply_pool_end_address;
4299
4300 reply_pool_end_address = reply_pool_start_address + pool_sz;
4301
4302 if (upper_32_bits(reply_pool_start_address) ==
4303 upper_32_bits(reply_pool_end_address))
4304 return 1;
4305 else
4306 return 0;
4307}
4308
f92363d1
SR
4309/**
4310 * _base_allocate_memory_pools - allocate start of day memory pools
4311 * @ioc: per adapter object
f92363d1 4312 *
4beb4867 4313 * Return: 0 success, anything else error.
f92363d1
SR
4314 */
4315static int
98c56ad3 4316_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
4317{
4318 struct mpt3sas_facts *facts;
4319 u16 max_sge_elements;
4320 u16 chains_needed_per_io;
cd33223b 4321 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
f92363d1 4322 u32 retry_sz;
016d5c35 4323 u16 max_request_credit, nvme_blocks_needed;
f92363d1
SR
4324 unsigned short sg_tablesize;
4325 u16 sge_size;
93204b78
C
4326 int i, j;
4327 struct chain_tracker *ct;
f92363d1 4328
919d8a3f 4329 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
4330
4331
4332 retry_sz = 0;
4333 facts = &ioc->facts;
4334
4335 /* command line tunables for max sgl entries */
4336 if (max_sgl_entries != -1)
4337 sg_tablesize = max_sgl_entries;
471ef9d4
SR
4338 else {
4339 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4340 sg_tablesize = MPT2SAS_SG_DEPTH;
4341 else
4342 sg_tablesize = MPT3SAS_SG_DEPTH;
4343 }
f92363d1 4344
06f5f976
SR
4345 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
4346 if (reset_devices)
4347 sg_tablesize = min_t(unsigned short, sg_tablesize,
4348 MPT_KDUMP_MIN_PHYS_SEGMENTS);
4349
0448f019
SPS
4350 if (ioc->is_mcpu_endpoint)
4351 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4352 else {
4353 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4354 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4355 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4356 sg_tablesize = min_t(unsigned short, sg_tablesize,
4357 SG_MAX_SEGMENTS);
919d8a3f
JP
4358 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
4359 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
0448f019
SPS
4360 }
4361 ioc->shost->sg_tablesize = sg_tablesize;
ad666a0f 4362 }
f92363d1 4363
fd0331b3
SS
4364 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4365 (facts->RequestCredit / 4));
4366 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4367 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4368 INTERNAL_SCSIIO_CMDS_COUNT)) {
919d8a3f
JP
4369 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
4370 facts->RequestCredit);
fd0331b3
SS
4371 return -ENOMEM;
4372 }
4373 ioc->internal_depth = 10;
4374 }
4375
4376 ioc->hi_priority_depth = ioc->internal_depth - (5);
f92363d1
SR
4377 /* command line tunables for max controller queue depth */
4378 if (max_queue_depth != -1 && max_queue_depth != 0) {
4379 max_request_credit = min_t(u16, max_queue_depth +
fd0331b3 4380 ioc->internal_depth, facts->RequestCredit);
f92363d1
SR
4381 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4382 max_request_credit = MAX_HBA_QUEUE_DEPTH;
06f5f976
SR
4383 } else if (reset_devices)
4384 max_request_credit = min_t(u16, facts->RequestCredit,
4385 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4386 else
f92363d1
SR
4387 max_request_credit = min_t(u16, facts->RequestCredit,
4388 MAX_HBA_QUEUE_DEPTH);
4389
fd0331b3
SS
4390 /* Firmware maintains additional facts->HighPriorityCredit number of
4391 * credits for HiPriprity Request messages, so hba queue depth will be
4392 * sum of max_request_credit and high priority queue depth.
4393 */
4394 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
f92363d1
SR
4395
4396 /* request frame size */
4397 ioc->request_sz = facts->IOCRequestFrameSize * 4;
4398
4399 /* reply frame size */
4400 ioc->reply_sz = facts->ReplyFrameSize * 4;
4401
ebb3024e
SS
4402 /* chain segment size */
4403 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4404 if (facts->IOCMaxChainSegmentSize)
4405 ioc->chain_segment_sz =
4406 facts->IOCMaxChainSegmentSize *
4407 MAX_CHAIN_ELEMT_SZ;
4408 else
4409 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
4410 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4411 MAX_CHAIN_ELEMT_SZ;
4412 } else
4413 ioc->chain_segment_sz = ioc->request_sz;
4414
f92363d1
SR
4415 /* calculate the max scatter element size */
4416 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4417
4418 retry_allocation:
4419 total_sz = 0;
4420 /* calculate number of sg elements left over in the 1st frame */
4421 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4422 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4423 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4424
4425 /* now do the same for a chain buffer */
ebb3024e 4426 max_sge_elements = ioc->chain_segment_sz - sge_size;
f92363d1
SR
4427 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4428
4429 /*
4430 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
4431 */
4432 chains_needed_per_io = ((ioc->shost->sg_tablesize -
4433 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4434 + 1;
4435 if (chains_needed_per_io > facts->MaxChainDepth) {
4436 chains_needed_per_io = facts->MaxChainDepth;
4437 ioc->shost->sg_tablesize = min_t(u16,
4438 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4439 * chains_needed_per_io), ioc->shost->sg_tablesize);
4440 }
4441 ioc->chains_needed_per_io = chains_needed_per_io;
4442
4443 /* reply free queue sizing - taking into account for 64 FW events */
4444 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4445
0448f019
SPS
4446 /* mCPU manage single counters for simplicity */
4447 if (ioc->is_mcpu_endpoint)
4448 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4449 else {
4450 /* calculate reply descriptor post queue depth */
4451 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4452 ioc->reply_free_queue_depth + 1;
4453 /* align the reply post queue on the next 16 count boundary */
4454 if (ioc->reply_post_queue_depth % 16)
4455 ioc->reply_post_queue_depth += 16 -
4456 (ioc->reply_post_queue_depth % 16);
4457 }
f92363d1 4458
f92363d1
SR
4459 if (ioc->reply_post_queue_depth >
4460 facts->MaxReplyDescriptorPostQueueDepth) {
4461 ioc->reply_post_queue_depth =
4462 facts->MaxReplyDescriptorPostQueueDepth -
4463 (facts->MaxReplyDescriptorPostQueueDepth % 16);
4464 ioc->hba_queue_depth =
4465 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
4466 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4467 }
4468
919d8a3f
JP
4469 dinitprintk(ioc,
4470 ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
4471 ioc->max_sges_in_main_message,
4472 ioc->max_sges_in_chain_message,
4473 ioc->shost->sg_tablesize,
4474 ioc->chains_needed_per_io));
f92363d1 4475
9b05c91a
SR
4476 /* reply post queue, 16 byte align */
4477 reply_post_free_sz = ioc->reply_post_queue_depth *
4478 sizeof(Mpi2DefaultReplyDescriptor_t);
4479
4480 sz = reply_post_free_sz;
4481 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4482 sz *= ioc->reply_queue_count;
4483
4484 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4485 (ioc->reply_queue_count):1,
4486 sizeof(struct reply_post_struct), GFP_KERNEL);
4487
4488 if (!ioc->reply_post) {
919d8a3f 4489 ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
9b05c91a
SR
4490 goto out;
4491 }
e9d98418
RP
4492 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
4493 &ioc->pdev->dev, sz, 16, 0);
9b05c91a 4494 if (!ioc->reply_post_free_dma_pool) {
919d8a3f 4495 ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
9b05c91a
SR
4496 goto out;
4497 }
4498 i = 0;
4499 do {
4500 ioc->reply_post[i].reply_post_free =
c39a4d75 4501 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
9b05c91a
SR
4502 GFP_KERNEL,
4503 &ioc->reply_post[i].reply_post_free_dma);
4504 if (!ioc->reply_post[i].reply_post_free) {
919d8a3f 4505 ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
9b05c91a
SR
4506 goto out;
4507 }
919d8a3f
JP
4508 dinitprintk(ioc,
4509 ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4510 ioc->reply_post[i].reply_post_free,
4511 ioc->reply_post_queue_depth,
4512 8, sz / 1024));
4513 dinitprintk(ioc,
4514 ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
4515 (u64)ioc->reply_post[i].reply_post_free_dma));
9b05c91a
SR
4516 total_sz += sz;
4517 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
4518
4519 if (ioc->dma_mask == 64) {
4520 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
919d8a3f
JP
4521 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
4522 pci_name(ioc->pdev));
9b05c91a
SR
4523 goto out;
4524 }
4525 }
4526
f92363d1
SR
4527 ioc->scsiio_depth = ioc->hba_queue_depth -
4528 ioc->hi_priority_depth - ioc->internal_depth;
4529
4530 /* set the scsi host can_queue depth
4531 * with some internal commands that could be outstanding
4532 */
fd0331b3 4533 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
919d8a3f
JP
4534 dinitprintk(ioc,
4535 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
4536 ioc->shost->can_queue));
f92363d1
SR
4537
4538
4539 /* contiguous pool for request and chains, 16 byte align, one extra "
4540 * "frame for smid=0
4541 */
4542 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
4543 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
4544
4545 /* hi-priority queue */
4546 sz += (ioc->hi_priority_depth * ioc->request_sz);
4547
4548 /* internal queue */
4549 sz += (ioc->internal_depth * ioc->request_sz);
4550
4551 ioc->request_dma_sz = sz;
1c2048bd
CH
4552 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
4553 &ioc->request_dma, GFP_KERNEL);
f92363d1 4554 if (!ioc->request) {
1c2048bd 4555 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
919d8a3f
JP
4556 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4557 ioc->request_sz, sz / 1024);
f92363d1
SR
4558 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
4559 goto out;
fd0331b3
SS
4560 retry_sz = 64;
4561 ioc->hba_queue_depth -= retry_sz;
8ff045c9 4562 _base_release_memory_pools(ioc);
f92363d1
SR
4563 goto retry_allocation;
4564 }
4565
4566 if (retry_sz)
1c2048bd 4567 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
919d8a3f
JP
4568 ioc->hba_queue_depth, ioc->chains_needed_per_io,
4569 ioc->request_sz, sz / 1024);
f92363d1
SR
4570
4571 /* hi-priority queue */
4572 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
4573 ioc->request_sz);
4574 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
4575 ioc->request_sz);
4576
4577 /* internal queue */
4578 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
4579 ioc->request_sz);
4580 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
4581 ioc->request_sz);
4582
919d8a3f
JP
4583 dinitprintk(ioc,
4584 ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4585 ioc->request, ioc->hba_queue_depth,
4586 ioc->request_sz,
4587 (ioc->hba_queue_depth * ioc->request_sz) / 1024));
f92363d1 4588
919d8a3f
JP
4589 dinitprintk(ioc,
4590 ioc_info(ioc, "request pool: dma(0x%llx)\n",
4591 (unsigned long long)ioc->request_dma));
f92363d1
SR
4592 total_sz += sz;
4593
919d8a3f
JP
4594 dinitprintk(ioc,
4595 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
4596 ioc->request, ioc->scsiio_depth));
f92363d1
SR
4597
4598 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
93204b78
C
4599 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
4600 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
f92363d1 4601 if (!ioc->chain_lookup) {
919d8a3f 4602 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
f92363d1
SR
4603 goto out;
4604 }
93204b78
C
4605
4606 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
4607 for (i = 0; i < ioc->scsiio_depth; i++) {
4608 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
4609 if (!ioc->chain_lookup[i].chains_per_smid) {
919d8a3f 4610 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
93204b78
C
4611 goto out;
4612 }
4613 }
4614
f92363d1
SR
4615 /* initialize hi-priority queue smid's */
4616 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
4617 sizeof(struct request_tracker), GFP_KERNEL);
4618 if (!ioc->hpr_lookup) {
919d8a3f 4619 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
f92363d1
SR
4620 goto out;
4621 }
4622 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
919d8a3f
JP
4623 dinitprintk(ioc,
4624 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
4625 ioc->hi_priority,
4626 ioc->hi_priority_depth, ioc->hi_priority_smid));
f92363d1
SR
4627
4628 /* initialize internal queue smid's */
4629 ioc->internal_lookup = kcalloc(ioc->internal_depth,
4630 sizeof(struct request_tracker), GFP_KERNEL);
4631 if (!ioc->internal_lookup) {
919d8a3f 4632 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
f92363d1
SR
4633 goto out;
4634 }
4635 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
919d8a3f
JP
4636 dinitprintk(ioc,
4637 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
4638 ioc->internal,
4639 ioc->internal_depth, ioc->internal_smid));
016d5c35
SPS
4640 /*
4641 * The number of NVMe page sized blocks needed is:
4642 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
4643 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
4644 * that is placed in the main message frame. 8 is the size of each PRP
4645 * entry or PRP list pointer entry. 8 is subtracted from page_size
4646 * because of the PRP list pointer entry at the end of a page, so this
4647 * is not counted as a PRP entry. The 1 added page is a round up.
4648 *
4649 * To avoid allocation failures due to the amount of memory that could
4650 * be required for NVMe PRP's, only each set of NVMe blocks will be
4651 * contiguous, so a new set is allocated for each possible I/O.
4652 */
74522a92 4653 ioc->chains_per_prp_buffer = 0;
016d5c35
SPS
4654 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4655 nvme_blocks_needed =
4656 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
4657 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
4658 nvme_blocks_needed++;
4659
dbec4c90
SPS
4660 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
4661 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
4662 if (!ioc->pcie_sg_lookup) {
919d8a3f 4663 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
dbec4c90
SPS
4664 goto out;
4665 }
016d5c35
SPS
4666 sz = nvme_blocks_needed * ioc->page_size;
4667 ioc->pcie_sgl_dma_pool =
13a06405 4668 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
016d5c35 4669 if (!ioc->pcie_sgl_dma_pool) {
919d8a3f 4670 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
016d5c35
SPS
4671 goto out;
4672 }
74522a92
C
4673
4674 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
4675 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
4676 ioc->chains_needed_per_io);
4677
016d5c35 4678 for (i = 0; i < ioc->scsiio_depth; i++) {
dbec4c90
SPS
4679 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
4680 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
4681 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4682 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
919d8a3f 4683 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
016d5c35
SPS
4684 goto out;
4685 }
74522a92
C
4686 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
4687 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4688 ct->chain_buffer =
4689 ioc->pcie_sg_lookup[i].pcie_sgl +
4690 (j * ioc->chain_segment_sz);
4691 ct->chain_buffer_dma =
4692 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
4693 (j * ioc->chain_segment_sz);
4694 }
016d5c35 4695 }
f92363d1 4696
919d8a3f
JP
4697 dinitprintk(ioc,
4698 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
4699 ioc->scsiio_depth, sz,
4700 (sz * ioc->scsiio_depth) / 1024));
4701 dinitprintk(ioc,
4702 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
4703 ioc->chains_per_prp_buffer));
016d5c35
SPS
4704 total_sz += sz * ioc->scsiio_depth;
4705 }
74522a92
C
4706
4707 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
4708 ioc->chain_segment_sz, 16, 0);
4709 if (!ioc->chain_dma_pool) {
919d8a3f 4710 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
74522a92
C
4711 goto out;
4712 }
4713 for (i = 0; i < ioc->scsiio_depth; i++) {
4714 for (j = ioc->chains_per_prp_buffer;
4715 j < ioc->chains_needed_per_io; j++) {
4716 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4717 ct->chain_buffer = dma_pool_alloc(
4718 ioc->chain_dma_pool, GFP_KERNEL,
4719 &ct->chain_buffer_dma);
4720 if (!ct->chain_buffer) {
919d8a3f 4721 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
74522a92
C
4722 _base_release_memory_pools(ioc);
4723 goto out;
4724 }
4725 }
4726 total_sz += ioc->chain_segment_sz;
4727 }
4728
919d8a3f
JP
4729 dinitprintk(ioc,
4730 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
4731 ioc->chain_depth, ioc->chain_segment_sz,
4732 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
74522a92 4733
f92363d1
SR
4734 /* sense buffers, 4 byte align */
4735 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
e9d98418
RP
4736 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4737 4, 0);
f92363d1 4738 if (!ioc->sense_dma_pool) {
919d8a3f 4739 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
f92363d1
SR
4740 goto out;
4741 }
e9d98418 4742 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
f92363d1
SR
4743 &ioc->sense_dma);
4744 if (!ioc->sense) {
919d8a3f 4745 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
f92363d1
SR
4746 goto out;
4747 }
e21fef6f
C
4748 /* sense buffer requires to be in same 4 gb region.
4749 * Below function will check the same.
4750 * In case of failure, new pci pool will be created with updated
4751 * alignment. Older allocation and pool will be destroyed.
4752 * Alignment will be used such a way that next allocation if
4753 * success, will always meet same 4gb region requirement.
4754 * Actual requirement is not alignment, but we need start and end of
4755 * DMA address must have same upper 32 bit address.
4756 */
4757 if (!is_MSB_are_same((long)ioc->sense, sz)) {
4758 //Release Sense pool & Reallocate
4759 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4760 dma_pool_destroy(ioc->sense_dma_pool);
4761 ioc->sense = NULL;
4762
4763 ioc->sense_dma_pool =
4764 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4765 roundup_pow_of_two(sz), 0);
4766 if (!ioc->sense_dma_pool) {
919d8a3f 4767 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
e21fef6f
C
4768 goto out;
4769 }
4770 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
4771 &ioc->sense_dma);
4772 if (!ioc->sense) {
919d8a3f 4773 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
e21fef6f
C
4774 goto out;
4775 }
4776 }
919d8a3f
JP
4777 dinitprintk(ioc,
4778 ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4779 ioc->sense, ioc->scsiio_depth,
4780 SCSI_SENSE_BUFFERSIZE, sz / 1024));
4781 dinitprintk(ioc,
4782 ioc_info(ioc, "sense_dma(0x%llx)\n",
4783 (unsigned long long)ioc->sense_dma));
f92363d1
SR
4784 total_sz += sz;
4785
4786 /* reply pool, 4 byte align */
4787 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
e9d98418
RP
4788 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4789 4, 0);
f92363d1 4790 if (!ioc->reply_dma_pool) {
919d8a3f 4791 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
f92363d1
SR
4792 goto out;
4793 }
e9d98418 4794 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
f92363d1
SR
4795 &ioc->reply_dma);
4796 if (!ioc->reply) {
919d8a3f 4797 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
f92363d1
SR
4798 goto out;
4799 }
4800 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
4801 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
919d8a3f
JP
4802 dinitprintk(ioc,
4803 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
4804 ioc->reply, ioc->reply_free_queue_depth,
4805 ioc->reply_sz, sz / 1024));
4806 dinitprintk(ioc,
4807 ioc_info(ioc, "reply_dma(0x%llx)\n",
4808 (unsigned long long)ioc->reply_dma));
f92363d1
SR
4809 total_sz += sz;
4810
4811 /* reply free queue, 16 byte align */
4812 sz = ioc->reply_free_queue_depth * 4;
e9d98418
RP
4813 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
4814 &ioc->pdev->dev, sz, 16, 0);
f92363d1 4815 if (!ioc->reply_free_dma_pool) {
919d8a3f 4816 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
f92363d1
SR
4817 goto out;
4818 }
c39a4d75 4819 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
f92363d1
SR
4820 &ioc->reply_free_dma);
4821 if (!ioc->reply_free) {
919d8a3f 4822 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
f92363d1
SR
4823 goto out;
4824 }
919d8a3f
JP
4825 dinitprintk(ioc,
4826 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
4827 ioc->reply_free, ioc->reply_free_queue_depth,
4828 4, sz / 1024));
4829 dinitprintk(ioc,
4830 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
4831 (unsigned long long)ioc->reply_free_dma));
f92363d1
SR
4832 total_sz += sz;
4833
cd33223b
C
4834 if (ioc->rdpq_array_enable) {
4835 reply_post_free_array_sz = ioc->reply_queue_count *
4836 sizeof(Mpi2IOCInitRDPQArrayEntry);
4837 ioc->reply_post_free_array_dma_pool =
4838 dma_pool_create("reply_post_free_array pool",
4839 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
4840 if (!ioc->reply_post_free_array_dma_pool) {
4841 dinitprintk(ioc,
919d8a3f 4842 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
cd33223b
C
4843 goto out;
4844 }
4845 ioc->reply_post_free_array =
4846 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
4847 GFP_KERNEL, &ioc->reply_post_free_array_dma);
4848 if (!ioc->reply_post_free_array) {
4849 dinitprintk(ioc,
919d8a3f 4850 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
cd33223b
C
4851 goto out;
4852 }
4853 }
f92363d1 4854 ioc->config_page_sz = 512;
1c2048bd
CH
4855 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
4856 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
f92363d1 4857 if (!ioc->config_page) {
919d8a3f 4858 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
f92363d1
SR
4859 goto out;
4860 }
919d8a3f
JP
4861 dinitprintk(ioc,
4862 ioc_info(ioc, "config page(0x%p): size(%d)\n",
4863 ioc->config_page, ioc->config_page_sz));
4864 dinitprintk(ioc,
4865 ioc_info(ioc, "config_page_dma(0x%llx)\n",
4866 (unsigned long long)ioc->config_page_dma));
f92363d1
SR
4867 total_sz += ioc->config_page_sz;
4868
919d8a3f
JP
4869 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
4870 total_sz / 1024);
4871 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
4872 ioc->shost->can_queue, facts->RequestCredit);
4873 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
4874 ioc->shost->sg_tablesize);
f92363d1
SR
4875 return 0;
4876
4877 out:
4878 return -ENOMEM;
4879}
4880
4881/**
4882 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
4883 * @ioc: Pointer to MPT_ADAPTER structure
4884 * @cooked: Request raw or cooked IOC state
4885 *
4beb4867 4886 * Return: all IOC Doorbell register bits if cooked==0, else just the
f92363d1
SR
4887 * Doorbell bits in MPI_IOC_STATE_MASK.
4888 */
4889u32
4890mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
4891{
4892 u32 s, sc;
4893
306eaf27 4894 s = ioc->base_readl(&ioc->chip->Doorbell);
f92363d1
SR
4895 sc = s & MPI2_IOC_STATE_MASK;
4896 return cooked ? sc : s;
4897}
4898
4899/**
4900 * _base_wait_on_iocstate - waiting on a particular ioc state
4beb4867 4901 * @ioc: ?
f92363d1
SR
4902 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
4903 * @timeout: timeout in second
f92363d1 4904 *
4beb4867 4905 * Return: 0 for success, non-zero for failure.
f92363d1
SR
4906 */
4907static int
98c56ad3 4908_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
f92363d1
SR
4909{
4910 u32 count, cntdn;
4911 u32 current_state;
4912
4913 count = 0;
98c56ad3 4914 cntdn = 1000 * timeout;
f92363d1
SR
4915 do {
4916 current_state = mpt3sas_base_get_iocstate(ioc, 1);
4917 if (current_state == ioc_state)
4918 return 0;
4919 if (count && current_state == MPI2_IOC_STATE_FAULT)
4920 break;
98c56ad3
CO
4921
4922 usleep_range(1000, 1500);
f92363d1
SR
4923 count++;
4924 } while (--cntdn);
4925
4926 return current_state;
4927}
4928
4929/**
4930 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
4931 * a write to the doorbell)
4932 * @ioc: per adapter object
f92363d1 4933 *
4beb4867 4934 * Return: 0 for success, non-zero for failure.
f92363d1
SR
4935 *
4936 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
4937 */
4dc8c808 4938static int
98c56ad3 4939_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
4dc8c808 4940
f92363d1 4941static int
98c56ad3 4942_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
4943{
4944 u32 cntdn, count;
4945 u32 int_status;
4946
4947 count = 0;
98c56ad3 4948 cntdn = 1000 * timeout;
f92363d1 4949 do {
306eaf27 4950 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
f92363d1 4951 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
919d8a3f
JP
4952 dhsprintk(ioc,
4953 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4954 __func__, count, timeout));
f92363d1
SR
4955 return 0;
4956 }
98c56ad3
CO
4957
4958 usleep_range(1000, 1500);
4959 count++;
4960 } while (--cntdn);
4961
919d8a3f
JP
4962 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
4963 __func__, count, int_status);
98c56ad3
CO
4964 return -EFAULT;
4965}
4966
4967static int
4968_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
4969{
4970 u32 cntdn, count;
4971 u32 int_status;
4972
4973 count = 0;
4974 cntdn = 2000 * timeout;
4975 do {
306eaf27 4976 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
98c56ad3 4977 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
919d8a3f
JP
4978 dhsprintk(ioc,
4979 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
4980 __func__, count, timeout));
98c56ad3
CO
4981 return 0;
4982 }
4983
4984 udelay(500);
f92363d1
SR
4985 count++;
4986 } while (--cntdn);
4987
919d8a3f
JP
4988 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
4989 __func__, count, int_status);
f92363d1 4990 return -EFAULT;
98c56ad3 4991
f92363d1
SR
4992}
4993
4994/**
4995 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
4996 * @ioc: per adapter object
4997 * @timeout: timeout in second
f92363d1 4998 *
4beb4867 4999 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5000 *
5001 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
5002 * doorbell.
5003 */
5004static int
98c56ad3 5005_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
5006{
5007 u32 cntdn, count;
5008 u32 int_status;
5009 u32 doorbell;
5010
5011 count = 0;
98c56ad3 5012 cntdn = 1000 * timeout;
f92363d1 5013 do {
306eaf27 5014 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
f92363d1 5015 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
919d8a3f
JP
5016 dhsprintk(ioc,
5017 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5018 __func__, count, timeout));
f92363d1
SR
5019 return 0;
5020 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
306eaf27 5021 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
f92363d1
SR
5022 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5023 MPI2_IOC_STATE_FAULT) {
5024 mpt3sas_base_fault_info(ioc , doorbell);
5025 return -EFAULT;
5026 }
5027 } else if (int_status == 0xFFFFFFFF)
5028 goto out;
5029
98c56ad3 5030 usleep_range(1000, 1500);
f92363d1
SR
5031 count++;
5032 } while (--cntdn);
5033
5034 out:
919d8a3f
JP
5035 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5036 __func__, count, int_status);
f92363d1
SR
5037 return -EFAULT;
5038}
5039
5040/**
5041 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
5042 * @ioc: per adapter object
5043 * @timeout: timeout in second
f92363d1 5044 *
4beb4867 5045 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5046 */
5047static int
98c56ad3 5048_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
f92363d1
SR
5049{
5050 u32 cntdn, count;
5051 u32 doorbell_reg;
5052
5053 count = 0;
98c56ad3 5054 cntdn = 1000 * timeout;
f92363d1 5055 do {
306eaf27 5056 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
f92363d1 5057 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
919d8a3f
JP
5058 dhsprintk(ioc,
5059 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5060 __func__, count, timeout));
f92363d1
SR
5061 return 0;
5062 }
98c56ad3
CO
5063
5064 usleep_range(1000, 1500);
f92363d1
SR
5065 count++;
5066 } while (--cntdn);
5067
919d8a3f
JP
5068 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5069 __func__, count, doorbell_reg);
f92363d1
SR
5070 return -EFAULT;
5071}
5072
5073/**
5074 * _base_send_ioc_reset - send doorbell reset
5075 * @ioc: per adapter object
5076 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
5077 * @timeout: timeout in second
f92363d1 5078 *
4beb4867 5079 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5080 */
5081static int
98c56ad3 5082_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
f92363d1
SR
5083{
5084 u32 ioc_state;
5085 int r = 0;
5086
5087 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
919d8a3f 5088 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
f92363d1
SR
5089 return -EFAULT;
5090 }
5091
5092 if (!(ioc->facts.IOCCapabilities &
5093 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5094 return -EFAULT;
5095
919d8a3f 5096 ioc_info(ioc, "sending message unit reset !!\n");
f92363d1
SR
5097
5098 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5099 &ioc->chip->Doorbell);
98c56ad3 5100 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
f92363d1
SR
5101 r = -EFAULT;
5102 goto out;
5103 }
98c56ad3 5104 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
f92363d1 5105 if (ioc_state) {
919d8a3f
JP
5106 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5107 __func__, ioc_state);
f92363d1
SR
5108 r = -EFAULT;
5109 goto out;
5110 }
5111 out:
919d8a3f
JP
5112 ioc_info(ioc, "message unit reset: %s\n",
5113 r == 0 ? "SUCCESS" : "FAILED");
f92363d1
SR
5114 return r;
5115}
5116
f4305749
SP
5117/**
5118 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
5119 * @ioc: per adapter object
5120 * @wait_count: timeout in seconds
5121 *
5122 * Return: Waits up to timeout seconds for the IOC to
5123 * become operational. Returns 0 if IOC is present
5124 * and operational; otherwise returns -EFAULT.
5125 */
5126
5127int
5128mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5129{
5130 int wait_state_count = 0;
5131 u32 ioc_state;
5132
a064a647 5133 do {
f4305749 5134 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
a064a647
SP
5135 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5136 break;
5137 ssleep(1);
f4305749 5138 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
a064a647
SP
5139 __func__, ++wait_state_count);
5140 } while (--timeout);
5141 if (!timeout) {
5142 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5143 return -EFAULT;
f4305749
SP
5144 }
5145 if (wait_state_count)
5146 ioc_info(ioc, "ioc is operational\n");
f4305749
SP
5147 return 0;
5148}
5149
f92363d1
SR
5150/**
5151 * _base_handshake_req_reply_wait - send request thru doorbell interface
5152 * @ioc: per adapter object
5153 * @request_bytes: request length
5154 * @request: pointer having request payload
5155 * @reply_bytes: reply length
5156 * @reply: pointer to reply payload
5157 * @timeout: timeout in second
f92363d1 5158 *
4beb4867 5159 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5160 */
5161static int
5162_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
98c56ad3 5163 u32 *request, int reply_bytes, u16 *reply, int timeout)
f92363d1
SR
5164{
5165 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5166 int i;
5167 u8 failed;
f92363d1
SR
5168 __le32 *mfp;
5169
5170 /* make sure doorbell is not in use */
306eaf27 5171 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
919d8a3f 5172 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
f92363d1
SR
5173 return -EFAULT;
5174 }
5175
5176 /* clear pending doorbell interrupts from previous state changes */
306eaf27 5177 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
f92363d1
SR
5178 MPI2_HIS_IOC2SYS_DB_STATUS)
5179 writel(0, &ioc->chip->HostInterruptStatus);
5180
5181 /* send message to ioc */
5182 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5183 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5184 &ioc->chip->Doorbell);
5185
98c56ad3 5186 if ((_base_spin_on_doorbell_int(ioc, 5))) {
919d8a3f
JP
5187 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5188 __LINE__);
f92363d1
SR
5189 return -EFAULT;
5190 }
5191 writel(0, &ioc->chip->HostInterruptStatus);
5192
98c56ad3 5193 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
919d8a3f
JP
5194 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5195 __LINE__);
f92363d1
SR
5196 return -EFAULT;
5197 }
5198
5199 /* send message 32-bits at a time */
5200 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
09c2f95a 5201 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
98c56ad3 5202 if ((_base_wait_for_doorbell_ack(ioc, 5)))
f92363d1
SR
5203 failed = 1;
5204 }
5205
5206 if (failed) {
919d8a3f
JP
5207 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5208 __LINE__);
f92363d1
SR
5209 return -EFAULT;
5210 }
5211
5212 /* now wait for the reply */
98c56ad3 5213 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
919d8a3f
JP
5214 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5215 __LINE__);
f92363d1
SR
5216 return -EFAULT;
5217 }
5218
5219 /* read the first two 16-bits, it gives the total length of the reply */
306eaf27 5220 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
f92363d1
SR
5221 & MPI2_DOORBELL_DATA_MASK);
5222 writel(0, &ioc->chip->HostInterruptStatus);
98c56ad3 5223 if ((_base_wait_for_doorbell_int(ioc, 5))) {
919d8a3f
JP
5224 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5225 __LINE__);
f92363d1
SR
5226 return -EFAULT;
5227 }
306eaf27 5228 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
f92363d1
SR
5229 & MPI2_DOORBELL_DATA_MASK);
5230 writel(0, &ioc->chip->HostInterruptStatus);
5231
5232 for (i = 2; i < default_reply->MsgLength * 2; i++) {
98c56ad3 5233 if ((_base_wait_for_doorbell_int(ioc, 5))) {
919d8a3f
JP
5234 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5235 __LINE__);
f92363d1
SR
5236 return -EFAULT;
5237 }
5238 if (i >= reply_bytes/2) /* overflow case */
306eaf27 5239 ioc->base_readl(&ioc->chip->Doorbell);
f92363d1 5240 else
306eaf27
SP
5241 reply[i] = le16_to_cpu(
5242 ioc->base_readl(&ioc->chip->Doorbell)
f92363d1
SR
5243 & MPI2_DOORBELL_DATA_MASK);
5244 writel(0, &ioc->chip->HostInterruptStatus);
5245 }
5246
98c56ad3
CO
5247 _base_wait_for_doorbell_int(ioc, 5);
5248 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
919d8a3f
JP
5249 dhsprintk(ioc,
5250 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5251 __LINE__));
f92363d1
SR
5252 }
5253 writel(0, &ioc->chip->HostInterruptStatus);
5254
5255 if (ioc->logging_level & MPT_DEBUG_INIT) {
5256 mfp = (__le32 *)reply;
5257 pr_info("\toffset:data\n");
5258 for (i = 0; i < reply_bytes/4; i++)
5259 pr_info("\t[0x%02x]:%08x\n", i*4,
5260 le32_to_cpu(mfp[i]));
5261 }
5262 return 0;
5263}
5264
5265/**
5266 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
5267 * @ioc: per adapter object
5268 * @mpi_reply: the reply payload from FW
5269 * @mpi_request: the request payload sent to FW
5270 *
5271 * The SAS IO Unit Control Request message allows the host to perform low-level
5272 * operations, such as resets on the PHYs of the IO Unit, also allows the host
5273 * to obtain the IOC assigned device handles for a device if it has other
5274 * identifying information about the device, in addition allows the host to
5275 * remove IOC resources associated with the device.
5276 *
4beb4867 5277 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5278 */
5279int
5280mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5281 Mpi2SasIoUnitControlReply_t *mpi_reply,
5282 Mpi2SasIoUnitControlRequest_t *mpi_request)
5283{
5284 u16 smid;
d37306ca 5285 u8 issue_reset = 0;
f92363d1
SR
5286 int rc;
5287 void *request;
f92363d1 5288
919d8a3f 5289 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
5290
5291 mutex_lock(&ioc->base_cmds.mutex);
5292
5293 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
919d8a3f 5294 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
f92363d1
SR
5295 rc = -EAGAIN;
5296 goto out;
5297 }
5298
f4305749
SP
5299 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5300 if (rc)
5301 goto out;
f92363d1
SR
5302
5303 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5304 if (!smid) {
919d8a3f 5305 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
5306 rc = -EAGAIN;
5307 goto out;
5308 }
5309
5310 rc = 0;
5311 ioc->base_cmds.status = MPT3_CMD_PENDING;
5312 request = mpt3sas_base_get_msg_frame(ioc, smid);
5313 ioc->base_cmds.smid = smid;
5314 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5315 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5316 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5317 ioc->ioc_link_reset_in_progress = 1;
5318 init_completion(&ioc->base_cmds.done);
40114bde 5319 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5320 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
5321 msecs_to_jiffies(10000));
5322 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5323 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5324 ioc->ioc_link_reset_in_progress)
5325 ioc->ioc_link_reset_in_progress = 0;
5326 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
d37306ca
C
5327 issue_reset =
5328 mpt3sas_base_check_cmd_timeout(ioc,
5329 ioc->base_cmds.status, mpi_request,
5330 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
f92363d1
SR
5331 goto issue_host_reset;
5332 }
5333 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5334 memcpy(mpi_reply, ioc->base_cmds.reply,
5335 sizeof(Mpi2SasIoUnitControlReply_t));
5336 else
5337 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5338 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5339 goto out;
5340
5341 issue_host_reset:
5342 if (issue_reset)
98c56ad3 5343 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
5344 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5345 rc = -EFAULT;
5346 out:
5347 mutex_unlock(&ioc->base_cmds.mutex);
5348 return rc;
5349}
5350
5351/**
5352 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
5353 * @ioc: per adapter object
5354 * @mpi_reply: the reply payload from FW
5355 * @mpi_request: the request payload sent to FW
5356 *
5357 * The SCSI Enclosure Processor request message causes the IOC to
5358 * communicate with SES devices to control LED status signals.
5359 *
4beb4867 5360 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5361 */
5362int
5363mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5364 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5365{
5366 u16 smid;
d37306ca 5367 u8 issue_reset = 0;
f92363d1
SR
5368 int rc;
5369 void *request;
f92363d1 5370
919d8a3f 5371 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
5372
5373 mutex_lock(&ioc->base_cmds.mutex);
5374
5375 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
919d8a3f 5376 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
f92363d1
SR
5377 rc = -EAGAIN;
5378 goto out;
5379 }
5380
f4305749
SP
5381 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5382 if (rc)
5383 goto out;
f92363d1
SR
5384
5385 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5386 if (!smid) {
919d8a3f 5387 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
5388 rc = -EAGAIN;
5389 goto out;
5390 }
5391
5392 rc = 0;
5393 ioc->base_cmds.status = MPT3_CMD_PENDING;
5394 request = mpt3sas_base_get_msg_frame(ioc, smid);
5395 ioc->base_cmds.smid = smid;
5396 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5397 init_completion(&ioc->base_cmds.done);
40114bde 5398 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5399 wait_for_completion_timeout(&ioc->base_cmds.done,
f92363d1
SR
5400 msecs_to_jiffies(10000));
5401 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
d37306ca
C
5402 issue_reset =
5403 mpt3sas_base_check_cmd_timeout(ioc,
5404 ioc->base_cmds.status, mpi_request,
5405 sizeof(Mpi2SepRequest_t)/4);
f92363d1
SR
5406 goto issue_host_reset;
5407 }
5408 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5409 memcpy(mpi_reply, ioc->base_cmds.reply,
5410 sizeof(Mpi2SepReply_t));
5411 else
5412 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5413 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5414 goto out;
5415
5416 issue_host_reset:
5417 if (issue_reset)
98c56ad3 5418 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
f92363d1
SR
5419 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5420 rc = -EFAULT;
5421 out:
5422 mutex_unlock(&ioc->base_cmds.mutex);
5423 return rc;
5424}
5425
5426/**
5427 * _base_get_port_facts - obtain port facts reply and save in ioc
5428 * @ioc: per adapter object
4beb4867 5429 * @port: ?
f92363d1 5430 *
4beb4867 5431 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5432 */
5433static int
98c56ad3 5434_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
f92363d1
SR
5435{
5436 Mpi2PortFactsRequest_t mpi_request;
5437 Mpi2PortFactsReply_t mpi_reply;
5438 struct mpt3sas_port_facts *pfacts;
5439 int mpi_reply_sz, mpi_request_sz, r;
5440
919d8a3f 5441 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
5442
5443 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5444 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5445 memset(&mpi_request, 0, mpi_request_sz);
5446 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5447 mpi_request.PortNumber = port;
5448 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 5449 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
5450
5451 if (r != 0) {
919d8a3f 5452 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
f92363d1
SR
5453 return r;
5454 }
5455
5456 pfacts = &ioc->pfacts[port];
5457 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5458 pfacts->PortNumber = mpi_reply.PortNumber;
5459 pfacts->VP_ID = mpi_reply.VP_ID;
5460 pfacts->VF_ID = mpi_reply.VF_ID;
5461 pfacts->MaxPostedCmdBuffers =
5462 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5463
5464 return 0;
5465}
5466
4dc8c808
SR
5467/**
5468 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
5469 * @ioc: per adapter object
5470 * @timeout:
4dc8c808 5471 *
4beb4867 5472 * Return: 0 for success, non-zero for failure.
4dc8c808
SR
5473 */
5474static int
98c56ad3 5475_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
4dc8c808
SR
5476{
5477 u32 ioc_state;
5478 int rc;
5479
1f95a47e 5480 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4dc8c808
SR
5481
5482 if (ioc->pci_error_recovery) {
1f95a47e
JP
5483 dfailprintk(ioc,
5484 ioc_info(ioc, "%s: host in pci error recovery\n",
5485 __func__));
4dc8c808
SR
5486 return -EFAULT;
5487 }
5488
5489 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
1f95a47e
JP
5490 dhsprintk(ioc,
5491 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
5492 __func__, ioc_state));
4dc8c808
SR
5493
5494 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
5495 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5496 return 0;
5497
5498 if (ioc_state & MPI2_DOORBELL_USED) {
1f95a47e 5499 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
4dc8c808
SR
5500 goto issue_diag_reset;
5501 }
5502
5503 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
5504 mpt3sas_base_fault_info(ioc, ioc_state &
5505 MPI2_DOORBELL_DATA_MASK);
5506 goto issue_diag_reset;
5507 }
5508
98c56ad3 5509 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
4dc8c808 5510 if (ioc_state) {
1f95a47e
JP
5511 dfailprintk(ioc,
5512 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5513 __func__, ioc_state));
4dc8c808
SR
5514 return -EFAULT;
5515 }
5516
5517 issue_diag_reset:
98c56ad3 5518 rc = _base_diag_reset(ioc);
4dc8c808
SR
5519 return rc;
5520}
5521
f92363d1
SR
5522/**
5523 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
5524 * @ioc: per adapter object
f92363d1 5525 *
4beb4867 5526 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5527 */
5528static int
98c56ad3 5529_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5530{
5531 Mpi2IOCFactsRequest_t mpi_request;
5532 Mpi2IOCFactsReply_t mpi_reply;
5533 struct mpt3sas_facts *facts;
5534 int mpi_reply_sz, mpi_request_sz, r;
5535
919d8a3f 5536 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1 5537
98c56ad3 5538 r = _base_wait_for_iocstate(ioc, 10);
4dc8c808 5539 if (r) {
1f95a47e
JP
5540 dfailprintk(ioc,
5541 ioc_info(ioc, "%s: failed getting to correct state\n",
5542 __func__));
4dc8c808
SR
5543 return r;
5544 }
f92363d1
SR
5545 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
5546 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
5547 memset(&mpi_request, 0, mpi_request_sz);
5548 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
5549 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
98c56ad3 5550 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
f92363d1
SR
5551
5552 if (r != 0) {
919d8a3f 5553 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
f92363d1
SR
5554 return r;
5555 }
5556
5557 facts = &ioc->facts;
5558 memset(facts, 0, sizeof(struct mpt3sas_facts));
5559 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
5560 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
5561 facts->VP_ID = mpi_reply.VP_ID;
5562 facts->VF_ID = mpi_reply.VF_ID;
5563 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
5564 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
5565 facts->WhoInit = mpi_reply.WhoInit;
5566 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
5567 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
2b48be65
C
5568 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
5569 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
5570 ioc->combined_reply_queue = 0;
f92363d1
SR
5571 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
5572 facts->MaxReplyDescriptorPostQueueDepth =
5573 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
5574 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
5575 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
5576 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
5577 ioc->ir_firmware = 1;
9b05c91a 5578 if ((facts->IOCCapabilities &
06f5f976 5579 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
9b05c91a 5580 ioc->rdpq_array_capable = 1;
f92363d1
SR
5581 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
5582 facts->IOCRequestFrameSize =
5583 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
ebb3024e
SS
5584 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
5585 facts->IOCMaxChainSegmentSize =
5586 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
5587 }
f92363d1
SR
5588 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
5589 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
5590 ioc->shost->max_id = -1;
5591 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
5592 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
5593 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
5594 facts->HighPriorityCredit =
5595 le16_to_cpu(mpi_reply.HighPriorityCredit);
5596 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
5597 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
016d5c35
SPS
5598 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
5599
5600 /*
5601 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
5602 */
5603 ioc->page_size = 1 << facts->CurrentHostPageSize;
5604 if (ioc->page_size == 1) {
919d8a3f 5605 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
016d5c35
SPS
5606 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
5607 }
919d8a3f
JP
5608 dinitprintk(ioc,
5609 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
5610 facts->CurrentHostPageSize));
5611
5612 dinitprintk(ioc,
5613 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
5614 facts->RequestCredit, facts->MaxChainDepth));
5615 dinitprintk(ioc,
5616 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
5617 facts->IOCRequestFrameSize * 4,
5618 facts->ReplyFrameSize * 4));
f92363d1
SR
5619 return 0;
5620}
5621
5622/**
5623 * _base_send_ioc_init - send ioc_init to firmware
5624 * @ioc: per adapter object
f92363d1 5625 *
4beb4867 5626 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5627 */
5628static int
98c56ad3 5629_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5630{
5631 Mpi2IOCInitRequest_t mpi_request;
5632 Mpi2IOCInitReply_t mpi_reply;
9b05c91a 5633 int i, r = 0;
23409bd4 5634 ktime_t current_time;
f92363d1 5635 u16 ioc_status;
9b05c91a 5636 u32 reply_post_free_array_sz = 0;
f92363d1 5637
919d8a3f 5638 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
5639
5640 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
5641 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
5642 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
5643 mpi_request.VF_ID = 0; /* TODO */
5644 mpi_request.VP_ID = 0;
d357e84d 5645 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
f92363d1 5646 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
016d5c35 5647 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
f92363d1
SR
5648
5649 if (_base_is_controller_msix_enabled(ioc))
5650 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
5651 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
5652 mpi_request.ReplyDescriptorPostQueueDepth =
5653 cpu_to_le16(ioc->reply_post_queue_depth);
5654 mpi_request.ReplyFreeQueueDepth =
5655 cpu_to_le16(ioc->reply_free_queue_depth);
5656
5657 mpi_request.SenseBufferAddressHigh =
5658 cpu_to_le32((u64)ioc->sense_dma >> 32);
5659 mpi_request.SystemReplyAddressHigh =
5660 cpu_to_le32((u64)ioc->reply_dma >> 32);
5661 mpi_request.SystemRequestFrameBaseAddress =
5662 cpu_to_le64((u64)ioc->request_dma);
5663 mpi_request.ReplyFreeQueueAddress =
5664 cpu_to_le64((u64)ioc->reply_free_dma);
f92363d1 5665
9b05c91a
SR
5666 if (ioc->rdpq_array_enable) {
5667 reply_post_free_array_sz = ioc->reply_queue_count *
5668 sizeof(Mpi2IOCInitRDPQArrayEntry);
cd33223b 5669 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
9b05c91a 5670 for (i = 0; i < ioc->reply_queue_count; i++)
cd33223b 5671 ioc->reply_post_free_array[i].RDPQBaseAddress =
9b05c91a
SR
5672 cpu_to_le64(
5673 (u64)ioc->reply_post[i].reply_post_free_dma);
5674 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
5675 mpi_request.ReplyDescriptorPostQueueAddress =
cd33223b 5676 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
9b05c91a
SR
5677 } else {
5678 mpi_request.ReplyDescriptorPostQueueAddress =
5679 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
5680 }
f92363d1
SR
5681
5682 /* This time stamp specifies number of milliseconds
5683 * since epoch ~ midnight January 1, 1970.
5684 */
23409bd4
TR
5685 current_time = ktime_get_real();
5686 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
f92363d1
SR
5687
5688 if (ioc->logging_level & MPT_DEBUG_INIT) {
5689 __le32 *mfp;
5690 int i;
5691
5692 mfp = (__le32 *)&mpi_request;
5693 pr_info("\toffset:data\n");
5694 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
5695 pr_info("\t[0x%02x]:%08x\n", i*4,
5696 le32_to_cpu(mfp[i]));
5697 }
5698
5699 r = _base_handshake_req_reply_wait(ioc,
5700 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
98c56ad3 5701 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
f92363d1
SR
5702
5703 if (r != 0) {
919d8a3f 5704 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
cd33223b 5705 return r;
f92363d1
SR
5706 }
5707
5708 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5709 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
5710 mpi_reply.IOCLogInfo) {
919d8a3f 5711 ioc_err(ioc, "%s: failed\n", __func__);
f92363d1
SR
5712 r = -EIO;
5713 }
5714
9b05c91a 5715 return r;
f92363d1
SR
5716}
5717
5718/**
5719 * mpt3sas_port_enable_done - command completion routine for port enable
5720 * @ioc: per adapter object
5721 * @smid: system request message index
5722 * @msix_index: MSIX table index supplied by the OS
5723 * @reply: reply message frame(lower 32bit addr)
5724 *
4beb4867
BVA
5725 * Return: 1 meaning mf should be freed from _base_interrupt
5726 * 0 means the mf is freed from this function.
f92363d1
SR
5727 */
5728u8
5729mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
5730 u32 reply)
5731{
5732 MPI2DefaultReply_t *mpi_reply;
5733 u16 ioc_status;
5734
5735 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
5736 return 1;
5737
5738 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5739 if (!mpi_reply)
5740 return 1;
5741
5742 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
5743 return 1;
5744
5745 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
5746 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
5747 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
5748 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
5749 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5750 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5751 ioc->port_enable_failed = 1;
5752
5753 if (ioc->is_driver_loading) {
5754 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
5755 mpt3sas_port_enable_complete(ioc);
5756 return 1;
5757 } else {
5758 ioc->start_scan_failed = ioc_status;
5759 ioc->start_scan = 0;
5760 return 1;
5761 }
5762 }
5763 complete(&ioc->port_enable_cmds.done);
5764 return 1;
5765}
5766
5767/**
5768 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
5769 * @ioc: per adapter object
f92363d1 5770 *
4beb4867 5771 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5772 */
5773static int
98c56ad3 5774_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5775{
5776 Mpi2PortEnableRequest_t *mpi_request;
5777 Mpi2PortEnableReply_t *mpi_reply;
f92363d1
SR
5778 int r = 0;
5779 u16 smid;
5780 u16 ioc_status;
5781
919d8a3f 5782 ioc_info(ioc, "sending port enable !!\n");
f92363d1
SR
5783
5784 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
919d8a3f 5785 ioc_err(ioc, "%s: internal command already in use\n", __func__);
f92363d1
SR
5786 return -EAGAIN;
5787 }
5788
5789 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5790 if (!smid) {
919d8a3f 5791 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
5792 return -EAGAIN;
5793 }
5794
5795 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5796 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5797 ioc->port_enable_cmds.smid = smid;
5798 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5799 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5800
5801 init_completion(&ioc->port_enable_cmds.done);
40114bde 5802 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5803 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
f92363d1 5804 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
919d8a3f 5805 ioc_err(ioc, "%s: timeout\n", __func__);
f92363d1
SR
5806 _debug_dump_mf(mpi_request,
5807 sizeof(Mpi2PortEnableRequest_t)/4);
5808 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
5809 r = -EFAULT;
5810 else
5811 r = -ETIME;
5812 goto out;
5813 }
5814
5815 mpi_reply = ioc->port_enable_cmds.reply;
5816 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
5817 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
919d8a3f
JP
5818 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
5819 __func__, ioc_status);
f92363d1
SR
5820 r = -EFAULT;
5821 goto out;
5822 }
5823
5824 out:
5825 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
919d8a3f 5826 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
f92363d1
SR
5827 return r;
5828}
5829
5830/**
5831 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
5832 * @ioc: per adapter object
5833 *
4beb4867 5834 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5835 */
5836int
5837mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
5838{
5839 Mpi2PortEnableRequest_t *mpi_request;
5840 u16 smid;
5841
919d8a3f 5842 ioc_info(ioc, "sending port enable !!\n");
f92363d1
SR
5843
5844 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
919d8a3f 5845 ioc_err(ioc, "%s: internal command already in use\n", __func__);
f92363d1
SR
5846 return -EAGAIN;
5847 }
5848
5849 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
5850 if (!smid) {
919d8a3f 5851 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
5852 return -EAGAIN;
5853 }
5854
5855 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
5856 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5857 ioc->port_enable_cmds.smid = smid;
5858 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
5859 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
5860
40114bde 5861 mpt3sas_base_put_smid_default(ioc, smid);
f92363d1
SR
5862 return 0;
5863}
5864
5865/**
5866 * _base_determine_wait_on_discovery - desposition
5867 * @ioc: per adapter object
5868 *
5869 * Decide whether to wait on discovery to complete. Used to either
5870 * locate boot device, or report volumes ahead of physical devices.
5871 *
4beb4867 5872 * Return: 1 for wait, 0 for don't wait.
f92363d1
SR
5873 */
5874static int
5875_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
5876{
5877 /* We wait for discovery to complete if IR firmware is loaded.
5878 * The sas topology events arrive before PD events, so we need time to
5879 * turn on the bit in ioc->pd_handles to indicate PD
5880 * Also, it maybe required to report Volumes ahead of physical
5881 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
5882 */
5883 if (ioc->ir_firmware)
5884 return 1;
5885
5886 /* if no Bios, then we don't need to wait */
5887 if (!ioc->bios_pg3.BiosVersion)
5888 return 0;
5889
5890 /* Bios is present, then we drop down here.
5891 *
5892 * If there any entries in the Bios Page 2, then we wait
5893 * for discovery to complete.
5894 */
5895
5896 /* Current Boot Device */
5897 if ((ioc->bios_pg2.CurrentBootDeviceForm &
5898 MPI2_BIOSPAGE2_FORM_MASK) ==
5899 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
5900 /* Request Boot Device */
5901 (ioc->bios_pg2.ReqBootDeviceForm &
5902 MPI2_BIOSPAGE2_FORM_MASK) ==
5903 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
5904 /* Alternate Request Boot Device */
5905 (ioc->bios_pg2.ReqAltBootDeviceForm &
5906 MPI2_BIOSPAGE2_FORM_MASK) ==
5907 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
5908 return 0;
5909
5910 return 1;
5911}
5912
5913/**
5914 * _base_unmask_events - turn on notification for this event
5915 * @ioc: per adapter object
5916 * @event: firmware event
5917 *
5918 * The mask is stored in ioc->event_masks.
5919 */
5920static void
5921_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
5922{
5923 u32 desired_event;
5924
5925 if (event >= 128)
5926 return;
5927
5928 desired_event = (1 << (event % 32));
5929
5930 if (event < 32)
5931 ioc->event_masks[0] &= ~desired_event;
5932 else if (event < 64)
5933 ioc->event_masks[1] &= ~desired_event;
5934 else if (event < 96)
5935 ioc->event_masks[2] &= ~desired_event;
5936 else if (event < 128)
5937 ioc->event_masks[3] &= ~desired_event;
5938}
5939
5940/**
5941 * _base_event_notification - send event notification
5942 * @ioc: per adapter object
f92363d1 5943 *
4beb4867 5944 * Return: 0 for success, non-zero for failure.
f92363d1
SR
5945 */
5946static int
98c56ad3 5947_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
5948{
5949 Mpi2EventNotificationRequest_t *mpi_request;
f92363d1
SR
5950 u16 smid;
5951 int r = 0;
5952 int i;
5953
919d8a3f 5954 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
5955
5956 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
919d8a3f 5957 ioc_err(ioc, "%s: internal command already in use\n", __func__);
f92363d1
SR
5958 return -EAGAIN;
5959 }
5960
5961 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5962 if (!smid) {
919d8a3f 5963 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
f92363d1
SR
5964 return -EAGAIN;
5965 }
5966 ioc->base_cmds.status = MPT3_CMD_PENDING;
5967 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5968 ioc->base_cmds.smid = smid;
5969 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
5970 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5971 mpi_request->VF_ID = 0; /* TODO */
5972 mpi_request->VP_ID = 0;
5973 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
5974 mpi_request->EventMasks[i] =
5975 cpu_to_le32(ioc->event_masks[i]);
5976 init_completion(&ioc->base_cmds.done);
40114bde 5977 mpt3sas_base_put_smid_default(ioc, smid);
8bbb1cf6 5978 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
f92363d1 5979 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
919d8a3f 5980 ioc_err(ioc, "%s: timeout\n", __func__);
f92363d1
SR
5981 _debug_dump_mf(mpi_request,
5982 sizeof(Mpi2EventNotificationRequest_t)/4);
5983 if (ioc->base_cmds.status & MPT3_CMD_RESET)
5984 r = -EFAULT;
5985 else
5986 r = -ETIME;
5987 } else
919d8a3f 5988 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
f92363d1
SR
5989 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5990 return r;
5991}
5992
5993/**
5994 * mpt3sas_base_validate_event_type - validating event types
5995 * @ioc: per adapter object
4beb4867 5996 * @event_type: firmware event
f92363d1
SR
5997 *
5998 * This will turn on firmware event notification when application
5999 * ask for that event. We don't mask events that are already enabled.
6000 */
6001void
6002mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6003{
6004 int i, j;
6005 u32 event_mask, desired_event;
6006 u8 send_update_to_fw;
6007
6008 for (i = 0, send_update_to_fw = 0; i <
6009 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6010 event_mask = ~event_type[i];
6011 desired_event = 1;
6012 for (j = 0; j < 32; j++) {
6013 if (!(event_mask & desired_event) &&
6014 (ioc->event_masks[i] & desired_event)) {
6015 ioc->event_masks[i] &= ~desired_event;
6016 send_update_to_fw = 1;
6017 }
6018 desired_event = (desired_event << 1);
6019 }
6020 }
6021
6022 if (!send_update_to_fw)
6023 return;
6024
6025 mutex_lock(&ioc->base_cmds.mutex);
98c56ad3 6026 _base_event_notification(ioc);
f92363d1
SR
6027 mutex_unlock(&ioc->base_cmds.mutex);
6028}
6029
6030/**
6031 * _base_diag_reset - the "big hammer" start of day reset
6032 * @ioc: per adapter object
f92363d1 6033 *
4beb4867 6034 * Return: 0 for success, non-zero for failure.
f92363d1
SR
6035 */
6036static int
98c56ad3 6037_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
6038{
6039 u32 host_diagnostic;
6040 u32 ioc_state;
6041 u32 count;
6042 u32 hcb_size;
6043
919d8a3f 6044 ioc_info(ioc, "sending diag reset !!\n");
f92363d1 6045
919d8a3f 6046 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
f92363d1
SR
6047
6048 count = 0;
6049 do {
6050 /* Write magic sequence to WriteSequence register
6051 * Loop until in diagnostic mode
6052 */
919d8a3f 6053 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
f92363d1
SR
6054 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6055 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6056 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6057 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6058 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6059 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6060 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6061
6062 /* wait 100 msec */
98c56ad3 6063 msleep(100);
f92363d1
SR
6064
6065 if (count++ > 20)
6066 goto out;
6067
306eaf27 6068 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
919d8a3f
JP
6069 drsprintk(ioc,
6070 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6071 count, host_diagnostic));
f92363d1
SR
6072
6073 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6074
306eaf27 6075 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
f92363d1 6076
919d8a3f 6077 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
f92363d1
SR
6078 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6079 &ioc->chip->HostDiagnostic);
6080
b453ff84 6081 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
98c56ad3 6082 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
f92363d1 6083
b453ff84
SR
6084 /* Approximately 300 second max wait */
6085 for (count = 0; count < (300000000 /
6086 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
f92363d1 6087
306eaf27 6088 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
f92363d1
SR
6089
6090 if (host_diagnostic == 0xFFFFFFFF)
6091 goto out;
6092 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6093 break;
6094
98c56ad3 6095 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
f92363d1
SR
6096 }
6097
6098 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6099
919d8a3f
JP
6100 drsprintk(ioc,
6101 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
f92363d1
SR
6102 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6103 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6104 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6105
919d8a3f 6106 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
f92363d1
SR
6107 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6108 &ioc->chip->HCBSize);
6109 }
6110
919d8a3f 6111 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
f92363d1
SR
6112 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6113 &ioc->chip->HostDiagnostic);
6114
919d8a3f
JP
6115 drsprintk(ioc,
6116 ioc_info(ioc, "disable writes to the diagnostic register\n"));
f92363d1
SR
6117 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6118
919d8a3f 6119 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
98c56ad3 6120 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
f92363d1 6121 if (ioc_state) {
919d8a3f
JP
6122 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6123 __func__, ioc_state);
f92363d1
SR
6124 goto out;
6125 }
6126
919d8a3f 6127 ioc_info(ioc, "diag reset: SUCCESS\n");
f92363d1
SR
6128 return 0;
6129
6130 out:
919d8a3f 6131 ioc_err(ioc, "diag reset: FAILED\n");
f92363d1
SR
6132 return -EFAULT;
6133}
6134
6135/**
6136 * _base_make_ioc_ready - put controller in READY state
6137 * @ioc: per adapter object
f92363d1
SR
6138 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6139 *
4beb4867 6140 * Return: 0 for success, non-zero for failure.
f92363d1
SR
6141 */
6142static int
98c56ad3 6143_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
f92363d1
SR
6144{
6145 u32 ioc_state;
6146 int rc;
6147 int count;
6148
919d8a3f 6149 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
6150
6151 if (ioc->pci_error_recovery)
6152 return 0;
6153
6154 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
919d8a3f
JP
6155 dhsprintk(ioc,
6156 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6157 __func__, ioc_state));
f92363d1
SR
6158
6159 /* if in RESET state, it should move to READY state shortly */
6160 count = 0;
6161 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6162 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6163 MPI2_IOC_STATE_READY) {
6164 if (count++ == 10) {
919d8a3f
JP
6165 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6166 __func__, ioc_state);
f92363d1
SR
6167 return -EFAULT;
6168 }
98c56ad3 6169 ssleep(1);
f92363d1
SR
6170 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6171 }
6172 }
6173
6174 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6175 return 0;
6176
6177 if (ioc_state & MPI2_DOORBELL_USED) {
919d8a3f 6178 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
f92363d1
SR
6179 goto issue_diag_reset;
6180 }
6181
6182 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6183 mpt3sas_base_fault_info(ioc, ioc_state &
6184 MPI2_DOORBELL_DATA_MASK);
6185 goto issue_diag_reset;
6186 }
6187
6188 if (type == FORCE_BIG_HAMMER)
6189 goto issue_diag_reset;
6190
6191 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6192 if (!(_base_send_ioc_reset(ioc,
98c56ad3 6193 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
f92363d1
SR
6194 return 0;
6195 }
6196
6197 issue_diag_reset:
98c56ad3 6198 rc = _base_diag_reset(ioc);
f92363d1
SR
6199 return rc;
6200}
6201
6202/**
6203 * _base_make_ioc_operational - put controller in OPERATIONAL state
6204 * @ioc: per adapter object
f92363d1 6205 *
4beb4867 6206 * Return: 0 for success, non-zero for failure.
f92363d1
SR
6207 */
6208static int
98c56ad3 6209_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
f92363d1 6210{
5ec8a175 6211 int r, i, index;
f92363d1
SR
6212 unsigned long flags;
6213 u32 reply_address;
6214 u16 smid;
6215 struct _tr_list *delayed_tr, *delayed_tr_next;
fd0331b3
SS
6216 struct _sc_list *delayed_sc, *delayed_sc_next;
6217 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
7786ab6a 6218 u8 hide_flag;
f92363d1 6219 struct adapter_reply_queue *reply_q;
5ec8a175 6220 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
f92363d1 6221
919d8a3f 6222 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
6223
6224 /* clean the delayed target reset list */
6225 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6226 &ioc->delayed_tr_list, list) {
6227 list_del(&delayed_tr->list);
6228 kfree(delayed_tr);
6229 }
6230
6231
6232 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6233 &ioc->delayed_tr_volume_list, list) {
6234 list_del(&delayed_tr->list);
6235 kfree(delayed_tr);
6236 }
6237
fd0331b3
SS
6238 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6239 &ioc->delayed_sc_list, list) {
6240 list_del(&delayed_sc->list);
6241 kfree(delayed_sc);
6242 }
6243
6244 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6245 &ioc->delayed_event_ack_list, list) {
6246 list_del(&delayed_event_ack->list);
6247 kfree(delayed_event_ack);
6248 }
6249
f92363d1 6250 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
f92363d1
SR
6251
6252 /* hi-priority queue */
6253 INIT_LIST_HEAD(&ioc->hpr_free_list);
6254 smid = ioc->hi_priority_smid;
6255 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6256 ioc->hpr_lookup[i].cb_idx = 0xFF;
6257 ioc->hpr_lookup[i].smid = smid;
6258 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6259 &ioc->hpr_free_list);
6260 }
6261
6262 /* internal queue */
6263 INIT_LIST_HEAD(&ioc->internal_free_list);
6264 smid = ioc->internal_smid;
6265 for (i = 0; i < ioc->internal_depth; i++, smid++) {
6266 ioc->internal_lookup[i].cb_idx = 0xFF;
6267 ioc->internal_lookup[i].smid = smid;
6268 list_add_tail(&ioc->internal_lookup[i].tracker_list,
6269 &ioc->internal_free_list);
6270 }
6271
f92363d1
SR
6272 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6273
6274 /* initialize Reply Free Queue */
6275 for (i = 0, reply_address = (u32)ioc->reply_dma ;
6276 i < ioc->reply_free_queue_depth ; i++, reply_address +=
b4472d71 6277 ioc->reply_sz) {
f92363d1 6278 ioc->reply_free[i] = cpu_to_le32(reply_address);
b4472d71
SPS
6279 if (ioc->is_mcpu_endpoint)
6280 _base_clone_reply_to_sys_mem(ioc,
cf6bf971 6281 reply_address, i);
b4472d71 6282 }
f92363d1
SR
6283
6284 /* initialize reply queues */
6285 if (ioc->is_driver_loading)
6286 _base_assign_reply_queues(ioc);
6287
6288 /* initialize Reply Post Free Queue */
5ec8a175
CO
6289 index = 0;
6290 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
f92363d1 6291 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
5ec8a175
CO
6292 /*
6293 * If RDPQ is enabled, switch to the next allocation.
6294 * Otherwise advance within the contiguous region.
6295 */
6296 if (ioc->rdpq_array_enable) {
6297 reply_q->reply_post_free =
6298 ioc->reply_post[index++].reply_post_free;
6299 } else {
6300 reply_q->reply_post_free = reply_post_free_contig;
6301 reply_post_free_contig += ioc->reply_post_queue_depth;
6302 }
6303
f92363d1 6304 reply_q->reply_post_host_index = 0;
f92363d1
SR
6305 for (i = 0; i < ioc->reply_post_queue_depth; i++)
6306 reply_q->reply_post_free[i].Words =
6307 cpu_to_le64(ULLONG_MAX);
6308 if (!_base_is_controller_msix_enabled(ioc))
6309 goto skip_init_reply_post_free_queue;
f92363d1
SR
6310 }
6311 skip_init_reply_post_free_queue:
6312
98c56ad3 6313 r = _base_send_ioc_init(ioc);
f92363d1
SR
6314 if (r)
6315 return r;
6316
6317 /* initialize reply free host index */
6318 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6319 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6320
6321 /* initialize reply post host index */
6322 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
0bb337c9 6323 if (ioc->combined_reply_queue)
fb77bb53
SR
6324 writel((reply_q->msix_index & 7)<<
6325 MPI2_RPHI_MSIX_INDEX_SHIFT,
6326 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6327 else
6328 writel(reply_q->msix_index <<
6329 MPI2_RPHI_MSIX_INDEX_SHIFT,
6330 &ioc->chip->ReplyPostHostIndex);
6331
f92363d1
SR
6332 if (!_base_is_controller_msix_enabled(ioc))
6333 goto skip_init_reply_post_host_index;
6334 }
6335
6336 skip_init_reply_post_host_index:
6337
6338 _base_unmask_interrupts(ioc);
3d29ed85
C
6339
6340 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6341 r = _base_display_fwpkg_version(ioc);
6342 if (r)
6343 return r;
6344 }
6345
6346 _base_static_config_pages(ioc);
98c56ad3 6347 r = _base_event_notification(ioc);
f92363d1
SR
6348 if (r)
6349 return r;
6350
f92363d1 6351 if (ioc->is_driver_loading) {
7786ab6a
SR
6352
6353 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6354 == 0x80) {
6355 hide_flag = (u8) (
6356 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6357 MFG_PAGE10_HIDE_SSDS_MASK);
6358 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6359 ioc->mfg_pg10_hide_flag = hide_flag;
6360 }
6361
f92363d1
SR
6362 ioc->wait_for_discovery_to_complete =
6363 _base_determine_wait_on_discovery(ioc);
6364
6365 return r; /* scan_start and scan_finished support */
6366 }
6367
98c56ad3 6368 r = _base_send_port_enable(ioc);
f92363d1
SR
6369 if (r)
6370 return r;
6371
6372 return r;
6373}
6374
6375/**
6376 * mpt3sas_base_free_resources - free resources controller resources
6377 * @ioc: per adapter object
f92363d1
SR
6378 */
6379void
6380mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6381{
919d8a3f 6382 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1 6383
08c4d550
SR
6384 /* synchronizing freeing resource with pci_access_mutex lock */
6385 mutex_lock(&ioc->pci_access_mutex);
cf9bd21a
JL
6386 if (ioc->chip_phys && ioc->chip) {
6387 _base_mask_interrupts(ioc);
6388 ioc->shost_recovery = 1;
98c56ad3 6389 _base_make_ioc_ready(ioc, SOFT_RESET);
cf9bd21a
JL
6390 ioc->shost_recovery = 0;
6391 }
6392
580d4e31 6393 mpt3sas_base_unmap_resources(ioc);
08c4d550 6394 mutex_unlock(&ioc->pci_access_mutex);
f92363d1
SR
6395 return;
6396}
6397
6398/**
6399 * mpt3sas_base_attach - attach controller instance
6400 * @ioc: per adapter object
6401 *
4beb4867 6402 * Return: 0 for success, non-zero for failure.
f92363d1
SR
6403 */
6404int
6405mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6406{
6407 int r, i;
6408 int cpu_id, last_cpu_id = 0;
6409
919d8a3f 6410 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
6411
6412 /* setup cpu_msix_table */
6413 ioc->cpu_count = num_online_cpus();
6414 for_each_online_cpu(cpu_id)
6415 last_cpu_id = cpu_id;
6416 ioc->cpu_msix_table_sz = last_cpu_id + 1;
6417 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6418 ioc->reply_queue_count = 1;
6419 if (!ioc->cpu_msix_table) {
919d8a3f
JP
6420 dfailprintk(ioc,
6421 ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
f92363d1
SR
6422 r = -ENOMEM;
6423 goto out_free_resources;
6424 }
6425
7786ab6a
SR
6426 if (ioc->is_warpdrive) {
6427 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6428 sizeof(resource_size_t *), GFP_KERNEL);
6429 if (!ioc->reply_post_host_index) {
919d8a3f
JP
6430 dfailprintk(ioc,
6431 ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
7786ab6a
SR
6432 r = -ENOMEM;
6433 goto out_free_resources;
6434 }
6435 }
6436
9b05c91a
SR
6437 ioc->rdpq_array_enable_assigned = 0;
6438 ioc->dma_mask = 0;
b8992029
SP
6439 if (ioc->is_aero_ioc)
6440 ioc->base_readl = &_base_readl_aero;
6441 else
6442 ioc->base_readl = &_base_readl;
f92363d1
SR
6443 r = mpt3sas_base_map_resources(ioc);
6444 if (r)
6445 goto out_free_resources;
6446
f92363d1 6447 pci_set_drvdata(ioc->pdev, ioc->shost);
98c56ad3 6448 r = _base_get_ioc_facts(ioc);
f92363d1
SR
6449 if (r)
6450 goto out_free_resources;
6451
471ef9d4
SR
6452 switch (ioc->hba_mpi_version_belonged) {
6453 case MPI2_VERSION:
6454 ioc->build_sg_scmd = &_base_build_sg_scmd;
6455 ioc->build_sg = &_base_build_sg;
6456 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6457 break;
6458 case MPI25_VERSION:
b130b0d5 6459 case MPI26_VERSION:
471ef9d4
SR
6460 /*
6461 * In SAS3.0,
6462 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
6463 * Target Status - all require the IEEE formated scatter gather
6464 * elements.
6465 */
6466 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
6467 ioc->build_sg = &_base_build_sg_ieee;
aff39e61 6468 ioc->build_nvme_prp = &_base_build_nvme_prp;
471ef9d4
SR
6469 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
6470 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
81c16f83 6471
471ef9d4
SR
6472 break;
6473 }
f92363d1 6474
40114bde
SP
6475 if (ioc->is_mcpu_endpoint)
6476 ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
6477 else
6478 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
81c16f83 6479
f92363d1
SR
6480 /*
6481 * These function pointers for other requests that don't
6482 * the require IEEE scatter gather elements.
6483 *
6484 * For example Configuration Pages and SAS IOUNIT Control don't.
6485 */
6486 ioc->build_sg_mpi = &_base_build_sg;
6487 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
6488
98c56ad3 6489 r = _base_make_ioc_ready(ioc, SOFT_RESET);
f92363d1
SR
6490 if (r)
6491 goto out_free_resources;
6492
6493 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
6494 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
6495 if (!ioc->pfacts) {
6496 r = -ENOMEM;
6497 goto out_free_resources;
6498 }
6499
6500 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
98c56ad3 6501 r = _base_get_port_facts(ioc, i);
f92363d1
SR
6502 if (r)
6503 goto out_free_resources;
6504 }
6505
98c56ad3 6506 r = _base_allocate_memory_pools(ioc);
f92363d1
SR
6507 if (r)
6508 goto out_free_resources;
6509
6510 init_waitqueue_head(&ioc->reset_wq);
6511
6512 /* allocate memory pd handle bitmask list */
6513 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
6514 if (ioc->facts.MaxDevHandle % 8)
6515 ioc->pd_handles_sz++;
6516 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
6517 GFP_KERNEL);
6518 if (!ioc->pd_handles) {
6519 r = -ENOMEM;
6520 goto out_free_resources;
6521 }
6522 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
6523 GFP_KERNEL);
6524 if (!ioc->blocking_handles) {
6525 r = -ENOMEM;
6526 goto out_free_resources;
6527 }
6528
c696f7b8
SPS
6529 /* allocate memory for pending OS device add list */
6530 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
6531 if (ioc->facts.MaxDevHandle % 8)
6532 ioc->pend_os_device_add_sz++;
6533 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
6534 GFP_KERNEL);
6535 if (!ioc->pend_os_device_add)
6536 goto out_free_resources;
6537
6538 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
6539 ioc->device_remove_in_progress =
6540 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
6541 if (!ioc->device_remove_in_progress)
6542 goto out_free_resources;
6543
f92363d1
SR
6544 ioc->fwfault_debug = mpt3sas_fwfault_debug;
6545
6546 /* base internal command bits */
6547 mutex_init(&ioc->base_cmds.mutex);
6548 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6549 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6550
6551 /* port_enable command bits */
6552 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6553 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6554
6555 /* transport internal command bits */
6556 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6557 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
6558 mutex_init(&ioc->transport_cmds.mutex);
6559
6560 /* scsih internal command bits */
6561 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6562 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
6563 mutex_init(&ioc->scsih_cmds.mutex);
6564
6565 /* task management internal command bits */
6566 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6567 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
6568 mutex_init(&ioc->tm_cmds.mutex);
6569
6570 /* config page internal command bits */
6571 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6572 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
6573 mutex_init(&ioc->config_cmds.mutex);
6574
6575 /* ctl module internal command bits */
6576 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
6577 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
6578 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
6579 mutex_init(&ioc->ctl_cmds.mutex);
6580
a5dd7efd
CJ
6581 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
6582 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
6583 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
6584 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
f92363d1
SR
6585 r = -ENOMEM;
6586 goto out_free_resources;
6587 }
6588
6589 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6590 ioc->event_masks[i] = -1;
6591
6592 /* here we enable the events we care about */
6593 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
6594 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
6595 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
6596 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
6597 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
6598 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
6599 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
6600 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
6601 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
6602 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
2d8ce8c9 6603 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
b99b1993 6604 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
95540b8e 6605 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
4318c734
SPS
6606 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
6607 if (ioc->is_gen35_ioc) {
6608 _base_unmask_events(ioc,
6609 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
6610 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
6611 _base_unmask_events(ioc,
6612 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
6613 }
6614 }
98c56ad3 6615 r = _base_make_ioc_operational(ioc);
f92363d1
SR
6616 if (r)
6617 goto out_free_resources;
6618
16e179bd 6619 ioc->non_operational_loop = 0;
459325c4 6620 ioc->got_task_abort_from_ioctl = 0;
f92363d1
SR
6621 return 0;
6622
6623 out_free_resources:
6624
6625 ioc->remove_host = 1;
6626
6627 mpt3sas_base_free_resources(ioc);
6628 _base_release_memory_pools(ioc);
6629 pci_set_drvdata(ioc->pdev, NULL);
6630 kfree(ioc->cpu_msix_table);
7786ab6a
SR
6631 if (ioc->is_warpdrive)
6632 kfree(ioc->reply_post_host_index);
f92363d1
SR
6633 kfree(ioc->pd_handles);
6634 kfree(ioc->blocking_handles);
c696f7b8
SPS
6635 kfree(ioc->device_remove_in_progress);
6636 kfree(ioc->pend_os_device_add);
f92363d1
SR
6637 kfree(ioc->tm_cmds.reply);
6638 kfree(ioc->transport_cmds.reply);
6639 kfree(ioc->scsih_cmds.reply);
6640 kfree(ioc->config_cmds.reply);
6641 kfree(ioc->base_cmds.reply);
6642 kfree(ioc->port_enable_cmds.reply);
6643 kfree(ioc->ctl_cmds.reply);
6644 kfree(ioc->ctl_cmds.sense);
6645 kfree(ioc->pfacts);
6646 ioc->ctl_cmds.reply = NULL;
6647 ioc->base_cmds.reply = NULL;
6648 ioc->tm_cmds.reply = NULL;
6649 ioc->scsih_cmds.reply = NULL;
6650 ioc->transport_cmds.reply = NULL;
6651 ioc->config_cmds.reply = NULL;
6652 ioc->pfacts = NULL;
6653 return r;
6654}
6655
6656
6657/**
6658 * mpt3sas_base_detach - remove controller instance
6659 * @ioc: per adapter object
f92363d1
SR
6660 */
6661void
6662mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
6663{
919d8a3f 6664 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
f92363d1
SR
6665
6666 mpt3sas_base_stop_watchdog(ioc);
6667 mpt3sas_base_free_resources(ioc);
6668 _base_release_memory_pools(ioc);
22a923c3 6669 mpt3sas_free_enclosure_list(ioc);
f92363d1
SR
6670 pci_set_drvdata(ioc->pdev, NULL);
6671 kfree(ioc->cpu_msix_table);
7786ab6a
SR
6672 if (ioc->is_warpdrive)
6673 kfree(ioc->reply_post_host_index);
f92363d1
SR
6674 kfree(ioc->pd_handles);
6675 kfree(ioc->blocking_handles);
c696f7b8
SPS
6676 kfree(ioc->device_remove_in_progress);
6677 kfree(ioc->pend_os_device_add);
f92363d1
SR
6678 kfree(ioc->pfacts);
6679 kfree(ioc->ctl_cmds.reply);
6680 kfree(ioc->ctl_cmds.sense);
6681 kfree(ioc->base_cmds.reply);
6682 kfree(ioc->port_enable_cmds.reply);
6683 kfree(ioc->tm_cmds.reply);
6684 kfree(ioc->transport_cmds.reply);
6685 kfree(ioc->scsih_cmds.reply);
6686 kfree(ioc->config_cmds.reply);
6687}
6688
6689/**
c7a35705 6690 * _base_pre_reset_handler - pre reset handler
f92363d1 6691 * @ioc: per adapter object
f92363d1 6692 */
c7a35705 6693static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
f92363d1 6694{
c7a35705
BVA
6695 mpt3sas_scsih_pre_reset_handler(ioc);
6696 mpt3sas_ctl_pre_reset_handler(ioc);
919d8a3f 6697 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
c7a35705
BVA
6698}
6699
6700/**
6701 * _base_after_reset_handler - after reset handler
6702 * @ioc: per adapter object
6703 */
6704static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
6705{
6706 mpt3sas_scsih_after_reset_handler(ioc);
6707 mpt3sas_ctl_after_reset_handler(ioc);
919d8a3f 6708 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
c7a35705
BVA
6709 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
6710 ioc->transport_cmds.status |= MPT3_CMD_RESET;
6711 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
6712 complete(&ioc->transport_cmds.done);
6713 }
6714 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6715 ioc->base_cmds.status |= MPT3_CMD_RESET;
6716 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
6717 complete(&ioc->base_cmds.done);
6718 }
6719 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6720 ioc->port_enable_failed = 1;
6721 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
6722 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
6723 if (ioc->is_driver_loading) {
6724 ioc->start_scan_failed =
6725 MPI2_IOCSTATUS_INTERNAL_ERROR;
6726 ioc->start_scan = 0;
6727 ioc->port_enable_cmds.status =
6728 MPT3_CMD_NOT_USED;
6729 } else {
6730 complete(&ioc->port_enable_cmds.done);
f92363d1 6731 }
f92363d1 6732 }
c7a35705
BVA
6733 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
6734 ioc->config_cmds.status |= MPT3_CMD_RESET;
6735 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
6736 ioc->config_cmds.smid = USHRT_MAX;
6737 complete(&ioc->config_cmds.done);
6738 }
6739}
6740
6741/**
6742 * _base_reset_done_handler - reset done handler
6743 * @ioc: per adapter object
6744 */
6745static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
6746{
6747 mpt3sas_scsih_reset_done_handler(ioc);
6748 mpt3sas_ctl_reset_done_handler(ioc);
919d8a3f 6749 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
f92363d1
SR
6750}
6751
6752/**
c666d3be 6753 * mpt3sas_wait_for_commands_to_complete - reset controller
f92363d1 6754 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1 6755 *
272e253c 6756 * This function is waiting 10s for all pending commands to complete
f92363d1
SR
6757 * prior to putting controller in reset.
6758 */
c666d3be
SR
6759void
6760mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
f92363d1
SR
6761{
6762 u32 ioc_state;
f92363d1
SR
6763
6764 ioc->pending_io_count = 0;
f92363d1
SR
6765
6766 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6767 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
6768 return;
6769
6770 /* pending command count */
c84b023a 6771 ioc->pending_io_count = scsi_host_busy(ioc->shost);
f92363d1
SR
6772
6773 if (!ioc->pending_io_count)
6774 return;
6775
6776 /* wait for pending commands to complete */
6777 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
6778}
6779
6780/**
6781 * mpt3sas_base_hard_reset_handler - reset controller
6782 * @ioc: Pointer to MPT_ADAPTER structure
f92363d1
SR
6783 * @type: FORCE_BIG_HAMMER or SOFT_RESET
6784 *
4beb4867 6785 * Return: 0 for success, non-zero for failure.
f92363d1
SR
6786 */
6787int
98c56ad3 6788mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
f92363d1
SR
6789 enum reset_type type)
6790{
6791 int r;
6792 unsigned long flags;
6793 u32 ioc_state;
6794 u8 is_fault = 0, is_trigger = 0;
6795
919d8a3f 6796 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
f92363d1
SR
6797
6798 if (ioc->pci_error_recovery) {
919d8a3f 6799 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
f92363d1
SR
6800 r = 0;
6801 goto out_unlocked;
6802 }
6803
6804 if (mpt3sas_fwfault_debug)
6805 mpt3sas_halt_firmware(ioc);
6806
f92363d1 6807 /* wait for an active reset in progress to complete */
982ea6f9 6808 mutex_lock(&ioc->reset_in_progress_mutex);
f92363d1
SR
6809
6810 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6811 ioc->shost_recovery = 1;
6812 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6813
6814 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6815 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
6816 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
6817 MPT3_DIAG_BUFFER_IS_RELEASED))) {
6818 is_trigger = 1;
6819 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6820 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
6821 is_fault = 1;
6822 }
c7a35705 6823 _base_pre_reset_handler(ioc);
c666d3be 6824 mpt3sas_wait_for_commands_to_complete(ioc);
f92363d1 6825 _base_mask_interrupts(ioc);
98c56ad3 6826 r = _base_make_ioc_ready(ioc, type);
f92363d1
SR
6827 if (r)
6828 goto out;
c7a35705 6829 _base_after_reset_handler(ioc);
f92363d1
SR
6830
6831 /* If this hard reset is called while port enable is active, then
6832 * there is no reason to call make_ioc_operational
6833 */
6834 if (ioc->is_driver_loading && ioc->port_enable_failed) {
6835 ioc->remove_host = 1;
6836 r = -EFAULT;
6837 goto out;
6838 }
98c56ad3 6839 r = _base_get_ioc_facts(ioc);
f92363d1
SR
6840 if (r)
6841 goto out;
9b05c91a
SR
6842
6843 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
6844 panic("%s: Issue occurred with flashing controller firmware."
6845 "Please reboot the system and ensure that the correct"
6846 " firmware version is running\n", ioc->name);
6847
98c56ad3 6848 r = _base_make_ioc_operational(ioc);
f92363d1 6849 if (!r)
c7a35705 6850 _base_reset_done_handler(ioc);
f92363d1
SR
6851
6852 out:
919d8a3f
JP
6853 dtmprintk(ioc,
6854 ioc_info(ioc, "%s: %s\n",
6855 __func__, r == 0 ? "SUCCESS" : "FAILED"));
f92363d1
SR
6856
6857 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
f92363d1
SR
6858 ioc->shost_recovery = 0;
6859 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6860 ioc->ioc_reset_count++;
6861 mutex_unlock(&ioc->reset_in_progress_mutex);
6862
6863 out_unlocked:
6864 if ((r == 0) && is_trigger) {
6865 if (is_fault)
6866 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
6867 else
6868 mpt3sas_trigger_master(ioc,
6869 MASTER_TRIGGER_ADAPTER_RESET);
6870 }
919d8a3f 6871 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
f92363d1
SR
6872 return r;
6873}