]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/mpi3mr/mpi3mr_os.c
91a031b1abd20ad675b0abf9083fd6b6cfde2f9c
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / mpi3mr / mpi3mr_os.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2021 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11
12 /* global driver scop variables */
13 LIST_HEAD(mrioc_list);
14 DEFINE_SPINLOCK(mrioc_list_lock);
15 static int mrioc_ids;
16 static int warn_non_secure_ctlr;
17
18 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
19 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
20 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
21 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
22
23 /* Module parameters*/
24 int prot_mask = -1;
25 module_param(prot_mask, int, 0);
26 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
27
28 static int prot_guard_mask = 3;
29 module_param(prot_guard_mask, int, 0);
30 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
31 static int logging_level;
32 module_param(logging_level, int, 0);
33 MODULE_PARM_DESC(logging_level,
34 " bits for enabling additional logging info (default=0)");
35
36 /* Forward declarations*/
37 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
38 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
39
40 /**
41 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
42 * @mrioc: Adapter instance reference
43 * @scmd: SCSI command reference
44 *
45 * Calculate the host tag based on block tag for a given scmd.
46 *
47 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
48 */
49 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
50 struct scsi_cmnd *scmd)
51 {
52 struct scmd_priv *priv = NULL;
53 u32 unique_tag;
54 u16 host_tag, hw_queue;
55
56 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
57
58 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
59 if (hw_queue >= mrioc->num_op_reply_q)
60 return MPI3MR_HOSTTAG_INVALID;
61 host_tag = blk_mq_unique_tag_to_tag(unique_tag);
62
63 if (WARN_ON(host_tag >= mrioc->max_host_ios))
64 return MPI3MR_HOSTTAG_INVALID;
65
66 priv = scsi_cmd_priv(scmd);
67 /*host_tag 0 is invalid hence incrementing by 1*/
68 priv->host_tag = host_tag + 1;
69 priv->scmd = scmd;
70 priv->in_lld_scope = 1;
71 priv->req_q_idx = hw_queue;
72 priv->meta_chain_idx = -1;
73 priv->chain_idx = -1;
74 priv->meta_sg_valid = 0;
75 return priv->host_tag;
76 }
77
78 /**
79 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
80 * @mrioc: Adapter instance reference
81 * @host_tag: Host tag
82 * @qidx: Operational queue index
83 *
84 * Identify the block tag from the host tag and queue index and
85 * retrieve associated scsi command using scsi_host_find_tag().
86 *
87 * Return: SCSI command reference or NULL.
88 */
89 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
90 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
91 {
92 struct scsi_cmnd *scmd = NULL;
93 struct scmd_priv *priv = NULL;
94 u32 unique_tag = host_tag - 1;
95
96 if (WARN_ON(host_tag > mrioc->max_host_ios))
97 goto out;
98
99 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
100
101 scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
102 if (scmd) {
103 priv = scsi_cmd_priv(scmd);
104 if (!priv->in_lld_scope)
105 scmd = NULL;
106 }
107 out:
108 return scmd;
109 }
110
111 /**
112 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
113 * @mrioc: Adapter instance reference
114 * @scmd: SCSI command reference
115 *
116 * Invalidate the SCSI command private data to mark the command
117 * is not in LLD scope anymore.
118 *
119 * Return: Nothing.
120 */
121 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
122 struct scsi_cmnd *scmd)
123 {
124 struct scmd_priv *priv = NULL;
125
126 priv = scsi_cmd_priv(scmd);
127
128 if (WARN_ON(priv->in_lld_scope == 0))
129 return;
130 priv->host_tag = MPI3MR_HOSTTAG_INVALID;
131 priv->req_q_idx = 0xFFFF;
132 priv->scmd = NULL;
133 priv->in_lld_scope = 0;
134 priv->meta_sg_valid = 0;
135 if (priv->chain_idx >= 0) {
136 clear_bit(priv->chain_idx, mrioc->chain_bitmap);
137 priv->chain_idx = -1;
138 }
139 if (priv->meta_chain_idx >= 0) {
140 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
141 priv->meta_chain_idx = -1;
142 }
143 }
144
145 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
146 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
147 static void mpi3mr_fwevt_worker(struct work_struct *work);
148
149 /**
150 * mpi3mr_fwevt_free - firmware event memory dealloctor
151 * @r: k reference pointer of the firmware event
152 *
153 * Free firmware event memory when no reference.
154 */
155 static void mpi3mr_fwevt_free(struct kref *r)
156 {
157 kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
158 }
159
160 /**
161 * mpi3mr_fwevt_get - k reference incrementor
162 * @fwevt: Firmware event reference
163 *
164 * Increment firmware event reference count.
165 */
166 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
167 {
168 kref_get(&fwevt->ref_count);
169 }
170
171 /**
172 * mpi3mr_fwevt_put - k reference decrementor
173 * @fwevt: Firmware event reference
174 *
175 * decrement firmware event reference count.
176 */
177 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
178 {
179 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
180 }
181
182 /**
183 * mpi3mr_alloc_fwevt - Allocate firmware event
184 * @len: length of firmware event data to allocate
185 *
186 * Allocate firmware event with required length and initialize
187 * the reference counter.
188 *
189 * Return: firmware event reference.
190 */
191 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
192 {
193 struct mpi3mr_fwevt *fwevt;
194
195 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
196 if (!fwevt)
197 return NULL;
198
199 kref_init(&fwevt->ref_count);
200 return fwevt;
201 }
202
203 /**
204 * mpi3mr_fwevt_add_to_list - Add firmware event to the list
205 * @mrioc: Adapter instance reference
206 * @fwevt: Firmware event reference
207 *
208 * Add the given firmware event to the firmware event list.
209 *
210 * Return: Nothing.
211 */
212 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
213 struct mpi3mr_fwevt *fwevt)
214 {
215 unsigned long flags;
216
217 if (!mrioc->fwevt_worker_thread)
218 return;
219
220 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
221 /* get fwevt reference count while adding it to fwevt_list */
222 mpi3mr_fwevt_get(fwevt);
223 INIT_LIST_HEAD(&fwevt->list);
224 list_add_tail(&fwevt->list, &mrioc->fwevt_list);
225 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
226 /* get fwevt reference count while enqueueing it to worker queue */
227 mpi3mr_fwevt_get(fwevt);
228 queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
229 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
230 }
231
232 /**
233 * mpi3mr_fwevt_del_from_list - Delete firmware event from list
234 * @mrioc: Adapter instance reference
235 * @fwevt: Firmware event reference
236 *
237 * Delete the given firmware event from the firmware event list.
238 *
239 * Return: Nothing.
240 */
241 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
242 struct mpi3mr_fwevt *fwevt)
243 {
244 unsigned long flags;
245
246 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
247 if (!list_empty(&fwevt->list)) {
248 list_del_init(&fwevt->list);
249 /*
250 * Put fwevt reference count after
251 * removing it from fwevt_list
252 */
253 mpi3mr_fwevt_put(fwevt);
254 }
255 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
256 }
257
258 /**
259 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
260 * @mrioc: Adapter instance reference
261 *
262 * Dequeue a firmware event from the firmware event list.
263 *
264 * Return: firmware event.
265 */
266 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
267 struct mpi3mr_ioc *mrioc)
268 {
269 unsigned long flags;
270 struct mpi3mr_fwevt *fwevt = NULL;
271
272 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
273 if (!list_empty(&mrioc->fwevt_list)) {
274 fwevt = list_first_entry(&mrioc->fwevt_list,
275 struct mpi3mr_fwevt, list);
276 list_del_init(&fwevt->list);
277 /*
278 * Put fwevt reference count after
279 * removing it from fwevt_list
280 */
281 mpi3mr_fwevt_put(fwevt);
282 }
283 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
284
285 return fwevt;
286 }
287
288 /**
289 * mpi3mr_cancel_work - cancel firmware event
290 * @fwevt: fwevt object which needs to be canceled
291 *
292 * Return: Nothing.
293 */
294 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
295 {
296 /*
297 * Wait on the fwevt to complete. If this returns 1, then
298 * the event was never executed.
299 *
300 * If it did execute, we wait for it to finish, and the put will
301 * happen from mpi3mr_process_fwevt()
302 */
303 if (cancel_work_sync(&fwevt->work)) {
304 /*
305 * Put fwevt reference count after
306 * dequeuing it from worker queue
307 */
308 mpi3mr_fwevt_put(fwevt);
309 /*
310 * Put fwevt reference count to neutralize
311 * kref_init increment
312 */
313 mpi3mr_fwevt_put(fwevt);
314 }
315 }
316
317 /**
318 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
319 * @mrioc: Adapter instance reference
320 *
321 * Flush all pending firmware events from the firmware event
322 * list.
323 *
324 * Return: Nothing.
325 */
326 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
327 {
328 struct mpi3mr_fwevt *fwevt = NULL;
329
330 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
331 !mrioc->fwevt_worker_thread)
332 return;
333
334 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
335 mpi3mr_cancel_work(fwevt);
336
337 if (mrioc->current_event) {
338 fwevt = mrioc->current_event;
339 /*
340 * Don't call cancel_work_sync() API for the
341 * fwevt work if the controller reset is
342 * get called as part of processing the
343 * same fwevt work (or) when worker thread is
344 * waiting for device add/remove APIs to complete.
345 * Otherwise we will see deadlock.
346 */
347 if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
348 fwevt->discard = 1;
349 return;
350 }
351
352 mpi3mr_cancel_work(fwevt);
353 }
354 }
355
356 /**
357 * mpi3mr_invalidate_devhandles -Invalidate device handles
358 * @mrioc: Adapter instance reference
359 *
360 * Invalidate the device handles in the target device structures
361 * . Called post reset prior to reinitializing the controller.
362 *
363 * Return: Nothing.
364 */
365 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
366 {
367 struct mpi3mr_tgt_dev *tgtdev;
368 struct mpi3mr_stgt_priv_data *tgt_priv;
369
370 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
371 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
372 if (tgtdev->starget && tgtdev->starget->hostdata) {
373 tgt_priv = tgtdev->starget->hostdata;
374 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
375 }
376 }
377 }
378
379 /**
380 * mpi3mr_print_scmd - print individual SCSI command
381 * @rq: Block request
382 * @data: Adapter instance reference
383 * @reserved: N/A. Currently not used
384 *
385 * Print the SCSI command details if it is in LLD scope.
386 *
387 * Return: true always.
388 */
389 static bool mpi3mr_print_scmd(struct request *rq,
390 void *data, bool reserved)
391 {
392 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
393 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
394 struct scmd_priv *priv = NULL;
395
396 if (scmd) {
397 priv = scsi_cmd_priv(scmd);
398 if (!priv->in_lld_scope)
399 goto out;
400
401 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
402 __func__, priv->host_tag, priv->req_q_idx + 1);
403 scsi_print_command(scmd);
404 }
405
406 out:
407 return(true);
408 }
409
410 /**
411 * mpi3mr_flush_scmd - Flush individual SCSI command
412 * @rq: Block request
413 * @data: Adapter instance reference
414 * @reserved: N/A. Currently not used
415 *
416 * Return the SCSI command to the upper layers if it is in LLD
417 * scope.
418 *
419 * Return: true always.
420 */
421
422 static bool mpi3mr_flush_scmd(struct request *rq,
423 void *data, bool reserved)
424 {
425 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
426 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
427 struct scmd_priv *priv = NULL;
428
429 if (scmd) {
430 priv = scsi_cmd_priv(scmd);
431 if (!priv->in_lld_scope)
432 goto out;
433
434 if (priv->meta_sg_valid)
435 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
436 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
437 mpi3mr_clear_scmd_priv(mrioc, scmd);
438 scsi_dma_unmap(scmd);
439 scmd->result = DID_RESET << 16;
440 scsi_print_command(scmd);
441 scmd->scsi_done(scmd);
442 mrioc->flush_io_count++;
443 }
444
445 out:
446 return(true);
447 }
448
449 /**
450 * mpi3mr_count_dev_pending - Count commands pending for a lun
451 * @rq: Block request
452 * @data: SCSI device reference
453 * @reserved: Unused
454 *
455 * This is an iterator function called for each SCSI command in
456 * a host and if the command is pending in the LLD for the
457 * specific device(lun) then device specific pending I/O counter
458 * is updated in the device structure.
459 *
460 * Return: true always.
461 */
462
463 static bool mpi3mr_count_dev_pending(struct request *rq,
464 void *data, bool reserved)
465 {
466 struct scsi_device *sdev = (struct scsi_device *)data;
467 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
468 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
469 struct scmd_priv *priv;
470
471 if (scmd) {
472 priv = scsi_cmd_priv(scmd);
473 if (!priv->in_lld_scope)
474 goto out;
475 if (scmd->device == sdev)
476 sdev_priv_data->pend_count++;
477 }
478
479 out:
480 return true;
481 }
482
483 /**
484 * mpi3mr_count_tgt_pending - Count commands pending for target
485 * @rq: Block request
486 * @data: SCSI target reference
487 * @reserved: Unused
488 *
489 * This is an iterator function called for each SCSI command in
490 * a host and if the command is pending in the LLD for the
491 * specific target then target specific pending I/O counter is
492 * updated in the target structure.
493 *
494 * Return: true always.
495 */
496
497 static bool mpi3mr_count_tgt_pending(struct request *rq,
498 void *data, bool reserved)
499 {
500 struct scsi_target *starget = (struct scsi_target *)data;
501 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
502 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
503 struct scmd_priv *priv;
504
505 if (scmd) {
506 priv = scsi_cmd_priv(scmd);
507 if (!priv->in_lld_scope)
508 goto out;
509 if (scmd->device && (scsi_target(scmd->device) == starget))
510 stgt_priv_data->pend_count++;
511 }
512
513 out:
514 return true;
515 }
516
517 /**
518 * mpi3mr_flush_host_io - Flush host I/Os
519 * @mrioc: Adapter instance reference
520 *
521 * Flush all of the pending I/Os by calling
522 * blk_mq_tagset_busy_iter() for each possible tag. This is
523 * executed post controller reset
524 *
525 * Return: Nothing.
526 */
527 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
528 {
529 struct Scsi_Host *shost = mrioc->shost;
530
531 mrioc->flush_io_count = 0;
532 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
533 blk_mq_tagset_busy_iter(&shost->tag_set,
534 mpi3mr_flush_scmd, (void *)mrioc);
535 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
536 mrioc->flush_io_count);
537 }
538
539 /**
540 * mpi3mr_alloc_tgtdev - target device allocator
541 *
542 * Allocate target device instance and initialize the reference
543 * count
544 *
545 * Return: target device instance.
546 */
547 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
548 {
549 struct mpi3mr_tgt_dev *tgtdev;
550
551 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
552 if (!tgtdev)
553 return NULL;
554 kref_init(&tgtdev->ref_count);
555 return tgtdev;
556 }
557
558 /**
559 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
560 * @mrioc: Adapter instance reference
561 * @tgtdev: Target device
562 *
563 * Add the target device to the target device list
564 *
565 * Return: Nothing.
566 */
567 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
568 struct mpi3mr_tgt_dev *tgtdev)
569 {
570 unsigned long flags;
571
572 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
573 mpi3mr_tgtdev_get(tgtdev);
574 INIT_LIST_HEAD(&tgtdev->list);
575 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
576 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
577 }
578
579 /**
580 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
581 * @mrioc: Adapter instance reference
582 * @tgtdev: Target device
583 *
584 * Remove the target device from the target device list
585 *
586 * Return: Nothing.
587 */
588 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
589 struct mpi3mr_tgt_dev *tgtdev)
590 {
591 unsigned long flags;
592
593 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
594 if (!list_empty(&tgtdev->list)) {
595 list_del_init(&tgtdev->list);
596 mpi3mr_tgtdev_put(tgtdev);
597 }
598 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
599 }
600
601 /**
602 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
603 * @mrioc: Adapter instance reference
604 * @handle: Device handle
605 *
606 * Accessor to retrieve target device from the device handle.
607 * Non Lock version
608 *
609 * Return: Target device reference.
610 */
611 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
612 struct mpi3mr_ioc *mrioc, u16 handle)
613 {
614 struct mpi3mr_tgt_dev *tgtdev;
615
616 assert_spin_locked(&mrioc->tgtdev_lock);
617 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
618 if (tgtdev->dev_handle == handle)
619 goto found_tgtdev;
620 return NULL;
621
622 found_tgtdev:
623 mpi3mr_tgtdev_get(tgtdev);
624 return tgtdev;
625 }
626
627 /**
628 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
629 * @mrioc: Adapter instance reference
630 * @handle: Device handle
631 *
632 * Accessor to retrieve target device from the device handle.
633 * Lock version
634 *
635 * Return: Target device reference.
636 */
637 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
638 struct mpi3mr_ioc *mrioc, u16 handle)
639 {
640 struct mpi3mr_tgt_dev *tgtdev;
641 unsigned long flags;
642
643 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
644 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
645 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
646 return tgtdev;
647 }
648
649 /**
650 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
651 * @mrioc: Adapter instance reference
652 * @persist_id: Persistent ID
653 *
654 * Accessor to retrieve target device from the Persistent ID.
655 * Non Lock version
656 *
657 * Return: Target device reference.
658 */
659 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
660 struct mpi3mr_ioc *mrioc, u16 persist_id)
661 {
662 struct mpi3mr_tgt_dev *tgtdev;
663
664 assert_spin_locked(&mrioc->tgtdev_lock);
665 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
666 if (tgtdev->perst_id == persist_id)
667 goto found_tgtdev;
668 return NULL;
669
670 found_tgtdev:
671 mpi3mr_tgtdev_get(tgtdev);
672 return tgtdev;
673 }
674
675 /**
676 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
677 * @mrioc: Adapter instance reference
678 * @persist_id: Persistent ID
679 *
680 * Accessor to retrieve target device from the Persistent ID.
681 * Lock version
682 *
683 * Return: Target device reference.
684 */
685 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
686 struct mpi3mr_ioc *mrioc, u16 persist_id)
687 {
688 struct mpi3mr_tgt_dev *tgtdev;
689 unsigned long flags;
690
691 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
692 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
693 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
694 return tgtdev;
695 }
696
697 /**
698 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
699 * @mrioc: Adapter instance reference
700 * @tgt_priv: Target private data
701 *
702 * Accessor to return target device from the target private
703 * data. Non Lock version
704 *
705 * Return: Target device reference.
706 */
707 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
708 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
709 {
710 struct mpi3mr_tgt_dev *tgtdev;
711
712 assert_spin_locked(&mrioc->tgtdev_lock);
713 tgtdev = tgt_priv->tgt_dev;
714 if (tgtdev)
715 mpi3mr_tgtdev_get(tgtdev);
716 return tgtdev;
717 }
718
719 /**
720 * mpi3mr_print_device_event_notice - print notice related to post processing of
721 * device event after controller reset.
722 *
723 * @mrioc: Adapter instance reference
724 * @device_add: true for device add event and false for device removal event
725 *
726 * Return: None.
727 */
728 static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
729 bool device_add)
730 {
731 ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
732 (device_add ? "addition" : "removal"));
733 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
734 ioc_notice(mrioc, "are matched with attached devices for correctness\n");
735 }
736
737 /**
738 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
739 * @mrioc: Adapter instance reference
740 * @tgtdev: Target device structure
741 *
742 * Checks whether the device is exposed to upper layers and if it
743 * is then remove the device from upper layers by calling
744 * scsi_remove_target().
745 *
746 * Return: 0 on success, non zero on failure.
747 */
748 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
749 struct mpi3mr_tgt_dev *tgtdev)
750 {
751 struct mpi3mr_stgt_priv_data *tgt_priv;
752
753 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
754 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
755 if (tgtdev->starget && tgtdev->starget->hostdata) {
756 tgt_priv = tgtdev->starget->hostdata;
757 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
758 }
759
760 if (tgtdev->starget) {
761 if (mrioc->current_event)
762 mrioc->current_event->pending_at_sml = 1;
763 scsi_remove_target(&tgtdev->starget->dev);
764 tgtdev->host_exposed = 0;
765 if (mrioc->current_event) {
766 mrioc->current_event->pending_at_sml = 0;
767 if (mrioc->current_event->discard) {
768 mpi3mr_print_device_event_notice(mrioc, false);
769 return;
770 }
771 }
772 }
773 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
774 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
775 }
776
777 /**
778 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
779 * @mrioc: Adapter instance reference
780 * @perst_id: Persistent ID of the device
781 *
782 * Checks whether the device can be exposed to upper layers and
783 * if it is not then expose the device to upper layers by
784 * calling scsi_scan_target().
785 *
786 * Return: 0 on success, non zero on failure.
787 */
788 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
789 u16 perst_id)
790 {
791 int retval = 0;
792 struct mpi3mr_tgt_dev *tgtdev;
793
794 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
795 if (!tgtdev) {
796 retval = -1;
797 goto out;
798 }
799 if (tgtdev->is_hidden) {
800 retval = -1;
801 goto out;
802 }
803 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
804 tgtdev->host_exposed = 1;
805 if (mrioc->current_event)
806 mrioc->current_event->pending_at_sml = 1;
807 scsi_scan_target(&mrioc->shost->shost_gendev, 0,
808 tgtdev->perst_id,
809 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
810 if (!tgtdev->starget)
811 tgtdev->host_exposed = 0;
812 if (mrioc->current_event) {
813 mrioc->current_event->pending_at_sml = 0;
814 if (mrioc->current_event->discard) {
815 mpi3mr_print_device_event_notice(mrioc, true);
816 goto out;
817 }
818 }
819 }
820 out:
821 if (tgtdev)
822 mpi3mr_tgtdev_put(tgtdev);
823
824 return retval;
825 }
826
827 /**
828 * mpi3mr_change_queue_depth- Change QD callback handler
829 * @sdev: SCSI device reference
830 * @q_depth: Queue depth
831 *
832 * Validate and limit QD and call scsi_change_queue_depth.
833 *
834 * Return: return value of scsi_change_queue_depth
835 */
836 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
837 int q_depth)
838 {
839 struct scsi_target *starget = scsi_target(sdev);
840 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
841 int retval = 0;
842
843 if (!sdev->tagged_supported)
844 q_depth = 1;
845 if (q_depth > shost->can_queue)
846 q_depth = shost->can_queue;
847 else if (!q_depth)
848 q_depth = MPI3MR_DEFAULT_SDEV_QD;
849 retval = scsi_change_queue_depth(sdev, q_depth);
850
851 return retval;
852 }
853
854 /**
855 * mpi3mr_update_sdev - Update SCSI device information
856 * @sdev: SCSI device reference
857 * @data: target device reference
858 *
859 * This is an iterator function called for each SCSI device in a
860 * target to update the target specific information into each
861 * SCSI device.
862 *
863 * Return: Nothing.
864 */
865 static void
866 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
867 {
868 struct mpi3mr_tgt_dev *tgtdev;
869
870 tgtdev = (struct mpi3mr_tgt_dev *)data;
871 if (!tgtdev)
872 return;
873
874 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
875 switch (tgtdev->dev_type) {
876 case MPI3_DEVICE_DEVFORM_PCIE:
877 /*The block layer hw sector size = 512*/
878 if ((tgtdev->dev_spec.pcie_inf.dev_info &
879 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
880 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
881 blk_queue_max_hw_sectors(sdev->request_queue,
882 tgtdev->dev_spec.pcie_inf.mdts / 512);
883 if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
884 blk_queue_virt_boundary(sdev->request_queue,
885 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
886 else
887 blk_queue_virt_boundary(sdev->request_queue,
888 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
889 }
890 break;
891 default:
892 break;
893 }
894 }
895
896 /**
897 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
898 * @mrioc: Adapter instance reference
899 *
900 * This is executed post controller reset to identify any
901 * missing devices during reset and remove from the upper layers
902 * or expose any newly detected device to the upper layers.
903 *
904 * Return: Nothing.
905 */
906
907 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
908 {
909 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
910
911 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
912 list) {
913 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
914 tgtdev->host_exposed) {
915 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
916 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
917 mpi3mr_tgtdev_put(tgtdev);
918 }
919 }
920
921 tgtdev = NULL;
922 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
923 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
924 !tgtdev->is_hidden && !tgtdev->host_exposed)
925 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
926 }
927 }
928
929 /**
930 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
931 * @mrioc: Adapter instance reference
932 * @tgtdev: Target device internal structure
933 * @dev_pg0: New device page0
934 *
935 * Update the information from the device page0 into the driver
936 * cached target device structure.
937 *
938 * Return: Nothing.
939 */
940 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
941 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
942 {
943 u16 flags = 0;
944 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
945 u8 prot_mask = 0;
946
947 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
948 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
949 tgtdev->dev_type = dev_pg0->device_form;
950 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
951 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
952 tgtdev->slot = le16_to_cpu(dev_pg0->slot);
953 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
954 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
955
956 flags = le16_to_cpu(dev_pg0->flags);
957 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
958
959 if (tgtdev->starget && tgtdev->starget->hostdata) {
960 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
961 tgtdev->starget->hostdata;
962 scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
963 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
964 scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
965 }
966
967 switch (dev_pg0->access_status) {
968 case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
969 case MPI3_DEVICE0_ASTATUS_PREPARE:
970 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
971 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
972 break;
973 default:
974 tgtdev->is_hidden = 1;
975 break;
976 }
977
978 switch (tgtdev->dev_type) {
979 case MPI3_DEVICE_DEVFORM_SAS_SATA:
980 {
981 struct mpi3_device0_sas_sata_format *sasinf =
982 &dev_pg0->device_specific.sas_sata_format;
983 u16 dev_info = le16_to_cpu(sasinf->device_info);
984
985 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
986 tgtdev->dev_spec.sas_sata_inf.sas_address =
987 le64_to_cpu(sasinf->sas_address);
988 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
989 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
990 tgtdev->is_hidden = 1;
991 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
992 MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
993 tgtdev->is_hidden = 1;
994 break;
995 }
996 case MPI3_DEVICE_DEVFORM_PCIE:
997 {
998 struct mpi3_device0_pcie_format *pcieinf =
999 &dev_pg0->device_specific.pcie_format;
1000 u16 dev_info = le16_to_cpu(pcieinf->device_info);
1001
1002 tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
1003 tgtdev->dev_spec.pcie_inf.capb =
1004 le32_to_cpu(pcieinf->capabilities);
1005 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1006 /* 2^12 = 4096 */
1007 tgtdev->dev_spec.pcie_inf.pgsz = 12;
1008 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1009 tgtdev->dev_spec.pcie_inf.mdts =
1010 le32_to_cpu(pcieinf->maximum_data_transfer_size);
1011 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1012 tgtdev->dev_spec.pcie_inf.reset_to =
1013 max_t(u8, pcieinf->controller_reset_to,
1014 MPI3MR_INTADMCMD_TIMEOUT);
1015 tgtdev->dev_spec.pcie_inf.abort_to =
1016 max_t(u8, pcieinf->nvme_abort_to,
1017 MPI3MR_INTADMCMD_TIMEOUT);
1018 }
1019 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1020 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1021 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1022 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1023 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1024 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1025 tgtdev->is_hidden = 1;
1026 if (!mrioc->shost)
1027 break;
1028 prot_mask = scsi_host_get_prot(mrioc->shost);
1029 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1030 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1031 ioc_info(mrioc,
1032 "%s : Disabling DIX0 prot capability\n", __func__);
1033 ioc_info(mrioc,
1034 "because HBA does not support DIX0 operation on NVME drives\n");
1035 }
1036 break;
1037 }
1038 case MPI3_DEVICE_DEVFORM_VD:
1039 {
1040 struct mpi3_device0_vd_format *vdinf =
1041 &dev_pg0->device_specific.vd_format;
1042
1043 tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
1044 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1045 tgtdev->is_hidden = 1;
1046 break;
1047 }
1048 default:
1049 break;
1050 }
1051 }
1052
1053 /**
1054 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1055 * @mrioc: Adapter instance reference
1056 * @fwevt: Firmware event information.
1057 *
1058 * Process Device status Change event and based on device's new
1059 * information, either expose the device to the upper layers, or
1060 * remove the device from upper layers.
1061 *
1062 * Return: Nothing.
1063 */
1064 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1065 struct mpi3mr_fwevt *fwevt)
1066 {
1067 u16 dev_handle = 0;
1068 u8 uhide = 0, delete = 0, cleanup = 0;
1069 struct mpi3mr_tgt_dev *tgtdev = NULL;
1070 struct mpi3_event_data_device_status_change *evtdata =
1071 (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1072
1073 dev_handle = le16_to_cpu(evtdata->dev_handle);
1074 ioc_info(mrioc,
1075 "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1076 __func__, dev_handle, evtdata->reason_code);
1077 switch (evtdata->reason_code) {
1078 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1079 delete = 1;
1080 break;
1081 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1082 uhide = 1;
1083 break;
1084 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1085 delete = 1;
1086 cleanup = 1;
1087 break;
1088 default:
1089 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1090 evtdata->reason_code);
1091 break;
1092 }
1093
1094 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1095 if (!tgtdev)
1096 goto out;
1097 if (uhide) {
1098 tgtdev->is_hidden = 0;
1099 if (!tgtdev->host_exposed)
1100 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1101 }
1102 if (tgtdev->starget && tgtdev->starget->hostdata) {
1103 if (delete)
1104 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1105 }
1106 if (cleanup) {
1107 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1108 mpi3mr_tgtdev_put(tgtdev);
1109 }
1110
1111 out:
1112 if (tgtdev)
1113 mpi3mr_tgtdev_put(tgtdev);
1114 }
1115
1116 /**
1117 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1118 * @mrioc: Adapter instance reference
1119 * @dev_pg0: New device page0
1120 *
1121 * Process Device Info Change event and based on device's new
1122 * information, either expose the device to the upper layers, or
1123 * remove the device from upper layers or update the details of
1124 * the device.
1125 *
1126 * Return: Nothing.
1127 */
1128 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1129 struct mpi3_device_page0 *dev_pg0)
1130 {
1131 struct mpi3mr_tgt_dev *tgtdev = NULL;
1132 u16 dev_handle = 0, perst_id = 0;
1133
1134 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1135 dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1136 ioc_info(mrioc,
1137 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1138 __func__, dev_handle, perst_id);
1139 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1140 if (!tgtdev)
1141 goto out;
1142 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1143 if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1144 mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1145 if (tgtdev->is_hidden && tgtdev->host_exposed)
1146 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1147 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1148 starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1149 mpi3mr_update_sdev);
1150 out:
1151 if (tgtdev)
1152 mpi3mr_tgtdev_put(tgtdev);
1153 }
1154
1155 /**
1156 * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1157 * @mrioc: Adapter instance reference
1158 * @event_data: SAS topology change list event data
1159 *
1160 * Prints information about the SAS topology change event.
1161 *
1162 * Return: Nothing.
1163 */
1164 static void
1165 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1166 struct mpi3_event_data_sas_topology_change_list *event_data)
1167 {
1168 int i;
1169 u16 handle;
1170 u8 reason_code, phy_number;
1171 char *status_str = NULL;
1172 u8 link_rate, prev_link_rate;
1173
1174 switch (event_data->exp_status) {
1175 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1176 status_str = "remove";
1177 break;
1178 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1179 status_str = "responding";
1180 break;
1181 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1182 status_str = "remove delay";
1183 break;
1184 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1185 status_str = "direct attached";
1186 break;
1187 default:
1188 status_str = "unknown status";
1189 break;
1190 }
1191 ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1192 __func__, status_str);
1193 ioc_info(mrioc,
1194 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1195 __func__, le16_to_cpu(event_data->expander_dev_handle),
1196 le16_to_cpu(event_data->enclosure_handle),
1197 event_data->start_phy_num, event_data->num_entries);
1198 for (i = 0; i < event_data->num_entries; i++) {
1199 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1200 if (!handle)
1201 continue;
1202 phy_number = event_data->start_phy_num + i;
1203 reason_code = event_data->phy_entry[i].status &
1204 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1205 switch (reason_code) {
1206 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1207 status_str = "target remove";
1208 break;
1209 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1210 status_str = "delay target remove";
1211 break;
1212 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1213 status_str = "link status change";
1214 break;
1215 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1216 status_str = "link status no change";
1217 break;
1218 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1219 status_str = "target responding";
1220 break;
1221 default:
1222 status_str = "unknown";
1223 break;
1224 }
1225 link_rate = event_data->phy_entry[i].link_rate >> 4;
1226 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1227 ioc_info(mrioc,
1228 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1229 __func__, phy_number, handle, status_str, link_rate,
1230 prev_link_rate);
1231 }
1232 }
1233
1234 /**
1235 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1236 * @mrioc: Adapter instance reference
1237 * @fwevt: Firmware event reference
1238 *
1239 * Prints information about the SAS topology change event and
1240 * for "not responding" event code, removes the device from the
1241 * upper layers.
1242 *
1243 * Return: Nothing.
1244 */
1245 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1246 struct mpi3mr_fwevt *fwevt)
1247 {
1248 struct mpi3_event_data_sas_topology_change_list *event_data =
1249 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1250 int i;
1251 u16 handle;
1252 u8 reason_code;
1253 struct mpi3mr_tgt_dev *tgtdev = NULL;
1254
1255 mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1256
1257 for (i = 0; i < event_data->num_entries; i++) {
1258 if (fwevt->discard)
1259 return;
1260 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1261 if (!handle)
1262 continue;
1263 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1264 if (!tgtdev)
1265 continue;
1266
1267 reason_code = event_data->phy_entry[i].status &
1268 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1269
1270 switch (reason_code) {
1271 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1272 if (tgtdev->host_exposed)
1273 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1274 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1275 mpi3mr_tgtdev_put(tgtdev);
1276 break;
1277 default:
1278 break;
1279 }
1280 if (tgtdev)
1281 mpi3mr_tgtdev_put(tgtdev);
1282 }
1283 }
1284
1285 /**
1286 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1287 * @mrioc: Adapter instance reference
1288 * @event_data: PCIe topology change list event data
1289 *
1290 * Prints information about the PCIe topology change event.
1291 *
1292 * Return: Nothing.
1293 */
1294 static void
1295 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1296 struct mpi3_event_data_pcie_topology_change_list *event_data)
1297 {
1298 int i;
1299 u16 handle;
1300 u16 reason_code;
1301 u8 port_number;
1302 char *status_str = NULL;
1303 u8 link_rate, prev_link_rate;
1304
1305 switch (event_data->switch_status) {
1306 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1307 status_str = "remove";
1308 break;
1309 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1310 status_str = "responding";
1311 break;
1312 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1313 status_str = "remove delay";
1314 break;
1315 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1316 status_str = "direct attached";
1317 break;
1318 default:
1319 status_str = "unknown status";
1320 break;
1321 }
1322 ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1323 __func__, status_str);
1324 ioc_info(mrioc,
1325 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1326 __func__, le16_to_cpu(event_data->switch_dev_handle),
1327 le16_to_cpu(event_data->enclosure_handle),
1328 event_data->start_port_num, event_data->num_entries);
1329 for (i = 0; i < event_data->num_entries; i++) {
1330 handle =
1331 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1332 if (!handle)
1333 continue;
1334 port_number = event_data->start_port_num + i;
1335 reason_code = event_data->port_entry[i].port_status;
1336 switch (reason_code) {
1337 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1338 status_str = "target remove";
1339 break;
1340 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1341 status_str = "delay target remove";
1342 break;
1343 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1344 status_str = "link status change";
1345 break;
1346 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1347 status_str = "link status no change";
1348 break;
1349 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1350 status_str = "target responding";
1351 break;
1352 default:
1353 status_str = "unknown";
1354 break;
1355 }
1356 link_rate = event_data->port_entry[i].current_port_info &
1357 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1358 prev_link_rate = event_data->port_entry[i].previous_port_info &
1359 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1360 ioc_info(mrioc,
1361 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1362 __func__, port_number, handle, status_str, link_rate,
1363 prev_link_rate);
1364 }
1365 }
1366
1367 /**
1368 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1369 * @mrioc: Adapter instance reference
1370 * @fwevt: Firmware event reference
1371 *
1372 * Prints information about the PCIe topology change event and
1373 * for "not responding" event code, removes the device from the
1374 * upper layers.
1375 *
1376 * Return: Nothing.
1377 */
1378 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1379 struct mpi3mr_fwevt *fwevt)
1380 {
1381 struct mpi3_event_data_pcie_topology_change_list *event_data =
1382 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1383 int i;
1384 u16 handle;
1385 u8 reason_code;
1386 struct mpi3mr_tgt_dev *tgtdev = NULL;
1387
1388 mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1389
1390 for (i = 0; i < event_data->num_entries; i++) {
1391 if (fwevt->discard)
1392 return;
1393 handle =
1394 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1395 if (!handle)
1396 continue;
1397 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1398 if (!tgtdev)
1399 continue;
1400
1401 reason_code = event_data->port_entry[i].port_status;
1402
1403 switch (reason_code) {
1404 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1405 if (tgtdev->host_exposed)
1406 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1407 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1408 mpi3mr_tgtdev_put(tgtdev);
1409 break;
1410 default:
1411 break;
1412 }
1413 if (tgtdev)
1414 mpi3mr_tgtdev_put(tgtdev);
1415 }
1416 }
1417
1418 /**
1419 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
1420 * @mrioc: Adapter instance reference
1421 * @fwevt: Firmware event reference
1422 *
1423 * Identifies the firmware event and calls corresponding bottomg
1424 * half handler and sends event acknowledgment if required.
1425 *
1426 * Return: Nothing.
1427 */
1428 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
1429 struct mpi3mr_fwevt *fwevt)
1430 {
1431 mpi3mr_fwevt_del_from_list(mrioc, fwevt);
1432 mrioc->current_event = fwevt;
1433
1434 if (mrioc->stop_drv_processing)
1435 goto out;
1436
1437 if (!fwevt->process_evt)
1438 goto evt_ack;
1439
1440 switch (fwevt->event_id) {
1441 case MPI3_EVENT_DEVICE_ADDED:
1442 {
1443 struct mpi3_device_page0 *dev_pg0 =
1444 (struct mpi3_device_page0 *)fwevt->event_data;
1445 mpi3mr_report_tgtdev_to_host(mrioc,
1446 le16_to_cpu(dev_pg0->persistent_id));
1447 break;
1448 }
1449 case MPI3_EVENT_DEVICE_INFO_CHANGED:
1450 {
1451 mpi3mr_devinfochg_evt_bh(mrioc,
1452 (struct mpi3_device_page0 *)fwevt->event_data);
1453 break;
1454 }
1455 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1456 {
1457 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
1458 break;
1459 }
1460 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1461 {
1462 mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
1463 break;
1464 }
1465 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1466 {
1467 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
1468 break;
1469 }
1470 default:
1471 break;
1472 }
1473
1474 evt_ack:
1475 if (fwevt->send_ack)
1476 mpi3mr_process_event_ack(mrioc, fwevt->event_id,
1477 fwevt->evt_ctx);
1478 out:
1479 /* Put fwevt reference count to neutralize kref_init increment */
1480 mpi3mr_fwevt_put(fwevt);
1481 mrioc->current_event = NULL;
1482 }
1483
1484 /**
1485 * mpi3mr_fwevt_worker - Firmware event worker
1486 * @work: Work struct containing firmware event
1487 *
1488 * Extracts the firmware event and calls mpi3mr_fwevt_bh.
1489 *
1490 * Return: Nothing.
1491 */
1492 static void mpi3mr_fwevt_worker(struct work_struct *work)
1493 {
1494 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
1495 work);
1496 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
1497 /*
1498 * Put fwevt reference count after
1499 * dequeuing it from worker queue
1500 */
1501 mpi3mr_fwevt_put(fwevt);
1502 }
1503
1504 /**
1505 * mpi3mr_create_tgtdev - Create and add a target device
1506 * @mrioc: Adapter instance reference
1507 * @dev_pg0: Device Page 0 data
1508 *
1509 * If the device specified by the device page 0 data is not
1510 * present in the driver's internal list, allocate the memory
1511 * for the device, populate the data and add to the list, else
1512 * update the device data. The key is persistent ID.
1513 *
1514 * Return: 0 on success, -ENOMEM on memory allocation failure
1515 */
1516 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
1517 struct mpi3_device_page0 *dev_pg0)
1518 {
1519 int retval = 0;
1520 struct mpi3mr_tgt_dev *tgtdev = NULL;
1521 u16 perst_id = 0;
1522
1523 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1524 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
1525 if (tgtdev) {
1526 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1527 mpi3mr_tgtdev_put(tgtdev);
1528 } else {
1529 tgtdev = mpi3mr_alloc_tgtdev();
1530 if (!tgtdev)
1531 return -ENOMEM;
1532 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1533 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
1534 }
1535
1536 return retval;
1537 }
1538
1539 /**
1540 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
1541 * @mrioc: Adapter instance reference
1542 *
1543 * Flush pending commands in the delayed lists due to a
1544 * controller reset or driver removal as a cleanup.
1545 *
1546 * Return: Nothing
1547 */
1548 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
1549 {
1550 struct delayed_dev_rmhs_node *_rmhs_node;
1551 struct delayed_evt_ack_node *_evtack_node;
1552
1553 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
1554 while (!list_empty(&mrioc->delayed_rmhs_list)) {
1555 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
1556 struct delayed_dev_rmhs_node, list);
1557 list_del(&_rmhs_node->list);
1558 kfree(_rmhs_node);
1559 }
1560 dprint_reset(mrioc, "flushing delayed event ack commands\n");
1561 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1562 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
1563 struct delayed_evt_ack_node, list);
1564 list_del(&_evtack_node->list);
1565 kfree(_evtack_node);
1566 }
1567 }
1568
1569 /**
1570 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
1571 * @mrioc: Adapter instance reference
1572 * @drv_cmd: Internal command tracker
1573 *
1574 * Issues a target reset TM to the firmware from the device
1575 * removal TM pend list or retry the removal handshake sequence
1576 * based on the IOU control request IOC status.
1577 *
1578 * Return: Nothing
1579 */
1580 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
1581 struct mpi3mr_drv_cmd *drv_cmd)
1582 {
1583 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1584 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1585
1586 ioc_info(mrioc,
1587 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
1588 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
1589 drv_cmd->ioc_loginfo);
1590 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1591 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
1592 drv_cmd->retry_count++;
1593 ioc_info(mrioc,
1594 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
1595 __func__, drv_cmd->dev_handle,
1596 drv_cmd->retry_count);
1597 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
1598 drv_cmd, drv_cmd->iou_rc);
1599 return;
1600 }
1601 ioc_err(mrioc,
1602 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
1603 __func__, drv_cmd->dev_handle);
1604 } else {
1605 ioc_info(mrioc,
1606 "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
1607 __func__, drv_cmd->dev_handle);
1608 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
1609 }
1610
1611 if (!list_empty(&mrioc->delayed_rmhs_list)) {
1612 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
1613 struct delayed_dev_rmhs_node, list);
1614 drv_cmd->dev_handle = delayed_dev_rmhs->handle;
1615 drv_cmd->retry_count = 0;
1616 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
1617 ioc_info(mrioc,
1618 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
1619 __func__, drv_cmd->dev_handle);
1620 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
1621 drv_cmd->iou_rc);
1622 list_del(&delayed_dev_rmhs->list);
1623 kfree(delayed_dev_rmhs);
1624 return;
1625 }
1626 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1627 drv_cmd->callback = NULL;
1628 drv_cmd->retry_count = 0;
1629 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1630 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1631 }
1632
1633 /**
1634 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
1635 * @mrioc: Adapter instance reference
1636 * @drv_cmd: Internal command tracker
1637 *
1638 * Issues a target reset TM to the firmware from the device
1639 * removal TM pend list or issue IO unit control request as
1640 * part of device removal or hidden acknowledgment handshake.
1641 *
1642 * Return: Nothing
1643 */
1644 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
1645 struct mpi3mr_drv_cmd *drv_cmd)
1646 {
1647 struct mpi3_iounit_control_request iou_ctrl;
1648 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1649 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
1650 int retval;
1651
1652 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
1653 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
1654
1655 if (tm_reply)
1656 pr_info(IOCNAME
1657 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
1658 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
1659 drv_cmd->ioc_loginfo,
1660 le32_to_cpu(tm_reply->termination_count));
1661
1662 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
1663 mrioc->name, drv_cmd->dev_handle, cmd_idx);
1664
1665 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
1666
1667 drv_cmd->state = MPI3MR_CMD_PENDING;
1668 drv_cmd->is_waiting = 0;
1669 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
1670 iou_ctrl.operation = drv_cmd->iou_rc;
1671 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
1672 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
1673 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
1674
1675 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
1676 1);
1677 if (retval) {
1678 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
1679 mrioc->name);
1680 goto out_failed;
1681 }
1682
1683 return;
1684 out_failed:
1685 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1686 drv_cmd->callback = NULL;
1687 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1688 drv_cmd->retry_count = 0;
1689 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1690 }
1691
1692 /**
1693 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
1694 * @mrioc: Adapter instance reference
1695 * @handle: Device handle
1696 * @cmdparam: Internal command tracker
1697 * @iou_rc: IO unit reason code
1698 *
1699 * Issues a target reset TM to the firmware or add it to a pend
1700 * list as part of device removal or hidden acknowledgment
1701 * handshake.
1702 *
1703 * Return: Nothing
1704 */
1705 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
1706 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
1707 {
1708 struct mpi3_scsi_task_mgmt_request tm_req;
1709 int retval = 0;
1710 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1711 u8 retrycount = 5;
1712 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1713 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1714
1715 if (drv_cmd)
1716 goto issue_cmd;
1717 do {
1718 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
1719 MPI3MR_NUM_DEVRMCMD);
1720 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
1721 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
1722 break;
1723 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1724 }
1725 } while (retrycount--);
1726
1727 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
1728 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
1729 GFP_ATOMIC);
1730 if (!delayed_dev_rmhs)
1731 return;
1732 INIT_LIST_HEAD(&delayed_dev_rmhs->list);
1733 delayed_dev_rmhs->handle = handle;
1734 delayed_dev_rmhs->iou_rc = iou_rc;
1735 list_add_tail(&delayed_dev_rmhs->list,
1736 &mrioc->delayed_rmhs_list);
1737 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
1738 __func__, handle);
1739 return;
1740 }
1741 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
1742
1743 issue_cmd:
1744 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1745 ioc_info(mrioc,
1746 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
1747 __func__, handle, cmd_idx);
1748
1749 memset(&tm_req, 0, sizeof(tm_req));
1750 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1751 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
1752 goto out;
1753 }
1754 drv_cmd->state = MPI3MR_CMD_PENDING;
1755 drv_cmd->is_waiting = 0;
1756 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
1757 drv_cmd->dev_handle = handle;
1758 drv_cmd->iou_rc = iou_rc;
1759 tm_req.dev_handle = cpu_to_le16(handle);
1760 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
1761 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1762 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
1763 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
1764
1765 set_bit(handle, mrioc->removepend_bitmap);
1766 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
1767 if (retval) {
1768 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
1769 __func__);
1770 goto out_failed;
1771 }
1772 out:
1773 return;
1774 out_failed:
1775 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1776 drv_cmd->callback = NULL;
1777 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1778 drv_cmd->retry_count = 0;
1779 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1780 }
1781
1782 /**
1783 * mpi3mr_complete_evt_ack - event ack request completion
1784 * @mrioc: Adapter instance reference
1785 * @drv_cmd: Internal command tracker
1786 *
1787 * This is the completion handler for non blocking event
1788 * acknowledgment sent to the firmware and this will issue any
1789 * pending event acknowledgment request.
1790 *
1791 * Return: Nothing
1792 */
1793 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
1794 struct mpi3mr_drv_cmd *drv_cmd)
1795 {
1796 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1797 struct delayed_evt_ack_node *delayed_evtack = NULL;
1798
1799 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1800 dprint_event_th(mrioc,
1801 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
1802 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1803 drv_cmd->ioc_loginfo);
1804 }
1805
1806 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1807 delayed_evtack =
1808 list_entry(mrioc->delayed_evtack_cmds_list.next,
1809 struct delayed_evt_ack_node, list);
1810 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
1811 delayed_evtack->event_ctx);
1812 list_del(&delayed_evtack->list);
1813 kfree(delayed_evtack);
1814 return;
1815 }
1816 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1817 drv_cmd->callback = NULL;
1818 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1819 }
1820
1821 /**
1822 * mpi3mr_send_event_ack - Issue event acknwoledgment request
1823 * @mrioc: Adapter instance reference
1824 * @event: MPI3 event id
1825 * @cmdparam: Internal command tracker
1826 * @event_ctx: event context
1827 *
1828 * Issues event acknowledgment request to the firmware if there
1829 * is a free command to send the event ack else it to a pend
1830 * list so that it will be processed on a completion of a prior
1831 * event acknowledgment .
1832 *
1833 * Return: Nothing
1834 */
1835 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
1836 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
1837 {
1838 struct mpi3_event_ack_request evtack_req;
1839 int retval = 0;
1840 u8 retrycount = 5;
1841 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1842 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1843 struct delayed_evt_ack_node *delayed_evtack = NULL;
1844
1845 if (drv_cmd) {
1846 dprint_event_th(mrioc,
1847 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1848 event, event_ctx);
1849 goto issue_cmd;
1850 }
1851 dprint_event_th(mrioc,
1852 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1853 event, event_ctx);
1854 do {
1855 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
1856 MPI3MR_NUM_EVTACKCMD);
1857 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
1858 if (!test_and_set_bit(cmd_idx,
1859 mrioc->evtack_cmds_bitmap))
1860 break;
1861 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1862 }
1863 } while (retrycount--);
1864
1865 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
1866 delayed_evtack = kzalloc(sizeof(*delayed_evtack),
1867 GFP_ATOMIC);
1868 if (!delayed_evtack)
1869 return;
1870 INIT_LIST_HEAD(&delayed_evtack->list);
1871 delayed_evtack->event = event;
1872 delayed_evtack->event_ctx = event_ctx;
1873 list_add_tail(&delayed_evtack->list,
1874 &mrioc->delayed_evtack_cmds_list);
1875 dprint_event_th(mrioc,
1876 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
1877 event, event_ctx);
1878 return;
1879 }
1880 drv_cmd = &mrioc->evtack_cmds[cmd_idx];
1881
1882 issue_cmd:
1883 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1884
1885 memset(&evtack_req, 0, sizeof(evtack_req));
1886 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1887 dprint_event_th(mrioc,
1888 "sending event ack failed due to command in use\n");
1889 goto out;
1890 }
1891 drv_cmd->state = MPI3MR_CMD_PENDING;
1892 drv_cmd->is_waiting = 0;
1893 drv_cmd->callback = mpi3mr_complete_evt_ack;
1894 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1895 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
1896 evtack_req.event = event;
1897 evtack_req.event_context = cpu_to_le32(event_ctx);
1898 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
1899 sizeof(evtack_req), 1);
1900 if (retval) {
1901 dprint_event_th(mrioc,
1902 "posting event ack request is failed\n");
1903 goto out_failed;
1904 }
1905
1906 dprint_event_th(mrioc,
1907 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
1908 event, event_ctx);
1909 out:
1910 return;
1911 out_failed:
1912 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1913 drv_cmd->callback = NULL;
1914 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1915 }
1916
1917 /**
1918 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
1919 * @mrioc: Adapter instance reference
1920 * @event_reply: event data
1921 *
1922 * Checks for the reason code and based on that either block I/O
1923 * to device, or unblock I/O to the device, or start the device
1924 * removal handshake with reason as remove with the firmware for
1925 * PCIe devices.
1926 *
1927 * Return: Nothing
1928 */
1929 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
1930 struct mpi3_event_notification_reply *event_reply)
1931 {
1932 struct mpi3_event_data_pcie_topology_change_list *topo_evt =
1933 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
1934 int i;
1935 u16 handle;
1936 u8 reason_code;
1937 struct mpi3mr_tgt_dev *tgtdev = NULL;
1938 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1939
1940 for (i = 0; i < topo_evt->num_entries; i++) {
1941 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
1942 if (!handle)
1943 continue;
1944 reason_code = topo_evt->port_entry[i].port_status;
1945 scsi_tgt_priv_data = NULL;
1946 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1947 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
1948 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1949 tgtdev->starget->hostdata;
1950 switch (reason_code) {
1951 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1952 if (scsi_tgt_priv_data) {
1953 scsi_tgt_priv_data->dev_removed = 1;
1954 scsi_tgt_priv_data->dev_removedelay = 0;
1955 atomic_set(&scsi_tgt_priv_data->block_io, 0);
1956 }
1957 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
1958 MPI3_CTRL_OP_REMOVE_DEVICE);
1959 break;
1960 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1961 if (scsi_tgt_priv_data) {
1962 scsi_tgt_priv_data->dev_removedelay = 1;
1963 atomic_inc(&scsi_tgt_priv_data->block_io);
1964 }
1965 break;
1966 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1967 if (scsi_tgt_priv_data &&
1968 scsi_tgt_priv_data->dev_removedelay) {
1969 scsi_tgt_priv_data->dev_removedelay = 0;
1970 atomic_dec_if_positive
1971 (&scsi_tgt_priv_data->block_io);
1972 }
1973 break;
1974 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1975 default:
1976 break;
1977 }
1978 if (tgtdev)
1979 mpi3mr_tgtdev_put(tgtdev);
1980 }
1981 }
1982
1983 /**
1984 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
1985 * @mrioc: Adapter instance reference
1986 * @event_reply: event data
1987 *
1988 * Checks for the reason code and based on that either block I/O
1989 * to device, or unblock I/O to the device, or start the device
1990 * removal handshake with reason as remove with the firmware for
1991 * SAS/SATA devices.
1992 *
1993 * Return: Nothing
1994 */
1995 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
1996 struct mpi3_event_notification_reply *event_reply)
1997 {
1998 struct mpi3_event_data_sas_topology_change_list *topo_evt =
1999 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2000 int i;
2001 u16 handle;
2002 u8 reason_code;
2003 struct mpi3mr_tgt_dev *tgtdev = NULL;
2004 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2005
2006 for (i = 0; i < topo_evt->num_entries; i++) {
2007 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2008 if (!handle)
2009 continue;
2010 reason_code = topo_evt->phy_entry[i].status &
2011 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2012 scsi_tgt_priv_data = NULL;
2013 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2014 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2015 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2016 tgtdev->starget->hostdata;
2017 switch (reason_code) {
2018 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2019 if (scsi_tgt_priv_data) {
2020 scsi_tgt_priv_data->dev_removed = 1;
2021 scsi_tgt_priv_data->dev_removedelay = 0;
2022 atomic_set(&scsi_tgt_priv_data->block_io, 0);
2023 }
2024 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2025 MPI3_CTRL_OP_REMOVE_DEVICE);
2026 break;
2027 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2028 if (scsi_tgt_priv_data) {
2029 scsi_tgt_priv_data->dev_removedelay = 1;
2030 atomic_inc(&scsi_tgt_priv_data->block_io);
2031 }
2032 break;
2033 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2034 if (scsi_tgt_priv_data &&
2035 scsi_tgt_priv_data->dev_removedelay) {
2036 scsi_tgt_priv_data->dev_removedelay = 0;
2037 atomic_dec_if_positive
2038 (&scsi_tgt_priv_data->block_io);
2039 }
2040 break;
2041 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2042 default:
2043 break;
2044 }
2045 if (tgtdev)
2046 mpi3mr_tgtdev_put(tgtdev);
2047 }
2048 }
2049
2050 /**
2051 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2052 * @mrioc: Adapter instance reference
2053 * @event_reply: event data
2054 *
2055 * Checks for the reason code and based on that either block I/O
2056 * to device, or unblock I/O to the device, or start the device
2057 * removal handshake with reason as remove/hide acknowledgment
2058 * with the firmware.
2059 *
2060 * Return: Nothing
2061 */
2062 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2063 struct mpi3_event_notification_reply *event_reply)
2064 {
2065 u16 dev_handle = 0;
2066 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2067 struct mpi3mr_tgt_dev *tgtdev = NULL;
2068 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2069 struct mpi3_event_data_device_status_change *evtdata =
2070 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2071
2072 if (mrioc->stop_drv_processing)
2073 goto out;
2074
2075 dev_handle = le16_to_cpu(evtdata->dev_handle);
2076
2077 switch (evtdata->reason_code) {
2078 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2079 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2080 block = 1;
2081 break;
2082 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2083 delete = 1;
2084 hide = 1;
2085 break;
2086 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2087 delete = 1;
2088 remove = 1;
2089 break;
2090 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2091 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2092 ublock = 1;
2093 break;
2094 default:
2095 break;
2096 }
2097
2098 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2099 if (!tgtdev)
2100 goto out;
2101 if (hide)
2102 tgtdev->is_hidden = hide;
2103 if (tgtdev->starget && tgtdev->starget->hostdata) {
2104 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2105 tgtdev->starget->hostdata;
2106 if (block)
2107 atomic_inc(&scsi_tgt_priv_data->block_io);
2108 if (delete)
2109 scsi_tgt_priv_data->dev_removed = 1;
2110 if (ublock)
2111 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2112 }
2113 if (remove)
2114 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2115 MPI3_CTRL_OP_REMOVE_DEVICE);
2116 if (hide)
2117 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2118 MPI3_CTRL_OP_HIDDEN_ACK);
2119
2120 out:
2121 if (tgtdev)
2122 mpi3mr_tgtdev_put(tgtdev);
2123 }
2124
2125 /**
2126 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2127 * @mrioc: Adapter instance reference
2128 * @event_reply: event data
2129 *
2130 * Blocks and unblocks host level I/O based on the reason code
2131 *
2132 * Return: Nothing
2133 */
2134 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2135 struct mpi3_event_notification_reply *event_reply)
2136 {
2137 struct mpi3_event_data_prepare_for_reset *evtdata =
2138 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2139
2140 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2141 dprint_event_th(mrioc,
2142 "prepare for reset event top half with rc=start\n");
2143 if (mrioc->prepare_for_reset)
2144 return;
2145 mrioc->prepare_for_reset = 1;
2146 mrioc->prepare_for_reset_timeout_counter = 0;
2147 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2148 dprint_event_th(mrioc,
2149 "prepare for reset top half with rc=abort\n");
2150 mrioc->prepare_for_reset = 0;
2151 mrioc->prepare_for_reset_timeout_counter = 0;
2152 }
2153 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2154 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2155 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2156 le32_to_cpu(event_reply->event_context));
2157 }
2158
2159 /**
2160 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2161 * @mrioc: Adapter instance reference
2162 * @event_reply: event data
2163 *
2164 * Identifies the new shutdown timeout value and update.
2165 *
2166 * Return: Nothing
2167 */
2168 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2169 struct mpi3_event_notification_reply *event_reply)
2170 {
2171 struct mpi3_event_data_energy_pack_change *evtdata =
2172 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2173 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2174
2175 if (shutdown_timeout <= 0) {
2176 ioc_warn(mrioc,
2177 "%s :Invalid Shutdown Timeout received = %d\n",
2178 __func__, shutdown_timeout);
2179 return;
2180 }
2181
2182 ioc_info(mrioc,
2183 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2184 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2185 mrioc->facts.shutdown_timeout = shutdown_timeout;
2186 }
2187
2188 /**
2189 * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf
2190 * @mrioc: Adapter instance reference
2191 * @event_reply: event data
2192 *
2193 * Displays temperature threshold event details and fault code
2194 * if any is hit due to temperature exceeding threshold.
2195 *
2196 * Return: Nothing
2197 */
2198 static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc,
2199 struct mpi3_event_notification_reply *event_reply)
2200 {
2201 struct mpi3_event_data_temp_threshold *evtdata =
2202 (struct mpi3_event_data_temp_threshold *)event_reply->event_data;
2203
2204 ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n",
2205 (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ",
2206 (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ",
2207 (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num,
2208 le16_to_cpu(evtdata->current_temperature));
2209 mpi3mr_print_fault_info(mrioc);
2210 }
2211
2212 /**
2213 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2214 * @mrioc: Adapter instance reference
2215 * @event_reply: event data
2216 *
2217 * Displays Cable manegemt event details.
2218 *
2219 * Return: Nothing
2220 */
2221 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2222 struct mpi3_event_notification_reply *event_reply)
2223 {
2224 struct mpi3_event_data_cable_management *evtdata =
2225 (struct mpi3_event_data_cable_management *)event_reply->event_data;
2226
2227 switch (evtdata->status) {
2228 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2229 {
2230 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2231 "Devices connected to this cable are not detected.\n"
2232 "This cable requires %d mW of power.\n",
2233 evtdata->receptacle_id,
2234 le32_to_cpu(evtdata->active_cable_power_requirement));
2235 break;
2236 }
2237 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2238 {
2239 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2240 evtdata->receptacle_id);
2241 break;
2242 }
2243 default:
2244 break;
2245 }
2246 }
2247
2248 /**
2249 * mpi3mr_os_handle_events - Firmware event handler
2250 * @mrioc: Adapter instance reference
2251 * @event_reply: event data
2252 *
2253 * Identify whteher the event has to handled and acknowledged
2254 * and either process the event in the tophalf and/or schedule a
2255 * bottom half through mpi3mr_fwevt_worker.
2256 *
2257 * Return: Nothing
2258 */
2259 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2260 struct mpi3_event_notification_reply *event_reply)
2261 {
2262 u16 evt_type, sz;
2263 struct mpi3mr_fwevt *fwevt = NULL;
2264 bool ack_req = 0, process_evt_bh = 0;
2265
2266 if (mrioc->stop_drv_processing)
2267 return;
2268
2269 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2270 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2271 ack_req = 1;
2272
2273 evt_type = event_reply->event;
2274
2275 switch (evt_type) {
2276 case MPI3_EVENT_DEVICE_ADDED:
2277 {
2278 struct mpi3_device_page0 *dev_pg0 =
2279 (struct mpi3_device_page0 *)event_reply->event_data;
2280 if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
2281 ioc_err(mrioc,
2282 "%s :Failed to add device in the device add event\n",
2283 __func__);
2284 else
2285 process_evt_bh = 1;
2286 break;
2287 }
2288 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2289 {
2290 process_evt_bh = 1;
2291 mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
2292 break;
2293 }
2294 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2295 {
2296 process_evt_bh = 1;
2297 mpi3mr_sastopochg_evt_th(mrioc, event_reply);
2298 break;
2299 }
2300 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2301 {
2302 process_evt_bh = 1;
2303 mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
2304 break;
2305 }
2306 case MPI3_EVENT_PREPARE_FOR_RESET:
2307 {
2308 mpi3mr_preparereset_evt_th(mrioc, event_reply);
2309 ack_req = 0;
2310 break;
2311 }
2312 case MPI3_EVENT_DEVICE_INFO_CHANGED:
2313 {
2314 process_evt_bh = 1;
2315 break;
2316 }
2317 case MPI3_EVENT_ENERGY_PACK_CHANGE:
2318 {
2319 mpi3mr_energypackchg_evt_th(mrioc, event_reply);
2320 break;
2321 }
2322 case MPI3_EVENT_TEMP_THRESHOLD:
2323 {
2324 mpi3mr_tempthreshold_evt_th(mrioc, event_reply);
2325 break;
2326 }
2327 case MPI3_EVENT_CABLE_MGMT:
2328 {
2329 mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
2330 break;
2331 }
2332 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2333 case MPI3_EVENT_SAS_DISCOVERY:
2334 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
2335 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
2336 case MPI3_EVENT_PCIE_ENUMERATION:
2337 break;
2338 default:
2339 ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
2340 __func__, evt_type);
2341 break;
2342 }
2343 if (process_evt_bh || ack_req) {
2344 sz = event_reply->event_data_length * 4;
2345 fwevt = mpi3mr_alloc_fwevt(sz);
2346 if (!fwevt) {
2347 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
2348 __func__, __FILE__, __LINE__, __func__);
2349 return;
2350 }
2351
2352 memcpy(fwevt->event_data, event_reply->event_data, sz);
2353 fwevt->mrioc = mrioc;
2354 fwevt->event_id = evt_type;
2355 fwevt->send_ack = ack_req;
2356 fwevt->process_evt = process_evt_bh;
2357 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
2358 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2359 }
2360 }
2361
2362 /**
2363 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
2364 * @mrioc: Adapter instance reference
2365 * @scmd: SCSI command reference
2366 * @scsiio_req: MPI3 SCSI IO request
2367 *
2368 * Identifies the protection information flags from the SCSI
2369 * command and set appropriate flags in the MPI3 SCSI IO
2370 * request.
2371 *
2372 * Return: Nothing
2373 */
2374 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
2375 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2376 {
2377 u16 eedp_flags = 0;
2378 unsigned char prot_op = scsi_get_prot_op(scmd);
2379
2380 switch (prot_op) {
2381 case SCSI_PROT_NORMAL:
2382 return;
2383 case SCSI_PROT_READ_STRIP:
2384 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2385 break;
2386 case SCSI_PROT_WRITE_INSERT:
2387 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2388 break;
2389 case SCSI_PROT_READ_INSERT:
2390 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2391 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2392 break;
2393 case SCSI_PROT_WRITE_STRIP:
2394 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2395 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2396 break;
2397 case SCSI_PROT_READ_PASS:
2398 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2399 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2400 break;
2401 case SCSI_PROT_WRITE_PASS:
2402 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
2403 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
2404 scsiio_req->sgl[0].eedp.application_tag_translation_mask =
2405 0xffff;
2406 } else
2407 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2408
2409 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2410 break;
2411 default:
2412 return;
2413 }
2414
2415 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
2416 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
2417
2418 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
2419 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
2420
2421 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
2422 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
2423 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2424 scsiio_req->cdb.eedp32.primary_reference_tag =
2425 cpu_to_be32(scsi_prot_ref_tag(scmd));
2426 }
2427
2428 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
2429 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2430
2431 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
2432
2433 switch (scsi_prot_interval(scmd)) {
2434 case 512:
2435 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
2436 break;
2437 case 520:
2438 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
2439 break;
2440 case 4080:
2441 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
2442 break;
2443 case 4088:
2444 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
2445 break;
2446 case 4096:
2447 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
2448 break;
2449 case 4104:
2450 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
2451 break;
2452 case 4160:
2453 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
2454 break;
2455 default:
2456 break;
2457 }
2458
2459 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
2460 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
2461 }
2462
2463 /**
2464 * mpi3mr_build_sense_buffer - Map sense information
2465 * @desc: Sense type
2466 * @buf: Sense buffer to populate
2467 * @key: Sense key
2468 * @asc: Additional sense code
2469 * @ascq: Additional sense code qualifier
2470 *
2471 * Maps the given sense information into either descriptor or
2472 * fixed format sense data.
2473 *
2474 * Return: Nothing
2475 */
2476 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
2477 u8 asc, u8 ascq)
2478 {
2479 if (desc) {
2480 buf[0] = 0x72; /* descriptor, current */
2481 buf[1] = key;
2482 buf[2] = asc;
2483 buf[3] = ascq;
2484 buf[7] = 0;
2485 } else {
2486 buf[0] = 0x70; /* fixed, current */
2487 buf[2] = key;
2488 buf[7] = 0xa;
2489 buf[12] = asc;
2490 buf[13] = ascq;
2491 }
2492 }
2493
2494 /**
2495 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
2496 * @scmd: SCSI command reference
2497 * @ioc_status: status of MPI3 request
2498 *
2499 * Maps the EEDP error status of the SCSI IO request to sense
2500 * data.
2501 *
2502 * Return: Nothing
2503 */
2504 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
2505 u16 ioc_status)
2506 {
2507 u8 ascq = 0;
2508
2509 switch (ioc_status) {
2510 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2511 ascq = 0x01;
2512 break;
2513 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2514 ascq = 0x02;
2515 break;
2516 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2517 ascq = 0x03;
2518 break;
2519 default:
2520 ascq = 0x00;
2521 break;
2522 }
2523
2524 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
2525 0x10, ascq);
2526 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
2527 }
2528
2529 /**
2530 * mpi3mr_process_op_reply_desc - reply descriptor handler
2531 * @mrioc: Adapter instance reference
2532 * @reply_desc: Operational reply descriptor
2533 * @reply_dma: place holder for reply DMA address
2534 * @qidx: Operational queue index
2535 *
2536 * Process the operational reply descriptor and identifies the
2537 * descriptor type. Based on the descriptor map the MPI3 request
2538 * status to a SCSI command status and calls scsi_done call
2539 * back.
2540 *
2541 * Return: Nothing
2542 */
2543 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
2544 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
2545 {
2546 u16 reply_desc_type, host_tag = 0;
2547 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2548 u32 ioc_loginfo = 0;
2549 struct mpi3_status_reply_descriptor *status_desc = NULL;
2550 struct mpi3_address_reply_descriptor *addr_desc = NULL;
2551 struct mpi3_success_reply_descriptor *success_desc = NULL;
2552 struct mpi3_scsi_io_reply *scsi_reply = NULL;
2553 struct scsi_cmnd *scmd = NULL;
2554 struct scmd_priv *priv = NULL;
2555 u8 *sense_buf = NULL;
2556 u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
2557 u32 xfer_count = 0, sense_count = 0, resp_data = 0;
2558 u16 dev_handle = 0xFFFF;
2559 struct scsi_sense_hdr sshdr;
2560
2561 *reply_dma = 0;
2562 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
2563 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
2564 switch (reply_desc_type) {
2565 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
2566 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
2567 host_tag = le16_to_cpu(status_desc->host_tag);
2568 ioc_status = le16_to_cpu(status_desc->ioc_status);
2569 if (ioc_status &
2570 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2571 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
2572 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2573 break;
2574 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
2575 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
2576 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
2577 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
2578 *reply_dma);
2579 if (!scsi_reply) {
2580 panic("%s: scsi_reply is NULL, this shouldn't happen\n",
2581 mrioc->name);
2582 goto out;
2583 }
2584 host_tag = le16_to_cpu(scsi_reply->host_tag);
2585 ioc_status = le16_to_cpu(scsi_reply->ioc_status);
2586 scsi_status = scsi_reply->scsi_status;
2587 scsi_state = scsi_reply->scsi_state;
2588 dev_handle = le16_to_cpu(scsi_reply->dev_handle);
2589 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
2590 xfer_count = le32_to_cpu(scsi_reply->transfer_count);
2591 sense_count = le32_to_cpu(scsi_reply->sense_count);
2592 resp_data = le32_to_cpu(scsi_reply->response_data);
2593 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
2594 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2595 if (ioc_status &
2596 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2597 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
2598 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2599 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
2600 panic("%s: Ran out of sense buffers\n", mrioc->name);
2601 break;
2602 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
2603 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
2604 host_tag = le16_to_cpu(success_desc->host_tag);
2605 break;
2606 default:
2607 break;
2608 }
2609 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
2610 if (!scmd) {
2611 panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
2612 mrioc->name, host_tag);
2613 goto out;
2614 }
2615 priv = scsi_cmd_priv(scmd);
2616 if (success_desc) {
2617 scmd->result = DID_OK << 16;
2618 goto out_success;
2619 }
2620
2621 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
2622 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
2623 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
2624 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
2625 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
2626 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2627
2628 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
2629 sense_buf) {
2630 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
2631
2632 memcpy(scmd->sense_buffer, sense_buf, sz);
2633 }
2634
2635 switch (ioc_status) {
2636 case MPI3_IOCSTATUS_BUSY:
2637 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
2638 scmd->result = SAM_STAT_BUSY;
2639 break;
2640 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2641 scmd->result = DID_NO_CONNECT << 16;
2642 break;
2643 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
2644 scmd->result = DID_SOFT_ERROR << 16;
2645 break;
2646 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
2647 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
2648 scmd->result = DID_RESET << 16;
2649 break;
2650 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2651 if ((xfer_count == 0) || (scmd->underflow > xfer_count))
2652 scmd->result = DID_SOFT_ERROR << 16;
2653 else
2654 scmd->result = (DID_OK << 16) | scsi_status;
2655 break;
2656 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
2657 scmd->result = (DID_OK << 16) | scsi_status;
2658 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
2659 break;
2660 if (xfer_count < scmd->underflow) {
2661 if (scsi_status == SAM_STAT_BUSY)
2662 scmd->result = SAM_STAT_BUSY;
2663 else
2664 scmd->result = DID_SOFT_ERROR << 16;
2665 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2666 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
2667 scmd->result = DID_SOFT_ERROR << 16;
2668 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2669 scmd->result = DID_RESET << 16;
2670 break;
2671 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
2672 scsi_set_resid(scmd, 0);
2673 fallthrough;
2674 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
2675 case MPI3_IOCSTATUS_SUCCESS:
2676 scmd->result = (DID_OK << 16) | scsi_status;
2677 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2678 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
2679 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
2680 scmd->result = DID_SOFT_ERROR << 16;
2681 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2682 scmd->result = DID_RESET << 16;
2683 break;
2684 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2685 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2686 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2687 mpi3mr_map_eedp_error(scmd, ioc_status);
2688 break;
2689 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2690 case MPI3_IOCSTATUS_INVALID_FUNCTION:
2691 case MPI3_IOCSTATUS_INVALID_SGL:
2692 case MPI3_IOCSTATUS_INTERNAL_ERROR:
2693 case MPI3_IOCSTATUS_INVALID_FIELD:
2694 case MPI3_IOCSTATUS_INVALID_STATE:
2695 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
2696 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2697 case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
2698 default:
2699 scmd->result = DID_SOFT_ERROR << 16;
2700 break;
2701 }
2702
2703 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
2704 (scmd->cmnd[0] != ATA_16)) {
2705 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
2706 scmd->result);
2707 scsi_print_command(scmd);
2708 ioc_info(mrioc,
2709 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
2710 __func__, dev_handle, ioc_status, ioc_loginfo,
2711 priv->req_q_idx + 1);
2712 ioc_info(mrioc,
2713 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
2714 host_tag, scsi_state, scsi_status, xfer_count, resp_data);
2715 if (sense_buf) {
2716 scsi_normalize_sense(sense_buf, sense_count, &sshdr);
2717 ioc_info(mrioc,
2718 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
2719 __func__, sense_count, sshdr.sense_key,
2720 sshdr.asc, sshdr.ascq);
2721 }
2722 }
2723 out_success:
2724 if (priv->meta_sg_valid) {
2725 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
2726 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
2727 }
2728 mpi3mr_clear_scmd_priv(mrioc, scmd);
2729 scsi_dma_unmap(scmd);
2730 scmd->scsi_done(scmd);
2731 out:
2732 if (sense_buf)
2733 mpi3mr_repost_sense_buf(mrioc,
2734 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2735 }
2736
2737 /**
2738 * mpi3mr_get_chain_idx - get free chain buffer index
2739 * @mrioc: Adapter instance reference
2740 *
2741 * Try to get a free chain buffer index from the free pool.
2742 *
2743 * Return: -1 on failure or the free chain buffer index
2744 */
2745 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
2746 {
2747 u8 retry_count = 5;
2748 int cmd_idx = -1;
2749
2750 do {
2751 spin_lock(&mrioc->chain_buf_lock);
2752 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
2753 mrioc->chain_buf_count);
2754 if (cmd_idx < mrioc->chain_buf_count) {
2755 set_bit(cmd_idx, mrioc->chain_bitmap);
2756 spin_unlock(&mrioc->chain_buf_lock);
2757 break;
2758 }
2759 spin_unlock(&mrioc->chain_buf_lock);
2760 cmd_idx = -1;
2761 } while (retry_count--);
2762 return cmd_idx;
2763 }
2764
2765 /**
2766 * mpi3mr_prepare_sg_scmd - build scatter gather list
2767 * @mrioc: Adapter instance reference
2768 * @scmd: SCSI command reference
2769 * @scsiio_req: MPI3 SCSI IO request
2770 *
2771 * This function maps SCSI command's data and protection SGEs to
2772 * MPI request SGEs. If required additional 4K chain buffer is
2773 * used to send the SGEs.
2774 *
2775 * Return: 0 on success, -ENOMEM on dma_map_sg failure
2776 */
2777 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
2778 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2779 {
2780 dma_addr_t chain_dma;
2781 struct scatterlist *sg_scmd;
2782 void *sg_local, *chain;
2783 u32 chain_length;
2784 int sges_left, chain_idx;
2785 u32 sges_in_segment;
2786 u8 simple_sgl_flags;
2787 u8 simple_sgl_flags_last;
2788 u8 last_chain_sgl_flags;
2789 struct chain_element *chain_req;
2790 struct scmd_priv *priv = NULL;
2791 u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
2792 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
2793
2794 priv = scsi_cmd_priv(scmd);
2795
2796 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
2797 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2798 simple_sgl_flags_last = simple_sgl_flags |
2799 MPI3_SGE_FLAGS_END_OF_LIST;
2800 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
2801 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2802
2803 if (meta_sg)
2804 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
2805 else
2806 sg_local = &scsiio_req->sgl;
2807
2808 if (!scsiio_req->data_length && !meta_sg) {
2809 mpi3mr_build_zero_len_sge(sg_local);
2810 return 0;
2811 }
2812
2813 if (meta_sg) {
2814 sg_scmd = scsi_prot_sglist(scmd);
2815 sges_left = dma_map_sg(&mrioc->pdev->dev,
2816 scsi_prot_sglist(scmd),
2817 scsi_prot_sg_count(scmd),
2818 scmd->sc_data_direction);
2819 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
2820 } else {
2821 sg_scmd = scsi_sglist(scmd);
2822 sges_left = scsi_dma_map(scmd);
2823 }
2824
2825 if (sges_left < 0) {
2826 sdev_printk(KERN_ERR, scmd->device,
2827 "scsi_dma_map failed: request for %d bytes!\n",
2828 scsi_bufflen(scmd));
2829 return -ENOMEM;
2830 }
2831 if (sges_left > MPI3MR_SG_DEPTH) {
2832 sdev_printk(KERN_ERR, scmd->device,
2833 "scsi_dma_map returned unsupported sge count %d!\n",
2834 sges_left);
2835 return -ENOMEM;
2836 }
2837
2838 sges_in_segment = (mrioc->facts.op_req_sz -
2839 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
2840
2841 if (scsiio_req->sgl[0].eedp.flags ==
2842 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
2843 sg_local += sizeof(struct mpi3_sge_common);
2844 sges_in_segment--;
2845 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
2846 }
2847
2848 if (scsiio_req->msg_flags ==
2849 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
2850 sges_in_segment--;
2851 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
2852 }
2853
2854 if (meta_sg)
2855 sges_in_segment = 1;
2856
2857 if (sges_left <= sges_in_segment)
2858 goto fill_in_last_segment;
2859
2860 /* fill in main message segment when there is a chain following */
2861 while (sges_in_segment > 1) {
2862 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2863 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2864 sg_scmd = sg_next(sg_scmd);
2865 sg_local += sizeof(struct mpi3_sge_common);
2866 sges_left--;
2867 sges_in_segment--;
2868 }
2869
2870 chain_idx = mpi3mr_get_chain_idx(mrioc);
2871 if (chain_idx < 0)
2872 return -1;
2873 chain_req = &mrioc->chain_sgl_list[chain_idx];
2874 if (meta_sg)
2875 priv->meta_chain_idx = chain_idx;
2876 else
2877 priv->chain_idx = chain_idx;
2878
2879 chain = chain_req->addr;
2880 chain_dma = chain_req->dma_addr;
2881 sges_in_segment = sges_left;
2882 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
2883
2884 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
2885 chain_length, chain_dma);
2886
2887 sg_local = chain;
2888
2889 fill_in_last_segment:
2890 while (sges_left > 0) {
2891 if (sges_left == 1)
2892 mpi3mr_add_sg_single(sg_local,
2893 simple_sgl_flags_last, sg_dma_len(sg_scmd),
2894 sg_dma_address(sg_scmd));
2895 else
2896 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2897 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2898 sg_scmd = sg_next(sg_scmd);
2899 sg_local += sizeof(struct mpi3_sge_common);
2900 sges_left--;
2901 }
2902
2903 return 0;
2904 }
2905
2906 /**
2907 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
2908 * @mrioc: Adapter instance reference
2909 * @scmd: SCSI command reference
2910 * @scsiio_req: MPI3 SCSI IO request
2911 *
2912 * This function calls mpi3mr_prepare_sg_scmd for constructing
2913 * both data SGEs and protection information SGEs in the MPI
2914 * format from the SCSI Command as appropriate .
2915 *
2916 * Return: return value of mpi3mr_prepare_sg_scmd.
2917 */
2918 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
2919 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2920 {
2921 int ret;
2922
2923 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2924 if (ret)
2925 return ret;
2926
2927 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
2928 /* There is a valid meta sg */
2929 scsiio_req->flags |=
2930 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
2931 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2932 }
2933
2934 return ret;
2935 }
2936
2937 /**
2938 * mpi3mr_tm_response_name - get TM response as a string
2939 * @resp_code: TM response code
2940 *
2941 * Convert known task management response code as a readable
2942 * string.
2943 *
2944 * Return: response code string.
2945 */
2946 static const char *mpi3mr_tm_response_name(u8 resp_code)
2947 {
2948 char *desc;
2949
2950 switch (resp_code) {
2951 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
2952 desc = "task management request completed";
2953 break;
2954 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
2955 desc = "invalid frame";
2956 break;
2957 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
2958 desc = "task management request not supported";
2959 break;
2960 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
2961 desc = "task management request failed";
2962 break;
2963 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
2964 desc = "task management request succeeded";
2965 break;
2966 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
2967 desc = "invalid LUN";
2968 break;
2969 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
2970 desc = "overlapped tag attempted";
2971 break;
2972 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
2973 desc = "task queued, however not sent to target";
2974 break;
2975 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
2976 desc = "task management request denied by NVMe device";
2977 break;
2978 default:
2979 desc = "unknown";
2980 break;
2981 }
2982
2983 return desc;
2984 }
2985
2986 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
2987 {
2988 int i;
2989 int num_of_reply_queues =
2990 mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
2991
2992 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
2993 mpi3mr_process_op_reply_q(mrioc,
2994 mrioc->intr_info[i].op_reply_q);
2995 }
2996
2997 /**
2998 * mpi3mr_issue_tm - Issue Task Management request
2999 * @mrioc: Adapter instance reference
3000 * @tm_type: Task Management type
3001 * @handle: Device handle
3002 * @lun: lun ID
3003 * @htag: Host tag of the TM request
3004 * @timeout: TM timeout value
3005 * @drv_cmd: Internal command tracker
3006 * @resp_code: Response code place holder
3007 * @scmd: SCSI command
3008 *
3009 * Issues a Task Management Request to the controller for a
3010 * specified target, lun and command and wait for its completion
3011 * and check TM response. Recover the TM if it timed out by
3012 * issuing controller reset.
3013 *
3014 * Return: 0 on success, non-zero on errors
3015 */
3016 static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
3017 u16 handle, uint lun, u16 htag, ulong timeout,
3018 struct mpi3mr_drv_cmd *drv_cmd,
3019 u8 *resp_code, struct scsi_cmnd *scmd)
3020 {
3021 struct mpi3_scsi_task_mgmt_request tm_req;
3022 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
3023 int retval = 0;
3024 struct mpi3mr_tgt_dev *tgtdev = NULL;
3025 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
3026 struct scmd_priv *cmd_priv = NULL;
3027 struct scsi_device *sdev = NULL;
3028 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3029
3030 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3031 __func__, tm_type, handle);
3032 if (mrioc->unrecoverable) {
3033 retval = -1;
3034 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3035 __func__);
3036 goto out;
3037 }
3038
3039 memset(&tm_req, 0, sizeof(tm_req));
3040 mutex_lock(&drv_cmd->mutex);
3041 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3042 retval = -1;
3043 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3044 mutex_unlock(&drv_cmd->mutex);
3045 goto out;
3046 }
3047 if (mrioc->reset_in_progress) {
3048 retval = -1;
3049 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3050 mutex_unlock(&drv_cmd->mutex);
3051 goto out;
3052 }
3053
3054 drv_cmd->state = MPI3MR_CMD_PENDING;
3055 drv_cmd->is_waiting = 1;
3056 drv_cmd->callback = NULL;
3057 tm_req.dev_handle = cpu_to_le16(handle);
3058 tm_req.task_type = tm_type;
3059 tm_req.host_tag = cpu_to_le16(htag);
3060
3061 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3062 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3063
3064 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3065
3066 if (scmd) {
3067 sdev = scmd->device;
3068 sdev_priv_data = sdev->hostdata;
3069 scsi_tgt_priv_data = ((sdev_priv_data) ?
3070 sdev_priv_data->tgt_priv_data : NULL);
3071 } else {
3072 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3073 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3074 tgtdev->starget->hostdata;
3075 }
3076
3077 if (scsi_tgt_priv_data)
3078 atomic_inc(&scsi_tgt_priv_data->block_io);
3079
3080 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3081 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3082 timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3083 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3084 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3085 }
3086
3087 init_completion(&drv_cmd->done);
3088 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3089 if (retval) {
3090 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3091 goto out_unlock;
3092 }
3093 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3094
3095 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3096 drv_cmd->is_waiting = 0;
3097 retval = -1;
3098 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3099 dprint_tm(mrioc,
3100 "task management request timed out after %ld seconds\n",
3101 timeout);
3102 if (mrioc->logging_level & MPI3_DEBUG_TM)
3103 dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3104 mpi3mr_soft_reset_handler(mrioc,
3105 MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3106 }
3107 goto out_unlock;
3108 }
3109
3110 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3111 dprint_tm(mrioc, "invalid task management reply message\n");
3112 retval = -1;
3113 goto out_unlock;
3114 }
3115
3116 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3117
3118 switch (drv_cmd->ioc_status) {
3119 case MPI3_IOCSTATUS_SUCCESS:
3120 *resp_code = le32_to_cpu(tm_reply->response_data) &
3121 MPI3MR_RI_MASK_RESPCODE;
3122 break;
3123 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3124 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3125 break;
3126 default:
3127 dprint_tm(mrioc,
3128 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3129 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3130 retval = -1;
3131 goto out_unlock;
3132 }
3133
3134 switch (*resp_code) {
3135 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3136 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3137 break;
3138 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3139 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3140 retval = -1;
3141 break;
3142 default:
3143 retval = -1;
3144 break;
3145 }
3146
3147 dprint_tm(mrioc,
3148 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3149 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3150 le32_to_cpu(tm_reply->termination_count),
3151 mpi3mr_tm_response_name(*resp_code), *resp_code);
3152
3153 if (!retval) {
3154 mpi3mr_ioc_disable_intr(mrioc);
3155 mpi3mr_poll_pend_io_completions(mrioc);
3156 mpi3mr_ioc_enable_intr(mrioc);
3157 mpi3mr_poll_pend_io_completions(mrioc);
3158 }
3159 switch (tm_type) {
3160 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3161 if (!scsi_tgt_priv_data)
3162 break;
3163 scsi_tgt_priv_data->pend_count = 0;
3164 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3165 mpi3mr_count_tgt_pending,
3166 (void *)scsi_tgt_priv_data->starget);
3167 break;
3168 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3169 if (!sdev_priv_data)
3170 break;
3171 sdev_priv_data->pend_count = 0;
3172 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3173 mpi3mr_count_dev_pending, (void *)sdev);
3174 break;
3175 default:
3176 break;
3177 }
3178
3179 out_unlock:
3180 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3181 mutex_unlock(&drv_cmd->mutex);
3182 if (scsi_tgt_priv_data)
3183 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
3184 if (tgtdev)
3185 mpi3mr_tgtdev_put(tgtdev);
3186 out:
3187 return retval;
3188 }
3189
3190 /**
3191 * mpi3mr_bios_param - BIOS param callback
3192 * @sdev: SCSI device reference
3193 * @bdev: Block device reference
3194 * @capacity: Capacity in logical sectors
3195 * @params: Parameter array
3196 *
3197 * Just the parameters with heads/secots/cylinders.
3198 *
3199 * Return: 0 always
3200 */
3201 static int mpi3mr_bios_param(struct scsi_device *sdev,
3202 struct block_device *bdev, sector_t capacity, int params[])
3203 {
3204 int heads;
3205 int sectors;
3206 sector_t cylinders;
3207 ulong dummy;
3208
3209 heads = 64;
3210 sectors = 32;
3211
3212 dummy = heads * sectors;
3213 cylinders = capacity;
3214 sector_div(cylinders, dummy);
3215
3216 if ((ulong)capacity >= 0x200000) {
3217 heads = 255;
3218 sectors = 63;
3219 dummy = heads * sectors;
3220 cylinders = capacity;
3221 sector_div(cylinders, dummy);
3222 }
3223
3224 params[0] = heads;
3225 params[1] = sectors;
3226 params[2] = cylinders;
3227 return 0;
3228 }
3229
3230 /**
3231 * mpi3mr_map_queues - Map queues callback handler
3232 * @shost: SCSI host reference
3233 *
3234 * Maps default and poll queues.
3235 *
3236 * Return: return zero.
3237 */
3238 static int mpi3mr_map_queues(struct Scsi_Host *shost)
3239 {
3240 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3241 int i, qoff, offset;
3242 struct blk_mq_queue_map *map = NULL;
3243
3244 offset = mrioc->op_reply_q_offset;
3245
3246 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
3247 map = &shost->tag_set.map[i];
3248
3249 map->nr_queues = 0;
3250
3251 if (i == HCTX_TYPE_DEFAULT)
3252 map->nr_queues = mrioc->default_qcount;
3253 else if (i == HCTX_TYPE_POLL)
3254 map->nr_queues = mrioc->active_poll_qcount;
3255
3256 if (!map->nr_queues) {
3257 BUG_ON(i == HCTX_TYPE_DEFAULT);
3258 continue;
3259 }
3260
3261 /*
3262 * The poll queue(s) doesn't have an IRQ (and hence IRQ
3263 * affinity), so use the regular blk-mq cpu mapping
3264 */
3265 map->queue_offset = qoff;
3266 if (i != HCTX_TYPE_POLL)
3267 blk_mq_pci_map_queues(map, mrioc->pdev, offset);
3268 else
3269 blk_mq_map_queues(map);
3270
3271 qoff += map->nr_queues;
3272 offset += map->nr_queues;
3273 }
3274
3275 return 0;
3276
3277 }
3278
3279 /**
3280 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
3281 * @mrioc: Adapter instance reference
3282 *
3283 * Calculate the pending I/Os for the controller and return.
3284 *
3285 * Return: Number of pending I/Os
3286 */
3287 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
3288 {
3289 u16 i;
3290 uint pend_ios = 0;
3291
3292 for (i = 0; i < mrioc->num_op_reply_q; i++)
3293 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
3294 return pend_ios;
3295 }
3296
3297 /**
3298 * mpi3mr_print_pending_host_io - print pending I/Os
3299 * @mrioc: Adapter instance reference
3300 *
3301 * Print number of pending I/Os and each I/O details prior to
3302 * reset for debug purpose.
3303 *
3304 * Return: Nothing
3305 */
3306 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
3307 {
3308 struct Scsi_Host *shost = mrioc->shost;
3309
3310 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
3311 __func__, mpi3mr_get_fw_pending_ios(mrioc));
3312 blk_mq_tagset_busy_iter(&shost->tag_set,
3313 mpi3mr_print_scmd, (void *)mrioc);
3314 }
3315
3316 /**
3317 * mpi3mr_wait_for_host_io - block for I/Os to complete
3318 * @mrioc: Adapter instance reference
3319 * @timeout: time out in seconds
3320 * Waits for pending I/Os for the given adapter to complete or
3321 * to hit the timeout.
3322 *
3323 * Return: Nothing
3324 */
3325 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
3326 {
3327 enum mpi3mr_iocstate iocstate;
3328 int i = 0;
3329
3330 iocstate = mpi3mr_get_iocstate(mrioc);
3331 if (iocstate != MRIOC_STATE_READY)
3332 return;
3333
3334 if (!mpi3mr_get_fw_pending_ios(mrioc))
3335 return;
3336 ioc_info(mrioc,
3337 "%s :Waiting for %d seconds prior to reset for %d I/O\n",
3338 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
3339
3340 for (i = 0; i < timeout; i++) {
3341 if (!mpi3mr_get_fw_pending_ios(mrioc))
3342 break;
3343 iocstate = mpi3mr_get_iocstate(mrioc);
3344 if (iocstate != MRIOC_STATE_READY)
3345 break;
3346 msleep(1000);
3347 }
3348
3349 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
3350 mpi3mr_get_fw_pending_ios(mrioc));
3351 }
3352
3353 /**
3354 * mpi3mr_eh_host_reset - Host reset error handling callback
3355 * @scmd: SCSI command reference
3356 *
3357 * Issue controller reset if the scmd is for a Physical Device,
3358 * if the scmd is for RAID volume, then wait for
3359 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
3360 * pending I/Os prior to issuing reset to the controller.
3361 *
3362 * Return: SUCCESS of successful reset else FAILED
3363 */
3364 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
3365 {
3366 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3367 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3368 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3369 u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
3370 int retval = FAILED, ret;
3371
3372 sdev_priv_data = scmd->device->hostdata;
3373 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
3374 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3375 dev_type = stgt_priv_data->dev_type;
3376 }
3377
3378 if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
3379 mpi3mr_wait_for_host_io(mrioc,
3380 MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
3381 if (!mpi3mr_get_fw_pending_ios(mrioc)) {
3382 retval = SUCCESS;
3383 goto out;
3384 }
3385 }
3386
3387 mpi3mr_print_pending_host_io(mrioc);
3388 ret = mpi3mr_soft_reset_handler(mrioc,
3389 MPI3MR_RESET_FROM_EH_HOS, 1);
3390 if (ret)
3391 goto out;
3392
3393 retval = SUCCESS;
3394 out:
3395 sdev_printk(KERN_INFO, scmd->device,
3396 "Host reset is %s for scmd(%p)\n",
3397 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3398
3399 return retval;
3400 }
3401
3402 /**
3403 * mpi3mr_eh_target_reset - Target reset error handling callback
3404 * @scmd: SCSI command reference
3405 *
3406 * Issue Target reset Task Management and verify the scmd is
3407 * terminated successfully and return status accordingly.
3408 *
3409 * Return: SUCCESS of successful termination of the scmd else
3410 * FAILED
3411 */
3412 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
3413 {
3414 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3415 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3416 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3417 u16 dev_handle;
3418 u8 resp_code = 0;
3419 int retval = FAILED, ret = 0;
3420
3421 sdev_printk(KERN_INFO, scmd->device,
3422 "Attempting Target Reset! scmd(%p)\n", scmd);
3423 scsi_print_command(scmd);
3424
3425 sdev_priv_data = scmd->device->hostdata;
3426 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3427 sdev_printk(KERN_INFO, scmd->device,
3428 "SCSI device is not available\n");
3429 retval = SUCCESS;
3430 goto out;
3431 }
3432
3433 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3434 dev_handle = stgt_priv_data->dev_handle;
3435 if (stgt_priv_data->dev_removed) {
3436 sdev_printk(KERN_INFO, scmd->device,
3437 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
3438 mrioc->name, dev_handle);
3439 retval = FAILED;
3440 goto out;
3441 }
3442 sdev_printk(KERN_INFO, scmd->device,
3443 "Target Reset is issued to handle(0x%04x)\n",
3444 dev_handle);
3445
3446 ret = mpi3mr_issue_tm(mrioc,
3447 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
3448 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3449 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3450
3451 if (ret)
3452 goto out;
3453
3454 if (stgt_priv_data->pend_count) {
3455 sdev_printk(KERN_INFO, scmd->device,
3456 "%s: target has %d pending commands, target reset is failed\n",
3457 mrioc->name, sdev_priv_data->pend_count);
3458 goto out;
3459 }
3460
3461 retval = SUCCESS;
3462 out:
3463 sdev_printk(KERN_INFO, scmd->device,
3464 "%s: target reset is %s for scmd(%p)\n", mrioc->name,
3465 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3466
3467 return retval;
3468 }
3469
3470 /**
3471 * mpi3mr_eh_dev_reset- Device reset error handling callback
3472 * @scmd: SCSI command reference
3473 *
3474 * Issue lun reset Task Management and verify the scmd is
3475 * terminated successfully and return status accordingly.
3476 *
3477 * Return: SUCCESS of successful termination of the scmd else
3478 * FAILED
3479 */
3480 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
3481 {
3482 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3483 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3484 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3485 u16 dev_handle;
3486 u8 resp_code = 0;
3487 int retval = FAILED, ret = 0;
3488
3489 sdev_printk(KERN_INFO, scmd->device,
3490 "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
3491 scsi_print_command(scmd);
3492
3493 sdev_priv_data = scmd->device->hostdata;
3494 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3495 sdev_printk(KERN_INFO, scmd->device,
3496 "SCSI device is not available\n");
3497 retval = SUCCESS;
3498 goto out;
3499 }
3500
3501 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3502 dev_handle = stgt_priv_data->dev_handle;
3503 if (stgt_priv_data->dev_removed) {
3504 sdev_printk(KERN_INFO, scmd->device,
3505 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
3506 mrioc->name, dev_handle);
3507 retval = FAILED;
3508 goto out;
3509 }
3510 sdev_printk(KERN_INFO, scmd->device,
3511 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
3512
3513 ret = mpi3mr_issue_tm(mrioc,
3514 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
3515 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3516 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3517
3518 if (ret)
3519 goto out;
3520
3521 if (sdev_priv_data->pend_count) {
3522 sdev_printk(KERN_INFO, scmd->device,
3523 "%s: device has %d pending commands, device(LUN) reset is failed\n",
3524 mrioc->name, sdev_priv_data->pend_count);
3525 goto out;
3526 }
3527 retval = SUCCESS;
3528 out:
3529 sdev_printk(KERN_INFO, scmd->device,
3530 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
3531 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3532
3533 return retval;
3534 }
3535
3536 /**
3537 * mpi3mr_scan_start - Scan start callback handler
3538 * @shost: SCSI host reference
3539 *
3540 * Issue port enable request asynchronously.
3541 *
3542 * Return: Nothing
3543 */
3544 static void mpi3mr_scan_start(struct Scsi_Host *shost)
3545 {
3546 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3547
3548 mrioc->scan_started = 1;
3549 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
3550 if (mpi3mr_issue_port_enable(mrioc, 1)) {
3551 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
3552 mrioc->scan_started = 0;
3553 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3554 }
3555 }
3556
3557 /**
3558 * mpi3mr_scan_finished - Scan finished callback handler
3559 * @shost: SCSI host reference
3560 * @time: Jiffies from the scan start
3561 *
3562 * Checks whether the port enable is completed or timedout or
3563 * failed and set the scan status accordingly after taking any
3564 * recovery if required.
3565 *
3566 * Return: 1 on scan finished or timed out, 0 for in progress
3567 */
3568 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
3569 unsigned long time)
3570 {
3571 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3572 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3573 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3574
3575 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
3576 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
3577 ioc_err(mrioc, "port enable failed due to fault or reset\n");
3578 mpi3mr_print_fault_info(mrioc);
3579 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3580 mrioc->scan_started = 0;
3581 mrioc->init_cmds.is_waiting = 0;
3582 mrioc->init_cmds.callback = NULL;
3583 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3584 }
3585
3586 if (time >= (pe_timeout * HZ)) {
3587 ioc_err(mrioc, "port enable failed due to time out\n");
3588 mpi3mr_check_rh_fault_ioc(mrioc,
3589 MPI3MR_RESET_FROM_PE_TIMEOUT);
3590 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3591 mrioc->scan_started = 0;
3592 mrioc->init_cmds.is_waiting = 0;
3593 mrioc->init_cmds.callback = NULL;
3594 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3595 }
3596
3597 if (mrioc->scan_started)
3598 return 0;
3599
3600 if (mrioc->scan_failed) {
3601 ioc_err(mrioc,
3602 "port enable failed with status=0x%04x\n",
3603 mrioc->scan_failed);
3604 } else
3605 ioc_info(mrioc, "port enable is successfully completed\n");
3606
3607 mpi3mr_start_watchdog(mrioc);
3608 mrioc->is_driver_loading = 0;
3609 return 1;
3610 }
3611
3612 /**
3613 * mpi3mr_slave_destroy - Slave destroy callback handler
3614 * @sdev: SCSI device reference
3615 *
3616 * Cleanup and free per device(lun) private data.
3617 *
3618 * Return: Nothing.
3619 */
3620 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
3621 {
3622 struct Scsi_Host *shost;
3623 struct mpi3mr_ioc *mrioc;
3624 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3625 struct mpi3mr_tgt_dev *tgt_dev;
3626 unsigned long flags;
3627 struct scsi_target *starget;
3628
3629 if (!sdev->hostdata)
3630 return;
3631
3632 starget = scsi_target(sdev);
3633 shost = dev_to_shost(&starget->dev);
3634 mrioc = shost_priv(shost);
3635 scsi_tgt_priv_data = starget->hostdata;
3636
3637 scsi_tgt_priv_data->num_luns--;
3638
3639 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3640 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3641 if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
3642 tgt_dev->starget = NULL;
3643 if (tgt_dev)
3644 mpi3mr_tgtdev_put(tgt_dev);
3645 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3646
3647 kfree(sdev->hostdata);
3648 sdev->hostdata = NULL;
3649 }
3650
3651 /**
3652 * mpi3mr_target_destroy - Target destroy callback handler
3653 * @starget: SCSI target reference
3654 *
3655 * Cleanup and free per target private data.
3656 *
3657 * Return: Nothing.
3658 */
3659 static void mpi3mr_target_destroy(struct scsi_target *starget)
3660 {
3661 struct Scsi_Host *shost;
3662 struct mpi3mr_ioc *mrioc;
3663 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3664 struct mpi3mr_tgt_dev *tgt_dev;
3665 unsigned long flags;
3666
3667 if (!starget->hostdata)
3668 return;
3669
3670 shost = dev_to_shost(&starget->dev);
3671 mrioc = shost_priv(shost);
3672 scsi_tgt_priv_data = starget->hostdata;
3673
3674 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3675 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
3676 if (tgt_dev && (tgt_dev->starget == starget) &&
3677 (tgt_dev->perst_id == starget->id))
3678 tgt_dev->starget = NULL;
3679 if (tgt_dev) {
3680 scsi_tgt_priv_data->tgt_dev = NULL;
3681 scsi_tgt_priv_data->perst_id = 0;
3682 mpi3mr_tgtdev_put(tgt_dev);
3683 mpi3mr_tgtdev_put(tgt_dev);
3684 }
3685 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3686
3687 kfree(starget->hostdata);
3688 starget->hostdata = NULL;
3689 }
3690
3691 /**
3692 * mpi3mr_slave_configure - Slave configure callback handler
3693 * @sdev: SCSI device reference
3694 *
3695 * Configure queue depth, max hardware sectors and virt boundary
3696 * as required
3697 *
3698 * Return: 0 always.
3699 */
3700 static int mpi3mr_slave_configure(struct scsi_device *sdev)
3701 {
3702 struct scsi_target *starget;
3703 struct Scsi_Host *shost;
3704 struct mpi3mr_ioc *mrioc;
3705 struct mpi3mr_tgt_dev *tgt_dev;
3706 unsigned long flags;
3707 int retval = 0;
3708
3709 starget = scsi_target(sdev);
3710 shost = dev_to_shost(&starget->dev);
3711 mrioc = shost_priv(shost);
3712
3713 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3714 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3715 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3716 if (!tgt_dev)
3717 return -ENXIO;
3718
3719 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
3720 switch (tgt_dev->dev_type) {
3721 case MPI3_DEVICE_DEVFORM_PCIE:
3722 /*The block layer hw sector size = 512*/
3723 if ((tgt_dev->dev_spec.pcie_inf.dev_info &
3724 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
3725 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
3726 blk_queue_max_hw_sectors(sdev->request_queue,
3727 tgt_dev->dev_spec.pcie_inf.mdts / 512);
3728 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
3729 blk_queue_virt_boundary(sdev->request_queue,
3730 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
3731 else
3732 blk_queue_virt_boundary(sdev->request_queue,
3733 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
3734 }
3735 break;
3736 default:
3737 break;
3738 }
3739
3740 mpi3mr_tgtdev_put(tgt_dev);
3741
3742 return retval;
3743 }
3744
3745 /**
3746 * mpi3mr_slave_alloc -Slave alloc callback handler
3747 * @sdev: SCSI device reference
3748 *
3749 * Allocate per device(lun) private data and initialize it.
3750 *
3751 * Return: 0 on success -ENOMEM on memory allocation failure.
3752 */
3753 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
3754 {
3755 struct Scsi_Host *shost;
3756 struct mpi3mr_ioc *mrioc;
3757 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3758 struct mpi3mr_tgt_dev *tgt_dev;
3759 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
3760 unsigned long flags;
3761 struct scsi_target *starget;
3762 int retval = 0;
3763
3764 starget = scsi_target(sdev);
3765 shost = dev_to_shost(&starget->dev);
3766 mrioc = shost_priv(shost);
3767 scsi_tgt_priv_data = starget->hostdata;
3768
3769 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3770 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3771
3772 if (tgt_dev) {
3773 if (tgt_dev->starget == NULL)
3774 tgt_dev->starget = starget;
3775 mpi3mr_tgtdev_put(tgt_dev);
3776 retval = 0;
3777 } else {
3778 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3779 return -ENXIO;
3780 }
3781
3782 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3783
3784 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
3785 if (!scsi_dev_priv_data)
3786 return -ENOMEM;
3787
3788 scsi_dev_priv_data->lun_id = sdev->lun;
3789 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
3790 sdev->hostdata = scsi_dev_priv_data;
3791
3792 scsi_tgt_priv_data->num_luns++;
3793
3794 return retval;
3795 }
3796
3797 /**
3798 * mpi3mr_target_alloc - Target alloc callback handler
3799 * @starget: SCSI target reference
3800 *
3801 * Allocate per target private data and initialize it.
3802 *
3803 * Return: 0 on success -ENOMEM on memory allocation failure.
3804 */
3805 static int mpi3mr_target_alloc(struct scsi_target *starget)
3806 {
3807 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3808 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3809 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3810 struct mpi3mr_tgt_dev *tgt_dev;
3811 unsigned long flags;
3812 int retval = 0;
3813
3814 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
3815 if (!scsi_tgt_priv_data)
3816 return -ENOMEM;
3817
3818 starget->hostdata = scsi_tgt_priv_data;
3819
3820 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3821 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3822 if (tgt_dev && !tgt_dev->is_hidden) {
3823 scsi_tgt_priv_data->starget = starget;
3824 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
3825 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
3826 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
3827 scsi_tgt_priv_data->tgt_dev = tgt_dev;
3828 tgt_dev->starget = starget;
3829 atomic_set(&scsi_tgt_priv_data->block_io, 0);
3830 retval = 0;
3831 } else
3832 retval = -ENXIO;
3833 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3834
3835 return retval;
3836 }
3837
3838 /**
3839 * mpi3mr_check_return_unmap - Whether an unmap is allowed
3840 * @mrioc: Adapter instance reference
3841 * @scmd: SCSI Command reference
3842 *
3843 * The controller hardware cannot handle certain unmap commands
3844 * for NVMe drives, this routine checks those and return true
3845 * and completes the SCSI command with proper status and sense
3846 * data.
3847 *
3848 * Return: TRUE for not allowed unmap, FALSE otherwise.
3849 */
3850 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
3851 struct scsi_cmnd *scmd)
3852 {
3853 unsigned char *buf;
3854 u16 param_len, desc_len, trunc_param_len;
3855
3856 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
3857
3858 if (mrioc->pdev->revision) {
3859 if ((param_len > 24) && ((param_len - 8) & 0xF)) {
3860 trunc_param_len -= (param_len - 8) & 0xF;
3861 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3862 dprint_scsi_err(mrioc,
3863 "truncating param_len from (%d) to (%d)\n",
3864 param_len, trunc_param_len);
3865 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3866 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3867 }
3868 return false;
3869 }
3870
3871 if (!param_len) {
3872 ioc_warn(mrioc,
3873 "%s: cdb received with zero parameter length\n",
3874 __func__);
3875 scsi_print_command(scmd);
3876 scmd->result = DID_OK << 16;
3877 scmd->scsi_done(scmd);
3878 return true;
3879 }
3880
3881 if (param_len < 24) {
3882 ioc_warn(mrioc,
3883 "%s: cdb received with invalid param_len: %d\n",
3884 __func__, param_len);
3885 scsi_print_command(scmd);
3886 scmd->result = SAM_STAT_CHECK_CONDITION;
3887 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3888 0x1A, 0);
3889 scmd->scsi_done(scmd);
3890 return true;
3891 }
3892 if (param_len != scsi_bufflen(scmd)) {
3893 ioc_warn(mrioc,
3894 "%s: cdb received with param_len: %d bufflen: %d\n",
3895 __func__, param_len, scsi_bufflen(scmd));
3896 scsi_print_command(scmd);
3897 scmd->result = SAM_STAT_CHECK_CONDITION;
3898 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3899 0x1A, 0);
3900 scmd->scsi_done(scmd);
3901 return true;
3902 }
3903 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
3904 if (!buf) {
3905 scsi_print_command(scmd);
3906 scmd->result = SAM_STAT_CHECK_CONDITION;
3907 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3908 0x55, 0x03);
3909 scmd->scsi_done(scmd);
3910 return true;
3911 }
3912 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
3913 desc_len = get_unaligned_be16(&buf[2]);
3914
3915 if (desc_len < 16) {
3916 ioc_warn(mrioc,
3917 "%s: Invalid descriptor length in param list: %d\n",
3918 __func__, desc_len);
3919 scsi_print_command(scmd);
3920 scmd->result = SAM_STAT_CHECK_CONDITION;
3921 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3922 0x26, 0);
3923 scmd->scsi_done(scmd);
3924 kfree(buf);
3925 return true;
3926 }
3927
3928 if (param_len > (desc_len + 8)) {
3929 trunc_param_len = desc_len + 8;
3930 scsi_print_command(scmd);
3931 dprint_scsi_err(mrioc,
3932 "truncating param_len(%d) to desc_len+8(%d)\n",
3933 param_len, trunc_param_len);
3934 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3935 scsi_print_command(scmd);
3936 }
3937
3938 kfree(buf);
3939 return false;
3940 }
3941
3942 /**
3943 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
3944 * @scmd: SCSI Command reference
3945 *
3946 * Checks whether a cdb is allowed during shutdown or not.
3947 *
3948 * Return: TRUE for allowed commands, FALSE otherwise.
3949 */
3950
3951 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
3952 {
3953 switch (scmd->cmnd[0]) {
3954 case SYNCHRONIZE_CACHE:
3955 case START_STOP:
3956 return true;
3957 default:
3958 return false;
3959 }
3960 }
3961
3962 /**
3963 * mpi3mr_qcmd - I/O request despatcher
3964 * @shost: SCSI Host reference
3965 * @scmd: SCSI Command reference
3966 *
3967 * Issues the SCSI Command as an MPI3 request.
3968 *
3969 * Return: 0 on successful queueing of the request or if the
3970 * request is completed with failure.
3971 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
3972 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
3973 */
3974 static int mpi3mr_qcmd(struct Scsi_Host *shost,
3975 struct scsi_cmnd *scmd)
3976 {
3977 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3978 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3979 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3980 struct scmd_priv *scmd_priv_data = NULL;
3981 struct mpi3_scsi_io_request *scsiio_req = NULL;
3982 struct op_req_qinfo *op_req_q = NULL;
3983 int retval = 0;
3984 u16 dev_handle;
3985 u16 host_tag;
3986 u32 scsiio_flags = 0;
3987 struct request *rq = scsi_cmd_to_rq(scmd);
3988 int iprio_class;
3989 u8 is_pcie_dev = 0;
3990
3991 sdev_priv_data = scmd->device->hostdata;
3992 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3993 scmd->result = DID_NO_CONNECT << 16;
3994 scmd->scsi_done(scmd);
3995 goto out;
3996 }
3997
3998 if (mrioc->stop_drv_processing &&
3999 !(mpi3mr_allow_scmd_to_fw(scmd))) {
4000 scmd->result = DID_NO_CONNECT << 16;
4001 scmd->scsi_done(scmd);
4002 goto out;
4003 }
4004
4005 if (mrioc->reset_in_progress) {
4006 retval = SCSI_MLQUEUE_HOST_BUSY;
4007 goto out;
4008 }
4009
4010 stgt_priv_data = sdev_priv_data->tgt_priv_data;
4011
4012 dev_handle = stgt_priv_data->dev_handle;
4013 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
4014 scmd->result = DID_NO_CONNECT << 16;
4015 scmd->scsi_done(scmd);
4016 goto out;
4017 }
4018 if (stgt_priv_data->dev_removed) {
4019 scmd->result = DID_NO_CONNECT << 16;
4020 scmd->scsi_done(scmd);
4021 goto out;
4022 }
4023
4024 if (atomic_read(&stgt_priv_data->block_io)) {
4025 if (mrioc->stop_drv_processing) {
4026 scmd->result = DID_NO_CONNECT << 16;
4027 scmd->scsi_done(scmd);
4028 goto out;
4029 }
4030 retval = SCSI_MLQUEUE_DEVICE_BUSY;
4031 goto out;
4032 }
4033
4034 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
4035 is_pcie_dev = 1;
4036 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
4037 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
4038 mpi3mr_check_return_unmap(mrioc, scmd))
4039 goto out;
4040
4041 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
4042 if (host_tag == MPI3MR_HOSTTAG_INVALID) {
4043 scmd->result = DID_ERROR << 16;
4044 scmd->scsi_done(scmd);
4045 goto out;
4046 }
4047
4048 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4049 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
4050 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4051 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
4052 else
4053 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
4054
4055 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
4056
4057 if (sdev_priv_data->ncq_prio_enable) {
4058 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4059 if (iprio_class == IOPRIO_CLASS_RT)
4060 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
4061 }
4062
4063 if (scmd->cmd_len > 16)
4064 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
4065
4066 scmd_priv_data = scsi_cmd_priv(scmd);
4067 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
4068 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
4069 scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
4070 scsiio_req->host_tag = cpu_to_le16(host_tag);
4071
4072 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
4073
4074 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
4075 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
4076 scsiio_req->dev_handle = cpu_to_le16(dev_handle);
4077 scsiio_req->flags = cpu_to_le32(scsiio_flags);
4078 int_to_scsilun(sdev_priv_data->lun_id,
4079 (struct scsi_lun *)scsiio_req->lun);
4080
4081 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
4082 mpi3mr_clear_scmd_priv(mrioc, scmd);
4083 retval = SCSI_MLQUEUE_HOST_BUSY;
4084 goto out;
4085 }
4086 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
4087
4088 if (mpi3mr_op_request_post(mrioc, op_req_q,
4089 scmd_priv_data->mpi3mr_scsiio_req)) {
4090 mpi3mr_clear_scmd_priv(mrioc, scmd);
4091 retval = SCSI_MLQUEUE_HOST_BUSY;
4092 goto out;
4093 }
4094
4095 out:
4096 return retval;
4097 }
4098
4099 static struct scsi_host_template mpi3mr_driver_template = {
4100 .module = THIS_MODULE,
4101 .name = "MPI3 Storage Controller",
4102 .proc_name = MPI3MR_DRIVER_NAME,
4103 .queuecommand = mpi3mr_qcmd,
4104 .target_alloc = mpi3mr_target_alloc,
4105 .slave_alloc = mpi3mr_slave_alloc,
4106 .slave_configure = mpi3mr_slave_configure,
4107 .target_destroy = mpi3mr_target_destroy,
4108 .slave_destroy = mpi3mr_slave_destroy,
4109 .scan_finished = mpi3mr_scan_finished,
4110 .scan_start = mpi3mr_scan_start,
4111 .change_queue_depth = mpi3mr_change_queue_depth,
4112 .eh_device_reset_handler = mpi3mr_eh_dev_reset,
4113 .eh_target_reset_handler = mpi3mr_eh_target_reset,
4114 .eh_host_reset_handler = mpi3mr_eh_host_reset,
4115 .bios_param = mpi3mr_bios_param,
4116 .map_queues = mpi3mr_map_queues,
4117 .mq_poll = mpi3mr_blk_mq_poll,
4118 .no_write_same = 1,
4119 .can_queue = 1,
4120 .this_id = -1,
4121 .sg_tablesize = MPI3MR_SG_DEPTH,
4122 /* max xfer supported is 1M (2K in 512 byte sized sectors)
4123 */
4124 .max_sectors = 2048,
4125 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
4126 .max_segment_size = 0xffffffff,
4127 .track_queue_depth = 1,
4128 .cmd_size = sizeof(struct scmd_priv),
4129 };
4130
4131 /**
4132 * mpi3mr_init_drv_cmd - Initialize internal command tracker
4133 * @cmdptr: Internal command tracker
4134 * @host_tag: Host tag used for the specific command
4135 *
4136 * Initialize the internal command tracker structure with
4137 * specified host tag.
4138 *
4139 * Return: Nothing.
4140 */
4141 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
4142 u16 host_tag)
4143 {
4144 mutex_init(&cmdptr->mutex);
4145 cmdptr->reply = NULL;
4146 cmdptr->state = MPI3MR_CMD_NOTUSED;
4147 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
4148 cmdptr->host_tag = host_tag;
4149 }
4150
4151 /**
4152 * osintfc_mrioc_security_status -Check controller secure status
4153 * @pdev: PCI device instance
4154 *
4155 * Read the Device Serial Number capability from PCI config
4156 * space and decide whether the controller is secure or not.
4157 *
4158 * Return: 0 on success, non-zero on failure.
4159 */
4160 static int
4161 osintfc_mrioc_security_status(struct pci_dev *pdev)
4162 {
4163 u32 cap_data;
4164 int base;
4165 u32 ctlr_status;
4166 u32 debug_status;
4167 int retval = 0;
4168
4169 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
4170 if (!base) {
4171 dev_err(&pdev->dev,
4172 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
4173 return -1;
4174 }
4175
4176 pci_read_config_dword(pdev, base + 4, &cap_data);
4177
4178 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
4179 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
4180
4181 switch (ctlr_status) {
4182 case MPI3MR_INVALID_DEVICE:
4183 dev_err(&pdev->dev,
4184 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4185 __func__, pdev->device, pdev->subsystem_vendor,
4186 pdev->subsystem_device);
4187 retval = -1;
4188 break;
4189 case MPI3MR_CONFIG_SECURE_DEVICE:
4190 if (!debug_status)
4191 dev_info(&pdev->dev,
4192 "%s: Config secure ctlr is detected\n",
4193 __func__);
4194 break;
4195 case MPI3MR_HARD_SECURE_DEVICE:
4196 break;
4197 case MPI3MR_TAMPERED_DEVICE:
4198 dev_err(&pdev->dev,
4199 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4200 __func__, pdev->device, pdev->subsystem_vendor,
4201 pdev->subsystem_device);
4202 retval = -1;
4203 break;
4204 default:
4205 retval = -1;
4206 break;
4207 }
4208
4209 if (!retval && debug_status) {
4210 dev_err(&pdev->dev,
4211 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4212 __func__, pdev->device, pdev->subsystem_vendor,
4213 pdev->subsystem_device);
4214 retval = -1;
4215 }
4216
4217 return retval;
4218 }
4219
4220 /**
4221 * mpi3mr_probe - PCI probe callback
4222 * @pdev: PCI device instance
4223 * @id: PCI device ID details
4224 *
4225 * controller initialization routine. Checks the security status
4226 * of the controller and if it is invalid or tampered return the
4227 * probe without initializing the controller. Otherwise,
4228 * allocate per adapter instance through shost_priv and
4229 * initialize controller specific data structures, initializae
4230 * the controller hardware, add shost to the SCSI subsystem.
4231 *
4232 * Return: 0 on success, non-zero on failure.
4233 */
4234
4235 static int
4236 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4237 {
4238 struct mpi3mr_ioc *mrioc = NULL;
4239 struct Scsi_Host *shost = NULL;
4240 int retval = 0, i;
4241
4242 if (osintfc_mrioc_security_status(pdev)) {
4243 warn_non_secure_ctlr = 1;
4244 return 1; /* For Invalid and Tampered device */
4245 }
4246
4247 shost = scsi_host_alloc(&mpi3mr_driver_template,
4248 sizeof(struct mpi3mr_ioc));
4249 if (!shost) {
4250 retval = -ENODEV;
4251 goto shost_failed;
4252 }
4253
4254 mrioc = shost_priv(shost);
4255 mrioc->id = mrioc_ids++;
4256 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
4257 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
4258 INIT_LIST_HEAD(&mrioc->list);
4259 spin_lock(&mrioc_list_lock);
4260 list_add_tail(&mrioc->list, &mrioc_list);
4261 spin_unlock(&mrioc_list_lock);
4262
4263 spin_lock_init(&mrioc->admin_req_lock);
4264 spin_lock_init(&mrioc->reply_free_queue_lock);
4265 spin_lock_init(&mrioc->sbq_lock);
4266 spin_lock_init(&mrioc->fwevt_lock);
4267 spin_lock_init(&mrioc->tgtdev_lock);
4268 spin_lock_init(&mrioc->watchdog_lock);
4269 spin_lock_init(&mrioc->chain_buf_lock);
4270
4271 INIT_LIST_HEAD(&mrioc->fwevt_list);
4272 INIT_LIST_HEAD(&mrioc->tgtdev_list);
4273 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
4274 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
4275
4276 mutex_init(&mrioc->reset_mutex);
4277 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
4278 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
4279
4280 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4281 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
4282 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
4283
4284 if (pdev->revision)
4285 mrioc->enable_segqueue = true;
4286
4287 init_waitqueue_head(&mrioc->reset_waitq);
4288 mrioc->logging_level = logging_level;
4289 mrioc->shost = shost;
4290 mrioc->pdev = pdev;
4291
4292 /* init shost parameters */
4293 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
4294 shost->max_lun = -1;
4295 shost->unique_id = mrioc->id;
4296
4297 shost->max_channel = 0;
4298 shost->max_id = 0xFFFFFFFF;
4299
4300 if (prot_mask >= 0)
4301 scsi_host_set_prot(shost, prot_mask);
4302 else {
4303 prot_mask = SHOST_DIF_TYPE1_PROTECTION
4304 | SHOST_DIF_TYPE2_PROTECTION
4305 | SHOST_DIF_TYPE3_PROTECTION;
4306 scsi_host_set_prot(shost, prot_mask);
4307 }
4308
4309 ioc_info(mrioc,
4310 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
4311 __func__,
4312 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4313 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4314 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4315 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4316 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4317 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4318 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4319
4320 if (prot_guard_mask)
4321 scsi_host_set_guard(shost, (prot_guard_mask & 3));
4322 else
4323 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
4324
4325 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
4326 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
4327 mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
4328 mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
4329 if (!mrioc->fwevt_worker_thread) {
4330 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4331 __FILE__, __LINE__, __func__);
4332 retval = -ENODEV;
4333 goto fwevtthread_failed;
4334 }
4335
4336 mrioc->is_driver_loading = 1;
4337 mrioc->cpu_count = num_online_cpus();
4338 if (mpi3mr_setup_resources(mrioc)) {
4339 ioc_err(mrioc, "setup resources failed\n");
4340 retval = -ENODEV;
4341 goto resource_alloc_failed;
4342 }
4343 if (mpi3mr_init_ioc(mrioc)) {
4344 ioc_err(mrioc, "initializing IOC failed\n");
4345 retval = -ENODEV;
4346 goto init_ioc_failed;
4347 }
4348
4349 shost->nr_hw_queues = mrioc->num_op_reply_q;
4350 if (mrioc->active_poll_qcount)
4351 shost->nr_maps = 3;
4352
4353 shost->can_queue = mrioc->max_host_ios;
4354 shost->sg_tablesize = MPI3MR_SG_DEPTH;
4355 shost->max_id = mrioc->facts.max_perids + 1;
4356
4357 retval = scsi_add_host(shost, &pdev->dev);
4358 if (retval) {
4359 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4360 __FILE__, __LINE__, __func__);
4361 goto addhost_failed;
4362 }
4363
4364 scsi_scan_host(shost);
4365 return retval;
4366
4367 addhost_failed:
4368 mpi3mr_stop_watchdog(mrioc);
4369 mpi3mr_cleanup_ioc(mrioc);
4370 init_ioc_failed:
4371 mpi3mr_free_mem(mrioc);
4372 mpi3mr_cleanup_resources(mrioc);
4373 resource_alloc_failed:
4374 destroy_workqueue(mrioc->fwevt_worker_thread);
4375 fwevtthread_failed:
4376 spin_lock(&mrioc_list_lock);
4377 list_del(&mrioc->list);
4378 spin_unlock(&mrioc_list_lock);
4379 scsi_host_put(shost);
4380 shost_failed:
4381 return retval;
4382 }
4383
4384 /**
4385 * mpi3mr_remove - PCI remove callback
4386 * @pdev: PCI device instance
4387 *
4388 * Cleanup the IOC by issuing MUR and shutdown notification.
4389 * Free up all memory and resources associated with the
4390 * controllerand target devices, unregister the shost.
4391 *
4392 * Return: Nothing.
4393 */
4394 static void mpi3mr_remove(struct pci_dev *pdev)
4395 {
4396 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4397 struct mpi3mr_ioc *mrioc;
4398 struct workqueue_struct *wq;
4399 unsigned long flags;
4400 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
4401
4402 if (!shost)
4403 return;
4404
4405 mrioc = shost_priv(shost);
4406 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4407 ssleep(1);
4408
4409 mrioc->stop_drv_processing = 1;
4410 mpi3mr_cleanup_fwevt_list(mrioc);
4411 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4412 wq = mrioc->fwevt_worker_thread;
4413 mrioc->fwevt_worker_thread = NULL;
4414 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4415 if (wq)
4416 destroy_workqueue(wq);
4417 scsi_remove_host(shost);
4418
4419 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
4420 list) {
4421 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
4422 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
4423 mpi3mr_tgtdev_put(tgtdev);
4424 }
4425 mpi3mr_stop_watchdog(mrioc);
4426 mpi3mr_cleanup_ioc(mrioc);
4427 mpi3mr_free_mem(mrioc);
4428 mpi3mr_cleanup_resources(mrioc);
4429
4430 spin_lock(&mrioc_list_lock);
4431 list_del(&mrioc->list);
4432 spin_unlock(&mrioc_list_lock);
4433
4434 scsi_host_put(shost);
4435 }
4436
4437 /**
4438 * mpi3mr_shutdown - PCI shutdown callback
4439 * @pdev: PCI device instance
4440 *
4441 * Free up all memory and resources associated with the
4442 * controller
4443 *
4444 * Return: Nothing.
4445 */
4446 static void mpi3mr_shutdown(struct pci_dev *pdev)
4447 {
4448 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4449 struct mpi3mr_ioc *mrioc;
4450 struct workqueue_struct *wq;
4451 unsigned long flags;
4452
4453 if (!shost)
4454 return;
4455
4456 mrioc = shost_priv(shost);
4457 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4458 ssleep(1);
4459
4460 mrioc->stop_drv_processing = 1;
4461 mpi3mr_cleanup_fwevt_list(mrioc);
4462 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4463 wq = mrioc->fwevt_worker_thread;
4464 mrioc->fwevt_worker_thread = NULL;
4465 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4466 if (wq)
4467 destroy_workqueue(wq);
4468
4469 mpi3mr_stop_watchdog(mrioc);
4470 mpi3mr_cleanup_ioc(mrioc);
4471 mpi3mr_cleanup_resources(mrioc);
4472 }
4473
4474 #ifdef CONFIG_PM
4475 /**
4476 * mpi3mr_suspend - PCI power management suspend callback
4477 * @pdev: PCI device instance
4478 * @state: New power state
4479 *
4480 * Change the power state to the given value and cleanup the IOC
4481 * by issuing MUR and shutdown notification
4482 *
4483 * Return: 0 always.
4484 */
4485 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
4486 {
4487 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4488 struct mpi3mr_ioc *mrioc;
4489 pci_power_t device_state;
4490
4491 if (!shost)
4492 return 0;
4493
4494 mrioc = shost_priv(shost);
4495 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4496 ssleep(1);
4497 mrioc->stop_drv_processing = 1;
4498 mpi3mr_cleanup_fwevt_list(mrioc);
4499 scsi_block_requests(shost);
4500 mpi3mr_stop_watchdog(mrioc);
4501 mpi3mr_cleanup_ioc(mrioc);
4502
4503 device_state = pci_choose_state(pdev, state);
4504 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
4505 pdev, pci_name(pdev), device_state);
4506 pci_save_state(pdev);
4507 pci_set_power_state(pdev, device_state);
4508 mpi3mr_cleanup_resources(mrioc);
4509
4510 return 0;
4511 }
4512
4513 /**
4514 * mpi3mr_resume - PCI power management resume callback
4515 * @pdev: PCI device instance
4516 *
4517 * Restore the power state to D0 and reinitialize the controller
4518 * and resume I/O operations to the target devices
4519 *
4520 * Return: 0 on success, non-zero on failure
4521 */
4522 static int mpi3mr_resume(struct pci_dev *pdev)
4523 {
4524 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4525 struct mpi3mr_ioc *mrioc;
4526 pci_power_t device_state = pdev->current_state;
4527 int r;
4528
4529 if (!shost)
4530 return 0;
4531
4532 mrioc = shost_priv(shost);
4533
4534 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
4535 pdev, pci_name(pdev), device_state);
4536 pci_set_power_state(pdev, PCI_D0);
4537 pci_enable_wake(pdev, PCI_D0, 0);
4538 pci_restore_state(pdev);
4539 mrioc->pdev = pdev;
4540 mrioc->cpu_count = num_online_cpus();
4541 r = mpi3mr_setup_resources(mrioc);
4542 if (r) {
4543 ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
4544 __func__, r);
4545 return r;
4546 }
4547
4548 mrioc->stop_drv_processing = 0;
4549 mpi3mr_memset_buffers(mrioc);
4550 r = mpi3mr_reinit_ioc(mrioc, 1);
4551 if (r) {
4552 ioc_err(mrioc, "resuming controller failed[%d]\n", r);
4553 return r;
4554 }
4555 scsi_unblock_requests(shost);
4556 mpi3mr_start_watchdog(mrioc);
4557
4558 return 0;
4559 }
4560 #endif
4561
4562 static const struct pci_device_id mpi3mr_pci_id_table[] = {
4563 {
4564 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
4565 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
4566 },
4567 { 0 }
4568 };
4569 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
4570
4571 static struct pci_driver mpi3mr_pci_driver = {
4572 .name = MPI3MR_DRIVER_NAME,
4573 .id_table = mpi3mr_pci_id_table,
4574 .probe = mpi3mr_probe,
4575 .remove = mpi3mr_remove,
4576 .shutdown = mpi3mr_shutdown,
4577 #ifdef CONFIG_PM
4578 .suspend = mpi3mr_suspend,
4579 .resume = mpi3mr_resume,
4580 #endif
4581 };
4582
4583 static int __init mpi3mr_init(void)
4584 {
4585 int ret_val;
4586
4587 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
4588 MPI3MR_DRIVER_VERSION);
4589
4590 ret_val = pci_register_driver(&mpi3mr_pci_driver);
4591
4592 return ret_val;
4593 }
4594
4595 static void __exit mpi3mr_exit(void)
4596 {
4597 if (warn_non_secure_ctlr)
4598 pr_warn(
4599 "Unloading %s version %s while managing a non secure controller\n",
4600 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
4601 else
4602 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
4603 MPI3MR_DRIVER_VERSION);
4604
4605 pci_unregister_driver(&mpi3mr_pci_driver);
4606 }
4607
4608 module_init(mpi3mr_init);
4609 module_exit(mpi3mr_exit);