]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/scsi/mpi3mr/mpi3mr_os.c
scsi: mpi3mr: Fix hibernation issue
[mirror_ubuntu-jammy-kernel.git] / drivers / scsi / mpi3mr / mpi3mr_os.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Driver for Broadcom MPI3 Storage Controllers
4 *
5 * Copyright (C) 2017-2021 Broadcom Inc.
6 * (mailto: mpi3mr-linuxdrv.pdl@broadcom.com)
7 *
8 */
9
10 #include "mpi3mr.h"
11
12 /* global driver scop variables */
13 LIST_HEAD(mrioc_list);
14 DEFINE_SPINLOCK(mrioc_list_lock);
15 static int mrioc_ids;
16 static int warn_non_secure_ctlr;
17
18 MODULE_AUTHOR(MPI3MR_DRIVER_AUTHOR);
19 MODULE_DESCRIPTION(MPI3MR_DRIVER_DESC);
20 MODULE_LICENSE(MPI3MR_DRIVER_LICENSE);
21 MODULE_VERSION(MPI3MR_DRIVER_VERSION);
22
23 /* Module parameters*/
24 int prot_mask = -1;
25 module_param(prot_mask, int, 0);
26 MODULE_PARM_DESC(prot_mask, "Host protection capabilities mask, def=0x07");
27
28 static int prot_guard_mask = 3;
29 module_param(prot_guard_mask, int, 0);
30 MODULE_PARM_DESC(prot_guard_mask, " Host protection guard mask, def=3");
31 static int logging_level;
32 module_param(logging_level, int, 0);
33 MODULE_PARM_DESC(logging_level,
34 " bits for enabling additional logging info (default=0)");
35
36 /* Forward declarations*/
37 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
38 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
39
40 /**
41 * mpi3mr_host_tag_for_scmd - Get host tag for a scmd
42 * @mrioc: Adapter instance reference
43 * @scmd: SCSI command reference
44 *
45 * Calculate the host tag based on block tag for a given scmd.
46 *
47 * Return: Valid host tag or MPI3MR_HOSTTAG_INVALID.
48 */
49 static u16 mpi3mr_host_tag_for_scmd(struct mpi3mr_ioc *mrioc,
50 struct scsi_cmnd *scmd)
51 {
52 struct scmd_priv *priv = NULL;
53 u32 unique_tag;
54 u16 host_tag, hw_queue;
55
56 unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
57
58 hw_queue = blk_mq_unique_tag_to_hwq(unique_tag);
59 if (hw_queue >= mrioc->num_op_reply_q)
60 return MPI3MR_HOSTTAG_INVALID;
61 host_tag = blk_mq_unique_tag_to_tag(unique_tag);
62
63 if (WARN_ON(host_tag >= mrioc->max_host_ios))
64 return MPI3MR_HOSTTAG_INVALID;
65
66 priv = scsi_cmd_priv(scmd);
67 /*host_tag 0 is invalid hence incrementing by 1*/
68 priv->host_tag = host_tag + 1;
69 priv->scmd = scmd;
70 priv->in_lld_scope = 1;
71 priv->req_q_idx = hw_queue;
72 priv->meta_chain_idx = -1;
73 priv->chain_idx = -1;
74 priv->meta_sg_valid = 0;
75 return priv->host_tag;
76 }
77
78 /**
79 * mpi3mr_scmd_from_host_tag - Get SCSI command from host tag
80 * @mrioc: Adapter instance reference
81 * @host_tag: Host tag
82 * @qidx: Operational queue index
83 *
84 * Identify the block tag from the host tag and queue index and
85 * retrieve associated scsi command using scsi_host_find_tag().
86 *
87 * Return: SCSI command reference or NULL.
88 */
89 static struct scsi_cmnd *mpi3mr_scmd_from_host_tag(
90 struct mpi3mr_ioc *mrioc, u16 host_tag, u16 qidx)
91 {
92 struct scsi_cmnd *scmd = NULL;
93 struct scmd_priv *priv = NULL;
94 u32 unique_tag = host_tag - 1;
95
96 if (WARN_ON(host_tag > mrioc->max_host_ios))
97 goto out;
98
99 unique_tag |= (qidx << BLK_MQ_UNIQUE_TAG_BITS);
100
101 scmd = scsi_host_find_tag(mrioc->shost, unique_tag);
102 if (scmd) {
103 priv = scsi_cmd_priv(scmd);
104 if (!priv->in_lld_scope)
105 scmd = NULL;
106 }
107 out:
108 return scmd;
109 }
110
111 /**
112 * mpi3mr_clear_scmd_priv - Cleanup SCSI command private date
113 * @mrioc: Adapter instance reference
114 * @scmd: SCSI command reference
115 *
116 * Invalidate the SCSI command private data to mark the command
117 * is not in LLD scope anymore.
118 *
119 * Return: Nothing.
120 */
121 static void mpi3mr_clear_scmd_priv(struct mpi3mr_ioc *mrioc,
122 struct scsi_cmnd *scmd)
123 {
124 struct scmd_priv *priv = NULL;
125
126 priv = scsi_cmd_priv(scmd);
127
128 if (WARN_ON(priv->in_lld_scope == 0))
129 return;
130 priv->host_tag = MPI3MR_HOSTTAG_INVALID;
131 priv->req_q_idx = 0xFFFF;
132 priv->scmd = NULL;
133 priv->in_lld_scope = 0;
134 priv->meta_sg_valid = 0;
135 if (priv->chain_idx >= 0) {
136 clear_bit(priv->chain_idx, mrioc->chain_bitmap);
137 priv->chain_idx = -1;
138 }
139 if (priv->meta_chain_idx >= 0) {
140 clear_bit(priv->meta_chain_idx, mrioc->chain_bitmap);
141 priv->meta_chain_idx = -1;
142 }
143 }
144
145 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
146 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc);
147 static void mpi3mr_fwevt_worker(struct work_struct *work);
148
149 /**
150 * mpi3mr_fwevt_free - firmware event memory dealloctor
151 * @r: k reference pointer of the firmware event
152 *
153 * Free firmware event memory when no reference.
154 */
155 static void mpi3mr_fwevt_free(struct kref *r)
156 {
157 kfree(container_of(r, struct mpi3mr_fwevt, ref_count));
158 }
159
160 /**
161 * mpi3mr_fwevt_get - k reference incrementor
162 * @fwevt: Firmware event reference
163 *
164 * Increment firmware event reference count.
165 */
166 static void mpi3mr_fwevt_get(struct mpi3mr_fwevt *fwevt)
167 {
168 kref_get(&fwevt->ref_count);
169 }
170
171 /**
172 * mpi3mr_fwevt_put - k reference decrementor
173 * @fwevt: Firmware event reference
174 *
175 * decrement firmware event reference count.
176 */
177 static void mpi3mr_fwevt_put(struct mpi3mr_fwevt *fwevt)
178 {
179 kref_put(&fwevt->ref_count, mpi3mr_fwevt_free);
180 }
181
182 /**
183 * mpi3mr_alloc_fwevt - Allocate firmware event
184 * @len: length of firmware event data to allocate
185 *
186 * Allocate firmware event with required length and initialize
187 * the reference counter.
188 *
189 * Return: firmware event reference.
190 */
191 static struct mpi3mr_fwevt *mpi3mr_alloc_fwevt(int len)
192 {
193 struct mpi3mr_fwevt *fwevt;
194
195 fwevt = kzalloc(sizeof(*fwevt) + len, GFP_ATOMIC);
196 if (!fwevt)
197 return NULL;
198
199 kref_init(&fwevt->ref_count);
200 return fwevt;
201 }
202
203 /**
204 * mpi3mr_fwevt_add_to_list - Add firmware event to the list
205 * @mrioc: Adapter instance reference
206 * @fwevt: Firmware event reference
207 *
208 * Add the given firmware event to the firmware event list.
209 *
210 * Return: Nothing.
211 */
212 static void mpi3mr_fwevt_add_to_list(struct mpi3mr_ioc *mrioc,
213 struct mpi3mr_fwevt *fwevt)
214 {
215 unsigned long flags;
216
217 if (!mrioc->fwevt_worker_thread)
218 return;
219
220 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
221 /* get fwevt reference count while adding it to fwevt_list */
222 mpi3mr_fwevt_get(fwevt);
223 INIT_LIST_HEAD(&fwevt->list);
224 list_add_tail(&fwevt->list, &mrioc->fwevt_list);
225 INIT_WORK(&fwevt->work, mpi3mr_fwevt_worker);
226 /* get fwevt reference count while enqueueing it to worker queue */
227 mpi3mr_fwevt_get(fwevt);
228 queue_work(mrioc->fwevt_worker_thread, &fwevt->work);
229 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
230 }
231
232 /**
233 * mpi3mr_fwevt_del_from_list - Delete firmware event from list
234 * @mrioc: Adapter instance reference
235 * @fwevt: Firmware event reference
236 *
237 * Delete the given firmware event from the firmware event list.
238 *
239 * Return: Nothing.
240 */
241 static void mpi3mr_fwevt_del_from_list(struct mpi3mr_ioc *mrioc,
242 struct mpi3mr_fwevt *fwevt)
243 {
244 unsigned long flags;
245
246 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
247 if (!list_empty(&fwevt->list)) {
248 list_del_init(&fwevt->list);
249 /*
250 * Put fwevt reference count after
251 * removing it from fwevt_list
252 */
253 mpi3mr_fwevt_put(fwevt);
254 }
255 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
256 }
257
258 /**
259 * mpi3mr_dequeue_fwevt - Dequeue firmware event from the list
260 * @mrioc: Adapter instance reference
261 *
262 * Dequeue a firmware event from the firmware event list.
263 *
264 * Return: firmware event.
265 */
266 static struct mpi3mr_fwevt *mpi3mr_dequeue_fwevt(
267 struct mpi3mr_ioc *mrioc)
268 {
269 unsigned long flags;
270 struct mpi3mr_fwevt *fwevt = NULL;
271
272 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
273 if (!list_empty(&mrioc->fwevt_list)) {
274 fwevt = list_first_entry(&mrioc->fwevt_list,
275 struct mpi3mr_fwevt, list);
276 list_del_init(&fwevt->list);
277 /*
278 * Put fwevt reference count after
279 * removing it from fwevt_list
280 */
281 mpi3mr_fwevt_put(fwevt);
282 }
283 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
284
285 return fwevt;
286 }
287
288 /**
289 * mpi3mr_cancel_work - cancel firmware event
290 * @fwevt: fwevt object which needs to be canceled
291 *
292 * Return: Nothing.
293 */
294 static void mpi3mr_cancel_work(struct mpi3mr_fwevt *fwevt)
295 {
296 /*
297 * Wait on the fwevt to complete. If this returns 1, then
298 * the event was never executed.
299 *
300 * If it did execute, we wait for it to finish, and the put will
301 * happen from mpi3mr_process_fwevt()
302 */
303 if (cancel_work_sync(&fwevt->work)) {
304 /*
305 * Put fwevt reference count after
306 * dequeuing it from worker queue
307 */
308 mpi3mr_fwevt_put(fwevt);
309 /*
310 * Put fwevt reference count to neutralize
311 * kref_init increment
312 */
313 mpi3mr_fwevt_put(fwevt);
314 }
315 }
316
317 /**
318 * mpi3mr_cleanup_fwevt_list - Cleanup firmware event list
319 * @mrioc: Adapter instance reference
320 *
321 * Flush all pending firmware events from the firmware event
322 * list.
323 *
324 * Return: Nothing.
325 */
326 void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc)
327 {
328 struct mpi3mr_fwevt *fwevt = NULL;
329
330 if ((list_empty(&mrioc->fwevt_list) && !mrioc->current_event) ||
331 !mrioc->fwevt_worker_thread)
332 return;
333
334 while ((fwevt = mpi3mr_dequeue_fwevt(mrioc)))
335 mpi3mr_cancel_work(fwevt);
336
337 if (mrioc->current_event) {
338 fwevt = mrioc->current_event;
339 /*
340 * Don't call cancel_work_sync() API for the
341 * fwevt work if the controller reset is
342 * get called as part of processing the
343 * same fwevt work (or) when worker thread is
344 * waiting for device add/remove APIs to complete.
345 * Otherwise we will see deadlock.
346 */
347 if (current_work() == &fwevt->work || fwevt->pending_at_sml) {
348 fwevt->discard = 1;
349 return;
350 }
351
352 mpi3mr_cancel_work(fwevt);
353 }
354 }
355
356 /**
357 * mpi3mr_invalidate_devhandles -Invalidate device handles
358 * @mrioc: Adapter instance reference
359 *
360 * Invalidate the device handles in the target device structures
361 * . Called post reset prior to reinitializing the controller.
362 *
363 * Return: Nothing.
364 */
365 void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
366 {
367 struct mpi3mr_tgt_dev *tgtdev;
368 struct mpi3mr_stgt_priv_data *tgt_priv;
369
370 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
371 tgtdev->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
372 if (tgtdev->starget && tgtdev->starget->hostdata) {
373 tgt_priv = tgtdev->starget->hostdata;
374 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
375 }
376 }
377 }
378
379 /**
380 * mpi3mr_print_scmd - print individual SCSI command
381 * @rq: Block request
382 * @data: Adapter instance reference
383 * @reserved: N/A. Currently not used
384 *
385 * Print the SCSI command details if it is in LLD scope.
386 *
387 * Return: true always.
388 */
389 static bool mpi3mr_print_scmd(struct request *rq,
390 void *data, bool reserved)
391 {
392 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
393 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
394 struct scmd_priv *priv = NULL;
395
396 if (scmd) {
397 priv = scsi_cmd_priv(scmd);
398 if (!priv->in_lld_scope)
399 goto out;
400
401 ioc_info(mrioc, "%s :Host Tag = %d, qid = %d\n",
402 __func__, priv->host_tag, priv->req_q_idx + 1);
403 scsi_print_command(scmd);
404 }
405
406 out:
407 return(true);
408 }
409
410 /**
411 * mpi3mr_flush_scmd - Flush individual SCSI command
412 * @rq: Block request
413 * @data: Adapter instance reference
414 * @reserved: N/A. Currently not used
415 *
416 * Return the SCSI command to the upper layers if it is in LLD
417 * scope.
418 *
419 * Return: true always.
420 */
421
422 static bool mpi3mr_flush_scmd(struct request *rq,
423 void *data, bool reserved)
424 {
425 struct mpi3mr_ioc *mrioc = (struct mpi3mr_ioc *)data;
426 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
427 struct scmd_priv *priv = NULL;
428
429 if (scmd) {
430 priv = scsi_cmd_priv(scmd);
431 if (!priv->in_lld_scope)
432 goto out;
433
434 if (priv->meta_sg_valid)
435 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
436 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
437 mpi3mr_clear_scmd_priv(mrioc, scmd);
438 scsi_dma_unmap(scmd);
439 scmd->result = DID_RESET << 16;
440 scsi_print_command(scmd);
441 scmd->scsi_done(scmd);
442 mrioc->flush_io_count++;
443 }
444
445 out:
446 return(true);
447 }
448
449 /**
450 * mpi3mr_count_dev_pending - Count commands pending for a lun
451 * @rq: Block request
452 * @data: SCSI device reference
453 * @reserved: Unused
454 *
455 * This is an iterator function called for each SCSI command in
456 * a host and if the command is pending in the LLD for the
457 * specific device(lun) then device specific pending I/O counter
458 * is updated in the device structure.
459 *
460 * Return: true always.
461 */
462
463 static bool mpi3mr_count_dev_pending(struct request *rq,
464 void *data, bool reserved)
465 {
466 struct scsi_device *sdev = (struct scsi_device *)data;
467 struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
468 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
469 struct scmd_priv *priv;
470
471 if (scmd) {
472 priv = scsi_cmd_priv(scmd);
473 if (!priv->in_lld_scope)
474 goto out;
475 if (scmd->device == sdev)
476 sdev_priv_data->pend_count++;
477 }
478
479 out:
480 return true;
481 }
482
483 /**
484 * mpi3mr_count_tgt_pending - Count commands pending for target
485 * @rq: Block request
486 * @data: SCSI target reference
487 * @reserved: Unused
488 *
489 * This is an iterator function called for each SCSI command in
490 * a host and if the command is pending in the LLD for the
491 * specific target then target specific pending I/O counter is
492 * updated in the target structure.
493 *
494 * Return: true always.
495 */
496
497 static bool mpi3mr_count_tgt_pending(struct request *rq,
498 void *data, bool reserved)
499 {
500 struct scsi_target *starget = (struct scsi_target *)data;
501 struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
502 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
503 struct scmd_priv *priv;
504
505 if (scmd) {
506 priv = scsi_cmd_priv(scmd);
507 if (!priv->in_lld_scope)
508 goto out;
509 if (scmd->device && (scsi_target(scmd->device) == starget))
510 stgt_priv_data->pend_count++;
511 }
512
513 out:
514 return true;
515 }
516
517 /**
518 * mpi3mr_flush_host_io - Flush host I/Os
519 * @mrioc: Adapter instance reference
520 *
521 * Flush all of the pending I/Os by calling
522 * blk_mq_tagset_busy_iter() for each possible tag. This is
523 * executed post controller reset
524 *
525 * Return: Nothing.
526 */
527 void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc)
528 {
529 struct Scsi_Host *shost = mrioc->shost;
530
531 mrioc->flush_io_count = 0;
532 ioc_info(mrioc, "%s :Flushing Host I/O cmds post reset\n", __func__);
533 blk_mq_tagset_busy_iter(&shost->tag_set,
534 mpi3mr_flush_scmd, (void *)mrioc);
535 ioc_info(mrioc, "%s :Flushed %d Host I/O cmds\n", __func__,
536 mrioc->flush_io_count);
537 }
538
539 /**
540 * mpi3mr_alloc_tgtdev - target device allocator
541 *
542 * Allocate target device instance and initialize the reference
543 * count
544 *
545 * Return: target device instance.
546 */
547 static struct mpi3mr_tgt_dev *mpi3mr_alloc_tgtdev(void)
548 {
549 struct mpi3mr_tgt_dev *tgtdev;
550
551 tgtdev = kzalloc(sizeof(*tgtdev), GFP_ATOMIC);
552 if (!tgtdev)
553 return NULL;
554 kref_init(&tgtdev->ref_count);
555 return tgtdev;
556 }
557
558 /**
559 * mpi3mr_tgtdev_add_to_list -Add tgtdevice to the list
560 * @mrioc: Adapter instance reference
561 * @tgtdev: Target device
562 *
563 * Add the target device to the target device list
564 *
565 * Return: Nothing.
566 */
567 static void mpi3mr_tgtdev_add_to_list(struct mpi3mr_ioc *mrioc,
568 struct mpi3mr_tgt_dev *tgtdev)
569 {
570 unsigned long flags;
571
572 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
573 mpi3mr_tgtdev_get(tgtdev);
574 INIT_LIST_HEAD(&tgtdev->list);
575 list_add_tail(&tgtdev->list, &mrioc->tgtdev_list);
576 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
577 }
578
579 /**
580 * mpi3mr_tgtdev_del_from_list -Delete tgtdevice from the list
581 * @mrioc: Adapter instance reference
582 * @tgtdev: Target device
583 *
584 * Remove the target device from the target device list
585 *
586 * Return: Nothing.
587 */
588 static void mpi3mr_tgtdev_del_from_list(struct mpi3mr_ioc *mrioc,
589 struct mpi3mr_tgt_dev *tgtdev)
590 {
591 unsigned long flags;
592
593 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
594 if (!list_empty(&tgtdev->list)) {
595 list_del_init(&tgtdev->list);
596 mpi3mr_tgtdev_put(tgtdev);
597 }
598 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
599 }
600
601 /**
602 * __mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
603 * @mrioc: Adapter instance reference
604 * @handle: Device handle
605 *
606 * Accessor to retrieve target device from the device handle.
607 * Non Lock version
608 *
609 * Return: Target device reference.
610 */
611 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_handle(
612 struct mpi3mr_ioc *mrioc, u16 handle)
613 {
614 struct mpi3mr_tgt_dev *tgtdev;
615
616 assert_spin_locked(&mrioc->tgtdev_lock);
617 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
618 if (tgtdev->dev_handle == handle)
619 goto found_tgtdev;
620 return NULL;
621
622 found_tgtdev:
623 mpi3mr_tgtdev_get(tgtdev);
624 return tgtdev;
625 }
626
627 /**
628 * mpi3mr_get_tgtdev_by_handle -Get tgtdev from device handle
629 * @mrioc: Adapter instance reference
630 * @handle: Device handle
631 *
632 * Accessor to retrieve target device from the device handle.
633 * Lock version
634 *
635 * Return: Target device reference.
636 */
637 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_handle(
638 struct mpi3mr_ioc *mrioc, u16 handle)
639 {
640 struct mpi3mr_tgt_dev *tgtdev;
641 unsigned long flags;
642
643 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
644 tgtdev = __mpi3mr_get_tgtdev_by_handle(mrioc, handle);
645 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
646 return tgtdev;
647 }
648
649 /**
650 * __mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persist ID
651 * @mrioc: Adapter instance reference
652 * @persist_id: Persistent ID
653 *
654 * Accessor to retrieve target device from the Persistent ID.
655 * Non Lock version
656 *
657 * Return: Target device reference.
658 */
659 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_by_perst_id(
660 struct mpi3mr_ioc *mrioc, u16 persist_id)
661 {
662 struct mpi3mr_tgt_dev *tgtdev;
663
664 assert_spin_locked(&mrioc->tgtdev_lock);
665 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list)
666 if (tgtdev->perst_id == persist_id)
667 goto found_tgtdev;
668 return NULL;
669
670 found_tgtdev:
671 mpi3mr_tgtdev_get(tgtdev);
672 return tgtdev;
673 }
674
675 /**
676 * mpi3mr_get_tgtdev_by_perst_id -Get tgtdev from persistent ID
677 * @mrioc: Adapter instance reference
678 * @persist_id: Persistent ID
679 *
680 * Accessor to retrieve target device from the Persistent ID.
681 * Lock version
682 *
683 * Return: Target device reference.
684 */
685 static struct mpi3mr_tgt_dev *mpi3mr_get_tgtdev_by_perst_id(
686 struct mpi3mr_ioc *mrioc, u16 persist_id)
687 {
688 struct mpi3mr_tgt_dev *tgtdev;
689 unsigned long flags;
690
691 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
692 tgtdev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, persist_id);
693 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
694 return tgtdev;
695 }
696
697 /**
698 * __mpi3mr_get_tgtdev_from_tgtpriv -Get tgtdev from tgt private
699 * @mrioc: Adapter instance reference
700 * @tgt_priv: Target private data
701 *
702 * Accessor to return target device from the target private
703 * data. Non Lock version
704 *
705 * Return: Target device reference.
706 */
707 static struct mpi3mr_tgt_dev *__mpi3mr_get_tgtdev_from_tgtpriv(
708 struct mpi3mr_ioc *mrioc, struct mpi3mr_stgt_priv_data *tgt_priv)
709 {
710 struct mpi3mr_tgt_dev *tgtdev;
711
712 assert_spin_locked(&mrioc->tgtdev_lock);
713 tgtdev = tgt_priv->tgt_dev;
714 if (tgtdev)
715 mpi3mr_tgtdev_get(tgtdev);
716 return tgtdev;
717 }
718
719 /**
720 * mpi3mr_print_device_event_notice - print notice related to post processing of
721 * device event after controller reset.
722 *
723 * @mrioc: Adapter instance reference
724 * @device_add: true for device add event and false for device removal event
725 *
726 * Return: None.
727 */
728 static void mpi3mr_print_device_event_notice(struct mpi3mr_ioc *mrioc,
729 bool device_add)
730 {
731 ioc_notice(mrioc, "Device %s was in progress before the reset and\n",
732 (device_add ? "addition" : "removal"));
733 ioc_notice(mrioc, "completed after reset, verify whether the exposed devices\n");
734 ioc_notice(mrioc, "are matched with attached devices for correctness\n");
735 }
736
737 /**
738 * mpi3mr_remove_tgtdev_from_host - Remove dev from upper layers
739 * @mrioc: Adapter instance reference
740 * @tgtdev: Target device structure
741 *
742 * Checks whether the device is exposed to upper layers and if it
743 * is then remove the device from upper layers by calling
744 * scsi_remove_target().
745 *
746 * Return: 0 on success, non zero on failure.
747 */
748 static void mpi3mr_remove_tgtdev_from_host(struct mpi3mr_ioc *mrioc,
749 struct mpi3mr_tgt_dev *tgtdev)
750 {
751 struct mpi3mr_stgt_priv_data *tgt_priv;
752
753 ioc_info(mrioc, "%s :Removing handle(0x%04x), wwid(0x%016llx)\n",
754 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
755 if (tgtdev->starget && tgtdev->starget->hostdata) {
756 tgt_priv = tgtdev->starget->hostdata;
757 tgt_priv->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
758 }
759
760 if (tgtdev->starget) {
761 if (mrioc->current_event)
762 mrioc->current_event->pending_at_sml = 1;
763 scsi_remove_target(&tgtdev->starget->dev);
764 tgtdev->host_exposed = 0;
765 if (mrioc->current_event) {
766 mrioc->current_event->pending_at_sml = 0;
767 if (mrioc->current_event->discard) {
768 mpi3mr_print_device_event_notice(mrioc, false);
769 return;
770 }
771 }
772 }
773 ioc_info(mrioc, "%s :Removed handle(0x%04x), wwid(0x%016llx)\n",
774 __func__, tgtdev->dev_handle, (unsigned long long)tgtdev->wwid);
775 }
776
777 /**
778 * mpi3mr_report_tgtdev_to_host - Expose device to upper layers
779 * @mrioc: Adapter instance reference
780 * @perst_id: Persistent ID of the device
781 *
782 * Checks whether the device can be exposed to upper layers and
783 * if it is not then expose the device to upper layers by
784 * calling scsi_scan_target().
785 *
786 * Return: 0 on success, non zero on failure.
787 */
788 static int mpi3mr_report_tgtdev_to_host(struct mpi3mr_ioc *mrioc,
789 u16 perst_id)
790 {
791 int retval = 0;
792 struct mpi3mr_tgt_dev *tgtdev;
793
794 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
795 if (!tgtdev) {
796 retval = -1;
797 goto out;
798 }
799 if (tgtdev->is_hidden) {
800 retval = -1;
801 goto out;
802 }
803 if (!tgtdev->host_exposed && !mrioc->reset_in_progress) {
804 tgtdev->host_exposed = 1;
805 if (mrioc->current_event)
806 mrioc->current_event->pending_at_sml = 1;
807 scsi_scan_target(&mrioc->shost->shost_gendev, 0,
808 tgtdev->perst_id,
809 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
810 if (!tgtdev->starget)
811 tgtdev->host_exposed = 0;
812 if (mrioc->current_event) {
813 mrioc->current_event->pending_at_sml = 0;
814 if (mrioc->current_event->discard) {
815 mpi3mr_print_device_event_notice(mrioc, true);
816 goto out;
817 }
818 }
819 }
820 out:
821 if (tgtdev)
822 mpi3mr_tgtdev_put(tgtdev);
823
824 return retval;
825 }
826
827 /**
828 * mpi3mr_change_queue_depth- Change QD callback handler
829 * @sdev: SCSI device reference
830 * @q_depth: Queue depth
831 *
832 * Validate and limit QD and call scsi_change_queue_depth.
833 *
834 * Return: return value of scsi_change_queue_depth
835 */
836 static int mpi3mr_change_queue_depth(struct scsi_device *sdev,
837 int q_depth)
838 {
839 struct scsi_target *starget = scsi_target(sdev);
840 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
841 int retval = 0;
842
843 if (!sdev->tagged_supported)
844 q_depth = 1;
845 if (q_depth > shost->can_queue)
846 q_depth = shost->can_queue;
847 else if (!q_depth)
848 q_depth = MPI3MR_DEFAULT_SDEV_QD;
849 retval = scsi_change_queue_depth(sdev, q_depth);
850
851 return retval;
852 }
853
854 /**
855 * mpi3mr_update_sdev - Update SCSI device information
856 * @sdev: SCSI device reference
857 * @data: target device reference
858 *
859 * This is an iterator function called for each SCSI device in a
860 * target to update the target specific information into each
861 * SCSI device.
862 *
863 * Return: Nothing.
864 */
865 static void
866 mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
867 {
868 struct mpi3mr_tgt_dev *tgtdev;
869
870 tgtdev = (struct mpi3mr_tgt_dev *)data;
871 if (!tgtdev)
872 return;
873
874 mpi3mr_change_queue_depth(sdev, tgtdev->q_depth);
875 switch (tgtdev->dev_type) {
876 case MPI3_DEVICE_DEVFORM_PCIE:
877 /*The block layer hw sector size = 512*/
878 if ((tgtdev->dev_spec.pcie_inf.dev_info &
879 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
880 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
881 blk_queue_max_hw_sectors(sdev->request_queue,
882 tgtdev->dev_spec.pcie_inf.mdts / 512);
883 if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
884 blk_queue_virt_boundary(sdev->request_queue,
885 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
886 else
887 blk_queue_virt_boundary(sdev->request_queue,
888 ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
889 }
890 break;
891 default:
892 break;
893 }
894 }
895
896 /**
897 * mpi3mr_rfresh_tgtdevs - Refresh target device exposure
898 * @mrioc: Adapter instance reference
899 *
900 * This is executed post controller reset to identify any
901 * missing devices during reset and remove from the upper layers
902 * or expose any newly detected device to the upper layers.
903 *
904 * Return: Nothing.
905 */
906
907 void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
908 {
909 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
910
911 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
912 list) {
913 if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
914 tgtdev->host_exposed) {
915 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
916 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
917 mpi3mr_tgtdev_put(tgtdev);
918 }
919 }
920
921 tgtdev = NULL;
922 list_for_each_entry(tgtdev, &mrioc->tgtdev_list, list) {
923 if ((tgtdev->dev_handle != MPI3MR_INVALID_DEV_HANDLE) &&
924 !tgtdev->is_hidden && !tgtdev->host_exposed)
925 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
926 }
927 }
928
929 /**
930 * mpi3mr_update_tgtdev - DevStatusChange evt bottomhalf
931 * @mrioc: Adapter instance reference
932 * @tgtdev: Target device internal structure
933 * @dev_pg0: New device page0
934 *
935 * Update the information from the device page0 into the driver
936 * cached target device structure.
937 *
938 * Return: Nothing.
939 */
940 static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
941 struct mpi3mr_tgt_dev *tgtdev, struct mpi3_device_page0 *dev_pg0)
942 {
943 u16 flags = 0;
944 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
945 u8 prot_mask = 0;
946
947 tgtdev->perst_id = le16_to_cpu(dev_pg0->persistent_id);
948 tgtdev->dev_handle = le16_to_cpu(dev_pg0->dev_handle);
949 tgtdev->dev_type = dev_pg0->device_form;
950 tgtdev->encl_handle = le16_to_cpu(dev_pg0->enclosure_handle);
951 tgtdev->parent_handle = le16_to_cpu(dev_pg0->parent_dev_handle);
952 tgtdev->slot = le16_to_cpu(dev_pg0->slot);
953 tgtdev->q_depth = le16_to_cpu(dev_pg0->queue_depth);
954 tgtdev->wwid = le64_to_cpu(dev_pg0->wwid);
955
956 flags = le16_to_cpu(dev_pg0->flags);
957 tgtdev->is_hidden = (flags & MPI3_DEVICE0_FLAGS_HIDDEN);
958
959 if (tgtdev->starget && tgtdev->starget->hostdata) {
960 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
961 tgtdev->starget->hostdata;
962 scsi_tgt_priv_data->perst_id = tgtdev->perst_id;
963 scsi_tgt_priv_data->dev_handle = tgtdev->dev_handle;
964 scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
965 }
966
967 switch (dev_pg0->access_status) {
968 case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
969 case MPI3_DEVICE0_ASTATUS_PREPARE:
970 case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
971 case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
972 break;
973 default:
974 tgtdev->is_hidden = 1;
975 break;
976 }
977
978 switch (tgtdev->dev_type) {
979 case MPI3_DEVICE_DEVFORM_SAS_SATA:
980 {
981 struct mpi3_device0_sas_sata_format *sasinf =
982 &dev_pg0->device_specific.sas_sata_format;
983 u16 dev_info = le16_to_cpu(sasinf->device_info);
984
985 tgtdev->dev_spec.sas_sata_inf.dev_info = dev_info;
986 tgtdev->dev_spec.sas_sata_inf.sas_address =
987 le64_to_cpu(sasinf->sas_address);
988 if ((dev_info & MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK) !=
989 MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE)
990 tgtdev->is_hidden = 1;
991 else if (!(dev_info & (MPI3_SAS_DEVICE_INFO_STP_SATA_TARGET |
992 MPI3_SAS_DEVICE_INFO_SSP_TARGET)))
993 tgtdev->is_hidden = 1;
994 break;
995 }
996 case MPI3_DEVICE_DEVFORM_PCIE:
997 {
998 struct mpi3_device0_pcie_format *pcieinf =
999 &dev_pg0->device_specific.pcie_format;
1000 u16 dev_info = le16_to_cpu(pcieinf->device_info);
1001
1002 tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
1003 tgtdev->dev_spec.pcie_inf.capb =
1004 le32_to_cpu(pcieinf->capabilities);
1005 tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
1006 /* 2^12 = 4096 */
1007 tgtdev->dev_spec.pcie_inf.pgsz = 12;
1008 if (dev_pg0->access_status == MPI3_DEVICE0_ASTATUS_NO_ERRORS) {
1009 tgtdev->dev_spec.pcie_inf.mdts =
1010 le32_to_cpu(pcieinf->maximum_data_transfer_size);
1011 tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
1012 tgtdev->dev_spec.pcie_inf.reset_to =
1013 max_t(u8, pcieinf->controller_reset_to,
1014 MPI3MR_INTADMCMD_TIMEOUT);
1015 tgtdev->dev_spec.pcie_inf.abort_to =
1016 max_t(u8, pcieinf->nvme_abort_to,
1017 MPI3MR_INTADMCMD_TIMEOUT);
1018 }
1019 if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
1020 tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
1021 if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1022 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
1023 ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
1024 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
1025 tgtdev->is_hidden = 1;
1026 if (!mrioc->shost)
1027 break;
1028 prot_mask = scsi_host_get_prot(mrioc->shost);
1029 if (prot_mask & SHOST_DIX_TYPE0_PROTECTION) {
1030 scsi_host_set_prot(mrioc->shost, prot_mask & 0x77);
1031 ioc_info(mrioc,
1032 "%s : Disabling DIX0 prot capability\n", __func__);
1033 ioc_info(mrioc,
1034 "because HBA does not support DIX0 operation on NVME drives\n");
1035 }
1036 break;
1037 }
1038 case MPI3_DEVICE_DEVFORM_VD:
1039 {
1040 struct mpi3_device0_vd_format *vdinf =
1041 &dev_pg0->device_specific.vd_format;
1042
1043 tgtdev->dev_spec.vol_inf.state = vdinf->vd_state;
1044 if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
1045 tgtdev->is_hidden = 1;
1046 break;
1047 }
1048 default:
1049 break;
1050 }
1051 }
1052
1053 /**
1054 * mpi3mr_devstatuschg_evt_bh - DevStatusChange evt bottomhalf
1055 * @mrioc: Adapter instance reference
1056 * @fwevt: Firmware event information.
1057 *
1058 * Process Device status Change event and based on device's new
1059 * information, either expose the device to the upper layers, or
1060 * remove the device from upper layers.
1061 *
1062 * Return: Nothing.
1063 */
1064 static void mpi3mr_devstatuschg_evt_bh(struct mpi3mr_ioc *mrioc,
1065 struct mpi3mr_fwevt *fwevt)
1066 {
1067 u16 dev_handle = 0;
1068 u8 uhide = 0, delete = 0, cleanup = 0;
1069 struct mpi3mr_tgt_dev *tgtdev = NULL;
1070 struct mpi3_event_data_device_status_change *evtdata =
1071 (struct mpi3_event_data_device_status_change *)fwevt->event_data;
1072
1073 dev_handle = le16_to_cpu(evtdata->dev_handle);
1074 ioc_info(mrioc,
1075 "%s :device status change: handle(0x%04x): reason code(0x%x)\n",
1076 __func__, dev_handle, evtdata->reason_code);
1077 switch (evtdata->reason_code) {
1078 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
1079 delete = 1;
1080 break;
1081 case MPI3_EVENT_DEV_STAT_RC_NOT_HIDDEN:
1082 uhide = 1;
1083 break;
1084 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
1085 delete = 1;
1086 cleanup = 1;
1087 break;
1088 default:
1089 ioc_info(mrioc, "%s :Unhandled reason code(0x%x)\n", __func__,
1090 evtdata->reason_code);
1091 break;
1092 }
1093
1094 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1095 if (!tgtdev)
1096 goto out;
1097 if (uhide) {
1098 tgtdev->is_hidden = 0;
1099 if (!tgtdev->host_exposed)
1100 mpi3mr_report_tgtdev_to_host(mrioc, tgtdev->perst_id);
1101 }
1102 if (tgtdev->starget && tgtdev->starget->hostdata) {
1103 if (delete)
1104 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1105 }
1106 if (cleanup) {
1107 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1108 mpi3mr_tgtdev_put(tgtdev);
1109 }
1110
1111 out:
1112 if (tgtdev)
1113 mpi3mr_tgtdev_put(tgtdev);
1114 }
1115
1116 /**
1117 * mpi3mr_devinfochg_evt_bh - DeviceInfoChange evt bottomhalf
1118 * @mrioc: Adapter instance reference
1119 * @dev_pg0: New device page0
1120 *
1121 * Process Device Info Change event and based on device's new
1122 * information, either expose the device to the upper layers, or
1123 * remove the device from upper layers or update the details of
1124 * the device.
1125 *
1126 * Return: Nothing.
1127 */
1128 static void mpi3mr_devinfochg_evt_bh(struct mpi3mr_ioc *mrioc,
1129 struct mpi3_device_page0 *dev_pg0)
1130 {
1131 struct mpi3mr_tgt_dev *tgtdev = NULL;
1132 u16 dev_handle = 0, perst_id = 0;
1133
1134 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1135 dev_handle = le16_to_cpu(dev_pg0->dev_handle);
1136 ioc_info(mrioc,
1137 "%s :Device info change: handle(0x%04x): persist_id(0x%x)\n",
1138 __func__, dev_handle, perst_id);
1139 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
1140 if (!tgtdev)
1141 goto out;
1142 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1143 if (!tgtdev->is_hidden && !tgtdev->host_exposed)
1144 mpi3mr_report_tgtdev_to_host(mrioc, perst_id);
1145 if (tgtdev->is_hidden && tgtdev->host_exposed)
1146 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1147 if (!tgtdev->is_hidden && tgtdev->host_exposed && tgtdev->starget)
1148 starget_for_each_device(tgtdev->starget, (void *)tgtdev,
1149 mpi3mr_update_sdev);
1150 out:
1151 if (tgtdev)
1152 mpi3mr_tgtdev_put(tgtdev);
1153 }
1154
1155 /**
1156 * mpi3mr_sastopochg_evt_debug - SASTopoChange details
1157 * @mrioc: Adapter instance reference
1158 * @event_data: SAS topology change list event data
1159 *
1160 * Prints information about the SAS topology change event.
1161 *
1162 * Return: Nothing.
1163 */
1164 static void
1165 mpi3mr_sastopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1166 struct mpi3_event_data_sas_topology_change_list *event_data)
1167 {
1168 int i;
1169 u16 handle;
1170 u8 reason_code, phy_number;
1171 char *status_str = NULL;
1172 u8 link_rate, prev_link_rate;
1173
1174 switch (event_data->exp_status) {
1175 case MPI3_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
1176 status_str = "remove";
1177 break;
1178 case MPI3_EVENT_SAS_TOPO_ES_RESPONDING:
1179 status_str = "responding";
1180 break;
1181 case MPI3_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
1182 status_str = "remove delay";
1183 break;
1184 case MPI3_EVENT_SAS_TOPO_ES_NO_EXPANDER:
1185 status_str = "direct attached";
1186 break;
1187 default:
1188 status_str = "unknown status";
1189 break;
1190 }
1191 ioc_info(mrioc, "%s :sas topology change: (%s)\n",
1192 __func__, status_str);
1193 ioc_info(mrioc,
1194 "%s :\texpander_handle(0x%04x), enclosure_handle(0x%04x) start_phy(%02d), num_entries(%d)\n",
1195 __func__, le16_to_cpu(event_data->expander_dev_handle),
1196 le16_to_cpu(event_data->enclosure_handle),
1197 event_data->start_phy_num, event_data->num_entries);
1198 for (i = 0; i < event_data->num_entries; i++) {
1199 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1200 if (!handle)
1201 continue;
1202 phy_number = event_data->start_phy_num + i;
1203 reason_code = event_data->phy_entry[i].status &
1204 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1205 switch (reason_code) {
1206 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1207 status_str = "target remove";
1208 break;
1209 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
1210 status_str = "delay target remove";
1211 break;
1212 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
1213 status_str = "link status change";
1214 break;
1215 case MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE:
1216 status_str = "link status no change";
1217 break;
1218 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
1219 status_str = "target responding";
1220 break;
1221 default:
1222 status_str = "unknown";
1223 break;
1224 }
1225 link_rate = event_data->phy_entry[i].link_rate >> 4;
1226 prev_link_rate = event_data->phy_entry[i].link_rate & 0xF;
1227 ioc_info(mrioc,
1228 "%s :\tphy(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1229 __func__, phy_number, handle, status_str, link_rate,
1230 prev_link_rate);
1231 }
1232 }
1233
1234 /**
1235 * mpi3mr_sastopochg_evt_bh - SASTopologyChange evt bottomhalf
1236 * @mrioc: Adapter instance reference
1237 * @fwevt: Firmware event reference
1238 *
1239 * Prints information about the SAS topology change event and
1240 * for "not responding" event code, removes the device from the
1241 * upper layers.
1242 *
1243 * Return: Nothing.
1244 */
1245 static void mpi3mr_sastopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1246 struct mpi3mr_fwevt *fwevt)
1247 {
1248 struct mpi3_event_data_sas_topology_change_list *event_data =
1249 (struct mpi3_event_data_sas_topology_change_list *)fwevt->event_data;
1250 int i;
1251 u16 handle;
1252 u8 reason_code;
1253 struct mpi3mr_tgt_dev *tgtdev = NULL;
1254
1255 mpi3mr_sastopochg_evt_debug(mrioc, event_data);
1256
1257 for (i = 0; i < event_data->num_entries; i++) {
1258 if (fwevt->discard)
1259 return;
1260 handle = le16_to_cpu(event_data->phy_entry[i].attached_dev_handle);
1261 if (!handle)
1262 continue;
1263 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1264 if (!tgtdev)
1265 continue;
1266
1267 reason_code = event_data->phy_entry[i].status &
1268 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
1269
1270 switch (reason_code) {
1271 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
1272 if (tgtdev->host_exposed)
1273 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1274 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1275 mpi3mr_tgtdev_put(tgtdev);
1276 break;
1277 default:
1278 break;
1279 }
1280 if (tgtdev)
1281 mpi3mr_tgtdev_put(tgtdev);
1282 }
1283 }
1284
1285 /**
1286 * mpi3mr_pcietopochg_evt_debug - PCIeTopoChange details
1287 * @mrioc: Adapter instance reference
1288 * @event_data: PCIe topology change list event data
1289 *
1290 * Prints information about the PCIe topology change event.
1291 *
1292 * Return: Nothing.
1293 */
1294 static void
1295 mpi3mr_pcietopochg_evt_debug(struct mpi3mr_ioc *mrioc,
1296 struct mpi3_event_data_pcie_topology_change_list *event_data)
1297 {
1298 int i;
1299 u16 handle;
1300 u16 reason_code;
1301 u8 port_number;
1302 char *status_str = NULL;
1303 u8 link_rate, prev_link_rate;
1304
1305 switch (event_data->switch_status) {
1306 case MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
1307 status_str = "remove";
1308 break;
1309 case MPI3_EVENT_PCIE_TOPO_SS_RESPONDING:
1310 status_str = "responding";
1311 break;
1312 case MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
1313 status_str = "remove delay";
1314 break;
1315 case MPI3_EVENT_PCIE_TOPO_SS_NO_PCIE_SWITCH:
1316 status_str = "direct attached";
1317 break;
1318 default:
1319 status_str = "unknown status";
1320 break;
1321 }
1322 ioc_info(mrioc, "%s :pcie topology change: (%s)\n",
1323 __func__, status_str);
1324 ioc_info(mrioc,
1325 "%s :\tswitch_handle(0x%04x), enclosure_handle(0x%04x) start_port(%02d), num_entries(%d)\n",
1326 __func__, le16_to_cpu(event_data->switch_dev_handle),
1327 le16_to_cpu(event_data->enclosure_handle),
1328 event_data->start_port_num, event_data->num_entries);
1329 for (i = 0; i < event_data->num_entries; i++) {
1330 handle =
1331 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1332 if (!handle)
1333 continue;
1334 port_number = event_data->start_port_num + i;
1335 reason_code = event_data->port_entry[i].port_status;
1336 switch (reason_code) {
1337 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1338 status_str = "target remove";
1339 break;
1340 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1341 status_str = "delay target remove";
1342 break;
1343 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1344 status_str = "link status change";
1345 break;
1346 case MPI3_EVENT_PCIE_TOPO_PS_NO_CHANGE:
1347 status_str = "link status no change";
1348 break;
1349 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1350 status_str = "target responding";
1351 break;
1352 default:
1353 status_str = "unknown";
1354 break;
1355 }
1356 link_rate = event_data->port_entry[i].current_port_info &
1357 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1358 prev_link_rate = event_data->port_entry[i].previous_port_info &
1359 MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK;
1360 ioc_info(mrioc,
1361 "%s :\tport(%02d), attached_handle(0x%04x): %s: link rate: new(0x%02x), old(0x%02x)\n",
1362 __func__, port_number, handle, status_str, link_rate,
1363 prev_link_rate);
1364 }
1365 }
1366
1367 /**
1368 * mpi3mr_pcietopochg_evt_bh - PCIeTopologyChange evt bottomhalf
1369 * @mrioc: Adapter instance reference
1370 * @fwevt: Firmware event reference
1371 *
1372 * Prints information about the PCIe topology change event and
1373 * for "not responding" event code, removes the device from the
1374 * upper layers.
1375 *
1376 * Return: Nothing.
1377 */
1378 static void mpi3mr_pcietopochg_evt_bh(struct mpi3mr_ioc *mrioc,
1379 struct mpi3mr_fwevt *fwevt)
1380 {
1381 struct mpi3_event_data_pcie_topology_change_list *event_data =
1382 (struct mpi3_event_data_pcie_topology_change_list *)fwevt->event_data;
1383 int i;
1384 u16 handle;
1385 u8 reason_code;
1386 struct mpi3mr_tgt_dev *tgtdev = NULL;
1387
1388 mpi3mr_pcietopochg_evt_debug(mrioc, event_data);
1389
1390 for (i = 0; i < event_data->num_entries; i++) {
1391 if (fwevt->discard)
1392 return;
1393 handle =
1394 le16_to_cpu(event_data->port_entry[i].attached_dev_handle);
1395 if (!handle)
1396 continue;
1397 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1398 if (!tgtdev)
1399 continue;
1400
1401 reason_code = event_data->port_entry[i].port_status;
1402
1403 switch (reason_code) {
1404 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1405 if (tgtdev->host_exposed)
1406 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
1407 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
1408 mpi3mr_tgtdev_put(tgtdev);
1409 break;
1410 default:
1411 break;
1412 }
1413 if (tgtdev)
1414 mpi3mr_tgtdev_put(tgtdev);
1415 }
1416 }
1417
1418 /**
1419 * mpi3mr_fwevt_bh - Firmware event bottomhalf handler
1420 * @mrioc: Adapter instance reference
1421 * @fwevt: Firmware event reference
1422 *
1423 * Identifies the firmware event and calls corresponding bottomg
1424 * half handler and sends event acknowledgment if required.
1425 *
1426 * Return: Nothing.
1427 */
1428 static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
1429 struct mpi3mr_fwevt *fwevt)
1430 {
1431 mpi3mr_fwevt_del_from_list(mrioc, fwevt);
1432 mrioc->current_event = fwevt;
1433
1434 if (mrioc->stop_drv_processing)
1435 goto out;
1436
1437 if (!fwevt->process_evt)
1438 goto evt_ack;
1439
1440 switch (fwevt->event_id) {
1441 case MPI3_EVENT_DEVICE_ADDED:
1442 {
1443 struct mpi3_device_page0 *dev_pg0 =
1444 (struct mpi3_device_page0 *)fwevt->event_data;
1445 mpi3mr_report_tgtdev_to_host(mrioc,
1446 le16_to_cpu(dev_pg0->persistent_id));
1447 break;
1448 }
1449 case MPI3_EVENT_DEVICE_INFO_CHANGED:
1450 {
1451 mpi3mr_devinfochg_evt_bh(mrioc,
1452 (struct mpi3_device_page0 *)fwevt->event_data);
1453 break;
1454 }
1455 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
1456 {
1457 mpi3mr_devstatuschg_evt_bh(mrioc, fwevt);
1458 break;
1459 }
1460 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1461 {
1462 mpi3mr_sastopochg_evt_bh(mrioc, fwevt);
1463 break;
1464 }
1465 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1466 {
1467 mpi3mr_pcietopochg_evt_bh(mrioc, fwevt);
1468 break;
1469 }
1470 default:
1471 break;
1472 }
1473
1474 evt_ack:
1475 if (fwevt->send_ack)
1476 mpi3mr_process_event_ack(mrioc, fwevt->event_id,
1477 fwevt->evt_ctx);
1478 out:
1479 /* Put fwevt reference count to neutralize kref_init increment */
1480 mpi3mr_fwevt_put(fwevt);
1481 mrioc->current_event = NULL;
1482 }
1483
1484 /**
1485 * mpi3mr_fwevt_worker - Firmware event worker
1486 * @work: Work struct containing firmware event
1487 *
1488 * Extracts the firmware event and calls mpi3mr_fwevt_bh.
1489 *
1490 * Return: Nothing.
1491 */
1492 static void mpi3mr_fwevt_worker(struct work_struct *work)
1493 {
1494 struct mpi3mr_fwevt *fwevt = container_of(work, struct mpi3mr_fwevt,
1495 work);
1496 mpi3mr_fwevt_bh(fwevt->mrioc, fwevt);
1497 /*
1498 * Put fwevt reference count after
1499 * dequeuing it from worker queue
1500 */
1501 mpi3mr_fwevt_put(fwevt);
1502 }
1503
1504 /**
1505 * mpi3mr_create_tgtdev - Create and add a target device
1506 * @mrioc: Adapter instance reference
1507 * @dev_pg0: Device Page 0 data
1508 *
1509 * If the device specified by the device page 0 data is not
1510 * present in the driver's internal list, allocate the memory
1511 * for the device, populate the data and add to the list, else
1512 * update the device data. The key is persistent ID.
1513 *
1514 * Return: 0 on success, -ENOMEM on memory allocation failure
1515 */
1516 static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
1517 struct mpi3_device_page0 *dev_pg0)
1518 {
1519 int retval = 0;
1520 struct mpi3mr_tgt_dev *tgtdev = NULL;
1521 u16 perst_id = 0;
1522
1523 perst_id = le16_to_cpu(dev_pg0->persistent_id);
1524 tgtdev = mpi3mr_get_tgtdev_by_perst_id(mrioc, perst_id);
1525 if (tgtdev) {
1526 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1527 mpi3mr_tgtdev_put(tgtdev);
1528 } else {
1529 tgtdev = mpi3mr_alloc_tgtdev();
1530 if (!tgtdev)
1531 return -ENOMEM;
1532 mpi3mr_update_tgtdev(mrioc, tgtdev, dev_pg0);
1533 mpi3mr_tgtdev_add_to_list(mrioc, tgtdev);
1534 }
1535
1536 return retval;
1537 }
1538
1539 /**
1540 * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
1541 * @mrioc: Adapter instance reference
1542 *
1543 * Flush pending commands in the delayed lists due to a
1544 * controller reset or driver removal as a cleanup.
1545 *
1546 * Return: Nothing
1547 */
1548 void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
1549 {
1550 struct delayed_dev_rmhs_node *_rmhs_node;
1551 struct delayed_evt_ack_node *_evtack_node;
1552
1553 dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
1554 while (!list_empty(&mrioc->delayed_rmhs_list)) {
1555 _rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
1556 struct delayed_dev_rmhs_node, list);
1557 list_del(&_rmhs_node->list);
1558 kfree(_rmhs_node);
1559 }
1560 dprint_reset(mrioc, "flushing delayed event ack commands\n");
1561 while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1562 _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
1563 struct delayed_evt_ack_node, list);
1564 list_del(&_evtack_node->list);
1565 kfree(_evtack_node);
1566 }
1567 }
1568
1569 /**
1570 * mpi3mr_dev_rmhs_complete_iou - Device removal IOUC completion
1571 * @mrioc: Adapter instance reference
1572 * @drv_cmd: Internal command tracker
1573 *
1574 * Issues a target reset TM to the firmware from the device
1575 * removal TM pend list or retry the removal handshake sequence
1576 * based on the IOU control request IOC status.
1577 *
1578 * Return: Nothing
1579 */
1580 static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_ioc *mrioc,
1581 struct mpi3mr_drv_cmd *drv_cmd)
1582 {
1583 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1584 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1585
1586 ioc_info(mrioc,
1587 "%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
1588 __func__, drv_cmd->dev_handle, drv_cmd->ioc_status,
1589 drv_cmd->ioc_loginfo);
1590 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1591 if (drv_cmd->retry_count < MPI3MR_DEV_RMHS_RETRY_COUNT) {
1592 drv_cmd->retry_count++;
1593 ioc_info(mrioc,
1594 "%s :dev_rmhs_iouctrl_complete: handle(0x%04x)retrying handshake retry=%d\n",
1595 __func__, drv_cmd->dev_handle,
1596 drv_cmd->retry_count);
1597 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle,
1598 drv_cmd, drv_cmd->iou_rc);
1599 return;
1600 }
1601 ioc_err(mrioc,
1602 "%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
1603 __func__, drv_cmd->dev_handle);
1604 } else {
1605 ioc_info(mrioc,
1606 "%s :dev removal handshake completed successfully: handle(0x%04x)\n",
1607 __func__, drv_cmd->dev_handle);
1608 clear_bit(drv_cmd->dev_handle, mrioc->removepend_bitmap);
1609 }
1610
1611 if (!list_empty(&mrioc->delayed_rmhs_list)) {
1612 delayed_dev_rmhs = list_entry(mrioc->delayed_rmhs_list.next,
1613 struct delayed_dev_rmhs_node, list);
1614 drv_cmd->dev_handle = delayed_dev_rmhs->handle;
1615 drv_cmd->retry_count = 0;
1616 drv_cmd->iou_rc = delayed_dev_rmhs->iou_rc;
1617 ioc_info(mrioc,
1618 "%s :dev_rmhs_iouctrl_complete: processing delayed TM: handle(0x%04x)\n",
1619 __func__, drv_cmd->dev_handle);
1620 mpi3mr_dev_rmhs_send_tm(mrioc, drv_cmd->dev_handle, drv_cmd,
1621 drv_cmd->iou_rc);
1622 list_del(&delayed_dev_rmhs->list);
1623 kfree(delayed_dev_rmhs);
1624 return;
1625 }
1626 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1627 drv_cmd->callback = NULL;
1628 drv_cmd->retry_count = 0;
1629 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1630 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1631 }
1632
1633 /**
1634 * mpi3mr_dev_rmhs_complete_tm - Device removal TM completion
1635 * @mrioc: Adapter instance reference
1636 * @drv_cmd: Internal command tracker
1637 *
1638 * Issues a target reset TM to the firmware from the device
1639 * removal TM pend list or issue IO unit control request as
1640 * part of device removal or hidden acknowledgment handshake.
1641 *
1642 * Return: Nothing
1643 */
1644 static void mpi3mr_dev_rmhs_complete_tm(struct mpi3mr_ioc *mrioc,
1645 struct mpi3mr_drv_cmd *drv_cmd)
1646 {
1647 struct mpi3_iounit_control_request iou_ctrl;
1648 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1649 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
1650 int retval;
1651
1652 if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
1653 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
1654
1655 if (tm_reply)
1656 pr_info(IOCNAME
1657 "dev_rmhs_tr_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x), term_count(%d)\n",
1658 mrioc->name, drv_cmd->dev_handle, drv_cmd->ioc_status,
1659 drv_cmd->ioc_loginfo,
1660 le32_to_cpu(tm_reply->termination_count));
1661
1662 pr_info(IOCNAME "Issuing IOU CTL: handle(0x%04x) dev_rmhs idx(%d)\n",
1663 mrioc->name, drv_cmd->dev_handle, cmd_idx);
1664
1665 memset(&iou_ctrl, 0, sizeof(iou_ctrl));
1666
1667 drv_cmd->state = MPI3MR_CMD_PENDING;
1668 drv_cmd->is_waiting = 0;
1669 drv_cmd->callback = mpi3mr_dev_rmhs_complete_iou;
1670 iou_ctrl.operation = drv_cmd->iou_rc;
1671 iou_ctrl.param16[0] = cpu_to_le16(drv_cmd->dev_handle);
1672 iou_ctrl.host_tag = cpu_to_le16(drv_cmd->host_tag);
1673 iou_ctrl.function = MPI3_FUNCTION_IO_UNIT_CONTROL;
1674
1675 retval = mpi3mr_admin_request_post(mrioc, &iou_ctrl, sizeof(iou_ctrl),
1676 1);
1677 if (retval) {
1678 pr_err(IOCNAME "Issue DevRmHsTMIOUCTL: Admin post failed\n",
1679 mrioc->name);
1680 goto out_failed;
1681 }
1682
1683 return;
1684 out_failed:
1685 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1686 drv_cmd->callback = NULL;
1687 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1688 drv_cmd->retry_count = 0;
1689 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1690 }
1691
1692 /**
1693 * mpi3mr_dev_rmhs_send_tm - Issue TM for device removal
1694 * @mrioc: Adapter instance reference
1695 * @handle: Device handle
1696 * @cmdparam: Internal command tracker
1697 * @iou_rc: IO unit reason code
1698 *
1699 * Issues a target reset TM to the firmware or add it to a pend
1700 * list as part of device removal or hidden acknowledgment
1701 * handshake.
1702 *
1703 * Return: Nothing
1704 */
1705 static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_ioc *mrioc, u16 handle,
1706 struct mpi3mr_drv_cmd *cmdparam, u8 iou_rc)
1707 {
1708 struct mpi3_scsi_task_mgmt_request tm_req;
1709 int retval = 0;
1710 u16 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1711 u8 retrycount = 5;
1712 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1713 struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
1714
1715 if (drv_cmd)
1716 goto issue_cmd;
1717 do {
1718 cmd_idx = find_first_zero_bit(mrioc->devrem_bitmap,
1719 MPI3MR_NUM_DEVRMCMD);
1720 if (cmd_idx < MPI3MR_NUM_DEVRMCMD) {
1721 if (!test_and_set_bit(cmd_idx, mrioc->devrem_bitmap))
1722 break;
1723 cmd_idx = MPI3MR_NUM_DEVRMCMD;
1724 }
1725 } while (retrycount--);
1726
1727 if (cmd_idx >= MPI3MR_NUM_DEVRMCMD) {
1728 delayed_dev_rmhs = kzalloc(sizeof(*delayed_dev_rmhs),
1729 GFP_ATOMIC);
1730 if (!delayed_dev_rmhs)
1731 return;
1732 INIT_LIST_HEAD(&delayed_dev_rmhs->list);
1733 delayed_dev_rmhs->handle = handle;
1734 delayed_dev_rmhs->iou_rc = iou_rc;
1735 list_add_tail(&delayed_dev_rmhs->list,
1736 &mrioc->delayed_rmhs_list);
1737 ioc_info(mrioc, "%s :DevRmHs: tr:handle(0x%04x) is postponed\n",
1738 __func__, handle);
1739 return;
1740 }
1741 drv_cmd = &mrioc->dev_rmhs_cmds[cmd_idx];
1742
1743 issue_cmd:
1744 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
1745 ioc_info(mrioc,
1746 "%s :Issuing TR TM: for devhandle 0x%04x with dev_rmhs %d\n",
1747 __func__, handle, cmd_idx);
1748
1749 memset(&tm_req, 0, sizeof(tm_req));
1750 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1751 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
1752 goto out;
1753 }
1754 drv_cmd->state = MPI3MR_CMD_PENDING;
1755 drv_cmd->is_waiting = 0;
1756 drv_cmd->callback = mpi3mr_dev_rmhs_complete_tm;
1757 drv_cmd->dev_handle = handle;
1758 drv_cmd->iou_rc = iou_rc;
1759 tm_req.dev_handle = cpu_to_le16(handle);
1760 tm_req.task_type = MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
1761 tm_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1762 tm_req.task_host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INVALID);
1763 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
1764
1765 set_bit(handle, mrioc->removepend_bitmap);
1766 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
1767 if (retval) {
1768 ioc_err(mrioc, "%s :Issue DevRmHsTM: Admin Post failed\n",
1769 __func__);
1770 goto out_failed;
1771 }
1772 out:
1773 return;
1774 out_failed:
1775 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1776 drv_cmd->callback = NULL;
1777 drv_cmd->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
1778 drv_cmd->retry_count = 0;
1779 clear_bit(cmd_idx, mrioc->devrem_bitmap);
1780 }
1781
1782 /**
1783 * mpi3mr_complete_evt_ack - event ack request completion
1784 * @mrioc: Adapter instance reference
1785 * @drv_cmd: Internal command tracker
1786 *
1787 * This is the completion handler for non blocking event
1788 * acknowledgment sent to the firmware and this will issue any
1789 * pending event acknowledgment request.
1790 *
1791 * Return: Nothing
1792 */
1793 static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
1794 struct mpi3mr_drv_cmd *drv_cmd)
1795 {
1796 u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1797 struct delayed_evt_ack_node *delayed_evtack = NULL;
1798
1799 if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
1800 dprint_event_th(mrioc,
1801 "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
1802 (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
1803 drv_cmd->ioc_loginfo);
1804 }
1805
1806 if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
1807 delayed_evtack =
1808 list_entry(mrioc->delayed_evtack_cmds_list.next,
1809 struct delayed_evt_ack_node, list);
1810 mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
1811 delayed_evtack->event_ctx);
1812 list_del(&delayed_evtack->list);
1813 kfree(delayed_evtack);
1814 return;
1815 }
1816 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1817 drv_cmd->callback = NULL;
1818 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1819 }
1820
1821 /**
1822 * mpi3mr_send_event_ack - Issue event acknwoledgment request
1823 * @mrioc: Adapter instance reference
1824 * @event: MPI3 event id
1825 * @cmdparam: Internal command tracker
1826 * @event_ctx: event context
1827 *
1828 * Issues event acknowledgment request to the firmware if there
1829 * is a free command to send the event ack else it to a pend
1830 * list so that it will be processed on a completion of a prior
1831 * event acknowledgment .
1832 *
1833 * Return: Nothing
1834 */
1835 static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
1836 struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
1837 {
1838 struct mpi3_event_ack_request evtack_req;
1839 int retval = 0;
1840 u8 retrycount = 5;
1841 u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1842 struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
1843 struct delayed_evt_ack_node *delayed_evtack = NULL;
1844
1845 if (drv_cmd) {
1846 dprint_event_th(mrioc,
1847 "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1848 event, event_ctx);
1849 goto issue_cmd;
1850 }
1851 dprint_event_th(mrioc,
1852 "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
1853 event, event_ctx);
1854 do {
1855 cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
1856 MPI3MR_NUM_EVTACKCMD);
1857 if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
1858 if (!test_and_set_bit(cmd_idx,
1859 mrioc->evtack_cmds_bitmap))
1860 break;
1861 cmd_idx = MPI3MR_NUM_EVTACKCMD;
1862 }
1863 } while (retrycount--);
1864
1865 if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
1866 delayed_evtack = kzalloc(sizeof(*delayed_evtack),
1867 GFP_ATOMIC);
1868 if (!delayed_evtack)
1869 return;
1870 INIT_LIST_HEAD(&delayed_evtack->list);
1871 delayed_evtack->event = event;
1872 delayed_evtack->event_ctx = event_ctx;
1873 list_add_tail(&delayed_evtack->list,
1874 &mrioc->delayed_evtack_cmds_list);
1875 dprint_event_th(mrioc,
1876 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
1877 event, event_ctx);
1878 return;
1879 }
1880 drv_cmd = &mrioc->evtack_cmds[cmd_idx];
1881
1882 issue_cmd:
1883 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
1884
1885 memset(&evtack_req, 0, sizeof(evtack_req));
1886 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
1887 dprint_event_th(mrioc,
1888 "sending event ack failed due to command in use\n");
1889 goto out;
1890 }
1891 drv_cmd->state = MPI3MR_CMD_PENDING;
1892 drv_cmd->is_waiting = 0;
1893 drv_cmd->callback = mpi3mr_complete_evt_ack;
1894 evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
1895 evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
1896 evtack_req.event = event;
1897 evtack_req.event_context = cpu_to_le32(event_ctx);
1898 retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
1899 sizeof(evtack_req), 1);
1900 if (retval) {
1901 dprint_event_th(mrioc,
1902 "posting event ack request is failed\n");
1903 goto out_failed;
1904 }
1905
1906 dprint_event_th(mrioc,
1907 "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
1908 event, event_ctx);
1909 out:
1910 return;
1911 out_failed:
1912 drv_cmd->state = MPI3MR_CMD_NOTUSED;
1913 drv_cmd->callback = NULL;
1914 clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
1915 }
1916
1917 /**
1918 * mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
1919 * @mrioc: Adapter instance reference
1920 * @event_reply: event data
1921 *
1922 * Checks for the reason code and based on that either block I/O
1923 * to device, or unblock I/O to the device, or start the device
1924 * removal handshake with reason as remove with the firmware for
1925 * PCIe devices.
1926 *
1927 * Return: Nothing
1928 */
1929 static void mpi3mr_pcietopochg_evt_th(struct mpi3mr_ioc *mrioc,
1930 struct mpi3_event_notification_reply *event_reply)
1931 {
1932 struct mpi3_event_data_pcie_topology_change_list *topo_evt =
1933 (struct mpi3_event_data_pcie_topology_change_list *)event_reply->event_data;
1934 int i;
1935 u16 handle;
1936 u8 reason_code;
1937 struct mpi3mr_tgt_dev *tgtdev = NULL;
1938 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
1939
1940 for (i = 0; i < topo_evt->num_entries; i++) {
1941 handle = le16_to_cpu(topo_evt->port_entry[i].attached_dev_handle);
1942 if (!handle)
1943 continue;
1944 reason_code = topo_evt->port_entry[i].port_status;
1945 scsi_tgt_priv_data = NULL;
1946 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
1947 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
1948 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
1949 tgtdev->starget->hostdata;
1950 switch (reason_code) {
1951 case MPI3_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
1952 if (scsi_tgt_priv_data) {
1953 scsi_tgt_priv_data->dev_removed = 1;
1954 scsi_tgt_priv_data->dev_removedelay = 0;
1955 atomic_set(&scsi_tgt_priv_data->block_io, 0);
1956 }
1957 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
1958 MPI3_CTRL_OP_REMOVE_DEVICE);
1959 break;
1960 case MPI3_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
1961 if (scsi_tgt_priv_data) {
1962 scsi_tgt_priv_data->dev_removedelay = 1;
1963 atomic_inc(&scsi_tgt_priv_data->block_io);
1964 }
1965 break;
1966 case MPI3_EVENT_PCIE_TOPO_PS_RESPONDING:
1967 if (scsi_tgt_priv_data &&
1968 scsi_tgt_priv_data->dev_removedelay) {
1969 scsi_tgt_priv_data->dev_removedelay = 0;
1970 atomic_dec_if_positive
1971 (&scsi_tgt_priv_data->block_io);
1972 }
1973 break;
1974 case MPI3_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
1975 default:
1976 break;
1977 }
1978 if (tgtdev)
1979 mpi3mr_tgtdev_put(tgtdev);
1980 }
1981 }
1982
1983 /**
1984 * mpi3mr_sastopochg_evt_th - SASTopologyChange evt tophalf
1985 * @mrioc: Adapter instance reference
1986 * @event_reply: event data
1987 *
1988 * Checks for the reason code and based on that either block I/O
1989 * to device, or unblock I/O to the device, or start the device
1990 * removal handshake with reason as remove with the firmware for
1991 * SAS/SATA devices.
1992 *
1993 * Return: Nothing
1994 */
1995 static void mpi3mr_sastopochg_evt_th(struct mpi3mr_ioc *mrioc,
1996 struct mpi3_event_notification_reply *event_reply)
1997 {
1998 struct mpi3_event_data_sas_topology_change_list *topo_evt =
1999 (struct mpi3_event_data_sas_topology_change_list *)event_reply->event_data;
2000 int i;
2001 u16 handle;
2002 u8 reason_code;
2003 struct mpi3mr_tgt_dev *tgtdev = NULL;
2004 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2005
2006 for (i = 0; i < topo_evt->num_entries; i++) {
2007 handle = le16_to_cpu(topo_evt->phy_entry[i].attached_dev_handle);
2008 if (!handle)
2009 continue;
2010 reason_code = topo_evt->phy_entry[i].status &
2011 MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
2012 scsi_tgt_priv_data = NULL;
2013 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
2014 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
2015 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2016 tgtdev->starget->hostdata;
2017 switch (reason_code) {
2018 case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
2019 if (scsi_tgt_priv_data) {
2020 scsi_tgt_priv_data->dev_removed = 1;
2021 scsi_tgt_priv_data->dev_removedelay = 0;
2022 atomic_set(&scsi_tgt_priv_data->block_io, 0);
2023 }
2024 mpi3mr_dev_rmhs_send_tm(mrioc, handle, NULL,
2025 MPI3_CTRL_OP_REMOVE_DEVICE);
2026 break;
2027 case MPI3_EVENT_SAS_TOPO_PHY_RC_DELAY_NOT_RESPONDING:
2028 if (scsi_tgt_priv_data) {
2029 scsi_tgt_priv_data->dev_removedelay = 1;
2030 atomic_inc(&scsi_tgt_priv_data->block_io);
2031 }
2032 break;
2033 case MPI3_EVENT_SAS_TOPO_PHY_RC_RESPONDING:
2034 if (scsi_tgt_priv_data &&
2035 scsi_tgt_priv_data->dev_removedelay) {
2036 scsi_tgt_priv_data->dev_removedelay = 0;
2037 atomic_dec_if_positive
2038 (&scsi_tgt_priv_data->block_io);
2039 }
2040 break;
2041 case MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED:
2042 default:
2043 break;
2044 }
2045 if (tgtdev)
2046 mpi3mr_tgtdev_put(tgtdev);
2047 }
2048 }
2049
2050 /**
2051 * mpi3mr_devstatuschg_evt_th - DeviceStatusChange evt tophalf
2052 * @mrioc: Adapter instance reference
2053 * @event_reply: event data
2054 *
2055 * Checks for the reason code and based on that either block I/O
2056 * to device, or unblock I/O to the device, or start the device
2057 * removal handshake with reason as remove/hide acknowledgment
2058 * with the firmware.
2059 *
2060 * Return: Nothing
2061 */
2062 static void mpi3mr_devstatuschg_evt_th(struct mpi3mr_ioc *mrioc,
2063 struct mpi3_event_notification_reply *event_reply)
2064 {
2065 u16 dev_handle = 0;
2066 u8 ublock = 0, block = 0, hide = 0, delete = 0, remove = 0;
2067 struct mpi3mr_tgt_dev *tgtdev = NULL;
2068 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2069 struct mpi3_event_data_device_status_change *evtdata =
2070 (struct mpi3_event_data_device_status_change *)event_reply->event_data;
2071
2072 if (mrioc->stop_drv_processing)
2073 goto out;
2074
2075 dev_handle = le16_to_cpu(evtdata->dev_handle);
2076
2077 switch (evtdata->reason_code) {
2078 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_STRT:
2079 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_STRT:
2080 block = 1;
2081 break;
2082 case MPI3_EVENT_DEV_STAT_RC_HIDDEN:
2083 delete = 1;
2084 hide = 1;
2085 break;
2086 case MPI3_EVENT_DEV_STAT_RC_VD_NOT_RESPONDING:
2087 delete = 1;
2088 remove = 1;
2089 break;
2090 case MPI3_EVENT_DEV_STAT_RC_INT_DEVICE_RESET_CMP:
2091 case MPI3_EVENT_DEV_STAT_RC_INT_IT_NEXUS_RESET_CMP:
2092 ublock = 1;
2093 break;
2094 default:
2095 break;
2096 }
2097
2098 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, dev_handle);
2099 if (!tgtdev)
2100 goto out;
2101 if (hide)
2102 tgtdev->is_hidden = hide;
2103 if (tgtdev->starget && tgtdev->starget->hostdata) {
2104 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
2105 tgtdev->starget->hostdata;
2106 if (block)
2107 atomic_inc(&scsi_tgt_priv_data->block_io);
2108 if (delete)
2109 scsi_tgt_priv_data->dev_removed = 1;
2110 if (ublock)
2111 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
2112 }
2113 if (remove)
2114 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2115 MPI3_CTRL_OP_REMOVE_DEVICE);
2116 if (hide)
2117 mpi3mr_dev_rmhs_send_tm(mrioc, dev_handle, NULL,
2118 MPI3_CTRL_OP_HIDDEN_ACK);
2119
2120 out:
2121 if (tgtdev)
2122 mpi3mr_tgtdev_put(tgtdev);
2123 }
2124
2125 /**
2126 * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
2127 * @mrioc: Adapter instance reference
2128 * @event_reply: event data
2129 *
2130 * Blocks and unblocks host level I/O based on the reason code
2131 *
2132 * Return: Nothing
2133 */
2134 static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
2135 struct mpi3_event_notification_reply *event_reply)
2136 {
2137 struct mpi3_event_data_prepare_for_reset *evtdata =
2138 (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
2139
2140 if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
2141 dprint_event_th(mrioc,
2142 "prepare for reset event top half with rc=start\n");
2143 if (mrioc->prepare_for_reset)
2144 return;
2145 mrioc->prepare_for_reset = 1;
2146 mrioc->prepare_for_reset_timeout_counter = 0;
2147 } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
2148 dprint_event_th(mrioc,
2149 "prepare for reset top half with rc=abort\n");
2150 mrioc->prepare_for_reset = 0;
2151 mrioc->prepare_for_reset_timeout_counter = 0;
2152 }
2153 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2154 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2155 mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
2156 le32_to_cpu(event_reply->event_context));
2157 }
2158
2159 /**
2160 * mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
2161 * @mrioc: Adapter instance reference
2162 * @event_reply: event data
2163 *
2164 * Identifies the new shutdown timeout value and update.
2165 *
2166 * Return: Nothing
2167 */
2168 static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
2169 struct mpi3_event_notification_reply *event_reply)
2170 {
2171 struct mpi3_event_data_energy_pack_change *evtdata =
2172 (struct mpi3_event_data_energy_pack_change *)event_reply->event_data;
2173 u16 shutdown_timeout = le16_to_cpu(evtdata->shutdown_timeout);
2174
2175 if (shutdown_timeout <= 0) {
2176 ioc_warn(mrioc,
2177 "%s :Invalid Shutdown Timeout received = %d\n",
2178 __func__, shutdown_timeout);
2179 return;
2180 }
2181
2182 ioc_info(mrioc,
2183 "%s :Previous Shutdown Timeout Value = %d New Shutdown Timeout Value = %d\n",
2184 __func__, mrioc->facts.shutdown_timeout, shutdown_timeout);
2185 mrioc->facts.shutdown_timeout = shutdown_timeout;
2186 }
2187
2188 /**
2189 * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
2190 * @mrioc: Adapter instance reference
2191 * @event_reply: event data
2192 *
2193 * Displays Cable manegemt event details.
2194 *
2195 * Return: Nothing
2196 */
2197 static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
2198 struct mpi3_event_notification_reply *event_reply)
2199 {
2200 struct mpi3_event_data_cable_management *evtdata =
2201 (struct mpi3_event_data_cable_management *)event_reply->event_data;
2202
2203 switch (evtdata->status) {
2204 case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
2205 {
2206 ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
2207 "Devices connected to this cable are not detected.\n"
2208 "This cable requires %d mW of power.\n",
2209 evtdata->receptacle_id,
2210 le32_to_cpu(evtdata->active_cable_power_requirement));
2211 break;
2212 }
2213 case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
2214 {
2215 ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
2216 evtdata->receptacle_id);
2217 break;
2218 }
2219 default:
2220 break;
2221 }
2222 }
2223
2224 /**
2225 * mpi3mr_os_handle_events - Firmware event handler
2226 * @mrioc: Adapter instance reference
2227 * @event_reply: event data
2228 *
2229 * Identify whteher the event has to handled and acknowledged
2230 * and either process the event in the tophalf and/or schedule a
2231 * bottom half through mpi3mr_fwevt_worker.
2232 *
2233 * Return: Nothing
2234 */
2235 void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
2236 struct mpi3_event_notification_reply *event_reply)
2237 {
2238 u16 evt_type, sz;
2239 struct mpi3mr_fwevt *fwevt = NULL;
2240 bool ack_req = 0, process_evt_bh = 0;
2241
2242 if (mrioc->stop_drv_processing)
2243 return;
2244
2245 if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
2246 == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
2247 ack_req = 1;
2248
2249 evt_type = event_reply->event;
2250
2251 switch (evt_type) {
2252 case MPI3_EVENT_DEVICE_ADDED:
2253 {
2254 struct mpi3_device_page0 *dev_pg0 =
2255 (struct mpi3_device_page0 *)event_reply->event_data;
2256 if (mpi3mr_create_tgtdev(mrioc, dev_pg0))
2257 ioc_err(mrioc,
2258 "%s :Failed to add device in the device add event\n",
2259 __func__);
2260 else
2261 process_evt_bh = 1;
2262 break;
2263 }
2264 case MPI3_EVENT_DEVICE_STATUS_CHANGE:
2265 {
2266 process_evt_bh = 1;
2267 mpi3mr_devstatuschg_evt_th(mrioc, event_reply);
2268 break;
2269 }
2270 case MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2271 {
2272 process_evt_bh = 1;
2273 mpi3mr_sastopochg_evt_th(mrioc, event_reply);
2274 break;
2275 }
2276 case MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
2277 {
2278 process_evt_bh = 1;
2279 mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
2280 break;
2281 }
2282 case MPI3_EVENT_PREPARE_FOR_RESET:
2283 {
2284 mpi3mr_preparereset_evt_th(mrioc, event_reply);
2285 ack_req = 0;
2286 break;
2287 }
2288 case MPI3_EVENT_DEVICE_INFO_CHANGED:
2289 {
2290 process_evt_bh = 1;
2291 break;
2292 }
2293 case MPI3_EVENT_ENERGY_PACK_CHANGE:
2294 {
2295 mpi3mr_energypackchg_evt_th(mrioc, event_reply);
2296 break;
2297 }
2298 case MPI3_EVENT_CABLE_MGMT:
2299 {
2300 mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
2301 break;
2302 }
2303 case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
2304 case MPI3_EVENT_SAS_DISCOVERY:
2305 case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
2306 case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
2307 case MPI3_EVENT_PCIE_ENUMERATION:
2308 break;
2309 default:
2310 ioc_info(mrioc, "%s :event 0x%02x is not handled\n",
2311 __func__, evt_type);
2312 break;
2313 }
2314 if (process_evt_bh || ack_req) {
2315 sz = event_reply->event_data_length * 4;
2316 fwevt = mpi3mr_alloc_fwevt(sz);
2317 if (!fwevt) {
2318 ioc_info(mrioc, "%s :failure at %s:%d/%s()!\n",
2319 __func__, __FILE__, __LINE__, __func__);
2320 return;
2321 }
2322
2323 memcpy(fwevt->event_data, event_reply->event_data, sz);
2324 fwevt->mrioc = mrioc;
2325 fwevt->event_id = evt_type;
2326 fwevt->send_ack = ack_req;
2327 fwevt->process_evt = process_evt_bh;
2328 fwevt->evt_ctx = le32_to_cpu(event_reply->event_context);
2329 mpi3mr_fwevt_add_to_list(mrioc, fwevt);
2330 }
2331 }
2332
2333 /**
2334 * mpi3mr_setup_eedp - Setup EEDP information in MPI3 SCSI IO
2335 * @mrioc: Adapter instance reference
2336 * @scmd: SCSI command reference
2337 * @scsiio_req: MPI3 SCSI IO request
2338 *
2339 * Identifies the protection information flags from the SCSI
2340 * command and set appropriate flags in the MPI3 SCSI IO
2341 * request.
2342 *
2343 * Return: Nothing
2344 */
2345 static void mpi3mr_setup_eedp(struct mpi3mr_ioc *mrioc,
2346 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2347 {
2348 u16 eedp_flags = 0;
2349 unsigned char prot_op = scsi_get_prot_op(scmd);
2350
2351 switch (prot_op) {
2352 case SCSI_PROT_NORMAL:
2353 return;
2354 case SCSI_PROT_READ_STRIP:
2355 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2356 break;
2357 case SCSI_PROT_WRITE_INSERT:
2358 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2359 break;
2360 case SCSI_PROT_READ_INSERT:
2361 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_INSERT;
2362 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2363 break;
2364 case SCSI_PROT_WRITE_STRIP:
2365 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE;
2366 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2367 break;
2368 case SCSI_PROT_READ_PASS:
2369 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2370 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2371 break;
2372 case SCSI_PROT_WRITE_PASS:
2373 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM) {
2374 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN;
2375 scsiio_req->sgl[0].eedp.application_tag_translation_mask =
2376 0xffff;
2377 } else
2378 eedp_flags = MPI3_EEDPFLAGS_EEDP_OP_CHECK;
2379
2380 scsiio_req->msg_flags |= MPI3_SCSIIO_MSGFLAGS_METASGL_VALID;
2381 break;
2382 default:
2383 return;
2384 }
2385
2386 if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
2387 eedp_flags |= MPI3_EEDPFLAGS_CHK_GUARD;
2388
2389 if (scmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
2390 eedp_flags |= MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM;
2391
2392 if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
2393 eedp_flags |= MPI3_EEDPFLAGS_CHK_REF_TAG |
2394 MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2395 scsiio_req->cdb.eedp32.primary_reference_tag =
2396 cpu_to_be32(scsi_prot_ref_tag(scmd));
2397 }
2398
2399 if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT)
2400 eedp_flags |= MPI3_EEDPFLAGS_INCR_PRI_REF_TAG;
2401
2402 eedp_flags |= MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE;
2403
2404 switch (scsi_prot_interval(scmd)) {
2405 case 512:
2406 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_512;
2407 break;
2408 case 520:
2409 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_520;
2410 break;
2411 case 4080:
2412 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4080;
2413 break;
2414 case 4088:
2415 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4088;
2416 break;
2417 case 4096:
2418 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4096;
2419 break;
2420 case 4104:
2421 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4104;
2422 break;
2423 case 4160:
2424 scsiio_req->sgl[0].eedp.user_data_size = MPI3_EEDP_UDS_4160;
2425 break;
2426 default:
2427 break;
2428 }
2429
2430 scsiio_req->sgl[0].eedp.eedp_flags = cpu_to_le16(eedp_flags);
2431 scsiio_req->sgl[0].eedp.flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED;
2432 }
2433
2434 /**
2435 * mpi3mr_build_sense_buffer - Map sense information
2436 * @desc: Sense type
2437 * @buf: Sense buffer to populate
2438 * @key: Sense key
2439 * @asc: Additional sense code
2440 * @ascq: Additional sense code qualifier
2441 *
2442 * Maps the given sense information into either descriptor or
2443 * fixed format sense data.
2444 *
2445 * Return: Nothing
2446 */
2447 static inline void mpi3mr_build_sense_buffer(int desc, u8 *buf, u8 key,
2448 u8 asc, u8 ascq)
2449 {
2450 if (desc) {
2451 buf[0] = 0x72; /* descriptor, current */
2452 buf[1] = key;
2453 buf[2] = asc;
2454 buf[3] = ascq;
2455 buf[7] = 0;
2456 } else {
2457 buf[0] = 0x70; /* fixed, current */
2458 buf[2] = key;
2459 buf[7] = 0xa;
2460 buf[12] = asc;
2461 buf[13] = ascq;
2462 }
2463 }
2464
2465 /**
2466 * mpi3mr_map_eedp_error - Map EEDP errors from IOC status
2467 * @scmd: SCSI command reference
2468 * @ioc_status: status of MPI3 request
2469 *
2470 * Maps the EEDP error status of the SCSI IO request to sense
2471 * data.
2472 *
2473 * Return: Nothing
2474 */
2475 static void mpi3mr_map_eedp_error(struct scsi_cmnd *scmd,
2476 u16 ioc_status)
2477 {
2478 u8 ascq = 0;
2479
2480 switch (ioc_status) {
2481 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2482 ascq = 0x01;
2483 break;
2484 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2485 ascq = 0x02;
2486 break;
2487 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2488 ascq = 0x03;
2489 break;
2490 default:
2491 ascq = 0x00;
2492 break;
2493 }
2494
2495 mpi3mr_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
2496 0x10, ascq);
2497 scmd->result = (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
2498 }
2499
2500 /**
2501 * mpi3mr_process_op_reply_desc - reply descriptor handler
2502 * @mrioc: Adapter instance reference
2503 * @reply_desc: Operational reply descriptor
2504 * @reply_dma: place holder for reply DMA address
2505 * @qidx: Operational queue index
2506 *
2507 * Process the operational reply descriptor and identifies the
2508 * descriptor type. Based on the descriptor map the MPI3 request
2509 * status to a SCSI command status and calls scsi_done call
2510 * back.
2511 *
2512 * Return: Nothing
2513 */
2514 void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
2515 struct mpi3_default_reply_descriptor *reply_desc, u64 *reply_dma, u16 qidx)
2516 {
2517 u16 reply_desc_type, host_tag = 0;
2518 u16 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2519 u32 ioc_loginfo = 0;
2520 struct mpi3_status_reply_descriptor *status_desc = NULL;
2521 struct mpi3_address_reply_descriptor *addr_desc = NULL;
2522 struct mpi3_success_reply_descriptor *success_desc = NULL;
2523 struct mpi3_scsi_io_reply *scsi_reply = NULL;
2524 struct scsi_cmnd *scmd = NULL;
2525 struct scmd_priv *priv = NULL;
2526 u8 *sense_buf = NULL;
2527 u8 scsi_state = 0, scsi_status = 0, sense_state = 0;
2528 u32 xfer_count = 0, sense_count = 0, resp_data = 0;
2529 u16 dev_handle = 0xFFFF;
2530 struct scsi_sense_hdr sshdr;
2531
2532 *reply_dma = 0;
2533 reply_desc_type = le16_to_cpu(reply_desc->reply_flags) &
2534 MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK;
2535 switch (reply_desc_type) {
2536 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_STATUS:
2537 status_desc = (struct mpi3_status_reply_descriptor *)reply_desc;
2538 host_tag = le16_to_cpu(status_desc->host_tag);
2539 ioc_status = le16_to_cpu(status_desc->ioc_status);
2540 if (ioc_status &
2541 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2542 ioc_loginfo = le32_to_cpu(status_desc->ioc_log_info);
2543 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2544 break;
2545 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
2546 addr_desc = (struct mpi3_address_reply_descriptor *)reply_desc;
2547 *reply_dma = le64_to_cpu(addr_desc->reply_frame_address);
2548 scsi_reply = mpi3mr_get_reply_virt_addr(mrioc,
2549 *reply_dma);
2550 if (!scsi_reply) {
2551 panic("%s: scsi_reply is NULL, this shouldn't happen\n",
2552 mrioc->name);
2553 goto out;
2554 }
2555 host_tag = le16_to_cpu(scsi_reply->host_tag);
2556 ioc_status = le16_to_cpu(scsi_reply->ioc_status);
2557 scsi_status = scsi_reply->scsi_status;
2558 scsi_state = scsi_reply->scsi_state;
2559 dev_handle = le16_to_cpu(scsi_reply->dev_handle);
2560 sense_state = (scsi_state & MPI3_SCSI_STATE_SENSE_MASK);
2561 xfer_count = le32_to_cpu(scsi_reply->transfer_count);
2562 sense_count = le32_to_cpu(scsi_reply->sense_count);
2563 resp_data = le32_to_cpu(scsi_reply->response_data);
2564 sense_buf = mpi3mr_get_sensebuf_virt_addr(mrioc,
2565 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2566 if (ioc_status &
2567 MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
2568 ioc_loginfo = le32_to_cpu(scsi_reply->ioc_log_info);
2569 ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
2570 if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
2571 panic("%s: Ran out of sense buffers\n", mrioc->name);
2572 break;
2573 case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS:
2574 success_desc = (struct mpi3_success_reply_descriptor *)reply_desc;
2575 host_tag = le16_to_cpu(success_desc->host_tag);
2576 break;
2577 default:
2578 break;
2579 }
2580 scmd = mpi3mr_scmd_from_host_tag(mrioc, host_tag, qidx);
2581 if (!scmd) {
2582 panic("%s: Cannot Identify scmd for host_tag 0x%x\n",
2583 mrioc->name, host_tag);
2584 goto out;
2585 }
2586 priv = scsi_cmd_priv(scmd);
2587 if (success_desc) {
2588 scmd->result = DID_OK << 16;
2589 goto out_success;
2590 }
2591
2592 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_count);
2593 if (ioc_status == MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN &&
2594 xfer_count == 0 && (scsi_status == MPI3_SCSI_STATUS_BUSY ||
2595 scsi_status == MPI3_SCSI_STATUS_RESERVATION_CONFLICT ||
2596 scsi_status == MPI3_SCSI_STATUS_TASK_SET_FULL))
2597 ioc_status = MPI3_IOCSTATUS_SUCCESS;
2598
2599 if ((sense_state == MPI3_SCSI_STATE_SENSE_VALID) && sense_count &&
2600 sense_buf) {
2601 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, sense_count);
2602
2603 memcpy(scmd->sense_buffer, sense_buf, sz);
2604 }
2605
2606 switch (ioc_status) {
2607 case MPI3_IOCSTATUS_BUSY:
2608 case MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES:
2609 scmd->result = SAM_STAT_BUSY;
2610 break;
2611 case MPI3_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
2612 scmd->result = DID_NO_CONNECT << 16;
2613 break;
2614 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
2615 scmd->result = DID_SOFT_ERROR << 16;
2616 break;
2617 case MPI3_IOCSTATUS_SCSI_TASK_TERMINATED:
2618 case MPI3_IOCSTATUS_SCSI_EXT_TERMINATED:
2619 scmd->result = DID_RESET << 16;
2620 break;
2621 case MPI3_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
2622 if ((xfer_count == 0) || (scmd->underflow > xfer_count))
2623 scmd->result = DID_SOFT_ERROR << 16;
2624 else
2625 scmd->result = (DID_OK << 16) | scsi_status;
2626 break;
2627 case MPI3_IOCSTATUS_SCSI_DATA_UNDERRUN:
2628 scmd->result = (DID_OK << 16) | scsi_status;
2629 if (sense_state == MPI3_SCSI_STATE_SENSE_VALID)
2630 break;
2631 if (xfer_count < scmd->underflow) {
2632 if (scsi_status == SAM_STAT_BUSY)
2633 scmd->result = SAM_STAT_BUSY;
2634 else
2635 scmd->result = DID_SOFT_ERROR << 16;
2636 } else if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2637 (sense_state != MPI3_SCSI_STATE_SENSE_NOT_AVAILABLE))
2638 scmd->result = DID_SOFT_ERROR << 16;
2639 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2640 scmd->result = DID_RESET << 16;
2641 break;
2642 case MPI3_IOCSTATUS_SCSI_DATA_OVERRUN:
2643 scsi_set_resid(scmd, 0);
2644 fallthrough;
2645 case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
2646 case MPI3_IOCSTATUS_SUCCESS:
2647 scmd->result = (DID_OK << 16) | scsi_status;
2648 if ((scsi_state & (MPI3_SCSI_STATE_NO_SCSI_STATUS)) ||
2649 (sense_state == MPI3_SCSI_STATE_SENSE_FAILED) ||
2650 (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY))
2651 scmd->result = DID_SOFT_ERROR << 16;
2652 else if (scsi_state & MPI3_SCSI_STATE_TERMINATED)
2653 scmd->result = DID_RESET << 16;
2654 break;
2655 case MPI3_IOCSTATUS_EEDP_GUARD_ERROR:
2656 case MPI3_IOCSTATUS_EEDP_REF_TAG_ERROR:
2657 case MPI3_IOCSTATUS_EEDP_APP_TAG_ERROR:
2658 mpi3mr_map_eedp_error(scmd, ioc_status);
2659 break;
2660 case MPI3_IOCSTATUS_SCSI_PROTOCOL_ERROR:
2661 case MPI3_IOCSTATUS_INVALID_FUNCTION:
2662 case MPI3_IOCSTATUS_INVALID_SGL:
2663 case MPI3_IOCSTATUS_INTERNAL_ERROR:
2664 case MPI3_IOCSTATUS_INVALID_FIELD:
2665 case MPI3_IOCSTATUS_INVALID_STATE:
2666 case MPI3_IOCSTATUS_SCSI_IO_DATA_ERROR:
2667 case MPI3_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
2668 case MPI3_IOCSTATUS_INSUFFICIENT_POWER:
2669 default:
2670 scmd->result = DID_SOFT_ERROR << 16;
2671 break;
2672 }
2673
2674 if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) &&
2675 (scmd->cmnd[0] != ATA_16)) {
2676 ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__,
2677 scmd->result);
2678 scsi_print_command(scmd);
2679 ioc_info(mrioc,
2680 "%s :Command issued to handle 0x%02x returned with error 0x%04x loginfo 0x%08x, qid %d\n",
2681 __func__, dev_handle, ioc_status, ioc_loginfo,
2682 priv->req_q_idx + 1);
2683 ioc_info(mrioc,
2684 " host_tag %d scsi_state 0x%02x scsi_status 0x%02x, xfer_cnt %d resp_data 0x%x\n",
2685 host_tag, scsi_state, scsi_status, xfer_count, resp_data);
2686 if (sense_buf) {
2687 scsi_normalize_sense(sense_buf, sense_count, &sshdr);
2688 ioc_info(mrioc,
2689 "%s :sense_count 0x%x, sense_key 0x%x ASC 0x%x, ASCQ 0x%x\n",
2690 __func__, sense_count, sshdr.sense_key,
2691 sshdr.asc, sshdr.ascq);
2692 }
2693 }
2694 out_success:
2695 if (priv->meta_sg_valid) {
2696 dma_unmap_sg(&mrioc->pdev->dev, scsi_prot_sglist(scmd),
2697 scsi_prot_sg_count(scmd), scmd->sc_data_direction);
2698 }
2699 mpi3mr_clear_scmd_priv(mrioc, scmd);
2700 scsi_dma_unmap(scmd);
2701 scmd->scsi_done(scmd);
2702 out:
2703 if (sense_buf)
2704 mpi3mr_repost_sense_buf(mrioc,
2705 le64_to_cpu(scsi_reply->sense_data_buffer_address));
2706 }
2707
2708 /**
2709 * mpi3mr_get_chain_idx - get free chain buffer index
2710 * @mrioc: Adapter instance reference
2711 *
2712 * Try to get a free chain buffer index from the free pool.
2713 *
2714 * Return: -1 on failure or the free chain buffer index
2715 */
2716 static int mpi3mr_get_chain_idx(struct mpi3mr_ioc *mrioc)
2717 {
2718 u8 retry_count = 5;
2719 int cmd_idx = -1;
2720
2721 do {
2722 spin_lock(&mrioc->chain_buf_lock);
2723 cmd_idx = find_first_zero_bit(mrioc->chain_bitmap,
2724 mrioc->chain_buf_count);
2725 if (cmd_idx < mrioc->chain_buf_count) {
2726 set_bit(cmd_idx, mrioc->chain_bitmap);
2727 spin_unlock(&mrioc->chain_buf_lock);
2728 break;
2729 }
2730 spin_unlock(&mrioc->chain_buf_lock);
2731 cmd_idx = -1;
2732 } while (retry_count--);
2733 return cmd_idx;
2734 }
2735
2736 /**
2737 * mpi3mr_prepare_sg_scmd - build scatter gather list
2738 * @mrioc: Adapter instance reference
2739 * @scmd: SCSI command reference
2740 * @scsiio_req: MPI3 SCSI IO request
2741 *
2742 * This function maps SCSI command's data and protection SGEs to
2743 * MPI request SGEs. If required additional 4K chain buffer is
2744 * used to send the SGEs.
2745 *
2746 * Return: 0 on success, -ENOMEM on dma_map_sg failure
2747 */
2748 static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
2749 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2750 {
2751 dma_addr_t chain_dma;
2752 struct scatterlist *sg_scmd;
2753 void *sg_local, *chain;
2754 u32 chain_length;
2755 int sges_left, chain_idx;
2756 u32 sges_in_segment;
2757 u8 simple_sgl_flags;
2758 u8 simple_sgl_flags_last;
2759 u8 last_chain_sgl_flags;
2760 struct chain_element *chain_req;
2761 struct scmd_priv *priv = NULL;
2762 u32 meta_sg = le32_to_cpu(scsiio_req->flags) &
2763 MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI;
2764
2765 priv = scsi_cmd_priv(scmd);
2766
2767 simple_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE |
2768 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2769 simple_sgl_flags_last = simple_sgl_flags |
2770 MPI3_SGE_FLAGS_END_OF_LIST;
2771 last_chain_sgl_flags = MPI3_SGE_FLAGS_ELEMENT_TYPE_LAST_CHAIN |
2772 MPI3_SGE_FLAGS_DLAS_SYSTEM;
2773
2774 if (meta_sg)
2775 sg_local = &scsiio_req->sgl[MPI3_SCSIIO_METASGL_INDEX];
2776 else
2777 sg_local = &scsiio_req->sgl;
2778
2779 if (!scsiio_req->data_length && !meta_sg) {
2780 mpi3mr_build_zero_len_sge(sg_local);
2781 return 0;
2782 }
2783
2784 if (meta_sg) {
2785 sg_scmd = scsi_prot_sglist(scmd);
2786 sges_left = dma_map_sg(&mrioc->pdev->dev,
2787 scsi_prot_sglist(scmd),
2788 scsi_prot_sg_count(scmd),
2789 scmd->sc_data_direction);
2790 priv->meta_sg_valid = 1; /* To unmap meta sg DMA */
2791 } else {
2792 sg_scmd = scsi_sglist(scmd);
2793 sges_left = scsi_dma_map(scmd);
2794 }
2795
2796 if (sges_left < 0) {
2797 sdev_printk(KERN_ERR, scmd->device,
2798 "scsi_dma_map failed: request for %d bytes!\n",
2799 scsi_bufflen(scmd));
2800 return -ENOMEM;
2801 }
2802 if (sges_left > MPI3MR_SG_DEPTH) {
2803 sdev_printk(KERN_ERR, scmd->device,
2804 "scsi_dma_map returned unsupported sge count %d!\n",
2805 sges_left);
2806 return -ENOMEM;
2807 }
2808
2809 sges_in_segment = (mrioc->facts.op_req_sz -
2810 offsetof(struct mpi3_scsi_io_request, sgl)) / sizeof(struct mpi3_sge_common);
2811
2812 if (scsiio_req->sgl[0].eedp.flags ==
2813 MPI3_SGE_FLAGS_ELEMENT_TYPE_EXTENDED && !meta_sg) {
2814 sg_local += sizeof(struct mpi3_sge_common);
2815 sges_in_segment--;
2816 /* Reserve 1st segment (scsiio_req->sgl[0]) for eedp */
2817 }
2818
2819 if (scsiio_req->msg_flags ==
2820 MPI3_SCSIIO_MSGFLAGS_METASGL_VALID && !meta_sg) {
2821 sges_in_segment--;
2822 /* Reserve last segment (scsiio_req->sgl[3]) for meta sg */
2823 }
2824
2825 if (meta_sg)
2826 sges_in_segment = 1;
2827
2828 if (sges_left <= sges_in_segment)
2829 goto fill_in_last_segment;
2830
2831 /* fill in main message segment when there is a chain following */
2832 while (sges_in_segment > 1) {
2833 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2834 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2835 sg_scmd = sg_next(sg_scmd);
2836 sg_local += sizeof(struct mpi3_sge_common);
2837 sges_left--;
2838 sges_in_segment--;
2839 }
2840
2841 chain_idx = mpi3mr_get_chain_idx(mrioc);
2842 if (chain_idx < 0)
2843 return -1;
2844 chain_req = &mrioc->chain_sgl_list[chain_idx];
2845 if (meta_sg)
2846 priv->meta_chain_idx = chain_idx;
2847 else
2848 priv->chain_idx = chain_idx;
2849
2850 chain = chain_req->addr;
2851 chain_dma = chain_req->dma_addr;
2852 sges_in_segment = sges_left;
2853 chain_length = sges_in_segment * sizeof(struct mpi3_sge_common);
2854
2855 mpi3mr_add_sg_single(sg_local, last_chain_sgl_flags,
2856 chain_length, chain_dma);
2857
2858 sg_local = chain;
2859
2860 fill_in_last_segment:
2861 while (sges_left > 0) {
2862 if (sges_left == 1)
2863 mpi3mr_add_sg_single(sg_local,
2864 simple_sgl_flags_last, sg_dma_len(sg_scmd),
2865 sg_dma_address(sg_scmd));
2866 else
2867 mpi3mr_add_sg_single(sg_local, simple_sgl_flags,
2868 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2869 sg_scmd = sg_next(sg_scmd);
2870 sg_local += sizeof(struct mpi3_sge_common);
2871 sges_left--;
2872 }
2873
2874 return 0;
2875 }
2876
2877 /**
2878 * mpi3mr_build_sg_scmd - build scatter gather list for SCSI IO
2879 * @mrioc: Adapter instance reference
2880 * @scmd: SCSI command reference
2881 * @scsiio_req: MPI3 SCSI IO request
2882 *
2883 * This function calls mpi3mr_prepare_sg_scmd for constructing
2884 * both data SGEs and protection information SGEs in the MPI
2885 * format from the SCSI Command as appropriate .
2886 *
2887 * Return: return value of mpi3mr_prepare_sg_scmd.
2888 */
2889 static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
2890 struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req)
2891 {
2892 int ret;
2893
2894 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2895 if (ret)
2896 return ret;
2897
2898 if (scsiio_req->msg_flags == MPI3_SCSIIO_MSGFLAGS_METASGL_VALID) {
2899 /* There is a valid meta sg */
2900 scsiio_req->flags |=
2901 cpu_to_le32(MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI);
2902 ret = mpi3mr_prepare_sg_scmd(mrioc, scmd, scsiio_req);
2903 }
2904
2905 return ret;
2906 }
2907
2908 /**
2909 * mpi3mr_tm_response_name - get TM response as a string
2910 * @resp_code: TM response code
2911 *
2912 * Convert known task management response code as a readable
2913 * string.
2914 *
2915 * Return: response code string.
2916 */
2917 static const char *mpi3mr_tm_response_name(u8 resp_code)
2918 {
2919 char *desc;
2920
2921 switch (resp_code) {
2922 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
2923 desc = "task management request completed";
2924 break;
2925 case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
2926 desc = "invalid frame";
2927 break;
2928 case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
2929 desc = "task management request not supported";
2930 break;
2931 case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
2932 desc = "task management request failed";
2933 break;
2934 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
2935 desc = "task management request succeeded";
2936 break;
2937 case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
2938 desc = "invalid LUN";
2939 break;
2940 case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
2941 desc = "overlapped tag attempted";
2942 break;
2943 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
2944 desc = "task queued, however not sent to target";
2945 break;
2946 case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
2947 desc = "task management request denied by NVMe device";
2948 break;
2949 default:
2950 desc = "unknown";
2951 break;
2952 }
2953
2954 return desc;
2955 }
2956
2957 inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
2958 {
2959 int i;
2960 int num_of_reply_queues =
2961 mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
2962
2963 for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
2964 mpi3mr_process_op_reply_q(mrioc,
2965 mrioc->intr_info[i].op_reply_q);
2966 }
2967
2968 /**
2969 * mpi3mr_issue_tm - Issue Task Management request
2970 * @mrioc: Adapter instance reference
2971 * @tm_type: Task Management type
2972 * @handle: Device handle
2973 * @lun: lun ID
2974 * @htag: Host tag of the TM request
2975 * @timeout: TM timeout value
2976 * @drv_cmd: Internal command tracker
2977 * @resp_code: Response code place holder
2978 * @scmd: SCSI command
2979 *
2980 * Issues a Task Management Request to the controller for a
2981 * specified target, lun and command and wait for its completion
2982 * and check TM response. Recover the TM if it timed out by
2983 * issuing controller reset.
2984 *
2985 * Return: 0 on success, non-zero on errors
2986 */
2987 static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
2988 u16 handle, uint lun, u16 htag, ulong timeout,
2989 struct mpi3mr_drv_cmd *drv_cmd,
2990 u8 *resp_code, struct scsi_cmnd *scmd)
2991 {
2992 struct mpi3_scsi_task_mgmt_request tm_req;
2993 struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
2994 int retval = 0;
2995 struct mpi3mr_tgt_dev *tgtdev = NULL;
2996 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
2997 struct scmd_priv *cmd_priv = NULL;
2998 struct scsi_device *sdev = NULL;
2999 struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
3000
3001 ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
3002 __func__, tm_type, handle);
3003 if (mrioc->unrecoverable) {
3004 retval = -1;
3005 ioc_err(mrioc, "%s :Issue TM: Unrecoverable controller\n",
3006 __func__);
3007 goto out;
3008 }
3009
3010 memset(&tm_req, 0, sizeof(tm_req));
3011 mutex_lock(&drv_cmd->mutex);
3012 if (drv_cmd->state & MPI3MR_CMD_PENDING) {
3013 retval = -1;
3014 ioc_err(mrioc, "%s :Issue TM: Command is in use\n", __func__);
3015 mutex_unlock(&drv_cmd->mutex);
3016 goto out;
3017 }
3018 if (mrioc->reset_in_progress) {
3019 retval = -1;
3020 ioc_err(mrioc, "%s :Issue TM: Reset in progress\n", __func__);
3021 mutex_unlock(&drv_cmd->mutex);
3022 goto out;
3023 }
3024
3025 drv_cmd->state = MPI3MR_CMD_PENDING;
3026 drv_cmd->is_waiting = 1;
3027 drv_cmd->callback = NULL;
3028 tm_req.dev_handle = cpu_to_le16(handle);
3029 tm_req.task_type = tm_type;
3030 tm_req.host_tag = cpu_to_le16(htag);
3031
3032 int_to_scsilun(lun, (struct scsi_lun *)tm_req.lun);
3033 tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
3034
3035 tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
3036
3037 if (scmd) {
3038 sdev = scmd->device;
3039 sdev_priv_data = sdev->hostdata;
3040 scsi_tgt_priv_data = ((sdev_priv_data) ?
3041 sdev_priv_data->tgt_priv_data : NULL);
3042 } else {
3043 if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
3044 scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
3045 tgtdev->starget->hostdata;
3046 }
3047
3048 if (scsi_tgt_priv_data)
3049 atomic_inc(&scsi_tgt_priv_data->block_io);
3050
3051 if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
3052 if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
3053 timeout = tgtdev->dev_spec.pcie_inf.abort_to;
3054 else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
3055 timeout = tgtdev->dev_spec.pcie_inf.reset_to;
3056 }
3057
3058 init_completion(&drv_cmd->done);
3059 retval = mpi3mr_admin_request_post(mrioc, &tm_req, sizeof(tm_req), 1);
3060 if (retval) {
3061 ioc_err(mrioc, "%s :Issue TM: Admin Post failed\n", __func__);
3062 goto out_unlock;
3063 }
3064 wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
3065
3066 if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
3067 drv_cmd->is_waiting = 0;
3068 retval = -1;
3069 if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
3070 dprint_tm(mrioc,
3071 "task management request timed out after %ld seconds\n",
3072 timeout);
3073 if (mrioc->logging_level & MPI3_DEBUG_TM)
3074 dprint_dump_req(&tm_req, sizeof(tm_req)/4);
3075 mpi3mr_soft_reset_handler(mrioc,
3076 MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
3077 }
3078 goto out_unlock;
3079 }
3080
3081 if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
3082 dprint_tm(mrioc, "invalid task management reply message\n");
3083 retval = -1;
3084 goto out_unlock;
3085 }
3086
3087 tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
3088
3089 switch (drv_cmd->ioc_status) {
3090 case MPI3_IOCSTATUS_SUCCESS:
3091 *resp_code = le32_to_cpu(tm_reply->response_data) &
3092 MPI3MR_RI_MASK_RESPCODE;
3093 break;
3094 case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
3095 *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
3096 break;
3097 default:
3098 dprint_tm(mrioc,
3099 "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
3100 handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
3101 retval = -1;
3102 goto out_unlock;
3103 }
3104
3105 switch (*resp_code) {
3106 case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
3107 case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
3108 break;
3109 case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
3110 if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3111 retval = -1;
3112 break;
3113 default:
3114 retval = -1;
3115 break;
3116 }
3117
3118 dprint_tm(mrioc,
3119 "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
3120 tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
3121 le32_to_cpu(tm_reply->termination_count),
3122 mpi3mr_tm_response_name(*resp_code), *resp_code);
3123
3124 if (!retval) {
3125 mpi3mr_ioc_disable_intr(mrioc);
3126 mpi3mr_poll_pend_io_completions(mrioc);
3127 mpi3mr_ioc_enable_intr(mrioc);
3128 mpi3mr_poll_pend_io_completions(mrioc);
3129 }
3130 switch (tm_type) {
3131 case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3132 if (!scsi_tgt_priv_data)
3133 break;
3134 scsi_tgt_priv_data->pend_count = 0;
3135 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3136 mpi3mr_count_tgt_pending,
3137 (void *)scsi_tgt_priv_data->starget);
3138 break;
3139 case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3140 if (!sdev_priv_data)
3141 break;
3142 sdev_priv_data->pend_count = 0;
3143 blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
3144 mpi3mr_count_dev_pending, (void *)sdev);
3145 break;
3146 default:
3147 break;
3148 }
3149
3150 out_unlock:
3151 drv_cmd->state = MPI3MR_CMD_NOTUSED;
3152 mutex_unlock(&drv_cmd->mutex);
3153 if (scsi_tgt_priv_data)
3154 atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
3155 if (tgtdev)
3156 mpi3mr_tgtdev_put(tgtdev);
3157 out:
3158 return retval;
3159 }
3160
3161 /**
3162 * mpi3mr_bios_param - BIOS param callback
3163 * @sdev: SCSI device reference
3164 * @bdev: Block device reference
3165 * @capacity: Capacity in logical sectors
3166 * @params: Parameter array
3167 *
3168 * Just the parameters with heads/secots/cylinders.
3169 *
3170 * Return: 0 always
3171 */
3172 static int mpi3mr_bios_param(struct scsi_device *sdev,
3173 struct block_device *bdev, sector_t capacity, int params[])
3174 {
3175 int heads;
3176 int sectors;
3177 sector_t cylinders;
3178 ulong dummy;
3179
3180 heads = 64;
3181 sectors = 32;
3182
3183 dummy = heads * sectors;
3184 cylinders = capacity;
3185 sector_div(cylinders, dummy);
3186
3187 if ((ulong)capacity >= 0x200000) {
3188 heads = 255;
3189 sectors = 63;
3190 dummy = heads * sectors;
3191 cylinders = capacity;
3192 sector_div(cylinders, dummy);
3193 }
3194
3195 params[0] = heads;
3196 params[1] = sectors;
3197 params[2] = cylinders;
3198 return 0;
3199 }
3200
3201 /**
3202 * mpi3mr_map_queues - Map queues callback handler
3203 * @shost: SCSI host reference
3204 *
3205 * Maps default and poll queues.
3206 *
3207 * Return: return zero.
3208 */
3209 static int mpi3mr_map_queues(struct Scsi_Host *shost)
3210 {
3211 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3212 int i, qoff, offset;
3213 struct blk_mq_queue_map *map = NULL;
3214
3215 offset = mrioc->op_reply_q_offset;
3216
3217 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
3218 map = &shost->tag_set.map[i];
3219
3220 map->nr_queues = 0;
3221
3222 if (i == HCTX_TYPE_DEFAULT)
3223 map->nr_queues = mrioc->default_qcount;
3224 else if (i == HCTX_TYPE_POLL)
3225 map->nr_queues = mrioc->active_poll_qcount;
3226
3227 if (!map->nr_queues) {
3228 BUG_ON(i == HCTX_TYPE_DEFAULT);
3229 continue;
3230 }
3231
3232 /*
3233 * The poll queue(s) doesn't have an IRQ (and hence IRQ
3234 * affinity), so use the regular blk-mq cpu mapping
3235 */
3236 map->queue_offset = qoff;
3237 if (i != HCTX_TYPE_POLL)
3238 blk_mq_pci_map_queues(map, mrioc->pdev, offset);
3239 else
3240 blk_mq_map_queues(map);
3241
3242 qoff += map->nr_queues;
3243 offset += map->nr_queues;
3244 }
3245
3246 return 0;
3247
3248 }
3249
3250 /**
3251 * mpi3mr_get_fw_pending_ios - Calculate pending I/O count
3252 * @mrioc: Adapter instance reference
3253 *
3254 * Calculate the pending I/Os for the controller and return.
3255 *
3256 * Return: Number of pending I/Os
3257 */
3258 static inline int mpi3mr_get_fw_pending_ios(struct mpi3mr_ioc *mrioc)
3259 {
3260 u16 i;
3261 uint pend_ios = 0;
3262
3263 for (i = 0; i < mrioc->num_op_reply_q; i++)
3264 pend_ios += atomic_read(&mrioc->op_reply_qinfo[i].pend_ios);
3265 return pend_ios;
3266 }
3267
3268 /**
3269 * mpi3mr_print_pending_host_io - print pending I/Os
3270 * @mrioc: Adapter instance reference
3271 *
3272 * Print number of pending I/Os and each I/O details prior to
3273 * reset for debug purpose.
3274 *
3275 * Return: Nothing
3276 */
3277 static void mpi3mr_print_pending_host_io(struct mpi3mr_ioc *mrioc)
3278 {
3279 struct Scsi_Host *shost = mrioc->shost;
3280
3281 ioc_info(mrioc, "%s :Pending commands prior to reset: %d\n",
3282 __func__, mpi3mr_get_fw_pending_ios(mrioc));
3283 blk_mq_tagset_busy_iter(&shost->tag_set,
3284 mpi3mr_print_scmd, (void *)mrioc);
3285 }
3286
3287 /**
3288 * mpi3mr_wait_for_host_io - block for I/Os to complete
3289 * @mrioc: Adapter instance reference
3290 * @timeout: time out in seconds
3291 * Waits for pending I/Os for the given adapter to complete or
3292 * to hit the timeout.
3293 *
3294 * Return: Nothing
3295 */
3296 void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
3297 {
3298 enum mpi3mr_iocstate iocstate;
3299 int i = 0;
3300
3301 iocstate = mpi3mr_get_iocstate(mrioc);
3302 if (iocstate != MRIOC_STATE_READY)
3303 return;
3304
3305 if (!mpi3mr_get_fw_pending_ios(mrioc))
3306 return;
3307 ioc_info(mrioc,
3308 "%s :Waiting for %d seconds prior to reset for %d I/O\n",
3309 __func__, timeout, mpi3mr_get_fw_pending_ios(mrioc));
3310
3311 for (i = 0; i < timeout; i++) {
3312 if (!mpi3mr_get_fw_pending_ios(mrioc))
3313 break;
3314 iocstate = mpi3mr_get_iocstate(mrioc);
3315 if (iocstate != MRIOC_STATE_READY)
3316 break;
3317 msleep(1000);
3318 }
3319
3320 ioc_info(mrioc, "%s :Pending I/Os after wait is: %d\n", __func__,
3321 mpi3mr_get_fw_pending_ios(mrioc));
3322 }
3323
3324 /**
3325 * mpi3mr_eh_host_reset - Host reset error handling callback
3326 * @scmd: SCSI command reference
3327 *
3328 * Issue controller reset if the scmd is for a Physical Device,
3329 * if the scmd is for RAID volume, then wait for
3330 * MPI3MR_RAID_ERRREC_RESET_TIMEOUT and checke whether any
3331 * pending I/Os prior to issuing reset to the controller.
3332 *
3333 * Return: SUCCESS of successful reset else FAILED
3334 */
3335 static int mpi3mr_eh_host_reset(struct scsi_cmnd *scmd)
3336 {
3337 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3338 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3339 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3340 u8 dev_type = MPI3_DEVICE_DEVFORM_VD;
3341 int retval = FAILED, ret;
3342
3343 sdev_priv_data = scmd->device->hostdata;
3344 if (sdev_priv_data && sdev_priv_data->tgt_priv_data) {
3345 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3346 dev_type = stgt_priv_data->dev_type;
3347 }
3348
3349 if (dev_type == MPI3_DEVICE_DEVFORM_VD) {
3350 mpi3mr_wait_for_host_io(mrioc,
3351 MPI3MR_RAID_ERRREC_RESET_TIMEOUT);
3352 if (!mpi3mr_get_fw_pending_ios(mrioc)) {
3353 retval = SUCCESS;
3354 goto out;
3355 }
3356 }
3357
3358 mpi3mr_print_pending_host_io(mrioc);
3359 ret = mpi3mr_soft_reset_handler(mrioc,
3360 MPI3MR_RESET_FROM_EH_HOS, 1);
3361 if (ret)
3362 goto out;
3363
3364 retval = SUCCESS;
3365 out:
3366 sdev_printk(KERN_INFO, scmd->device,
3367 "Host reset is %s for scmd(%p)\n",
3368 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3369
3370 return retval;
3371 }
3372
3373 /**
3374 * mpi3mr_eh_target_reset - Target reset error handling callback
3375 * @scmd: SCSI command reference
3376 *
3377 * Issue Target reset Task Management and verify the scmd is
3378 * terminated successfully and return status accordingly.
3379 *
3380 * Return: SUCCESS of successful termination of the scmd else
3381 * FAILED
3382 */
3383 static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
3384 {
3385 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3386 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3387 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3388 u16 dev_handle;
3389 u8 resp_code = 0;
3390 int retval = FAILED, ret = 0;
3391
3392 sdev_printk(KERN_INFO, scmd->device,
3393 "Attempting Target Reset! scmd(%p)\n", scmd);
3394 scsi_print_command(scmd);
3395
3396 sdev_priv_data = scmd->device->hostdata;
3397 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3398 sdev_printk(KERN_INFO, scmd->device,
3399 "SCSI device is not available\n");
3400 retval = SUCCESS;
3401 goto out;
3402 }
3403
3404 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3405 dev_handle = stgt_priv_data->dev_handle;
3406 if (stgt_priv_data->dev_removed) {
3407 sdev_printk(KERN_INFO, scmd->device,
3408 "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
3409 mrioc->name, dev_handle);
3410 retval = FAILED;
3411 goto out;
3412 }
3413 sdev_printk(KERN_INFO, scmd->device,
3414 "Target Reset is issued to handle(0x%04x)\n",
3415 dev_handle);
3416
3417 ret = mpi3mr_issue_tm(mrioc,
3418 MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
3419 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3420 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3421
3422 if (ret)
3423 goto out;
3424
3425 if (stgt_priv_data->pend_count) {
3426 sdev_printk(KERN_INFO, scmd->device,
3427 "%s: target has %d pending commands, target reset is failed\n",
3428 mrioc->name, stgt_priv_data->pend_count);
3429 goto out;
3430 }
3431
3432 retval = SUCCESS;
3433 out:
3434 sdev_printk(KERN_INFO, scmd->device,
3435 "%s: target reset is %s for scmd(%p)\n", mrioc->name,
3436 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3437
3438 return retval;
3439 }
3440
3441 /**
3442 * mpi3mr_eh_dev_reset- Device reset error handling callback
3443 * @scmd: SCSI command reference
3444 *
3445 * Issue lun reset Task Management and verify the scmd is
3446 * terminated successfully and return status accordingly.
3447 *
3448 * Return: SUCCESS of successful termination of the scmd else
3449 * FAILED
3450 */
3451 static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
3452 {
3453 struct mpi3mr_ioc *mrioc = shost_priv(scmd->device->host);
3454 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3455 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3456 u16 dev_handle;
3457 u8 resp_code = 0;
3458 int retval = FAILED, ret = 0;
3459
3460 sdev_printk(KERN_INFO, scmd->device,
3461 "Attempting Device(lun) Reset! scmd(%p)\n", scmd);
3462 scsi_print_command(scmd);
3463
3464 sdev_priv_data = scmd->device->hostdata;
3465 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3466 sdev_printk(KERN_INFO, scmd->device,
3467 "SCSI device is not available\n");
3468 retval = SUCCESS;
3469 goto out;
3470 }
3471
3472 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3473 dev_handle = stgt_priv_data->dev_handle;
3474 if (stgt_priv_data->dev_removed) {
3475 sdev_printk(KERN_INFO, scmd->device,
3476 "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
3477 mrioc->name, dev_handle);
3478 retval = FAILED;
3479 goto out;
3480 }
3481 sdev_printk(KERN_INFO, scmd->device,
3482 "Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
3483
3484 ret = mpi3mr_issue_tm(mrioc,
3485 MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
3486 sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
3487 MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
3488
3489 if (ret)
3490 goto out;
3491
3492 if (sdev_priv_data->pend_count) {
3493 sdev_printk(KERN_INFO, scmd->device,
3494 "%s: device has %d pending commands, device(LUN) reset is failed\n",
3495 mrioc->name, sdev_priv_data->pend_count);
3496 goto out;
3497 }
3498 retval = SUCCESS;
3499 out:
3500 sdev_printk(KERN_INFO, scmd->device,
3501 "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
3502 ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3503
3504 return retval;
3505 }
3506
3507 /**
3508 * mpi3mr_scan_start - Scan start callback handler
3509 * @shost: SCSI host reference
3510 *
3511 * Issue port enable request asynchronously.
3512 *
3513 * Return: Nothing
3514 */
3515 static void mpi3mr_scan_start(struct Scsi_Host *shost)
3516 {
3517 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3518
3519 mrioc->scan_started = 1;
3520 ioc_info(mrioc, "%s :Issuing Port Enable\n", __func__);
3521 if (mpi3mr_issue_port_enable(mrioc, 1)) {
3522 ioc_err(mrioc, "%s :Issuing port enable failed\n", __func__);
3523 mrioc->scan_started = 0;
3524 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3525 }
3526 }
3527
3528 /**
3529 * mpi3mr_scan_finished - Scan finished callback handler
3530 * @shost: SCSI host reference
3531 * @time: Jiffies from the scan start
3532 *
3533 * Checks whether the port enable is completed or timedout or
3534 * failed and set the scan status accordingly after taking any
3535 * recovery if required.
3536 *
3537 * Return: 1 on scan finished or timed out, 0 for in progress
3538 */
3539 static int mpi3mr_scan_finished(struct Scsi_Host *shost,
3540 unsigned long time)
3541 {
3542 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3543 u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
3544 u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
3545
3546 if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
3547 (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
3548 ioc_err(mrioc, "port enable failed due to fault or reset\n");
3549 mpi3mr_print_fault_info(mrioc);
3550 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3551 mrioc->scan_started = 0;
3552 mrioc->init_cmds.is_waiting = 0;
3553 mrioc->init_cmds.callback = NULL;
3554 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3555 }
3556
3557 if (time >= (pe_timeout * HZ)) {
3558 ioc_err(mrioc, "port enable failed due to time out\n");
3559 mpi3mr_check_rh_fault_ioc(mrioc,
3560 MPI3MR_RESET_FROM_PE_TIMEOUT);
3561 mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
3562 mrioc->scan_started = 0;
3563 mrioc->init_cmds.is_waiting = 0;
3564 mrioc->init_cmds.callback = NULL;
3565 mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
3566 }
3567
3568 if (mrioc->scan_started)
3569 return 0;
3570
3571 if (mrioc->scan_failed) {
3572 ioc_err(mrioc,
3573 "port enable failed with status=0x%04x\n",
3574 mrioc->scan_failed);
3575 } else
3576 ioc_info(mrioc, "port enable is successfully completed\n");
3577
3578 mpi3mr_start_watchdog(mrioc);
3579 mrioc->is_driver_loading = 0;
3580 return 1;
3581 }
3582
3583 /**
3584 * mpi3mr_slave_destroy - Slave destroy callback handler
3585 * @sdev: SCSI device reference
3586 *
3587 * Cleanup and free per device(lun) private data.
3588 *
3589 * Return: Nothing.
3590 */
3591 static void mpi3mr_slave_destroy(struct scsi_device *sdev)
3592 {
3593 struct Scsi_Host *shost;
3594 struct mpi3mr_ioc *mrioc;
3595 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3596 struct mpi3mr_tgt_dev *tgt_dev;
3597 unsigned long flags;
3598 struct scsi_target *starget;
3599
3600 if (!sdev->hostdata)
3601 return;
3602
3603 starget = scsi_target(sdev);
3604 shost = dev_to_shost(&starget->dev);
3605 mrioc = shost_priv(shost);
3606 scsi_tgt_priv_data = starget->hostdata;
3607
3608 scsi_tgt_priv_data->num_luns--;
3609
3610 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3611 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3612 if (tgt_dev && (!scsi_tgt_priv_data->num_luns))
3613 tgt_dev->starget = NULL;
3614 if (tgt_dev)
3615 mpi3mr_tgtdev_put(tgt_dev);
3616 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3617
3618 kfree(sdev->hostdata);
3619 sdev->hostdata = NULL;
3620 }
3621
3622 /**
3623 * mpi3mr_target_destroy - Target destroy callback handler
3624 * @starget: SCSI target reference
3625 *
3626 * Cleanup and free per target private data.
3627 *
3628 * Return: Nothing.
3629 */
3630 static void mpi3mr_target_destroy(struct scsi_target *starget)
3631 {
3632 struct Scsi_Host *shost;
3633 struct mpi3mr_ioc *mrioc;
3634 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3635 struct mpi3mr_tgt_dev *tgt_dev;
3636 unsigned long flags;
3637
3638 if (!starget->hostdata)
3639 return;
3640
3641 shost = dev_to_shost(&starget->dev);
3642 mrioc = shost_priv(shost);
3643 scsi_tgt_priv_data = starget->hostdata;
3644
3645 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3646 tgt_dev = __mpi3mr_get_tgtdev_from_tgtpriv(mrioc, scsi_tgt_priv_data);
3647 if (tgt_dev && (tgt_dev->starget == starget) &&
3648 (tgt_dev->perst_id == starget->id))
3649 tgt_dev->starget = NULL;
3650 if (tgt_dev) {
3651 scsi_tgt_priv_data->tgt_dev = NULL;
3652 scsi_tgt_priv_data->perst_id = 0;
3653 mpi3mr_tgtdev_put(tgt_dev);
3654 mpi3mr_tgtdev_put(tgt_dev);
3655 }
3656 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3657
3658 kfree(starget->hostdata);
3659 starget->hostdata = NULL;
3660 }
3661
3662 /**
3663 * mpi3mr_slave_configure - Slave configure callback handler
3664 * @sdev: SCSI device reference
3665 *
3666 * Configure queue depth, max hardware sectors and virt boundary
3667 * as required
3668 *
3669 * Return: 0 always.
3670 */
3671 static int mpi3mr_slave_configure(struct scsi_device *sdev)
3672 {
3673 struct scsi_target *starget;
3674 struct Scsi_Host *shost;
3675 struct mpi3mr_ioc *mrioc;
3676 struct mpi3mr_tgt_dev *tgt_dev;
3677 unsigned long flags;
3678 int retval = 0;
3679
3680 starget = scsi_target(sdev);
3681 shost = dev_to_shost(&starget->dev);
3682 mrioc = shost_priv(shost);
3683
3684 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3685 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3686 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3687 if (!tgt_dev)
3688 return -ENXIO;
3689
3690 mpi3mr_change_queue_depth(sdev, tgt_dev->q_depth);
3691 switch (tgt_dev->dev_type) {
3692 case MPI3_DEVICE_DEVFORM_PCIE:
3693 /*The block layer hw sector size = 512*/
3694 if ((tgt_dev->dev_spec.pcie_inf.dev_info &
3695 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
3696 MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
3697 blk_queue_max_hw_sectors(sdev->request_queue,
3698 tgt_dev->dev_spec.pcie_inf.mdts / 512);
3699 if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
3700 blk_queue_virt_boundary(sdev->request_queue,
3701 ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
3702 else
3703 blk_queue_virt_boundary(sdev->request_queue,
3704 ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
3705 }
3706 break;
3707 default:
3708 break;
3709 }
3710
3711 mpi3mr_tgtdev_put(tgt_dev);
3712
3713 return retval;
3714 }
3715
3716 /**
3717 * mpi3mr_slave_alloc -Slave alloc callback handler
3718 * @sdev: SCSI device reference
3719 *
3720 * Allocate per device(lun) private data and initialize it.
3721 *
3722 * Return: 0 on success -ENOMEM on memory allocation failure.
3723 */
3724 static int mpi3mr_slave_alloc(struct scsi_device *sdev)
3725 {
3726 struct Scsi_Host *shost;
3727 struct mpi3mr_ioc *mrioc;
3728 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3729 struct mpi3mr_tgt_dev *tgt_dev;
3730 struct mpi3mr_sdev_priv_data *scsi_dev_priv_data;
3731 unsigned long flags;
3732 struct scsi_target *starget;
3733 int retval = 0;
3734
3735 starget = scsi_target(sdev);
3736 shost = dev_to_shost(&starget->dev);
3737 mrioc = shost_priv(shost);
3738 scsi_tgt_priv_data = starget->hostdata;
3739
3740 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3741 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3742
3743 if (tgt_dev) {
3744 if (tgt_dev->starget == NULL)
3745 tgt_dev->starget = starget;
3746 mpi3mr_tgtdev_put(tgt_dev);
3747 retval = 0;
3748 } else {
3749 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3750 return -ENXIO;
3751 }
3752
3753 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3754
3755 scsi_dev_priv_data = kzalloc(sizeof(*scsi_dev_priv_data), GFP_KERNEL);
3756 if (!scsi_dev_priv_data)
3757 return -ENOMEM;
3758
3759 scsi_dev_priv_data->lun_id = sdev->lun;
3760 scsi_dev_priv_data->tgt_priv_data = scsi_tgt_priv_data;
3761 sdev->hostdata = scsi_dev_priv_data;
3762
3763 scsi_tgt_priv_data->num_luns++;
3764
3765 return retval;
3766 }
3767
3768 /**
3769 * mpi3mr_target_alloc - Target alloc callback handler
3770 * @starget: SCSI target reference
3771 *
3772 * Allocate per target private data and initialize it.
3773 *
3774 * Return: 0 on success -ENOMEM on memory allocation failure.
3775 */
3776 static int mpi3mr_target_alloc(struct scsi_target *starget)
3777 {
3778 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
3779 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3780 struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data;
3781 struct mpi3mr_tgt_dev *tgt_dev;
3782 unsigned long flags;
3783 int retval = 0;
3784
3785 scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
3786 if (!scsi_tgt_priv_data)
3787 return -ENOMEM;
3788
3789 starget->hostdata = scsi_tgt_priv_data;
3790
3791 spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
3792 tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
3793 if (tgt_dev && !tgt_dev->is_hidden) {
3794 scsi_tgt_priv_data->starget = starget;
3795 scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
3796 scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
3797 scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
3798 scsi_tgt_priv_data->tgt_dev = tgt_dev;
3799 tgt_dev->starget = starget;
3800 atomic_set(&scsi_tgt_priv_data->block_io, 0);
3801 retval = 0;
3802 } else
3803 retval = -ENXIO;
3804 spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
3805
3806 return retval;
3807 }
3808
3809 /**
3810 * mpi3mr_check_return_unmap - Whether an unmap is allowed
3811 * @mrioc: Adapter instance reference
3812 * @scmd: SCSI Command reference
3813 *
3814 * The controller hardware cannot handle certain unmap commands
3815 * for NVMe drives, this routine checks those and return true
3816 * and completes the SCSI command with proper status and sense
3817 * data.
3818 *
3819 * Return: TRUE for not allowed unmap, FALSE otherwise.
3820 */
3821 static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
3822 struct scsi_cmnd *scmd)
3823 {
3824 unsigned char *buf;
3825 u16 param_len, desc_len, trunc_param_len;
3826
3827 trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
3828
3829 if (mrioc->pdev->revision) {
3830 if ((param_len > 24) && ((param_len - 8) & 0xF)) {
3831 trunc_param_len -= (param_len - 8) & 0xF;
3832 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3833 dprint_scsi_err(mrioc,
3834 "truncating param_len from (%d) to (%d)\n",
3835 param_len, trunc_param_len);
3836 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3837 dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
3838 }
3839 return false;
3840 }
3841
3842 if (!param_len) {
3843 ioc_warn(mrioc,
3844 "%s: cdb received with zero parameter length\n",
3845 __func__);
3846 scsi_print_command(scmd);
3847 scmd->result = DID_OK << 16;
3848 scmd->scsi_done(scmd);
3849 return true;
3850 }
3851
3852 if (param_len < 24) {
3853 ioc_warn(mrioc,
3854 "%s: cdb received with invalid param_len: %d\n",
3855 __func__, param_len);
3856 scsi_print_command(scmd);
3857 scmd->result = SAM_STAT_CHECK_CONDITION;
3858 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3859 0x1A, 0);
3860 scmd->scsi_done(scmd);
3861 return true;
3862 }
3863 if (param_len != scsi_bufflen(scmd)) {
3864 ioc_warn(mrioc,
3865 "%s: cdb received with param_len: %d bufflen: %d\n",
3866 __func__, param_len, scsi_bufflen(scmd));
3867 scsi_print_command(scmd);
3868 scmd->result = SAM_STAT_CHECK_CONDITION;
3869 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3870 0x1A, 0);
3871 scmd->scsi_done(scmd);
3872 return true;
3873 }
3874 buf = kzalloc(scsi_bufflen(scmd), GFP_ATOMIC);
3875 if (!buf) {
3876 scsi_print_command(scmd);
3877 scmd->result = SAM_STAT_CHECK_CONDITION;
3878 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3879 0x55, 0x03);
3880 scmd->scsi_done(scmd);
3881 return true;
3882 }
3883 scsi_sg_copy_to_buffer(scmd, buf, scsi_bufflen(scmd));
3884 desc_len = get_unaligned_be16(&buf[2]);
3885
3886 if (desc_len < 16) {
3887 ioc_warn(mrioc,
3888 "%s: Invalid descriptor length in param list: %d\n",
3889 __func__, desc_len);
3890 scsi_print_command(scmd);
3891 scmd->result = SAM_STAT_CHECK_CONDITION;
3892 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
3893 0x26, 0);
3894 scmd->scsi_done(scmd);
3895 kfree(buf);
3896 return true;
3897 }
3898
3899 if (param_len > (desc_len + 8)) {
3900 trunc_param_len = desc_len + 8;
3901 scsi_print_command(scmd);
3902 dprint_scsi_err(mrioc,
3903 "truncating param_len(%d) to desc_len+8(%d)\n",
3904 param_len, trunc_param_len);
3905 put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
3906 scsi_print_command(scmd);
3907 }
3908
3909 kfree(buf);
3910 return false;
3911 }
3912
3913 /**
3914 * mpi3mr_allow_scmd_to_fw - Command is allowed during shutdown
3915 * @scmd: SCSI Command reference
3916 *
3917 * Checks whether a cdb is allowed during shutdown or not.
3918 *
3919 * Return: TRUE for allowed commands, FALSE otherwise.
3920 */
3921
3922 inline bool mpi3mr_allow_scmd_to_fw(struct scsi_cmnd *scmd)
3923 {
3924 switch (scmd->cmnd[0]) {
3925 case SYNCHRONIZE_CACHE:
3926 case START_STOP:
3927 return true;
3928 default:
3929 return false;
3930 }
3931 }
3932
3933 /**
3934 * mpi3mr_qcmd - I/O request despatcher
3935 * @shost: SCSI Host reference
3936 * @scmd: SCSI Command reference
3937 *
3938 * Issues the SCSI Command as an MPI3 request.
3939 *
3940 * Return: 0 on successful queueing of the request or if the
3941 * request is completed with failure.
3942 * SCSI_MLQUEUE_DEVICE_BUSY when the device is busy.
3943 * SCSI_MLQUEUE_HOST_BUSY when the host queue is full.
3944 */
3945 static int mpi3mr_qcmd(struct Scsi_Host *shost,
3946 struct scsi_cmnd *scmd)
3947 {
3948 struct mpi3mr_ioc *mrioc = shost_priv(shost);
3949 struct mpi3mr_stgt_priv_data *stgt_priv_data;
3950 struct mpi3mr_sdev_priv_data *sdev_priv_data;
3951 struct scmd_priv *scmd_priv_data = NULL;
3952 struct mpi3_scsi_io_request *scsiio_req = NULL;
3953 struct op_req_qinfo *op_req_q = NULL;
3954 int retval = 0;
3955 u16 dev_handle;
3956 u16 host_tag;
3957 u32 scsiio_flags = 0;
3958 struct request *rq = scsi_cmd_to_rq(scmd);
3959 int iprio_class;
3960 u8 is_pcie_dev = 0;
3961
3962 sdev_priv_data = scmd->device->hostdata;
3963 if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
3964 scmd->result = DID_NO_CONNECT << 16;
3965 scmd->scsi_done(scmd);
3966 goto out;
3967 }
3968
3969 if (mrioc->stop_drv_processing &&
3970 !(mpi3mr_allow_scmd_to_fw(scmd))) {
3971 scmd->result = DID_NO_CONNECT << 16;
3972 scmd->scsi_done(scmd);
3973 goto out;
3974 }
3975
3976 if (mrioc->reset_in_progress) {
3977 retval = SCSI_MLQUEUE_HOST_BUSY;
3978 goto out;
3979 }
3980
3981 stgt_priv_data = sdev_priv_data->tgt_priv_data;
3982
3983 dev_handle = stgt_priv_data->dev_handle;
3984 if (dev_handle == MPI3MR_INVALID_DEV_HANDLE) {
3985 scmd->result = DID_NO_CONNECT << 16;
3986 scmd->scsi_done(scmd);
3987 goto out;
3988 }
3989 if (stgt_priv_data->dev_removed) {
3990 scmd->result = DID_NO_CONNECT << 16;
3991 scmd->scsi_done(scmd);
3992 goto out;
3993 }
3994
3995 if (atomic_read(&stgt_priv_data->block_io)) {
3996 if (mrioc->stop_drv_processing) {
3997 scmd->result = DID_NO_CONNECT << 16;
3998 scmd->scsi_done(scmd);
3999 goto out;
4000 }
4001 retval = SCSI_MLQUEUE_DEVICE_BUSY;
4002 goto out;
4003 }
4004
4005 if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
4006 is_pcie_dev = 1;
4007 if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
4008 (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
4009 mpi3mr_check_return_unmap(mrioc, scmd))
4010 goto out;
4011
4012 host_tag = mpi3mr_host_tag_for_scmd(mrioc, scmd);
4013 if (host_tag == MPI3MR_HOSTTAG_INVALID) {
4014 scmd->result = DID_ERROR << 16;
4015 scmd->scsi_done(scmd);
4016 goto out;
4017 }
4018
4019 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4020 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_READ;
4021 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4022 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE;
4023 else
4024 scsiio_flags = MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER;
4025
4026 scsiio_flags |= MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ;
4027
4028 if (sdev_priv_data->ncq_prio_enable) {
4029 iprio_class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4030 if (iprio_class == IOPRIO_CLASS_RT)
4031 scsiio_flags |= 1 << MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT;
4032 }
4033
4034 if (scmd->cmd_len > 16)
4035 scsiio_flags |= MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16;
4036
4037 scmd_priv_data = scsi_cmd_priv(scmd);
4038 memset(scmd_priv_data->mpi3mr_scsiio_req, 0, MPI3MR_ADMIN_REQ_FRAME_SZ);
4039 scsiio_req = (struct mpi3_scsi_io_request *)scmd_priv_data->mpi3mr_scsiio_req;
4040 scsiio_req->function = MPI3_FUNCTION_SCSI_IO;
4041 scsiio_req->host_tag = cpu_to_le16(host_tag);
4042
4043 mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
4044
4045 memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
4046 scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
4047 scsiio_req->dev_handle = cpu_to_le16(dev_handle);
4048 scsiio_req->flags = cpu_to_le32(scsiio_flags);
4049 int_to_scsilun(sdev_priv_data->lun_id,
4050 (struct scsi_lun *)scsiio_req->lun);
4051
4052 if (mpi3mr_build_sg_scmd(mrioc, scmd, scsiio_req)) {
4053 mpi3mr_clear_scmd_priv(mrioc, scmd);
4054 retval = SCSI_MLQUEUE_HOST_BUSY;
4055 goto out;
4056 }
4057 op_req_q = &mrioc->req_qinfo[scmd_priv_data->req_q_idx];
4058
4059 if (mpi3mr_op_request_post(mrioc, op_req_q,
4060 scmd_priv_data->mpi3mr_scsiio_req)) {
4061 mpi3mr_clear_scmd_priv(mrioc, scmd);
4062 retval = SCSI_MLQUEUE_HOST_BUSY;
4063 goto out;
4064 }
4065
4066 out:
4067 return retval;
4068 }
4069
4070 static struct scsi_host_template mpi3mr_driver_template = {
4071 .module = THIS_MODULE,
4072 .name = "MPI3 Storage Controller",
4073 .proc_name = MPI3MR_DRIVER_NAME,
4074 .queuecommand = mpi3mr_qcmd,
4075 .target_alloc = mpi3mr_target_alloc,
4076 .slave_alloc = mpi3mr_slave_alloc,
4077 .slave_configure = mpi3mr_slave_configure,
4078 .target_destroy = mpi3mr_target_destroy,
4079 .slave_destroy = mpi3mr_slave_destroy,
4080 .scan_finished = mpi3mr_scan_finished,
4081 .scan_start = mpi3mr_scan_start,
4082 .change_queue_depth = mpi3mr_change_queue_depth,
4083 .eh_device_reset_handler = mpi3mr_eh_dev_reset,
4084 .eh_target_reset_handler = mpi3mr_eh_target_reset,
4085 .eh_host_reset_handler = mpi3mr_eh_host_reset,
4086 .bios_param = mpi3mr_bios_param,
4087 .map_queues = mpi3mr_map_queues,
4088 .mq_poll = mpi3mr_blk_mq_poll,
4089 .no_write_same = 1,
4090 .can_queue = 1,
4091 .this_id = -1,
4092 .sg_tablesize = MPI3MR_SG_DEPTH,
4093 /* max xfer supported is 1M (2K in 512 byte sized sectors)
4094 */
4095 .max_sectors = 2048,
4096 .cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
4097 .max_segment_size = 0xffffffff,
4098 .track_queue_depth = 1,
4099 .cmd_size = sizeof(struct scmd_priv),
4100 };
4101
4102 /**
4103 * mpi3mr_init_drv_cmd - Initialize internal command tracker
4104 * @cmdptr: Internal command tracker
4105 * @host_tag: Host tag used for the specific command
4106 *
4107 * Initialize the internal command tracker structure with
4108 * specified host tag.
4109 *
4110 * Return: Nothing.
4111 */
4112 static inline void mpi3mr_init_drv_cmd(struct mpi3mr_drv_cmd *cmdptr,
4113 u16 host_tag)
4114 {
4115 mutex_init(&cmdptr->mutex);
4116 cmdptr->reply = NULL;
4117 cmdptr->state = MPI3MR_CMD_NOTUSED;
4118 cmdptr->dev_handle = MPI3MR_INVALID_DEV_HANDLE;
4119 cmdptr->host_tag = host_tag;
4120 }
4121
4122 /**
4123 * osintfc_mrioc_security_status -Check controller secure status
4124 * @pdev: PCI device instance
4125 *
4126 * Read the Device Serial Number capability from PCI config
4127 * space and decide whether the controller is secure or not.
4128 *
4129 * Return: 0 on success, non-zero on failure.
4130 */
4131 static int
4132 osintfc_mrioc_security_status(struct pci_dev *pdev)
4133 {
4134 u32 cap_data;
4135 int base;
4136 u32 ctlr_status;
4137 u32 debug_status;
4138 int retval = 0;
4139
4140 base = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
4141 if (!base) {
4142 dev_err(&pdev->dev,
4143 "%s: PCI_EXT_CAP_ID_DSN is not supported\n", __func__);
4144 return -1;
4145 }
4146
4147 pci_read_config_dword(pdev, base + 4, &cap_data);
4148
4149 debug_status = cap_data & MPI3MR_CTLR_SECURE_DBG_STATUS_MASK;
4150 ctlr_status = cap_data & MPI3MR_CTLR_SECURITY_STATUS_MASK;
4151
4152 switch (ctlr_status) {
4153 case MPI3MR_INVALID_DEVICE:
4154 dev_err(&pdev->dev,
4155 "%s: Non secure ctlr (Invalid) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4156 __func__, pdev->device, pdev->subsystem_vendor,
4157 pdev->subsystem_device);
4158 retval = -1;
4159 break;
4160 case MPI3MR_CONFIG_SECURE_DEVICE:
4161 if (!debug_status)
4162 dev_info(&pdev->dev,
4163 "%s: Config secure ctlr is detected\n",
4164 __func__);
4165 break;
4166 case MPI3MR_HARD_SECURE_DEVICE:
4167 break;
4168 case MPI3MR_TAMPERED_DEVICE:
4169 dev_err(&pdev->dev,
4170 "%s: Non secure ctlr (Tampered) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4171 __func__, pdev->device, pdev->subsystem_vendor,
4172 pdev->subsystem_device);
4173 retval = -1;
4174 break;
4175 default:
4176 retval = -1;
4177 break;
4178 }
4179
4180 if (!retval && debug_status) {
4181 dev_err(&pdev->dev,
4182 "%s: Non secure ctlr (Secure Dbg) is detected: DID: 0x%x: SVID: 0x%x: SDID: 0x%x\n",
4183 __func__, pdev->device, pdev->subsystem_vendor,
4184 pdev->subsystem_device);
4185 retval = -1;
4186 }
4187
4188 return retval;
4189 }
4190
4191 /**
4192 * mpi3mr_probe - PCI probe callback
4193 * @pdev: PCI device instance
4194 * @id: PCI device ID details
4195 *
4196 * controller initialization routine. Checks the security status
4197 * of the controller and if it is invalid or tampered return the
4198 * probe without initializing the controller. Otherwise,
4199 * allocate per adapter instance through shost_priv and
4200 * initialize controller specific data structures, initializae
4201 * the controller hardware, add shost to the SCSI subsystem.
4202 *
4203 * Return: 0 on success, non-zero on failure.
4204 */
4205
4206 static int
4207 mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4208 {
4209 struct mpi3mr_ioc *mrioc = NULL;
4210 struct Scsi_Host *shost = NULL;
4211 int retval = 0, i;
4212
4213 if (osintfc_mrioc_security_status(pdev)) {
4214 warn_non_secure_ctlr = 1;
4215 return 1; /* For Invalid and Tampered device */
4216 }
4217
4218 shost = scsi_host_alloc(&mpi3mr_driver_template,
4219 sizeof(struct mpi3mr_ioc));
4220 if (!shost) {
4221 retval = -ENODEV;
4222 goto shost_failed;
4223 }
4224
4225 mrioc = shost_priv(shost);
4226 mrioc->id = mrioc_ids++;
4227 sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
4228 sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
4229 INIT_LIST_HEAD(&mrioc->list);
4230 spin_lock(&mrioc_list_lock);
4231 list_add_tail(&mrioc->list, &mrioc_list);
4232 spin_unlock(&mrioc_list_lock);
4233
4234 spin_lock_init(&mrioc->admin_req_lock);
4235 spin_lock_init(&mrioc->reply_free_queue_lock);
4236 spin_lock_init(&mrioc->sbq_lock);
4237 spin_lock_init(&mrioc->fwevt_lock);
4238 spin_lock_init(&mrioc->tgtdev_lock);
4239 spin_lock_init(&mrioc->watchdog_lock);
4240 spin_lock_init(&mrioc->chain_buf_lock);
4241
4242 INIT_LIST_HEAD(&mrioc->fwevt_list);
4243 INIT_LIST_HEAD(&mrioc->tgtdev_list);
4244 INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
4245 INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
4246
4247 mutex_init(&mrioc->reset_mutex);
4248 mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
4249 mpi3mr_init_drv_cmd(&mrioc->host_tm_cmds, MPI3MR_HOSTTAG_BLK_TMS);
4250
4251 for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
4252 mpi3mr_init_drv_cmd(&mrioc->dev_rmhs_cmds[i],
4253 MPI3MR_HOSTTAG_DEVRMCMD_MIN + i);
4254
4255 if (pdev->revision)
4256 mrioc->enable_segqueue = true;
4257
4258 init_waitqueue_head(&mrioc->reset_waitq);
4259 mrioc->logging_level = logging_level;
4260 mrioc->shost = shost;
4261 mrioc->pdev = pdev;
4262
4263 /* init shost parameters */
4264 shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
4265 shost->max_lun = -1;
4266 shost->unique_id = mrioc->id;
4267
4268 shost->max_channel = 0;
4269 shost->max_id = 0xFFFFFFFF;
4270
4271 if (prot_mask >= 0)
4272 scsi_host_set_prot(shost, prot_mask);
4273 else {
4274 prot_mask = SHOST_DIF_TYPE1_PROTECTION
4275 | SHOST_DIF_TYPE2_PROTECTION
4276 | SHOST_DIF_TYPE3_PROTECTION;
4277 scsi_host_set_prot(shost, prot_mask);
4278 }
4279
4280 ioc_info(mrioc,
4281 "%s :host protection capabilities enabled %s%s%s%s%s%s%s\n",
4282 __func__,
4283 (prot_mask & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
4284 (prot_mask & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
4285 (prot_mask & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
4286 (prot_mask & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
4287 (prot_mask & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
4288 (prot_mask & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
4289 (prot_mask & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
4290
4291 if (prot_guard_mask)
4292 scsi_host_set_guard(shost, (prot_guard_mask & 3));
4293 else
4294 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
4295
4296 snprintf(mrioc->fwevt_worker_name, sizeof(mrioc->fwevt_worker_name),
4297 "%s%d_fwevt_wrkr", mrioc->driver_name, mrioc->id);
4298 mrioc->fwevt_worker_thread = alloc_ordered_workqueue(
4299 mrioc->fwevt_worker_name, WQ_MEM_RECLAIM);
4300 if (!mrioc->fwevt_worker_thread) {
4301 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4302 __FILE__, __LINE__, __func__);
4303 retval = -ENODEV;
4304 goto fwevtthread_failed;
4305 }
4306
4307 mrioc->is_driver_loading = 1;
4308 mrioc->cpu_count = num_online_cpus();
4309 if (mpi3mr_setup_resources(mrioc)) {
4310 ioc_err(mrioc, "setup resources failed\n");
4311 retval = -ENODEV;
4312 goto resource_alloc_failed;
4313 }
4314 if (mpi3mr_init_ioc(mrioc)) {
4315 ioc_err(mrioc, "initializing IOC failed\n");
4316 retval = -ENODEV;
4317 goto init_ioc_failed;
4318 }
4319
4320 shost->nr_hw_queues = mrioc->num_op_reply_q;
4321 if (mrioc->active_poll_qcount)
4322 shost->nr_maps = 3;
4323
4324 shost->can_queue = mrioc->max_host_ios;
4325 shost->sg_tablesize = MPI3MR_SG_DEPTH;
4326 shost->max_id = mrioc->facts.max_perids + 1;
4327
4328 retval = scsi_add_host(shost, &pdev->dev);
4329 if (retval) {
4330 ioc_err(mrioc, "failure at %s:%d/%s()!\n",
4331 __FILE__, __LINE__, __func__);
4332 goto addhost_failed;
4333 }
4334
4335 scsi_scan_host(shost);
4336 return retval;
4337
4338 addhost_failed:
4339 mpi3mr_stop_watchdog(mrioc);
4340 mpi3mr_cleanup_ioc(mrioc);
4341 init_ioc_failed:
4342 mpi3mr_free_mem(mrioc);
4343 mpi3mr_cleanup_resources(mrioc);
4344 resource_alloc_failed:
4345 destroy_workqueue(mrioc->fwevt_worker_thread);
4346 fwevtthread_failed:
4347 spin_lock(&mrioc_list_lock);
4348 list_del(&mrioc->list);
4349 spin_unlock(&mrioc_list_lock);
4350 scsi_host_put(shost);
4351 shost_failed:
4352 return retval;
4353 }
4354
4355 /**
4356 * mpi3mr_remove - PCI remove callback
4357 * @pdev: PCI device instance
4358 *
4359 * Cleanup the IOC by issuing MUR and shutdown notification.
4360 * Free up all memory and resources associated with the
4361 * controllerand target devices, unregister the shost.
4362 *
4363 * Return: Nothing.
4364 */
4365 static void mpi3mr_remove(struct pci_dev *pdev)
4366 {
4367 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4368 struct mpi3mr_ioc *mrioc;
4369 struct workqueue_struct *wq;
4370 unsigned long flags;
4371 struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
4372
4373 if (!shost)
4374 return;
4375
4376 mrioc = shost_priv(shost);
4377 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4378 ssleep(1);
4379
4380 mrioc->stop_drv_processing = 1;
4381 mpi3mr_cleanup_fwevt_list(mrioc);
4382 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4383 wq = mrioc->fwevt_worker_thread;
4384 mrioc->fwevt_worker_thread = NULL;
4385 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4386 if (wq)
4387 destroy_workqueue(wq);
4388 scsi_remove_host(shost);
4389
4390 list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
4391 list) {
4392 mpi3mr_remove_tgtdev_from_host(mrioc, tgtdev);
4393 mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
4394 mpi3mr_tgtdev_put(tgtdev);
4395 }
4396 mpi3mr_stop_watchdog(mrioc);
4397 mpi3mr_cleanup_ioc(mrioc);
4398 mpi3mr_free_mem(mrioc);
4399 mpi3mr_cleanup_resources(mrioc);
4400
4401 spin_lock(&mrioc_list_lock);
4402 list_del(&mrioc->list);
4403 spin_unlock(&mrioc_list_lock);
4404
4405 scsi_host_put(shost);
4406 }
4407
4408 /**
4409 * mpi3mr_shutdown - PCI shutdown callback
4410 * @pdev: PCI device instance
4411 *
4412 * Free up all memory and resources associated with the
4413 * controller
4414 *
4415 * Return: Nothing.
4416 */
4417 static void mpi3mr_shutdown(struct pci_dev *pdev)
4418 {
4419 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4420 struct mpi3mr_ioc *mrioc;
4421 struct workqueue_struct *wq;
4422 unsigned long flags;
4423
4424 if (!shost)
4425 return;
4426
4427 mrioc = shost_priv(shost);
4428 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4429 ssleep(1);
4430
4431 mrioc->stop_drv_processing = 1;
4432 mpi3mr_cleanup_fwevt_list(mrioc);
4433 spin_lock_irqsave(&mrioc->fwevt_lock, flags);
4434 wq = mrioc->fwevt_worker_thread;
4435 mrioc->fwevt_worker_thread = NULL;
4436 spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
4437 if (wq)
4438 destroy_workqueue(wq);
4439
4440 mpi3mr_stop_watchdog(mrioc);
4441 mpi3mr_cleanup_ioc(mrioc);
4442 mpi3mr_cleanup_resources(mrioc);
4443 }
4444
4445 #ifdef CONFIG_PM
4446 /**
4447 * mpi3mr_suspend - PCI power management suspend callback
4448 * @pdev: PCI device instance
4449 * @state: New power state
4450 *
4451 * Change the power state to the given value and cleanup the IOC
4452 * by issuing MUR and shutdown notification
4453 *
4454 * Return: 0 always.
4455 */
4456 static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
4457 {
4458 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4459 struct mpi3mr_ioc *mrioc;
4460 pci_power_t device_state;
4461
4462 if (!shost)
4463 return 0;
4464
4465 mrioc = shost_priv(shost);
4466 while (mrioc->reset_in_progress || mrioc->is_driver_loading)
4467 ssleep(1);
4468 mrioc->stop_drv_processing = 1;
4469 mpi3mr_cleanup_fwevt_list(mrioc);
4470 scsi_block_requests(shost);
4471 mpi3mr_stop_watchdog(mrioc);
4472 mpi3mr_cleanup_ioc(mrioc);
4473
4474 device_state = pci_choose_state(pdev, state);
4475 ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
4476 pdev, pci_name(pdev), device_state);
4477 pci_save_state(pdev);
4478 mpi3mr_cleanup_resources(mrioc);
4479 pci_set_power_state(pdev, device_state);
4480
4481 return 0;
4482 }
4483
4484 /**
4485 * mpi3mr_resume - PCI power management resume callback
4486 * @pdev: PCI device instance
4487 *
4488 * Restore the power state to D0 and reinitialize the controller
4489 * and resume I/O operations to the target devices
4490 *
4491 * Return: 0 on success, non-zero on failure
4492 */
4493 static int mpi3mr_resume(struct pci_dev *pdev)
4494 {
4495 struct Scsi_Host *shost = pci_get_drvdata(pdev);
4496 struct mpi3mr_ioc *mrioc;
4497 pci_power_t device_state = pdev->current_state;
4498 int r;
4499
4500 if (!shost)
4501 return 0;
4502
4503 mrioc = shost_priv(shost);
4504
4505 ioc_info(mrioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
4506 pdev, pci_name(pdev), device_state);
4507 pci_set_power_state(pdev, PCI_D0);
4508 pci_enable_wake(pdev, PCI_D0, 0);
4509 pci_restore_state(pdev);
4510 mrioc->pdev = pdev;
4511 mrioc->cpu_count = num_online_cpus();
4512 r = mpi3mr_setup_resources(mrioc);
4513 if (r) {
4514 ioc_info(mrioc, "%s: Setup resources failed[%d]\n",
4515 __func__, r);
4516 return r;
4517 }
4518
4519 mrioc->stop_drv_processing = 0;
4520 mpi3mr_memset_buffers(mrioc);
4521 r = mpi3mr_reinit_ioc(mrioc, 1);
4522 if (r) {
4523 ioc_err(mrioc, "resuming controller failed[%d]\n", r);
4524 return r;
4525 }
4526 scsi_unblock_requests(shost);
4527 mpi3mr_start_watchdog(mrioc);
4528
4529 return 0;
4530 }
4531 #endif
4532
4533 static const struct pci_device_id mpi3mr_pci_id_table[] = {
4534 {
4535 PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
4536 MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
4537 },
4538 { 0 }
4539 };
4540 MODULE_DEVICE_TABLE(pci, mpi3mr_pci_id_table);
4541
4542 static struct pci_driver mpi3mr_pci_driver = {
4543 .name = MPI3MR_DRIVER_NAME,
4544 .id_table = mpi3mr_pci_id_table,
4545 .probe = mpi3mr_probe,
4546 .remove = mpi3mr_remove,
4547 .shutdown = mpi3mr_shutdown,
4548 #ifdef CONFIG_PM
4549 .suspend = mpi3mr_suspend,
4550 .resume = mpi3mr_resume,
4551 #endif
4552 };
4553
4554 static int __init mpi3mr_init(void)
4555 {
4556 int ret_val;
4557
4558 pr_info("Loading %s version %s\n", MPI3MR_DRIVER_NAME,
4559 MPI3MR_DRIVER_VERSION);
4560
4561 ret_val = pci_register_driver(&mpi3mr_pci_driver);
4562
4563 return ret_val;
4564 }
4565
4566 static void __exit mpi3mr_exit(void)
4567 {
4568 if (warn_non_secure_ctlr)
4569 pr_warn(
4570 "Unloading %s version %s while managing a non secure controller\n",
4571 MPI3MR_DRIVER_NAME, MPI3MR_DRIVER_VERSION);
4572 else
4573 pr_info("Unloading %s version %s\n", MPI3MR_DRIVER_NAME,
4574 MPI3MR_DRIVER_VERSION);
4575
4576 pci_unregister_driver(&mpi3mr_pci_driver);
4577 }
4578
4579 module_init(mpi3mr_init);
4580 module_exit(mpi3mr_exit);