]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/scsi/mpt3sas/mpt3sas_ctl.c
Merge remote-tracking branch 'asoc/fix/intel' into asoc-linus
[mirror_ubuntu-zesty-kernel.git] / drivers / scsi / mpt3sas / mpt3sas_ctl.c
1 /*
2 * Management Module Support for MPT (Message Passing Technology) based
3 * controllers
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62
63
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66
67
68 /**
69 * enum block_state - blocking state
70 * @NON_BLOCKING: non blocking
71 * @BLOCKING: blocking
72 *
73 * These states are for ioctls that need to wait for a response
74 * from firmware, so they probably require sleep.
75 */
76 enum block_state {
77 NON_BLOCKING,
78 BLOCKING,
79 };
80
81 /**
82 * _ctl_sas_device_find_by_handle - sas device search
83 * @ioc: per adapter object
84 * @handle: sas device handle (assigned by firmware)
85 * Context: Calling function should acquire ioc->sas_device_lock
86 *
87 * This searches for sas_device based on sas_address, then return sas_device
88 * object.
89 */
90 static struct _sas_device *
91 _ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
92 {
93 struct _sas_device *sas_device, *r;
94
95 r = NULL;
96 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
97 if (sas_device->handle != handle)
98 continue;
99 r = sas_device;
100 goto out;
101 }
102
103 out:
104 return r;
105 }
106
107 /**
108 * _ctl_display_some_debug - debug routine
109 * @ioc: per adapter object
110 * @smid: system request message index
111 * @calling_function_name: string pass from calling function
112 * @mpi_reply: reply message frame
113 * Context: none.
114 *
115 * Function for displaying debug info helpful when debugging issues
116 * in this module.
117 */
118 static void
119 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
120 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
121 {
122 Mpi2ConfigRequest_t *mpi_request;
123 char *desc = NULL;
124
125 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
126 return;
127
128 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
129 switch (mpi_request->Function) {
130 case MPI2_FUNCTION_SCSI_IO_REQUEST:
131 {
132 Mpi2SCSIIORequest_t *scsi_request =
133 (Mpi2SCSIIORequest_t *)mpi_request;
134
135 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
136 "scsi_io, cmd(0x%02x), cdb_len(%d)",
137 scsi_request->CDB.CDB32[0],
138 le16_to_cpu(scsi_request->IoFlags) & 0xF);
139 desc = ioc->tmp_string;
140 break;
141 }
142 case MPI2_FUNCTION_SCSI_TASK_MGMT:
143 desc = "task_mgmt";
144 break;
145 case MPI2_FUNCTION_IOC_INIT:
146 desc = "ioc_init";
147 break;
148 case MPI2_FUNCTION_IOC_FACTS:
149 desc = "ioc_facts";
150 break;
151 case MPI2_FUNCTION_CONFIG:
152 {
153 Mpi2ConfigRequest_t *config_request =
154 (Mpi2ConfigRequest_t *)mpi_request;
155
156 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
157 "config, type(0x%02x), ext_type(0x%02x), number(%d)",
158 (config_request->Header.PageType &
159 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
160 config_request->Header.PageNumber);
161 desc = ioc->tmp_string;
162 break;
163 }
164 case MPI2_FUNCTION_PORT_FACTS:
165 desc = "port_facts";
166 break;
167 case MPI2_FUNCTION_PORT_ENABLE:
168 desc = "port_enable";
169 break;
170 case MPI2_FUNCTION_EVENT_NOTIFICATION:
171 desc = "event_notification";
172 break;
173 case MPI2_FUNCTION_FW_DOWNLOAD:
174 desc = "fw_download";
175 break;
176 case MPI2_FUNCTION_FW_UPLOAD:
177 desc = "fw_upload";
178 break;
179 case MPI2_FUNCTION_RAID_ACTION:
180 desc = "raid_action";
181 break;
182 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
183 {
184 Mpi2SCSIIORequest_t *scsi_request =
185 (Mpi2SCSIIORequest_t *)mpi_request;
186
187 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
188 "raid_pass, cmd(0x%02x), cdb_len(%d)",
189 scsi_request->CDB.CDB32[0],
190 le16_to_cpu(scsi_request->IoFlags) & 0xF);
191 desc = ioc->tmp_string;
192 break;
193 }
194 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
195 desc = "sas_iounit_cntl";
196 break;
197 case MPI2_FUNCTION_SATA_PASSTHROUGH:
198 desc = "sata_pass";
199 break;
200 case MPI2_FUNCTION_DIAG_BUFFER_POST:
201 desc = "diag_buffer_post";
202 break;
203 case MPI2_FUNCTION_DIAG_RELEASE:
204 desc = "diag_release";
205 break;
206 case MPI2_FUNCTION_SMP_PASSTHROUGH:
207 desc = "smp_passthrough";
208 break;
209 }
210
211 if (!desc)
212 return;
213
214 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
215 ioc->name, calling_function_name, desc, smid);
216
217 if (!mpi_reply)
218 return;
219
220 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
221 pr_info(MPT3SAS_FMT
222 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
223 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
224 le32_to_cpu(mpi_reply->IOCLogInfo));
225
226 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
227 mpi_request->Function ==
228 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
229 Mpi2SCSIIOReply_t *scsi_reply =
230 (Mpi2SCSIIOReply_t *)mpi_reply;
231 struct _sas_device *sas_device = NULL;
232 unsigned long flags;
233
234 spin_lock_irqsave(&ioc->sas_device_lock, flags);
235 sas_device = _ctl_sas_device_find_by_handle(ioc,
236 le16_to_cpu(scsi_reply->DevHandle));
237 if (sas_device) {
238 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
239 ioc->name, (unsigned long long)
240 sas_device->sas_address, sas_device->phy);
241 pr_warn(MPT3SAS_FMT
242 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
243 ioc->name, (unsigned long long)
244 sas_device->enclosure_logical_id, sas_device->slot);
245 }
246 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
247 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
248 pr_info(MPT3SAS_FMT
249 "\tscsi_state(0x%02x), scsi_status"
250 "(0x%02x)\n", ioc->name,
251 scsi_reply->SCSIState,
252 scsi_reply->SCSIStatus);
253 }
254 }
255
256 /**
257 * mpt3sas_ctl_done - ctl module completion routine
258 * @ioc: per adapter object
259 * @smid: system request message index
260 * @msix_index: MSIX table index supplied by the OS
261 * @reply: reply message frame(lower 32bit addr)
262 * Context: none.
263 *
264 * The callback handler when using ioc->ctl_cb_idx.
265 *
266 * Return 1 meaning mf should be freed from _base_interrupt
267 * 0 means the mf is freed from this function.
268 */
269 u8
270 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
271 u32 reply)
272 {
273 MPI2DefaultReply_t *mpi_reply;
274 Mpi2SCSIIOReply_t *scsiio_reply;
275 const void *sense_data;
276 u32 sz;
277
278 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
279 return 1;
280 if (ioc->ctl_cmds.smid != smid)
281 return 1;
282 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
283 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
284 if (mpi_reply) {
285 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
286 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
287 /* get sense data */
288 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
289 mpi_reply->Function ==
290 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
291 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
292 if (scsiio_reply->SCSIState &
293 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
294 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
295 le32_to_cpu(scsiio_reply->SenseCount));
296 sense_data = mpt3sas_base_get_sense_buffer(ioc,
297 smid);
298 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
299 }
300 }
301 }
302 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
303 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
304 complete(&ioc->ctl_cmds.done);
305 return 1;
306 }
307
308 /**
309 * _ctl_check_event_type - determines when an event needs logging
310 * @ioc: per adapter object
311 * @event: firmware event
312 *
313 * The bitmask in ioc->event_type[] indicates which events should be
314 * be saved in the driver event_log. This bitmask is set by application.
315 *
316 * Returns 1 when event should be captured, or zero means no match.
317 */
318 static int
319 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
320 {
321 u16 i;
322 u32 desired_event;
323
324 if (event >= 128 || !event || !ioc->event_log)
325 return 0;
326
327 desired_event = (1 << (event % 32));
328 if (!desired_event)
329 desired_event = 1;
330 i = event / 32;
331 return desired_event & ioc->event_type[i];
332 }
333
334 /**
335 * mpt3sas_ctl_add_to_event_log - add event
336 * @ioc: per adapter object
337 * @mpi_reply: reply message frame
338 *
339 * Return nothing.
340 */
341 void
342 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
343 Mpi2EventNotificationReply_t *mpi_reply)
344 {
345 struct MPT3_IOCTL_EVENTS *event_log;
346 u16 event;
347 int i;
348 u32 sz, event_data_sz;
349 u8 send_aen = 0;
350
351 if (!ioc->event_log)
352 return;
353
354 event = le16_to_cpu(mpi_reply->Event);
355
356 if (_ctl_check_event_type(ioc, event)) {
357
358 /* insert entry into circular event_log */
359 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
360 event_log = ioc->event_log;
361 event_log[i].event = event;
362 event_log[i].context = ioc->event_context++;
363
364 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
365 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
366 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
367 memcpy(event_log[i].data, mpi_reply->EventData, sz);
368 send_aen = 1;
369 }
370
371 /* This aen_event_read_flag flag is set until the
372 * application has read the event log.
373 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
374 */
375 if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
376 (send_aen && !ioc->aen_event_read_flag)) {
377 ioc->aen_event_read_flag = 1;
378 wake_up_interruptible(&ctl_poll_wait);
379 if (async_queue)
380 kill_fasync(&async_queue, SIGIO, POLL_IN);
381 }
382 }
383
384 /**
385 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
386 * @ioc: per adapter object
387 * @msix_index: MSIX table index supplied by the OS
388 * @reply: reply message frame(lower 32bit addr)
389 * Context: interrupt.
390 *
391 * This function merely adds a new work task into ioc->firmware_event_thread.
392 * The tasks are worked from _firmware_event_work in user context.
393 *
394 * Return 1 meaning mf should be freed from _base_interrupt
395 * 0 means the mf is freed from this function.
396 */
397 u8
398 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
399 u32 reply)
400 {
401 Mpi2EventNotificationReply_t *mpi_reply;
402
403 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
404 if (mpi_reply)
405 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
406 return 1;
407 }
408
409 /**
410 * _ctl_verify_adapter - validates ioc_number passed from application
411 * @ioc: per adapter object
412 * @iocpp: The ioc pointer is returned in this.
413 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
414 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
415 *
416 * Return (-1) means error, else ioc_number.
417 */
418 static int
419 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
420 int mpi_version)
421 {
422 struct MPT3SAS_ADAPTER *ioc;
423 int version = 0;
424 /* global ioc lock to protect controller on list operations */
425 spin_lock(&gioc_lock);
426 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
427 if (ioc->id != ioc_number)
428 continue;
429 /* Check whether this ioctl command is from right
430 * ioctl device or not, if not continue the search.
431 */
432 version = ioc->hba_mpi_version_belonged;
433 /* MPI25_VERSION and MPI26_VERSION uses same ioctl
434 * device.
435 */
436 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
437 if ((version == MPI25_VERSION) ||
438 (version == MPI26_VERSION))
439 goto out;
440 else
441 continue;
442 } else {
443 if (version != mpi_version)
444 continue;
445 }
446 out:
447 spin_unlock(&gioc_lock);
448 *iocpp = ioc;
449 return ioc_number;
450 }
451 spin_unlock(&gioc_lock);
452 *iocpp = NULL;
453 return -1;
454 }
455
456 /**
457 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
458 * @ioc: per adapter object
459 * @reset_phase: phase
460 *
461 * The handler for doing any required cleanup or initialization.
462 *
463 * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
464 * MPT3_IOC_DONE_RESET
465 */
466 void
467 mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
468 {
469 int i;
470 u8 issue_reset;
471
472 switch (reset_phase) {
473 case MPT3_IOC_PRE_RESET:
474 dtmprintk(ioc, pr_info(MPT3SAS_FMT
475 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
476 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
477 if (!(ioc->diag_buffer_status[i] &
478 MPT3_DIAG_BUFFER_IS_REGISTERED))
479 continue;
480 if ((ioc->diag_buffer_status[i] &
481 MPT3_DIAG_BUFFER_IS_RELEASED))
482 continue;
483 mpt3sas_send_diag_release(ioc, i, &issue_reset);
484 }
485 break;
486 case MPT3_IOC_AFTER_RESET:
487 dtmprintk(ioc, pr_info(MPT3SAS_FMT
488 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
489 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
490 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
491 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
492 complete(&ioc->ctl_cmds.done);
493 }
494 break;
495 case MPT3_IOC_DONE_RESET:
496 dtmprintk(ioc, pr_info(MPT3SAS_FMT
497 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
498
499 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
500 if (!(ioc->diag_buffer_status[i] &
501 MPT3_DIAG_BUFFER_IS_REGISTERED))
502 continue;
503 if ((ioc->diag_buffer_status[i] &
504 MPT3_DIAG_BUFFER_IS_RELEASED))
505 continue;
506 ioc->diag_buffer_status[i] |=
507 MPT3_DIAG_BUFFER_IS_DIAG_RESET;
508 }
509 break;
510 }
511 }
512
513 /**
514 * _ctl_fasync -
515 * @fd -
516 * @filep -
517 * @mode -
518 *
519 * Called when application request fasyn callback handler.
520 */
521 static int
522 _ctl_fasync(int fd, struct file *filep, int mode)
523 {
524 return fasync_helper(fd, filep, mode, &async_queue);
525 }
526
527 /**
528 * _ctl_poll -
529 * @file -
530 * @wait -
531 *
532 */
533 static unsigned int
534 _ctl_poll(struct file *filep, poll_table *wait)
535 {
536 struct MPT3SAS_ADAPTER *ioc;
537
538 poll_wait(filep, &ctl_poll_wait, wait);
539
540 /* global ioc lock to protect controller on list operations */
541 spin_lock(&gioc_lock);
542 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
543 if (ioc->aen_event_read_flag) {
544 spin_unlock(&gioc_lock);
545 return POLLIN | POLLRDNORM;
546 }
547 }
548 spin_unlock(&gioc_lock);
549 return 0;
550 }
551
552 /**
553 * _ctl_set_task_mid - assign an active smid to tm request
554 * @ioc: per adapter object
555 * @karg - (struct mpt3_ioctl_command)
556 * @tm_request - pointer to mf from user space
557 *
558 * Returns 0 when an smid if found, else fail.
559 * during failure, the reply frame is filled.
560 */
561 static int
562 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
563 Mpi2SCSITaskManagementRequest_t *tm_request)
564 {
565 u8 found = 0;
566 u16 i;
567 u16 handle;
568 struct scsi_cmnd *scmd;
569 struct MPT3SAS_DEVICE *priv_data;
570 unsigned long flags;
571 Mpi2SCSITaskManagementReply_t *tm_reply;
572 u32 sz;
573 u32 lun;
574 char *desc = NULL;
575
576 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
577 desc = "abort_task";
578 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
579 desc = "query_task";
580 else
581 return 0;
582
583 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
584
585 handle = le16_to_cpu(tm_request->DevHandle);
586 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
587 for (i = ioc->scsiio_depth; i && !found; i--) {
588 scmd = ioc->scsi_lookup[i - 1].scmd;
589 if (scmd == NULL || scmd->device == NULL ||
590 scmd->device->hostdata == NULL)
591 continue;
592 if (lun != scmd->device->lun)
593 continue;
594 priv_data = scmd->device->hostdata;
595 if (priv_data->sas_target == NULL)
596 continue;
597 if (priv_data->sas_target->handle != handle)
598 continue;
599 tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
600 found = 1;
601 }
602 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
603
604 if (!found) {
605 dctlprintk(ioc, pr_info(MPT3SAS_FMT
606 "%s: handle(0x%04x), lun(%d), no active mid!!\n",
607 ioc->name,
608 desc, le16_to_cpu(tm_request->DevHandle), lun));
609 tm_reply = ioc->ctl_cmds.reply;
610 tm_reply->DevHandle = tm_request->DevHandle;
611 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
612 tm_reply->TaskType = tm_request->TaskType;
613 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
614 tm_reply->VP_ID = tm_request->VP_ID;
615 tm_reply->VF_ID = tm_request->VF_ID;
616 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
617 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
618 sz))
619 pr_err("failure at %s:%d/%s()!\n", __FILE__,
620 __LINE__, __func__);
621 return 1;
622 }
623
624 dctlprintk(ioc, pr_info(MPT3SAS_FMT
625 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
626 desc, le16_to_cpu(tm_request->DevHandle), lun,
627 le16_to_cpu(tm_request->TaskMID)));
628 return 0;
629 }
630
631 /**
632 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
633 * @ioc: per adapter object
634 * @karg - (struct mpt3_ioctl_command)
635 * @mf - pointer to mf in user space
636 */
637 static long
638 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
639 void __user *mf)
640 {
641 MPI2RequestHeader_t *mpi_request = NULL, *request;
642 MPI2DefaultReply_t *mpi_reply;
643 u32 ioc_state;
644 u16 smid;
645 unsigned long timeout;
646 u8 issue_reset;
647 u32 sz;
648 void *psge;
649 void *data_out = NULL;
650 dma_addr_t data_out_dma = 0;
651 size_t data_out_sz = 0;
652 void *data_in = NULL;
653 dma_addr_t data_in_dma = 0;
654 size_t data_in_sz = 0;
655 long ret;
656 u16 wait_state_count;
657 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
658
659 issue_reset = 0;
660
661 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
662 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
663 ioc->name, __func__);
664 ret = -EAGAIN;
665 goto out;
666 }
667
668 wait_state_count = 0;
669 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
670 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
671 if (wait_state_count++ == 10) {
672 pr_err(MPT3SAS_FMT
673 "%s: failed due to ioc not operational\n",
674 ioc->name, __func__);
675 ret = -EFAULT;
676 goto out;
677 }
678 ssleep(1);
679 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
680 pr_info(MPT3SAS_FMT
681 "%s: waiting for operational state(count=%d)\n",
682 ioc->name,
683 __func__, wait_state_count);
684 }
685 if (wait_state_count)
686 pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
687 ioc->name, __func__);
688
689 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
690 if (!mpi_request) {
691 pr_err(MPT3SAS_FMT
692 "%s: failed obtaining a memory for mpi_request\n",
693 ioc->name, __func__);
694 ret = -ENOMEM;
695 goto out;
696 }
697
698 /* Check for overflow and wraparound */
699 if (karg.data_sge_offset * 4 > ioc->request_sz ||
700 karg.data_sge_offset > (UINT_MAX / 4)) {
701 ret = -EINVAL;
702 goto out;
703 }
704
705 /* copy in request message frame from user */
706 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
707 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
708 __func__);
709 ret = -EFAULT;
710 goto out;
711 }
712
713 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
714 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
715 if (!smid) {
716 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
717 ioc->name, __func__);
718 ret = -EAGAIN;
719 goto out;
720 }
721 } else {
722
723 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
724 if (!smid) {
725 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
726 ioc->name, __func__);
727 ret = -EAGAIN;
728 goto out;
729 }
730 }
731
732 ret = 0;
733 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
734 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
735 request = mpt3sas_base_get_msg_frame(ioc, smid);
736 memcpy(request, mpi_request, karg.data_sge_offset*4);
737 ioc->ctl_cmds.smid = smid;
738 data_out_sz = karg.data_out_size;
739 data_in_sz = karg.data_in_size;
740
741 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
742 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
743 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
744 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH) {
745
746 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
747 if (!device_handle || (device_handle >
748 ioc->facts.MaxDevHandle)) {
749 ret = -EINVAL;
750 mpt3sas_base_free_smid(ioc, smid);
751 goto out;
752 }
753 }
754
755 /* obtain dma-able memory for data transfer */
756 if (data_out_sz) /* WRITE */ {
757 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
758 &data_out_dma);
759 if (!data_out) {
760 pr_err("failure at %s:%d/%s()!\n", __FILE__,
761 __LINE__, __func__);
762 ret = -ENOMEM;
763 mpt3sas_base_free_smid(ioc, smid);
764 goto out;
765 }
766 if (copy_from_user(data_out, karg.data_out_buf_ptr,
767 data_out_sz)) {
768 pr_err("failure at %s:%d/%s()!\n", __FILE__,
769 __LINE__, __func__);
770 ret = -EFAULT;
771 mpt3sas_base_free_smid(ioc, smid);
772 goto out;
773 }
774 }
775
776 if (data_in_sz) /* READ */ {
777 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
778 &data_in_dma);
779 if (!data_in) {
780 pr_err("failure at %s:%d/%s()!\n", __FILE__,
781 __LINE__, __func__);
782 ret = -ENOMEM;
783 mpt3sas_base_free_smid(ioc, smid);
784 goto out;
785 }
786 }
787
788 psge = (void *)request + (karg.data_sge_offset*4);
789
790 /* send command to firmware */
791 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
792
793 init_completion(&ioc->ctl_cmds.done);
794 switch (mpi_request->Function) {
795 case MPI2_FUNCTION_SCSI_IO_REQUEST:
796 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
797 {
798 Mpi2SCSIIORequest_t *scsiio_request =
799 (Mpi2SCSIIORequest_t *)request;
800 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
801 scsiio_request->SenseBufferLowAddress =
802 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
803 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
804 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
805 dtmprintk(ioc, pr_info(MPT3SAS_FMT
806 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
807 ioc->name, device_handle));
808 mpt3sas_base_free_smid(ioc, smid);
809 ret = -EINVAL;
810 goto out;
811 }
812 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
813 data_in_dma, data_in_sz);
814 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
815 ioc->put_smid_scsi_io(ioc, smid, device_handle);
816 else
817 ioc->put_smid_default(ioc, smid);
818 break;
819 }
820 case MPI2_FUNCTION_SCSI_TASK_MGMT:
821 {
822 Mpi2SCSITaskManagementRequest_t *tm_request =
823 (Mpi2SCSITaskManagementRequest_t *)request;
824
825 dtmprintk(ioc, pr_info(MPT3SAS_FMT
826 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
827 ioc->name,
828 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
829
830 if (tm_request->TaskType ==
831 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
832 tm_request->TaskType ==
833 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
834 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
835 mpt3sas_base_free_smid(ioc, smid);
836 goto out;
837 }
838 }
839
840 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
841 dtmprintk(ioc, pr_info(MPT3SAS_FMT
842 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
843 ioc->name, device_handle));
844 mpt3sas_base_free_smid(ioc, smid);
845 ret = -EINVAL;
846 goto out;
847 }
848 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
849 tm_request->DevHandle));
850 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
851 data_in_dma, data_in_sz);
852 ioc->put_smid_hi_priority(ioc, smid, 0);
853 break;
854 }
855 case MPI2_FUNCTION_SMP_PASSTHROUGH:
856 {
857 Mpi2SmpPassthroughRequest_t *smp_request =
858 (Mpi2SmpPassthroughRequest_t *)mpi_request;
859 u8 *data;
860
861 /* ioc determines which port to use */
862 smp_request->PhysicalPort = 0xFF;
863 if (smp_request->PassthroughFlags &
864 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
865 data = (u8 *)&smp_request->SGL;
866 else {
867 if (unlikely(data_out == NULL)) {
868 pr_err("failure at %s:%d/%s()!\n",
869 __FILE__, __LINE__, __func__);
870 mpt3sas_base_free_smid(ioc, smid);
871 ret = -EINVAL;
872 goto out;
873 }
874 data = data_out;
875 }
876
877 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
878 ioc->ioc_link_reset_in_progress = 1;
879 ioc->ignore_loginfos = 1;
880 }
881 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
882 data_in_sz);
883 ioc->put_smid_default(ioc, smid);
884 break;
885 }
886 case MPI2_FUNCTION_SATA_PASSTHROUGH:
887 {
888 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
889 dtmprintk(ioc, pr_info(MPT3SAS_FMT
890 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
891 ioc->name, device_handle));
892 mpt3sas_base_free_smid(ioc, smid);
893 ret = -EINVAL;
894 goto out;
895 }
896 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
897 data_in_sz);
898 ioc->put_smid_default(ioc, smid);
899 break;
900 }
901 case MPI2_FUNCTION_FW_DOWNLOAD:
902 case MPI2_FUNCTION_FW_UPLOAD:
903 {
904 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
905 data_in_sz);
906 ioc->put_smid_default(ioc, smid);
907 break;
908 }
909 case MPI2_FUNCTION_TOOLBOX:
910 {
911 Mpi2ToolboxCleanRequest_t *toolbox_request =
912 (Mpi2ToolboxCleanRequest_t *)mpi_request;
913
914 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
915 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
916 data_in_dma, data_in_sz);
917 } else {
918 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
919 data_in_dma, data_in_sz);
920 }
921 ioc->put_smid_default(ioc, smid);
922 break;
923 }
924 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
925 {
926 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
927 (Mpi2SasIoUnitControlRequest_t *)mpi_request;
928
929 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
930 || sasiounit_request->Operation ==
931 MPI2_SAS_OP_PHY_LINK_RESET) {
932 ioc->ioc_link_reset_in_progress = 1;
933 ioc->ignore_loginfos = 1;
934 }
935 /* drop to default case for posting the request */
936 }
937 default:
938 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
939 data_in_dma, data_in_sz);
940 ioc->put_smid_default(ioc, smid);
941 break;
942 }
943
944 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
945 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
946 else
947 timeout = karg.timeout;
948 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
949 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
950 Mpi2SCSITaskManagementRequest_t *tm_request =
951 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
952 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
953 tm_request->DevHandle));
954 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
955 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
956 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
957 ioc->ioc_link_reset_in_progress) {
958 ioc->ioc_link_reset_in_progress = 0;
959 ioc->ignore_loginfos = 0;
960 }
961 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
962 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
963 __func__);
964 _debug_dump_mf(mpi_request, karg.data_sge_offset);
965 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
966 issue_reset = 1;
967 goto issue_host_reset;
968 }
969
970 mpi_reply = ioc->ctl_cmds.reply;
971
972 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
973 (ioc->logging_level & MPT_DEBUG_TM)) {
974 Mpi2SCSITaskManagementReply_t *tm_reply =
975 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
976
977 pr_info(MPT3SAS_FMT "TASK_MGMT: " \
978 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
979 "TerminationCount(0x%08x)\n", ioc->name,
980 le16_to_cpu(tm_reply->IOCStatus),
981 le32_to_cpu(tm_reply->IOCLogInfo),
982 le32_to_cpu(tm_reply->TerminationCount));
983 }
984
985 /* copy out xdata to user */
986 if (data_in_sz) {
987 if (copy_to_user(karg.data_in_buf_ptr, data_in,
988 data_in_sz)) {
989 pr_err("failure at %s:%d/%s()!\n", __FILE__,
990 __LINE__, __func__);
991 ret = -ENODATA;
992 goto out;
993 }
994 }
995
996 /* copy out reply message frame to user */
997 if (karg.max_reply_bytes) {
998 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
999 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1000 sz)) {
1001 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1002 __LINE__, __func__);
1003 ret = -ENODATA;
1004 goto out;
1005 }
1006 }
1007
1008 /* copy out sense to user */
1009 if (karg.max_sense_bytes && (mpi_request->Function ==
1010 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1011 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
1012 sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
1013 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1014 sz)) {
1015 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1016 __LINE__, __func__);
1017 ret = -ENODATA;
1018 goto out;
1019 }
1020 }
1021
1022 issue_host_reset:
1023 if (issue_reset) {
1024 ret = -ENODATA;
1025 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1026 mpi_request->Function ==
1027 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1028 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1029 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
1030 ioc->name,
1031 le16_to_cpu(mpi_request->FunctionDependent1));
1032 mpt3sas_halt_firmware(ioc);
1033 mpt3sas_scsih_issue_locked_tm(ioc,
1034 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
1035 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
1036 } else
1037 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1038 }
1039
1040 out:
1041
1042 /* free memory associated with sg buffers */
1043 if (data_in)
1044 pci_free_consistent(ioc->pdev, data_in_sz, data_in,
1045 data_in_dma);
1046
1047 if (data_out)
1048 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
1049 data_out_dma);
1050
1051 kfree(mpi_request);
1052 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1053 return ret;
1054 }
1055
1056 /**
1057 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1058 * @ioc: per adapter object
1059 * @arg - user space buffer containing ioctl content
1060 */
1061 static long
1062 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1063 {
1064 struct mpt3_ioctl_iocinfo karg;
1065
1066 if (copy_from_user(&karg, arg, sizeof(karg))) {
1067 pr_err("failure at %s:%d/%s()!\n",
1068 __FILE__, __LINE__, __func__);
1069 return -EFAULT;
1070 }
1071
1072 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1073 __func__));
1074
1075 memset(&karg, 0 , sizeof(karg));
1076 if (ioc->pfacts)
1077 karg.port_number = ioc->pfacts[0].PortNumber;
1078 karg.hw_rev = ioc->pdev->revision;
1079 karg.pci_id = ioc->pdev->device;
1080 karg.subsystem_device = ioc->pdev->subsystem_device;
1081 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1082 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1083 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1084 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1085 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1086 karg.firmware_version = ioc->facts.FWVersion.Word;
1087 strcpy(karg.driver_version, ioc->driver_name);
1088 strcat(karg.driver_version, "-");
1089 switch (ioc->hba_mpi_version_belonged) {
1090 case MPI2_VERSION:
1091 if (ioc->is_warpdrive)
1092 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1093 else
1094 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1095 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1096 break;
1097 case MPI25_VERSION:
1098 case MPI26_VERSION:
1099 if (ioc->is_gen35_ioc)
1100 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1101 else
1102 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1103 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1104 break;
1105 }
1106 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1107
1108 if (copy_to_user(arg, &karg, sizeof(karg))) {
1109 pr_err("failure at %s:%d/%s()!\n",
1110 __FILE__, __LINE__, __func__);
1111 return -EFAULT;
1112 }
1113 return 0;
1114 }
1115
1116 /**
1117 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1118 * @ioc: per adapter object
1119 * @arg - user space buffer containing ioctl content
1120 */
1121 static long
1122 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1123 {
1124 struct mpt3_ioctl_eventquery karg;
1125
1126 if (copy_from_user(&karg, arg, sizeof(karg))) {
1127 pr_err("failure at %s:%d/%s()!\n",
1128 __FILE__, __LINE__, __func__);
1129 return -EFAULT;
1130 }
1131
1132 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1133 __func__));
1134
1135 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1136 memcpy(karg.event_types, ioc->event_type,
1137 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1138
1139 if (copy_to_user(arg, &karg, sizeof(karg))) {
1140 pr_err("failure at %s:%d/%s()!\n",
1141 __FILE__, __LINE__, __func__);
1142 return -EFAULT;
1143 }
1144 return 0;
1145 }
1146
1147 /**
1148 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1149 * @ioc: per adapter object
1150 * @arg - user space buffer containing ioctl content
1151 */
1152 static long
1153 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1154 {
1155 struct mpt3_ioctl_eventenable karg;
1156
1157 if (copy_from_user(&karg, arg, sizeof(karg))) {
1158 pr_err("failure at %s:%d/%s()!\n",
1159 __FILE__, __LINE__, __func__);
1160 return -EFAULT;
1161 }
1162
1163 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1164 __func__));
1165
1166 memcpy(ioc->event_type, karg.event_types,
1167 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1168 mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1169
1170 if (ioc->event_log)
1171 return 0;
1172 /* initialize event_log */
1173 ioc->event_context = 0;
1174 ioc->aen_event_read_flag = 0;
1175 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1176 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1177 if (!ioc->event_log) {
1178 pr_err("failure at %s:%d/%s()!\n",
1179 __FILE__, __LINE__, __func__);
1180 return -ENOMEM;
1181 }
1182 return 0;
1183 }
1184
1185 /**
1186 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1187 * @ioc: per adapter object
1188 * @arg - user space buffer containing ioctl content
1189 */
1190 static long
1191 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1192 {
1193 struct mpt3_ioctl_eventreport karg;
1194 u32 number_bytes, max_events, max;
1195 struct mpt3_ioctl_eventreport __user *uarg = arg;
1196
1197 if (copy_from_user(&karg, arg, sizeof(karg))) {
1198 pr_err("failure at %s:%d/%s()!\n",
1199 __FILE__, __LINE__, __func__);
1200 return -EFAULT;
1201 }
1202
1203 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1204 __func__));
1205
1206 number_bytes = karg.hdr.max_data_size -
1207 sizeof(struct mpt3_ioctl_header);
1208 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1209 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1210
1211 /* If fewer than 1 event is requested, there must have
1212 * been some type of error.
1213 */
1214 if (!max || !ioc->event_log)
1215 return -ENODATA;
1216
1217 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1218 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1219 pr_err("failure at %s:%d/%s()!\n",
1220 __FILE__, __LINE__, __func__);
1221 return -EFAULT;
1222 }
1223
1224 /* reset flag so SIGIO can restart */
1225 ioc->aen_event_read_flag = 0;
1226 return 0;
1227 }
1228
1229 /**
1230 * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1231 * @ioc: per adapter object
1232 * @arg - user space buffer containing ioctl content
1233 */
1234 static long
1235 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1236 {
1237 struct mpt3_ioctl_diag_reset karg;
1238 int retval;
1239
1240 if (copy_from_user(&karg, arg, sizeof(karg))) {
1241 pr_err("failure at %s:%d/%s()!\n",
1242 __FILE__, __LINE__, __func__);
1243 return -EFAULT;
1244 }
1245
1246 if (ioc->shost_recovery || ioc->pci_error_recovery ||
1247 ioc->is_driver_loading)
1248 return -EAGAIN;
1249
1250 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1251 __func__));
1252
1253 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1254 pr_info(MPT3SAS_FMT "host reset: %s\n",
1255 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1256 return 0;
1257 }
1258
1259 /**
1260 * _ctl_btdh_search_sas_device - searching for sas device
1261 * @ioc: per adapter object
1262 * @btdh: btdh ioctl payload
1263 */
1264 static int
1265 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1266 struct mpt3_ioctl_btdh_mapping *btdh)
1267 {
1268 struct _sas_device *sas_device;
1269 unsigned long flags;
1270 int rc = 0;
1271
1272 if (list_empty(&ioc->sas_device_list))
1273 return rc;
1274
1275 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1276 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1277 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1278 btdh->handle == sas_device->handle) {
1279 btdh->bus = sas_device->channel;
1280 btdh->id = sas_device->id;
1281 rc = 1;
1282 goto out;
1283 } else if (btdh->bus == sas_device->channel && btdh->id ==
1284 sas_device->id && btdh->handle == 0xFFFF) {
1285 btdh->handle = sas_device->handle;
1286 rc = 1;
1287 goto out;
1288 }
1289 }
1290 out:
1291 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1292 return rc;
1293 }
1294
1295 /**
1296 * _ctl_btdh_search_raid_device - searching for raid device
1297 * @ioc: per adapter object
1298 * @btdh: btdh ioctl payload
1299 */
1300 static int
1301 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1302 struct mpt3_ioctl_btdh_mapping *btdh)
1303 {
1304 struct _raid_device *raid_device;
1305 unsigned long flags;
1306 int rc = 0;
1307
1308 if (list_empty(&ioc->raid_device_list))
1309 return rc;
1310
1311 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1312 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1313 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1314 btdh->handle == raid_device->handle) {
1315 btdh->bus = raid_device->channel;
1316 btdh->id = raid_device->id;
1317 rc = 1;
1318 goto out;
1319 } else if (btdh->bus == raid_device->channel && btdh->id ==
1320 raid_device->id && btdh->handle == 0xFFFF) {
1321 btdh->handle = raid_device->handle;
1322 rc = 1;
1323 goto out;
1324 }
1325 }
1326 out:
1327 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1328 return rc;
1329 }
1330
1331 /**
1332 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1333 * @ioc: per adapter object
1334 * @arg - user space buffer containing ioctl content
1335 */
1336 static long
1337 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1338 {
1339 struct mpt3_ioctl_btdh_mapping karg;
1340 int rc;
1341
1342 if (copy_from_user(&karg, arg, sizeof(karg))) {
1343 pr_err("failure at %s:%d/%s()!\n",
1344 __FILE__, __LINE__, __func__);
1345 return -EFAULT;
1346 }
1347
1348 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1349 __func__));
1350
1351 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1352 if (!rc)
1353 _ctl_btdh_search_raid_device(ioc, &karg);
1354
1355 if (copy_to_user(arg, &karg, sizeof(karg))) {
1356 pr_err("failure at %s:%d/%s()!\n",
1357 __FILE__, __LINE__, __func__);
1358 return -EFAULT;
1359 }
1360 return 0;
1361 }
1362
1363 /**
1364 * _ctl_diag_capability - return diag buffer capability
1365 * @ioc: per adapter object
1366 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1367 *
1368 * returns 1 when diag buffer support is enabled in firmware
1369 */
1370 static u8
1371 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1372 {
1373 u8 rc = 0;
1374
1375 switch (buffer_type) {
1376 case MPI2_DIAG_BUF_TYPE_TRACE:
1377 if (ioc->facts.IOCCapabilities &
1378 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1379 rc = 1;
1380 break;
1381 case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1382 if (ioc->facts.IOCCapabilities &
1383 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1384 rc = 1;
1385 break;
1386 case MPI2_DIAG_BUF_TYPE_EXTENDED:
1387 if (ioc->facts.IOCCapabilities &
1388 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1389 rc = 1;
1390 }
1391
1392 return rc;
1393 }
1394
1395
1396 /**
1397 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1398 * @ioc: per adapter object
1399 * @diag_register: the diag_register struct passed in from user space
1400 *
1401 */
1402 static long
1403 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1404 struct mpt3_diag_register *diag_register)
1405 {
1406 int rc, i;
1407 void *request_data = NULL;
1408 dma_addr_t request_data_dma;
1409 u32 request_data_sz = 0;
1410 Mpi2DiagBufferPostRequest_t *mpi_request;
1411 Mpi2DiagBufferPostReply_t *mpi_reply;
1412 u8 buffer_type;
1413 u16 smid;
1414 u16 ioc_status;
1415 u32 ioc_state;
1416 u8 issue_reset = 0;
1417
1418 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1419 __func__));
1420
1421 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1422 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1423 pr_err(MPT3SAS_FMT
1424 "%s: failed due to ioc not operational\n",
1425 ioc->name, __func__);
1426 rc = -EAGAIN;
1427 goto out;
1428 }
1429
1430 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1431 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1432 ioc->name, __func__);
1433 rc = -EAGAIN;
1434 goto out;
1435 }
1436
1437 buffer_type = diag_register->buffer_type;
1438 if (!_ctl_diag_capability(ioc, buffer_type)) {
1439 pr_err(MPT3SAS_FMT
1440 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1441 ioc->name, __func__, buffer_type);
1442 return -EPERM;
1443 }
1444
1445 if (ioc->diag_buffer_status[buffer_type] &
1446 MPT3_DIAG_BUFFER_IS_REGISTERED) {
1447 pr_err(MPT3SAS_FMT
1448 "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1449 ioc->name, __func__,
1450 buffer_type);
1451 return -EINVAL;
1452 }
1453
1454 if (diag_register->requested_buffer_size % 4) {
1455 pr_err(MPT3SAS_FMT
1456 "%s: the requested_buffer_size is not 4 byte aligned\n",
1457 ioc->name, __func__);
1458 return -EINVAL;
1459 }
1460
1461 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1462 if (!smid) {
1463 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1464 ioc->name, __func__);
1465 rc = -EAGAIN;
1466 goto out;
1467 }
1468
1469 rc = 0;
1470 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1471 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1472 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1473 ioc->ctl_cmds.smid = smid;
1474
1475 request_data = ioc->diag_buffer[buffer_type];
1476 request_data_sz = diag_register->requested_buffer_size;
1477 ioc->unique_id[buffer_type] = diag_register->unique_id;
1478 ioc->diag_buffer_status[buffer_type] = 0;
1479 memcpy(ioc->product_specific[buffer_type],
1480 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1481 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1482
1483 if (request_data) {
1484 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1485 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1486 pci_free_consistent(ioc->pdev,
1487 ioc->diag_buffer_sz[buffer_type],
1488 request_data, request_data_dma);
1489 request_data = NULL;
1490 }
1491 }
1492
1493 if (request_data == NULL) {
1494 ioc->diag_buffer_sz[buffer_type] = 0;
1495 ioc->diag_buffer_dma[buffer_type] = 0;
1496 request_data = pci_alloc_consistent(
1497 ioc->pdev, request_data_sz, &request_data_dma);
1498 if (request_data == NULL) {
1499 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
1500 " for diag buffers, requested size(%d)\n",
1501 ioc->name, __func__, request_data_sz);
1502 mpt3sas_base_free_smid(ioc, smid);
1503 return -ENOMEM;
1504 }
1505 ioc->diag_buffer[buffer_type] = request_data;
1506 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1507 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1508 }
1509
1510 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1511 mpi_request->BufferType = diag_register->buffer_type;
1512 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1513 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1514 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1515 mpi_request->VF_ID = 0; /* TODO */
1516 mpi_request->VP_ID = 0;
1517
1518 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1519 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1520 ioc->name, __func__, request_data,
1521 (unsigned long long)request_data_dma,
1522 le32_to_cpu(mpi_request->BufferLength)));
1523
1524 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1525 mpi_request->ProductSpecific[i] =
1526 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1527
1528 init_completion(&ioc->ctl_cmds.done);
1529 ioc->put_smid_default(ioc, smid);
1530 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1531 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1532
1533 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1534 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
1535 __func__);
1536 _debug_dump_mf(mpi_request,
1537 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1538 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
1539 issue_reset = 1;
1540 goto issue_host_reset;
1541 }
1542
1543 /* process the completed Reply Message Frame */
1544 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1545 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1546 ioc->name, __func__);
1547 rc = -EFAULT;
1548 goto out;
1549 }
1550
1551 mpi_reply = ioc->ctl_cmds.reply;
1552 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1553
1554 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1555 ioc->diag_buffer_status[buffer_type] |=
1556 MPT3_DIAG_BUFFER_IS_REGISTERED;
1557 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1558 ioc->name, __func__));
1559 } else {
1560 pr_info(MPT3SAS_FMT
1561 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1562 ioc->name, __func__,
1563 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1564 rc = -EFAULT;
1565 }
1566
1567 issue_host_reset:
1568 if (issue_reset)
1569 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1570
1571 out:
1572
1573 if (rc && request_data)
1574 pci_free_consistent(ioc->pdev, request_data_sz,
1575 request_data, request_data_dma);
1576
1577 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1578 return rc;
1579 }
1580
1581 /**
1582 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1583 * @ioc: per adapter object
1584 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1585 *
1586 * This is called when command line option diag_buffer_enable is enabled
1587 * at driver load time.
1588 */
1589 void
1590 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1591 {
1592 struct mpt3_diag_register diag_register;
1593
1594 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1595
1596 if (bits_to_register & 1) {
1597 pr_info(MPT3SAS_FMT "registering trace buffer support\n",
1598 ioc->name);
1599 ioc->diag_trigger_master.MasterData =
1600 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1601 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1602 /* register for 2MB buffers */
1603 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1604 diag_register.unique_id = 0x7075900;
1605 _ctl_diag_register_2(ioc, &diag_register);
1606 }
1607
1608 if (bits_to_register & 2) {
1609 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
1610 ioc->name);
1611 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1612 /* register for 2MB buffers */
1613 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1614 diag_register.unique_id = 0x7075901;
1615 _ctl_diag_register_2(ioc, &diag_register);
1616 }
1617
1618 if (bits_to_register & 4) {
1619 pr_info(MPT3SAS_FMT "registering extended buffer support\n",
1620 ioc->name);
1621 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1622 /* register for 2MB buffers */
1623 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1624 diag_register.unique_id = 0x7075901;
1625 _ctl_diag_register_2(ioc, &diag_register);
1626 }
1627 }
1628
1629 /**
1630 * _ctl_diag_register - application register with driver
1631 * @ioc: per adapter object
1632 * @arg - user space buffer containing ioctl content
1633 *
1634 * This will allow the driver to setup any required buffers that will be
1635 * needed by firmware to communicate with the driver.
1636 */
1637 static long
1638 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1639 {
1640 struct mpt3_diag_register karg;
1641 long rc;
1642
1643 if (copy_from_user(&karg, arg, sizeof(karg))) {
1644 pr_err("failure at %s:%d/%s()!\n",
1645 __FILE__, __LINE__, __func__);
1646 return -EFAULT;
1647 }
1648
1649 rc = _ctl_diag_register_2(ioc, &karg);
1650 return rc;
1651 }
1652
1653 /**
1654 * _ctl_diag_unregister - application unregister with driver
1655 * @ioc: per adapter object
1656 * @arg - user space buffer containing ioctl content
1657 *
1658 * This will allow the driver to cleanup any memory allocated for diag
1659 * messages and to free up any resources.
1660 */
1661 static long
1662 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1663 {
1664 struct mpt3_diag_unregister karg;
1665 void *request_data;
1666 dma_addr_t request_data_dma;
1667 u32 request_data_sz;
1668 u8 buffer_type;
1669
1670 if (copy_from_user(&karg, arg, sizeof(karg))) {
1671 pr_err("failure at %s:%d/%s()!\n",
1672 __FILE__, __LINE__, __func__);
1673 return -EFAULT;
1674 }
1675
1676 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1677 __func__));
1678
1679 buffer_type = karg.unique_id & 0x000000ff;
1680 if (!_ctl_diag_capability(ioc, buffer_type)) {
1681 pr_err(MPT3SAS_FMT
1682 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1683 ioc->name, __func__, buffer_type);
1684 return -EPERM;
1685 }
1686
1687 if ((ioc->diag_buffer_status[buffer_type] &
1688 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1689 pr_err(MPT3SAS_FMT
1690 "%s: buffer_type(0x%02x) is not registered\n",
1691 ioc->name, __func__, buffer_type);
1692 return -EINVAL;
1693 }
1694 if ((ioc->diag_buffer_status[buffer_type] &
1695 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1696 pr_err(MPT3SAS_FMT
1697 "%s: buffer_type(0x%02x) has not been released\n",
1698 ioc->name, __func__, buffer_type);
1699 return -EINVAL;
1700 }
1701
1702 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1703 pr_err(MPT3SAS_FMT
1704 "%s: unique_id(0x%08x) is not registered\n",
1705 ioc->name, __func__, karg.unique_id);
1706 return -EINVAL;
1707 }
1708
1709 request_data = ioc->diag_buffer[buffer_type];
1710 if (!request_data) {
1711 pr_err(MPT3SAS_FMT
1712 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1713 ioc->name, __func__, buffer_type);
1714 return -ENOMEM;
1715 }
1716
1717 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1718 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1719 pci_free_consistent(ioc->pdev, request_data_sz,
1720 request_data, request_data_dma);
1721 ioc->diag_buffer[buffer_type] = NULL;
1722 ioc->diag_buffer_status[buffer_type] = 0;
1723 return 0;
1724 }
1725
1726 /**
1727 * _ctl_diag_query - query relevant info associated with diag buffers
1728 * @ioc: per adapter object
1729 * @arg - user space buffer containing ioctl content
1730 *
1731 * The application will send only buffer_type and unique_id. Driver will
1732 * inspect unique_id first, if valid, fill in all the info. If unique_id is
1733 * 0x00, the driver will return info specified by Buffer Type.
1734 */
1735 static long
1736 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1737 {
1738 struct mpt3_diag_query karg;
1739 void *request_data;
1740 int i;
1741 u8 buffer_type;
1742
1743 if (copy_from_user(&karg, arg, sizeof(karg))) {
1744 pr_err("failure at %s:%d/%s()!\n",
1745 __FILE__, __LINE__, __func__);
1746 return -EFAULT;
1747 }
1748
1749 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1750 __func__));
1751
1752 karg.application_flags = 0;
1753 buffer_type = karg.buffer_type;
1754
1755 if (!_ctl_diag_capability(ioc, buffer_type)) {
1756 pr_err(MPT3SAS_FMT
1757 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1758 ioc->name, __func__, buffer_type);
1759 return -EPERM;
1760 }
1761
1762 if ((ioc->diag_buffer_status[buffer_type] &
1763 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1764 pr_err(MPT3SAS_FMT
1765 "%s: buffer_type(0x%02x) is not registered\n",
1766 ioc->name, __func__, buffer_type);
1767 return -EINVAL;
1768 }
1769
1770 if (karg.unique_id & 0xffffff00) {
1771 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1772 pr_err(MPT3SAS_FMT
1773 "%s: unique_id(0x%08x) is not registered\n",
1774 ioc->name, __func__, karg.unique_id);
1775 return -EINVAL;
1776 }
1777 }
1778
1779 request_data = ioc->diag_buffer[buffer_type];
1780 if (!request_data) {
1781 pr_err(MPT3SAS_FMT
1782 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
1783 ioc->name, __func__, buffer_type);
1784 return -ENOMEM;
1785 }
1786
1787 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
1788 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1789 MPT3_APP_FLAGS_BUFFER_VALID);
1790 else
1791 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1792 MPT3_APP_FLAGS_BUFFER_VALID |
1793 MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
1794
1795 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1796 karg.product_specific[i] =
1797 ioc->product_specific[buffer_type][i];
1798
1799 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1800 karg.driver_added_buffer_size = 0;
1801 karg.unique_id = ioc->unique_id[buffer_type];
1802 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1803
1804 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1805 pr_err(MPT3SAS_FMT
1806 "%s: unable to write mpt3_diag_query data @ %p\n",
1807 ioc->name, __func__, arg);
1808 return -EFAULT;
1809 }
1810 return 0;
1811 }
1812
1813 /**
1814 * mpt3sas_send_diag_release - Diag Release Message
1815 * @ioc: per adapter object
1816 * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
1817 * @issue_reset - specifies whether host reset is required.
1818 *
1819 */
1820 int
1821 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1822 u8 *issue_reset)
1823 {
1824 Mpi2DiagReleaseRequest_t *mpi_request;
1825 Mpi2DiagReleaseReply_t *mpi_reply;
1826 u16 smid;
1827 u16 ioc_status;
1828 u32 ioc_state;
1829 int rc;
1830
1831 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1832 __func__));
1833
1834 rc = 0;
1835 *issue_reset = 0;
1836
1837 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1838 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1839 if (ioc->diag_buffer_status[buffer_type] &
1840 MPT3_DIAG_BUFFER_IS_REGISTERED)
1841 ioc->diag_buffer_status[buffer_type] |=
1842 MPT3_DIAG_BUFFER_IS_RELEASED;
1843 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1844 "%s: skipping due to FAULT state\n", ioc->name,
1845 __func__));
1846 rc = -EAGAIN;
1847 goto out;
1848 }
1849
1850 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1851 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1852 ioc->name, __func__);
1853 rc = -EAGAIN;
1854 goto out;
1855 }
1856
1857 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1858 if (!smid) {
1859 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1860 ioc->name, __func__);
1861 rc = -EAGAIN;
1862 goto out;
1863 }
1864
1865 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1866 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1867 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1868 ioc->ctl_cmds.smid = smid;
1869
1870 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1871 mpi_request->BufferType = buffer_type;
1872 mpi_request->VF_ID = 0; /* TODO */
1873 mpi_request->VP_ID = 0;
1874
1875 init_completion(&ioc->ctl_cmds.done);
1876 ioc->put_smid_default(ioc, smid);
1877 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1878 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1879
1880 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1881 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
1882 __func__);
1883 _debug_dump_mf(mpi_request,
1884 sizeof(Mpi2DiagReleaseRequest_t)/4);
1885 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
1886 *issue_reset = 1;
1887 rc = -EFAULT;
1888 goto out;
1889 }
1890
1891 /* process the completed Reply Message Frame */
1892 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1893 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1894 ioc->name, __func__);
1895 rc = -EFAULT;
1896 goto out;
1897 }
1898
1899 mpi_reply = ioc->ctl_cmds.reply;
1900 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1901
1902 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1903 ioc->diag_buffer_status[buffer_type] |=
1904 MPT3_DIAG_BUFFER_IS_RELEASED;
1905 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1906 ioc->name, __func__));
1907 } else {
1908 pr_info(MPT3SAS_FMT
1909 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1910 ioc->name, __func__,
1911 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1912 rc = -EFAULT;
1913 }
1914
1915 out:
1916 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1917 return rc;
1918 }
1919
1920 /**
1921 * _ctl_diag_release - request to send Diag Release Message to firmware
1922 * @arg - user space buffer containing ioctl content
1923 *
1924 * This allows ownership of the specified buffer to returned to the driver,
1925 * allowing an application to read the buffer without fear that firmware is
1926 * overwritting information in the buffer.
1927 */
1928 static long
1929 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1930 {
1931 struct mpt3_diag_release karg;
1932 void *request_data;
1933 int rc;
1934 u8 buffer_type;
1935 u8 issue_reset = 0;
1936
1937 if (copy_from_user(&karg, arg, sizeof(karg))) {
1938 pr_err("failure at %s:%d/%s()!\n",
1939 __FILE__, __LINE__, __func__);
1940 return -EFAULT;
1941 }
1942
1943 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1944 __func__));
1945
1946 buffer_type = karg.unique_id & 0x000000ff;
1947 if (!_ctl_diag_capability(ioc, buffer_type)) {
1948 pr_err(MPT3SAS_FMT
1949 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1950 ioc->name, __func__, buffer_type);
1951 return -EPERM;
1952 }
1953
1954 if ((ioc->diag_buffer_status[buffer_type] &
1955 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1956 pr_err(MPT3SAS_FMT
1957 "%s: buffer_type(0x%02x) is not registered\n",
1958 ioc->name, __func__, buffer_type);
1959 return -EINVAL;
1960 }
1961
1962 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1963 pr_err(MPT3SAS_FMT
1964 "%s: unique_id(0x%08x) is not registered\n",
1965 ioc->name, __func__, karg.unique_id);
1966 return -EINVAL;
1967 }
1968
1969 if (ioc->diag_buffer_status[buffer_type] &
1970 MPT3_DIAG_BUFFER_IS_RELEASED) {
1971 pr_err(MPT3SAS_FMT
1972 "%s: buffer_type(0x%02x) is already released\n",
1973 ioc->name, __func__,
1974 buffer_type);
1975 return 0;
1976 }
1977
1978 request_data = ioc->diag_buffer[buffer_type];
1979
1980 if (!request_data) {
1981 pr_err(MPT3SAS_FMT
1982 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1983 ioc->name, __func__, buffer_type);
1984 return -ENOMEM;
1985 }
1986
1987 /* buffers were released by due to host reset */
1988 if ((ioc->diag_buffer_status[buffer_type] &
1989 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
1990 ioc->diag_buffer_status[buffer_type] |=
1991 MPT3_DIAG_BUFFER_IS_RELEASED;
1992 ioc->diag_buffer_status[buffer_type] &=
1993 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
1994 pr_err(MPT3SAS_FMT
1995 "%s: buffer_type(0x%02x) was released due to host reset\n",
1996 ioc->name, __func__, buffer_type);
1997 return 0;
1998 }
1999
2000 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2001
2002 if (issue_reset)
2003 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2004
2005 return rc;
2006 }
2007
2008 /**
2009 * _ctl_diag_read_buffer - request for copy of the diag buffer
2010 * @ioc: per adapter object
2011 * @arg - user space buffer containing ioctl content
2012 */
2013 static long
2014 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2015 {
2016 struct mpt3_diag_read_buffer karg;
2017 struct mpt3_diag_read_buffer __user *uarg = arg;
2018 void *request_data, *diag_data;
2019 Mpi2DiagBufferPostRequest_t *mpi_request;
2020 Mpi2DiagBufferPostReply_t *mpi_reply;
2021 int rc, i;
2022 u8 buffer_type;
2023 unsigned long request_size, copy_size;
2024 u16 smid;
2025 u16 ioc_status;
2026 u8 issue_reset = 0;
2027
2028 if (copy_from_user(&karg, arg, sizeof(karg))) {
2029 pr_err("failure at %s:%d/%s()!\n",
2030 __FILE__, __LINE__, __func__);
2031 return -EFAULT;
2032 }
2033
2034 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2035 __func__));
2036
2037 buffer_type = karg.unique_id & 0x000000ff;
2038 if (!_ctl_diag_capability(ioc, buffer_type)) {
2039 pr_err(MPT3SAS_FMT
2040 "%s: doesn't have capability for buffer_type(0x%02x)\n",
2041 ioc->name, __func__, buffer_type);
2042 return -EPERM;
2043 }
2044
2045 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2046 pr_err(MPT3SAS_FMT
2047 "%s: unique_id(0x%08x) is not registered\n",
2048 ioc->name, __func__, karg.unique_id);
2049 return -EINVAL;
2050 }
2051
2052 request_data = ioc->diag_buffer[buffer_type];
2053 if (!request_data) {
2054 pr_err(MPT3SAS_FMT
2055 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2056 ioc->name, __func__, buffer_type);
2057 return -ENOMEM;
2058 }
2059
2060 request_size = ioc->diag_buffer_sz[buffer_type];
2061
2062 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2063 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
2064 "or bytes_to_read are not 4 byte aligned\n", ioc->name,
2065 __func__);
2066 return -EINVAL;
2067 }
2068
2069 if (karg.starting_offset > request_size)
2070 return -EINVAL;
2071
2072 diag_data = (void *)(request_data + karg.starting_offset);
2073 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2074 "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2075 ioc->name, __func__,
2076 diag_data, karg.starting_offset, karg.bytes_to_read));
2077
2078 /* Truncate data on requests that are too large */
2079 if ((diag_data + karg.bytes_to_read < diag_data) ||
2080 (diag_data + karg.bytes_to_read > request_data + request_size))
2081 copy_size = request_size - karg.starting_offset;
2082 else
2083 copy_size = karg.bytes_to_read;
2084
2085 if (copy_to_user((void __user *)uarg->diagnostic_data,
2086 diag_data, copy_size)) {
2087 pr_err(MPT3SAS_FMT
2088 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2089 ioc->name, __func__, diag_data);
2090 return -EFAULT;
2091 }
2092
2093 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2094 return 0;
2095
2096 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2097 "%s: Reregister buffer_type(0x%02x)\n",
2098 ioc->name, __func__, buffer_type));
2099 if ((ioc->diag_buffer_status[buffer_type] &
2100 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2101 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2102 "%s: buffer_type(0x%02x) is still registered\n",
2103 ioc->name, __func__, buffer_type));
2104 return 0;
2105 }
2106 /* Get a free request frame and save the message context.
2107 */
2108
2109 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2110 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
2111 ioc->name, __func__);
2112 rc = -EAGAIN;
2113 goto out;
2114 }
2115
2116 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2117 if (!smid) {
2118 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2119 ioc->name, __func__);
2120 rc = -EAGAIN;
2121 goto out;
2122 }
2123
2124 rc = 0;
2125 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2126 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2127 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2128 ioc->ctl_cmds.smid = smid;
2129
2130 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2131 mpi_request->BufferType = buffer_type;
2132 mpi_request->BufferLength =
2133 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2134 mpi_request->BufferAddress =
2135 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2136 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2137 mpi_request->ProductSpecific[i] =
2138 cpu_to_le32(ioc->product_specific[buffer_type][i]);
2139 mpi_request->VF_ID = 0; /* TODO */
2140 mpi_request->VP_ID = 0;
2141
2142 init_completion(&ioc->ctl_cmds.done);
2143 ioc->put_smid_default(ioc, smid);
2144 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2145 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2146
2147 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2148 pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
2149 __func__);
2150 _debug_dump_mf(mpi_request,
2151 sizeof(Mpi2DiagBufferPostRequest_t)/4);
2152 if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
2153 issue_reset = 1;
2154 goto issue_host_reset;
2155 }
2156
2157 /* process the completed Reply Message Frame */
2158 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2159 pr_err(MPT3SAS_FMT "%s: no reply message\n",
2160 ioc->name, __func__);
2161 rc = -EFAULT;
2162 goto out;
2163 }
2164
2165 mpi_reply = ioc->ctl_cmds.reply;
2166 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2167
2168 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2169 ioc->diag_buffer_status[buffer_type] |=
2170 MPT3_DIAG_BUFFER_IS_REGISTERED;
2171 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
2172 ioc->name, __func__));
2173 } else {
2174 pr_info(MPT3SAS_FMT
2175 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2176 ioc->name, __func__,
2177 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2178 rc = -EFAULT;
2179 }
2180
2181 issue_host_reset:
2182 if (issue_reset)
2183 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2184
2185 out:
2186
2187 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2188 return rc;
2189 }
2190
2191
2192
2193 #ifdef CONFIG_COMPAT
2194 /**
2195 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2196 * @ioc: per adapter object
2197 * @cmd - ioctl opcode
2198 * @arg - (struct mpt3_ioctl_command32)
2199 *
2200 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2201 */
2202 static long
2203 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2204 void __user *arg)
2205 {
2206 struct mpt3_ioctl_command32 karg32;
2207 struct mpt3_ioctl_command32 __user *uarg;
2208 struct mpt3_ioctl_command karg;
2209
2210 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2211 return -EINVAL;
2212
2213 uarg = (struct mpt3_ioctl_command32 __user *) arg;
2214
2215 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2216 pr_err("failure at %s:%d/%s()!\n",
2217 __FILE__, __LINE__, __func__);
2218 return -EFAULT;
2219 }
2220
2221 memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2222 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2223 karg.hdr.port_number = karg32.hdr.port_number;
2224 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2225 karg.timeout = karg32.timeout;
2226 karg.max_reply_bytes = karg32.max_reply_bytes;
2227 karg.data_in_size = karg32.data_in_size;
2228 karg.data_out_size = karg32.data_out_size;
2229 karg.max_sense_bytes = karg32.max_sense_bytes;
2230 karg.data_sge_offset = karg32.data_sge_offset;
2231 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2232 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2233 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2234 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2235 return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2236 }
2237 #endif
2238
2239 /**
2240 * _ctl_ioctl_main - main ioctl entry point
2241 * @file - (struct file)
2242 * @cmd - ioctl opcode
2243 * @arg - user space data buffer
2244 * @compat - handles 32 bit applications in 64bit os
2245 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2246 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2247 */
2248 static long
2249 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2250 u8 compat, u16 mpi_version)
2251 {
2252 struct MPT3SAS_ADAPTER *ioc;
2253 struct mpt3_ioctl_header ioctl_header;
2254 enum block_state state;
2255 long ret = -EINVAL;
2256
2257 /* get IOCTL header */
2258 if (copy_from_user(&ioctl_header, (char __user *)arg,
2259 sizeof(struct mpt3_ioctl_header))) {
2260 pr_err("failure at %s:%d/%s()!\n",
2261 __FILE__, __LINE__, __func__);
2262 return -EFAULT;
2263 }
2264
2265 if (_ctl_verify_adapter(ioctl_header.ioc_number,
2266 &ioc, mpi_version) == -1 || !ioc)
2267 return -ENODEV;
2268
2269 /* pci_access_mutex lock acquired by ioctl path */
2270 mutex_lock(&ioc->pci_access_mutex);
2271
2272 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2273 ioc->is_driver_loading || ioc->remove_host) {
2274 ret = -EAGAIN;
2275 goto out_unlock_pciaccess;
2276 }
2277
2278 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2279 if (state == NON_BLOCKING) {
2280 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2281 ret = -EAGAIN;
2282 goto out_unlock_pciaccess;
2283 }
2284 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2285 ret = -ERESTARTSYS;
2286 goto out_unlock_pciaccess;
2287 }
2288
2289
2290 switch (cmd) {
2291 case MPT3IOCINFO:
2292 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2293 ret = _ctl_getiocinfo(ioc, arg);
2294 break;
2295 #ifdef CONFIG_COMPAT
2296 case MPT3COMMAND32:
2297 #endif
2298 case MPT3COMMAND:
2299 {
2300 struct mpt3_ioctl_command __user *uarg;
2301 struct mpt3_ioctl_command karg;
2302
2303 #ifdef CONFIG_COMPAT
2304 if (compat) {
2305 ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2306 break;
2307 }
2308 #endif
2309 if (copy_from_user(&karg, arg, sizeof(karg))) {
2310 pr_err("failure at %s:%d/%s()!\n",
2311 __FILE__, __LINE__, __func__);
2312 ret = -EFAULT;
2313 break;
2314 }
2315
2316 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2317 uarg = arg;
2318 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2319 }
2320 break;
2321 }
2322 case MPT3EVENTQUERY:
2323 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2324 ret = _ctl_eventquery(ioc, arg);
2325 break;
2326 case MPT3EVENTENABLE:
2327 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2328 ret = _ctl_eventenable(ioc, arg);
2329 break;
2330 case MPT3EVENTREPORT:
2331 ret = _ctl_eventreport(ioc, arg);
2332 break;
2333 case MPT3HARDRESET:
2334 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2335 ret = _ctl_do_reset(ioc, arg);
2336 break;
2337 case MPT3BTDHMAPPING:
2338 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2339 ret = _ctl_btdh_mapping(ioc, arg);
2340 break;
2341 case MPT3DIAGREGISTER:
2342 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2343 ret = _ctl_diag_register(ioc, arg);
2344 break;
2345 case MPT3DIAGUNREGISTER:
2346 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2347 ret = _ctl_diag_unregister(ioc, arg);
2348 break;
2349 case MPT3DIAGQUERY:
2350 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2351 ret = _ctl_diag_query(ioc, arg);
2352 break;
2353 case MPT3DIAGRELEASE:
2354 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2355 ret = _ctl_diag_release(ioc, arg);
2356 break;
2357 case MPT3DIAGREADBUFFER:
2358 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2359 ret = _ctl_diag_read_buffer(ioc, arg);
2360 break;
2361 default:
2362 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2363 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2364 break;
2365 }
2366
2367 mutex_unlock(&ioc->ctl_cmds.mutex);
2368 out_unlock_pciaccess:
2369 mutex_unlock(&ioc->pci_access_mutex);
2370 return ret;
2371 }
2372
2373 /**
2374 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2375 * @file - (struct file)
2376 * @cmd - ioctl opcode
2377 * @arg -
2378 */
2379 static long
2380 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2381 {
2382 long ret;
2383
2384 /* pass MPI25_VERSION | MPI26_VERSION value,
2385 * to indicate that this ioctl cmd
2386 * came from mpt3ctl ioctl device.
2387 */
2388 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2389 MPI25_VERSION | MPI26_VERSION);
2390 return ret;
2391 }
2392
2393 /**
2394 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2395 * @file - (struct file)
2396 * @cmd - ioctl opcode
2397 * @arg -
2398 */
2399 static long
2400 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2401 {
2402 long ret;
2403
2404 /* pass MPI2_VERSION value, to indicate that this ioctl cmd
2405 * came from mpt2ctl ioctl device.
2406 */
2407 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2408 return ret;
2409 }
2410 #ifdef CONFIG_COMPAT
2411 /**
2412 *_ ctl_ioctl_compat - main ioctl entry point (compat)
2413 * @file -
2414 * @cmd -
2415 * @arg -
2416 *
2417 * This routine handles 32 bit applications in 64bit os.
2418 */
2419 static long
2420 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2421 {
2422 long ret;
2423
2424 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2425 MPI25_VERSION | MPI26_VERSION);
2426 return ret;
2427 }
2428
2429 /**
2430 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2431 * @file -
2432 * @cmd -
2433 * @arg -
2434 *
2435 * This routine handles 32 bit applications in 64bit os.
2436 */
2437 static long
2438 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2439 {
2440 long ret;
2441
2442 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2443 return ret;
2444 }
2445 #endif
2446
2447 /* scsi host attributes */
2448 /**
2449 * _ctl_version_fw_show - firmware version
2450 * @cdev - pointer to embedded class device
2451 * @buf - the buffer returned
2452 *
2453 * A sysfs 'read-only' shost attribute.
2454 */
2455 static ssize_t
2456 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2457 char *buf)
2458 {
2459 struct Scsi_Host *shost = class_to_shost(cdev);
2460 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2461
2462 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2463 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2464 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2465 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2466 ioc->facts.FWVersion.Word & 0x000000FF);
2467 }
2468 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2469
2470 /**
2471 * _ctl_version_bios_show - bios version
2472 * @cdev - pointer to embedded class device
2473 * @buf - the buffer returned
2474 *
2475 * A sysfs 'read-only' shost attribute.
2476 */
2477 static ssize_t
2478 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2479 char *buf)
2480 {
2481 struct Scsi_Host *shost = class_to_shost(cdev);
2482 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2483
2484 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2485
2486 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2487 (version & 0xFF000000) >> 24,
2488 (version & 0x00FF0000) >> 16,
2489 (version & 0x0000FF00) >> 8,
2490 version & 0x000000FF);
2491 }
2492 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2493
2494 /**
2495 * _ctl_version_mpi_show - MPI (message passing interface) version
2496 * @cdev - pointer to embedded class device
2497 * @buf - the buffer returned
2498 *
2499 * A sysfs 'read-only' shost attribute.
2500 */
2501 static ssize_t
2502 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2503 char *buf)
2504 {
2505 struct Scsi_Host *shost = class_to_shost(cdev);
2506 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2507
2508 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2509 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2510 }
2511 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2512
2513 /**
2514 * _ctl_version_product_show - product name
2515 * @cdev - pointer to embedded class device
2516 * @buf - the buffer returned
2517 *
2518 * A sysfs 'read-only' shost attribute.
2519 */
2520 static ssize_t
2521 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2522 char *buf)
2523 {
2524 struct Scsi_Host *shost = class_to_shost(cdev);
2525 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2526
2527 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2528 }
2529 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
2530
2531 /**
2532 * _ctl_version_nvdata_persistent_show - ndvata persistent version
2533 * @cdev - pointer to embedded class device
2534 * @buf - the buffer returned
2535 *
2536 * A sysfs 'read-only' shost attribute.
2537 */
2538 static ssize_t
2539 _ctl_version_nvdata_persistent_show(struct device *cdev,
2540 struct device_attribute *attr, char *buf)
2541 {
2542 struct Scsi_Host *shost = class_to_shost(cdev);
2543 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2544
2545 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2546 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2547 }
2548 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2549 _ctl_version_nvdata_persistent_show, NULL);
2550
2551 /**
2552 * _ctl_version_nvdata_default_show - nvdata default version
2553 * @cdev - pointer to embedded class device
2554 * @buf - the buffer returned
2555 *
2556 * A sysfs 'read-only' shost attribute.
2557 */
2558 static ssize_t
2559 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
2560 *attr, char *buf)
2561 {
2562 struct Scsi_Host *shost = class_to_shost(cdev);
2563 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2564
2565 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2566 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2567 }
2568 static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2569 _ctl_version_nvdata_default_show, NULL);
2570
2571 /**
2572 * _ctl_board_name_show - board name
2573 * @cdev - pointer to embedded class device
2574 * @buf - the buffer returned
2575 *
2576 * A sysfs 'read-only' shost attribute.
2577 */
2578 static ssize_t
2579 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2580 char *buf)
2581 {
2582 struct Scsi_Host *shost = class_to_shost(cdev);
2583 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2584
2585 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2586 }
2587 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2588
2589 /**
2590 * _ctl_board_assembly_show - board assembly name
2591 * @cdev - pointer to embedded class device
2592 * @buf - the buffer returned
2593 *
2594 * A sysfs 'read-only' shost attribute.
2595 */
2596 static ssize_t
2597 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2598 char *buf)
2599 {
2600 struct Scsi_Host *shost = class_to_shost(cdev);
2601 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2602
2603 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2604 }
2605 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
2606
2607 /**
2608 * _ctl_board_tracer_show - board tracer number
2609 * @cdev - pointer to embedded class device
2610 * @buf - the buffer returned
2611 *
2612 * A sysfs 'read-only' shost attribute.
2613 */
2614 static ssize_t
2615 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2616 char *buf)
2617 {
2618 struct Scsi_Host *shost = class_to_shost(cdev);
2619 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2620
2621 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2622 }
2623 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
2624
2625 /**
2626 * _ctl_io_delay_show - io missing delay
2627 * @cdev - pointer to embedded class device
2628 * @buf - the buffer returned
2629 *
2630 * This is for firmware implemention for deboucing device
2631 * removal events.
2632 *
2633 * A sysfs 'read-only' shost attribute.
2634 */
2635 static ssize_t
2636 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2637 char *buf)
2638 {
2639 struct Scsi_Host *shost = class_to_shost(cdev);
2640 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2641
2642 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2643 }
2644 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
2645
2646 /**
2647 * _ctl_device_delay_show - device missing delay
2648 * @cdev - pointer to embedded class device
2649 * @buf - the buffer returned
2650 *
2651 * This is for firmware implemention for deboucing device
2652 * removal events.
2653 *
2654 * A sysfs 'read-only' shost attribute.
2655 */
2656 static ssize_t
2657 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2658 char *buf)
2659 {
2660 struct Scsi_Host *shost = class_to_shost(cdev);
2661 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2662
2663 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2664 }
2665 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
2666
2667 /**
2668 * _ctl_fw_queue_depth_show - global credits
2669 * @cdev - pointer to embedded class device
2670 * @buf - the buffer returned
2671 *
2672 * This is firmware queue depth limit
2673 *
2674 * A sysfs 'read-only' shost attribute.
2675 */
2676 static ssize_t
2677 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2678 char *buf)
2679 {
2680 struct Scsi_Host *shost = class_to_shost(cdev);
2681 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2682
2683 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2684 }
2685 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
2686
2687 /**
2688 * _ctl_sas_address_show - sas address
2689 * @cdev - pointer to embedded class device
2690 * @buf - the buffer returned
2691 *
2692 * This is the controller sas address
2693 *
2694 * A sysfs 'read-only' shost attribute.
2695 */
2696 static ssize_t
2697 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2698 char *buf)
2699
2700 {
2701 struct Scsi_Host *shost = class_to_shost(cdev);
2702 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2703
2704 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2705 (unsigned long long)ioc->sas_hba.sas_address);
2706 }
2707 static DEVICE_ATTR(host_sas_address, S_IRUGO,
2708 _ctl_host_sas_address_show, NULL);
2709
2710 /**
2711 * _ctl_logging_level_show - logging level
2712 * @cdev - pointer to embedded class device
2713 * @buf - the buffer returned
2714 *
2715 * A sysfs 'read/write' shost attribute.
2716 */
2717 static ssize_t
2718 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2719 char *buf)
2720 {
2721 struct Scsi_Host *shost = class_to_shost(cdev);
2722 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2723
2724 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2725 }
2726 static ssize_t
2727 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2728 const char *buf, size_t count)
2729 {
2730 struct Scsi_Host *shost = class_to_shost(cdev);
2731 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2732 int val = 0;
2733
2734 if (sscanf(buf, "%x", &val) != 1)
2735 return -EINVAL;
2736
2737 ioc->logging_level = val;
2738 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
2739 ioc->logging_level);
2740 return strlen(buf);
2741 }
2742 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
2743 _ctl_logging_level_store);
2744
2745 /**
2746 * _ctl_fwfault_debug_show - show/store fwfault_debug
2747 * @cdev - pointer to embedded class device
2748 * @buf - the buffer returned
2749 *
2750 * mpt3sas_fwfault_debug is command line option
2751 * A sysfs 'read/write' shost attribute.
2752 */
2753 static ssize_t
2754 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
2755 char *buf)
2756 {
2757 struct Scsi_Host *shost = class_to_shost(cdev);
2758 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2759
2760 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2761 }
2762 static ssize_t
2763 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2764 const char *buf, size_t count)
2765 {
2766 struct Scsi_Host *shost = class_to_shost(cdev);
2767 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2768 int val = 0;
2769
2770 if (sscanf(buf, "%d", &val) != 1)
2771 return -EINVAL;
2772
2773 ioc->fwfault_debug = val;
2774 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
2775 ioc->fwfault_debug);
2776 return strlen(buf);
2777 }
2778 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2779 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2780
2781 /**
2782 * _ctl_ioc_reset_count_show - ioc reset count
2783 * @cdev - pointer to embedded class device
2784 * @buf - the buffer returned
2785 *
2786 * This is firmware queue depth limit
2787 *
2788 * A sysfs 'read-only' shost attribute.
2789 */
2790 static ssize_t
2791 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2792 char *buf)
2793 {
2794 struct Scsi_Host *shost = class_to_shost(cdev);
2795 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2796
2797 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
2798 }
2799 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
2800
2801 /**
2802 * _ctl_ioc_reply_queue_count_show - number of reply queues
2803 * @cdev - pointer to embedded class device
2804 * @buf - the buffer returned
2805 *
2806 * This is number of reply queues
2807 *
2808 * A sysfs 'read-only' shost attribute.
2809 */
2810 static ssize_t
2811 _ctl_ioc_reply_queue_count_show(struct device *cdev,
2812 struct device_attribute *attr, char *buf)
2813 {
2814 u8 reply_queue_count;
2815 struct Scsi_Host *shost = class_to_shost(cdev);
2816 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2817
2818 if ((ioc->facts.IOCCapabilities &
2819 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2820 reply_queue_count = ioc->reply_queue_count;
2821 else
2822 reply_queue_count = 1;
2823
2824 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2825 }
2826 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
2827 NULL);
2828
2829 /**
2830 * _ctl_BRM_status_show - Backup Rail Monitor Status
2831 * @cdev - pointer to embedded class device
2832 * @buf - the buffer returned
2833 *
2834 * This is number of reply queues
2835 *
2836 * A sysfs 'read-only' shost attribute.
2837 */
2838 static ssize_t
2839 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2840 char *buf)
2841 {
2842 struct Scsi_Host *shost = class_to_shost(cdev);
2843 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2844 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2845 Mpi2ConfigReply_t mpi_reply;
2846 u16 backup_rail_monitor_status = 0;
2847 u16 ioc_status;
2848 int sz;
2849 ssize_t rc = 0;
2850
2851 if (!ioc->is_warpdrive) {
2852 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
2853 " warpdrive\n", ioc->name, __func__);
2854 goto out;
2855 }
2856 /* pci_access_mutex lock acquired by sysfs show path */
2857 mutex_lock(&ioc->pci_access_mutex);
2858 if (ioc->pci_error_recovery || ioc->remove_host) {
2859 mutex_unlock(&ioc->pci_access_mutex);
2860 return 0;
2861 }
2862
2863 /* allocate upto GPIOVal 36 entries */
2864 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2865 io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2866 if (!io_unit_pg3) {
2867 pr_err(MPT3SAS_FMT "%s: failed allocating memory "
2868 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
2869 goto out;
2870 }
2871
2872 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2873 0) {
2874 pr_err(MPT3SAS_FMT
2875 "%s: failed reading iounit_pg3\n", ioc->name,
2876 __func__);
2877 goto out;
2878 }
2879
2880 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2881 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2882 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
2883 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
2884 goto out;
2885 }
2886
2887 if (io_unit_pg3->GPIOCount < 25) {
2888 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
2889 "25 entries, detected (%d) entries\n", ioc->name, __func__,
2890 io_unit_pg3->GPIOCount);
2891 goto out;
2892 }
2893
2894 /* BRM status is in bit zero of GPIOVal[24] */
2895 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
2896 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
2897
2898 out:
2899 kfree(io_unit_pg3);
2900 mutex_unlock(&ioc->pci_access_mutex);
2901 return rc;
2902 }
2903 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
2904
2905 struct DIAG_BUFFER_START {
2906 __le32 Size;
2907 __le32 DiagVersion;
2908 u8 BufferType;
2909 u8 Reserved[3];
2910 __le32 Reserved1;
2911 __le32 Reserved2;
2912 __le32 Reserved3;
2913 };
2914
2915 /**
2916 * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
2917 * @cdev - pointer to embedded class device
2918 * @buf - the buffer returned
2919 *
2920 * A sysfs 'read-only' shost attribute.
2921 */
2922 static ssize_t
2923 _ctl_host_trace_buffer_size_show(struct device *cdev,
2924 struct device_attribute *attr, char *buf)
2925 {
2926 struct Scsi_Host *shost = class_to_shost(cdev);
2927 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2928 u32 size = 0;
2929 struct DIAG_BUFFER_START *request_data;
2930
2931 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2932 pr_err(MPT3SAS_FMT
2933 "%s: host_trace_buffer is not registered\n",
2934 ioc->name, __func__);
2935 return 0;
2936 }
2937
2938 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2939 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2940 pr_err(MPT3SAS_FMT
2941 "%s: host_trace_buffer is not registered\n",
2942 ioc->name, __func__);
2943 return 0;
2944 }
2945
2946 request_data = (struct DIAG_BUFFER_START *)
2947 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
2948 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
2949 le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
2950 le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
2951 le32_to_cpu(request_data->Reserved3) == 0x4742444c)
2952 size = le32_to_cpu(request_data->Size);
2953
2954 ioc->ring_buffer_sz = size;
2955 return snprintf(buf, PAGE_SIZE, "%d\n", size);
2956 }
2957 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
2958 _ctl_host_trace_buffer_size_show, NULL);
2959
2960 /**
2961 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
2962 * @cdev - pointer to embedded class device
2963 * @buf - the buffer returned
2964 *
2965 * A sysfs 'read/write' shost attribute.
2966 *
2967 * You will only be able to read 4k bytes of ring buffer at a time.
2968 * In order to read beyond 4k bytes, you will have to write out the
2969 * offset to the same attribute, it will move the pointer.
2970 */
2971 static ssize_t
2972 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
2973 char *buf)
2974 {
2975 struct Scsi_Host *shost = class_to_shost(cdev);
2976 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2977 void *request_data;
2978 u32 size;
2979
2980 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
2981 pr_err(MPT3SAS_FMT
2982 "%s: host_trace_buffer is not registered\n",
2983 ioc->name, __func__);
2984 return 0;
2985 }
2986
2987 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
2988 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2989 pr_err(MPT3SAS_FMT
2990 "%s: host_trace_buffer is not registered\n",
2991 ioc->name, __func__);
2992 return 0;
2993 }
2994
2995 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
2996 return 0;
2997
2998 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
2999 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3000 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3001 memcpy(buf, request_data, size);
3002 return size;
3003 }
3004
3005 static ssize_t
3006 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3007 const char *buf, size_t count)
3008 {
3009 struct Scsi_Host *shost = class_to_shost(cdev);
3010 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3011 int val = 0;
3012
3013 if (sscanf(buf, "%d", &val) != 1)
3014 return -EINVAL;
3015
3016 ioc->ring_buffer_offset = val;
3017 return strlen(buf);
3018 }
3019 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
3020 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
3021
3022
3023 /*****************************************/
3024
3025 /**
3026 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
3027 * @cdev - pointer to embedded class device
3028 * @buf - the buffer returned
3029 *
3030 * A sysfs 'read/write' shost attribute.
3031 *
3032 * This is a mechnism to post/release host_trace_buffers
3033 */
3034 static ssize_t
3035 _ctl_host_trace_buffer_enable_show(struct device *cdev,
3036 struct device_attribute *attr, char *buf)
3037 {
3038 struct Scsi_Host *shost = class_to_shost(cdev);
3039 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3040
3041 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3042 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3043 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3044 return snprintf(buf, PAGE_SIZE, "off\n");
3045 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3046 MPT3_DIAG_BUFFER_IS_RELEASED))
3047 return snprintf(buf, PAGE_SIZE, "release\n");
3048 else
3049 return snprintf(buf, PAGE_SIZE, "post\n");
3050 }
3051
3052 static ssize_t
3053 _ctl_host_trace_buffer_enable_store(struct device *cdev,
3054 struct device_attribute *attr, const char *buf, size_t count)
3055 {
3056 struct Scsi_Host *shost = class_to_shost(cdev);
3057 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3058 char str[10] = "";
3059 struct mpt3_diag_register diag_register;
3060 u8 issue_reset = 0;
3061
3062 /* don't allow post/release occurr while recovery is active */
3063 if (ioc->shost_recovery || ioc->remove_host ||
3064 ioc->pci_error_recovery || ioc->is_driver_loading)
3065 return -EBUSY;
3066
3067 if (sscanf(buf, "%9s", str) != 1)
3068 return -EINVAL;
3069
3070 if (!strcmp(str, "post")) {
3071 /* exit out if host buffers are already posted */
3072 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3073 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3074 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3075 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3076 MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3077 goto out;
3078 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3079 pr_info(MPT3SAS_FMT "posting host trace buffers\n",
3080 ioc->name);
3081 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3082 diag_register.requested_buffer_size = (1024 * 1024);
3083 diag_register.unique_id = 0x7075900;
3084 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3085 _ctl_diag_register_2(ioc, &diag_register);
3086 } else if (!strcmp(str, "release")) {
3087 /* exit out if host buffers are already released */
3088 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3089 goto out;
3090 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3091 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3092 goto out;
3093 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3094 MPT3_DIAG_BUFFER_IS_RELEASED))
3095 goto out;
3096 pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
3097 ioc->name);
3098 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3099 &issue_reset);
3100 }
3101
3102 out:
3103 return strlen(buf);
3104 }
3105 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
3106 _ctl_host_trace_buffer_enable_show,
3107 _ctl_host_trace_buffer_enable_store);
3108
3109 /*********** diagnostic trigger suppport *********************************/
3110
3111 /**
3112 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
3113 * @cdev - pointer to embedded class device
3114 * @buf - the buffer returned
3115 *
3116 * A sysfs 'read/write' shost attribute.
3117 */
3118 static ssize_t
3119 _ctl_diag_trigger_master_show(struct device *cdev,
3120 struct device_attribute *attr, char *buf)
3121
3122 {
3123 struct Scsi_Host *shost = class_to_shost(cdev);
3124 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3125 unsigned long flags;
3126 ssize_t rc;
3127
3128 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3129 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3130 memcpy(buf, &ioc->diag_trigger_master, rc);
3131 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3132 return rc;
3133 }
3134
3135 /**
3136 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
3137 * @cdev - pointer to embedded class device
3138 * @buf - the buffer returned
3139 *
3140 * A sysfs 'read/write' shost attribute.
3141 */
3142 static ssize_t
3143 _ctl_diag_trigger_master_store(struct device *cdev,
3144 struct device_attribute *attr, const char *buf, size_t count)
3145
3146 {
3147 struct Scsi_Host *shost = class_to_shost(cdev);
3148 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3149 unsigned long flags;
3150 ssize_t rc;
3151
3152 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3153 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3154 memset(&ioc->diag_trigger_master, 0,
3155 sizeof(struct SL_WH_MASTER_TRIGGER_T));
3156 memcpy(&ioc->diag_trigger_master, buf, rc);
3157 ioc->diag_trigger_master.MasterData |=
3158 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3159 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3160 return rc;
3161 }
3162 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
3163 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
3164
3165
3166 /**
3167 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
3168 * @cdev - pointer to embedded class device
3169 * @buf - the buffer returned
3170 *
3171 * A sysfs 'read/write' shost attribute.
3172 */
3173 static ssize_t
3174 _ctl_diag_trigger_event_show(struct device *cdev,
3175 struct device_attribute *attr, char *buf)
3176 {
3177 struct Scsi_Host *shost = class_to_shost(cdev);
3178 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3179 unsigned long flags;
3180 ssize_t rc;
3181
3182 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3183 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3184 memcpy(buf, &ioc->diag_trigger_event, rc);
3185 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3186 return rc;
3187 }
3188
3189 /**
3190 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
3191 * @cdev - pointer to embedded class device
3192 * @buf - the buffer returned
3193 *
3194 * A sysfs 'read/write' shost attribute.
3195 */
3196 static ssize_t
3197 _ctl_diag_trigger_event_store(struct device *cdev,
3198 struct device_attribute *attr, const char *buf, size_t count)
3199
3200 {
3201 struct Scsi_Host *shost = class_to_shost(cdev);
3202 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3203 unsigned long flags;
3204 ssize_t sz;
3205
3206 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3207 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3208 memset(&ioc->diag_trigger_event, 0,
3209 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3210 memcpy(&ioc->diag_trigger_event, buf, sz);
3211 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3212 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3213 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3214 return sz;
3215 }
3216 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
3217 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
3218
3219
3220 /**
3221 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3222 * @cdev - pointer to embedded class device
3223 * @buf - the buffer returned
3224 *
3225 * A sysfs 'read/write' shost attribute.
3226 */
3227 static ssize_t
3228 _ctl_diag_trigger_scsi_show(struct device *cdev,
3229 struct device_attribute *attr, char *buf)
3230 {
3231 struct Scsi_Host *shost = class_to_shost(cdev);
3232 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3233 unsigned long flags;
3234 ssize_t rc;
3235
3236 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3237 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3238 memcpy(buf, &ioc->diag_trigger_scsi, rc);
3239 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3240 return rc;
3241 }
3242
3243 /**
3244 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3245 * @cdev - pointer to embedded class device
3246 * @buf - the buffer returned
3247 *
3248 * A sysfs 'read/write' shost attribute.
3249 */
3250 static ssize_t
3251 _ctl_diag_trigger_scsi_store(struct device *cdev,
3252 struct device_attribute *attr, const char *buf, size_t count)
3253 {
3254 struct Scsi_Host *shost = class_to_shost(cdev);
3255 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3256 unsigned long flags;
3257 ssize_t sz;
3258
3259 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3260 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
3261 memset(&ioc->diag_trigger_scsi, 0,
3262 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3263 memcpy(&ioc->diag_trigger_scsi, buf, sz);
3264 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3265 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3266 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3267 return sz;
3268 }
3269 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
3270 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
3271
3272
3273 /**
3274 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3275 * @cdev - pointer to embedded class device
3276 * @buf - the buffer returned
3277 *
3278 * A sysfs 'read/write' shost attribute.
3279 */
3280 static ssize_t
3281 _ctl_diag_trigger_mpi_show(struct device *cdev,
3282 struct device_attribute *attr, char *buf)
3283 {
3284 struct Scsi_Host *shost = class_to_shost(cdev);
3285 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3286 unsigned long flags;
3287 ssize_t rc;
3288
3289 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3290 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3291 memcpy(buf, &ioc->diag_trigger_mpi, rc);
3292 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3293 return rc;
3294 }
3295
3296 /**
3297 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3298 * @cdev - pointer to embedded class device
3299 * @buf - the buffer returned
3300 *
3301 * A sysfs 'read/write' shost attribute.
3302 */
3303 static ssize_t
3304 _ctl_diag_trigger_mpi_store(struct device *cdev,
3305 struct device_attribute *attr, const char *buf, size_t count)
3306 {
3307 struct Scsi_Host *shost = class_to_shost(cdev);
3308 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3309 unsigned long flags;
3310 ssize_t sz;
3311
3312 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3313 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3314 memset(&ioc->diag_trigger_mpi, 0,
3315 sizeof(ioc->diag_trigger_mpi));
3316 memcpy(&ioc->diag_trigger_mpi, buf, sz);
3317 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3318 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3319 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3320 return sz;
3321 }
3322
3323 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3324 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
3325
3326 /*********** diagnostic trigger suppport *** END ****************************/
3327
3328 /*****************************************/
3329
3330 struct device_attribute *mpt3sas_host_attrs[] = {
3331 &dev_attr_version_fw,
3332 &dev_attr_version_bios,
3333 &dev_attr_version_mpi,
3334 &dev_attr_version_product,
3335 &dev_attr_version_nvdata_persistent,
3336 &dev_attr_version_nvdata_default,
3337 &dev_attr_board_name,
3338 &dev_attr_board_assembly,
3339 &dev_attr_board_tracer,
3340 &dev_attr_io_delay,
3341 &dev_attr_device_delay,
3342 &dev_attr_logging_level,
3343 &dev_attr_fwfault_debug,
3344 &dev_attr_fw_queue_depth,
3345 &dev_attr_host_sas_address,
3346 &dev_attr_ioc_reset_count,
3347 &dev_attr_host_trace_buffer_size,
3348 &dev_attr_host_trace_buffer,
3349 &dev_attr_host_trace_buffer_enable,
3350 &dev_attr_reply_queue_count,
3351 &dev_attr_diag_trigger_master,
3352 &dev_attr_diag_trigger_event,
3353 &dev_attr_diag_trigger_scsi,
3354 &dev_attr_diag_trigger_mpi,
3355 &dev_attr_BRM_status,
3356 NULL,
3357 };
3358
3359 /* device attributes */
3360
3361 /**
3362 * _ctl_device_sas_address_show - sas address
3363 * @cdev - pointer to embedded class device
3364 * @buf - the buffer returned
3365 *
3366 * This is the sas address for the target
3367 *
3368 * A sysfs 'read-only' shost attribute.
3369 */
3370 static ssize_t
3371 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
3372 char *buf)
3373 {
3374 struct scsi_device *sdev = to_scsi_device(dev);
3375 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3376
3377 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3378 (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3379 }
3380 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
3381
3382 /**
3383 * _ctl_device_handle_show - device handle
3384 * @cdev - pointer to embedded class device
3385 * @buf - the buffer returned
3386 *
3387 * This is the firmware assigned device handle
3388 *
3389 * A sysfs 'read-only' shost attribute.
3390 */
3391 static ssize_t
3392 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3393 char *buf)
3394 {
3395 struct scsi_device *sdev = to_scsi_device(dev);
3396 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3397
3398 return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3399 sas_device_priv_data->sas_target->handle);
3400 }
3401 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3402
3403 /**
3404 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3405 * @dev - pointer to embedded device
3406 * @buf - the buffer returned
3407 *
3408 * A sysfs 'read/write' sdev attribute, only works with SATA
3409 */
3410 static ssize_t
3411 _ctl_device_ncq_prio_enable_show(struct device *dev,
3412 struct device_attribute *attr, char *buf)
3413 {
3414 struct scsi_device *sdev = to_scsi_device(dev);
3415 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3416
3417 return snprintf(buf, PAGE_SIZE, "%d\n",
3418 sas_device_priv_data->ncq_prio_enable);
3419 }
3420
3421 static ssize_t
3422 _ctl_device_ncq_prio_enable_store(struct device *dev,
3423 struct device_attribute *attr,
3424 const char *buf, size_t count)
3425 {
3426 struct scsi_device *sdev = to_scsi_device(dev);
3427 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3428 bool ncq_prio_enable = 0;
3429
3430 if (kstrtobool(buf, &ncq_prio_enable))
3431 return -EINVAL;
3432
3433 if (!scsih_ncq_prio_supp(sdev))
3434 return -EINVAL;
3435
3436 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3437 return strlen(buf);
3438 }
3439 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3440 _ctl_device_ncq_prio_enable_show,
3441 _ctl_device_ncq_prio_enable_store);
3442
3443 struct device_attribute *mpt3sas_dev_attrs[] = {
3444 &dev_attr_sas_address,
3445 &dev_attr_sas_device_handle,
3446 &dev_attr_sas_ncq_prio_enable,
3447 NULL,
3448 };
3449
3450 /* file operations table for mpt3ctl device */
3451 static const struct file_operations ctl_fops = {
3452 .owner = THIS_MODULE,
3453 .unlocked_ioctl = _ctl_ioctl,
3454 .poll = _ctl_poll,
3455 .fasync = _ctl_fasync,
3456 #ifdef CONFIG_COMPAT
3457 .compat_ioctl = _ctl_ioctl_compat,
3458 #endif
3459 };
3460
3461 /* file operations table for mpt2ctl device */
3462 static const struct file_operations ctl_gen2_fops = {
3463 .owner = THIS_MODULE,
3464 .unlocked_ioctl = _ctl_mpt2_ioctl,
3465 .poll = _ctl_poll,
3466 .fasync = _ctl_fasync,
3467 #ifdef CONFIG_COMPAT
3468 .compat_ioctl = _ctl_mpt2_ioctl_compat,
3469 #endif
3470 };
3471
3472 static struct miscdevice ctl_dev = {
3473 .minor = MPT3SAS_MINOR,
3474 .name = MPT3SAS_DEV_NAME,
3475 .fops = &ctl_fops,
3476 };
3477
3478 static struct miscdevice gen2_ctl_dev = {
3479 .minor = MPT2SAS_MINOR,
3480 .name = MPT2SAS_DEV_NAME,
3481 .fops = &ctl_gen2_fops,
3482 };
3483
3484 /**
3485 * mpt3sas_ctl_init - main entry point for ctl.
3486 *
3487 */
3488 void
3489 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3490 {
3491 async_queue = NULL;
3492
3493 /* Don't register mpt3ctl ioctl device if
3494 * hbas_to_enumarate is one.
3495 */
3496 if (hbas_to_enumerate != 1)
3497 if (misc_register(&ctl_dev) < 0)
3498 pr_err("%s can't register misc device [minor=%d]\n",
3499 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3500
3501 /* Don't register mpt3ctl ioctl device if
3502 * hbas_to_enumarate is two.
3503 */
3504 if (hbas_to_enumerate != 2)
3505 if (misc_register(&gen2_ctl_dev) < 0)
3506 pr_err("%s can't register misc device [minor=%d]\n",
3507 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3508
3509 init_waitqueue_head(&ctl_poll_wait);
3510 }
3511
3512 /**
3513 * mpt3sas_ctl_exit - exit point for ctl
3514 *
3515 */
3516 void
3517 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3518 {
3519 struct MPT3SAS_ADAPTER *ioc;
3520 int i;
3521
3522 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3523
3524 /* free memory associated to diag buffers */
3525 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3526 if (!ioc->diag_buffer[i])
3527 continue;
3528 if (!(ioc->diag_buffer_status[i] &
3529 MPT3_DIAG_BUFFER_IS_REGISTERED))
3530 continue;
3531 if ((ioc->diag_buffer_status[i] &
3532 MPT3_DIAG_BUFFER_IS_RELEASED))
3533 continue;
3534 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
3535 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
3536 ioc->diag_buffer[i] = NULL;
3537 ioc->diag_buffer_status[i] = 0;
3538 }
3539
3540 kfree(ioc->event_log);
3541 }
3542 if (hbas_to_enumerate != 1)
3543 misc_deregister(&ctl_dev);
3544 if (hbas_to_enumerate != 2)
3545 misc_deregister(&gen2_ctl_dev);
3546 }