1 /*
2 * Management Module Support for MPT (Message Passing Technology) based
3 * controllers
4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * NO WARRANTY
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
30
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
43 * USA.
44 */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/compat.h>
55 #include <linux/poll.h>
56
57 #include <linux/io.h>
58 #include <linux/uaccess.h>
59
60 #include "mpt3sas_base.h"
61 #include "mpt3sas_ctl.h"
62
63
64 static struct fasync_struct *async_queue;
65 static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
66
67
68 /**
69 * enum block_state - blocking state
70 * @NON_BLOCKING: non blocking
71 * @BLOCKING: blocking
72 *
73 * These states are for ioctls that need to wait for a response
74 * from firmware, so they probably require sleep.
75 */
76 enum block_state {
77 NON_BLOCKING,
78 BLOCKING,
79 };
80
81 /**
82 * _ctl_display_some_debug - debug routine
83 * @ioc: per adapter object
84 * @smid: system request message index
85 * @calling_function_name: string pass from calling function
86 * @mpi_reply: reply message frame
87 * Context: none.
88 *
89 * Function for displaying debug info helpful when debugging issues
90 * in this module.
91 */
92 static void
_ctl_display_some_debug(struct MPT3SAS_ADAPTER * ioc,u16 smid,char * calling_function_name,MPI2DefaultReply_t * mpi_reply)93 _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
94 char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
95 {
96 Mpi2ConfigRequest_t *mpi_request;
97 char *desc = NULL;
98
99 if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
100 return;
101
102 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
103 switch (mpi_request->Function) {
104 case MPI2_FUNCTION_SCSI_IO_REQUEST:
105 {
106 Mpi2SCSIIORequest_t *scsi_request =
107 (Mpi2SCSIIORequest_t *)mpi_request;
108
109 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
110 "scsi_io, cmd(0x%02x), cdb_len(%d)",
111 scsi_request->CDB.CDB32[0],
112 le16_to_cpu(scsi_request->IoFlags) & 0xF);
113 desc = ioc->tmp_string;
114 break;
115 }
116 case MPI2_FUNCTION_SCSI_TASK_MGMT:
117 desc = "task_mgmt";
118 break;
119 case MPI2_FUNCTION_IOC_INIT:
120 desc = "ioc_init";
121 break;
122 case MPI2_FUNCTION_IOC_FACTS:
123 desc = "ioc_facts";
124 break;
125 case MPI2_FUNCTION_CONFIG:
126 {
127 Mpi2ConfigRequest_t *config_request =
128 (Mpi2ConfigRequest_t *)mpi_request;
129
130 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
131 "config, type(0x%02x), ext_type(0x%02x), number(%d)",
132 (config_request->Header.PageType &
133 MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
134 config_request->Header.PageNumber);
135 desc = ioc->tmp_string;
136 break;
137 }
138 case MPI2_FUNCTION_PORT_FACTS:
139 desc = "port_facts";
140 break;
141 case MPI2_FUNCTION_PORT_ENABLE:
142 desc = "port_enable";
143 break;
144 case MPI2_FUNCTION_EVENT_NOTIFICATION:
145 desc = "event_notification";
146 break;
147 case MPI2_FUNCTION_FW_DOWNLOAD:
148 desc = "fw_download";
149 break;
150 case MPI2_FUNCTION_FW_UPLOAD:
151 desc = "fw_upload";
152 break;
153 case MPI2_FUNCTION_RAID_ACTION:
154 desc = "raid_action";
155 break;
156 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
157 {
158 Mpi2SCSIIORequest_t *scsi_request =
159 (Mpi2SCSIIORequest_t *)mpi_request;
160
161 snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
162 "raid_pass, cmd(0x%02x), cdb_len(%d)",
163 scsi_request->CDB.CDB32[0],
164 le16_to_cpu(scsi_request->IoFlags) & 0xF);
165 desc = ioc->tmp_string;
166 break;
167 }
168 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
169 desc = "sas_iounit_cntl";
170 break;
171 case MPI2_FUNCTION_SATA_PASSTHROUGH:
172 desc = "sata_pass";
173 break;
174 case MPI2_FUNCTION_DIAG_BUFFER_POST:
175 desc = "diag_buffer_post";
176 break;
177 case MPI2_FUNCTION_DIAG_RELEASE:
178 desc = "diag_release";
179 break;
180 case MPI2_FUNCTION_SMP_PASSTHROUGH:
181 desc = "smp_passthrough";
182 break;
183 }
184
185 if (!desc)
186 return;
187
188 pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
189 ioc->name, calling_function_name, desc, smid);
190
191 if (!mpi_reply)
192 return;
193
194 if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
195 pr_info(MPT3SAS_FMT
196 "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
197 ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
198 le32_to_cpu(mpi_reply->IOCLogInfo));
199
200 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
201 mpi_request->Function ==
202 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
203 Mpi2SCSIIOReply_t *scsi_reply =
204 (Mpi2SCSIIOReply_t *)mpi_reply;
205 struct _sas_device *sas_device = NULL;
206 struct _pcie_device *pcie_device = NULL;
207
208 sas_device = mpt3sas_get_sdev_by_handle(ioc,
209 le16_to_cpu(scsi_reply->DevHandle));
210 if (sas_device) {
211 pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
212 ioc->name, (unsigned long long)
213 sas_device->sas_address, sas_device->phy);
214 pr_warn(MPT3SAS_FMT
215 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
216 ioc->name, (unsigned long long)
217 sas_device->enclosure_logical_id, sas_device->slot);
218 sas_device_put(sas_device);
219 }
220 if (!sas_device) {
221 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
222 le16_to_cpu(scsi_reply->DevHandle));
223 if (pcie_device) {
224 pr_warn(MPT3SAS_FMT
225 "\tWWID(0x%016llx), port(%d)\n", ioc->name,
226 (unsigned long long)pcie_device->wwid,
227 pcie_device->port_num);
228 if (pcie_device->enclosure_handle != 0)
229 pr_warn(MPT3SAS_FMT
230 "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
231 ioc->name, (unsigned long long)
232 pcie_device->enclosure_logical_id,
233 pcie_device->slot);
234 pcie_device_put(pcie_device);
235 }
236 }
237 if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
238 pr_info(MPT3SAS_FMT
239 "\tscsi_state(0x%02x), scsi_status"
240 "(0x%02x)\n", ioc->name,
241 scsi_reply->SCSIState,
242 scsi_reply->SCSIStatus);
243 }
244 }
245
246 /**
247 * mpt3sas_ctl_done - ctl module completion routine
248 * @ioc: per adapter object
249 * @smid: system request message index
250 * @msix_index: MSIX table index supplied by the OS
251 * @reply: reply message frame(lower 32bit addr)
252 * Context: none.
253 *
254 * The callback handler when using ioc->ctl_cb_idx.
255 *
256 * Return: 1 meaning mf should be freed from _base_interrupt
257 * 0 means the mf is freed from this function.
258 */
259 u8
mpt3sas_ctl_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)260 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
261 u32 reply)
262 {
263 MPI2DefaultReply_t *mpi_reply;
264 Mpi2SCSIIOReply_t *scsiio_reply;
265 Mpi26NVMeEncapsulatedErrorReply_t *nvme_error_reply;
266 const void *sense_data;
267 u32 sz;
268
269 if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
270 return 1;
271 if (ioc->ctl_cmds.smid != smid)
272 return 1;
273 ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
274 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
275 if (mpi_reply) {
276 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
277 ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
278 /* get sense data */
279 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
280 mpi_reply->Function ==
281 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
282 scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
283 if (scsiio_reply->SCSIState &
284 MPI2_SCSI_STATE_AUTOSENSE_VALID) {
285 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
286 le32_to_cpu(scsiio_reply->SenseCount));
287 sense_data = mpt3sas_base_get_sense_buffer(ioc,
288 smid);
289 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
290 }
291 }
292 /*
293 * Get Error Response data for NVMe device. The ctl_cmds.sense
294 * buffer is used to store the Error Response data.
295 */
296 if (mpi_reply->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
297 nvme_error_reply =
298 (Mpi26NVMeEncapsulatedErrorReply_t *)mpi_reply;
299 sz = min_t(u32, NVME_ERROR_RESPONSE_SIZE,
300 le16_to_cpu(nvme_error_reply->ErrorResponseCount));
301 sense_data = mpt3sas_base_get_sense_buffer(ioc, smid);
302 memcpy(ioc->ctl_cmds.sense, sense_data, sz);
303 }
304 }
305
306 _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
307 ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
308 complete(&ioc->ctl_cmds.done);
309 return 1;
310 }
311
312 /**
313 * _ctl_check_event_type - determines when an event needs logging
314 * @ioc: per adapter object
315 * @event: firmware event
316 *
317 * The bitmask in ioc->event_type[] indicates which events should be
318 * be saved in the driver event_log. This bitmask is set by application.
319 *
320 * Return: 1 when event should be captured, or zero means no match.
321 */
322 static int
_ctl_check_event_type(struct MPT3SAS_ADAPTER * ioc,u16 event)323 _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
324 {
325 u16 i;
326 u32 desired_event;
327
328 if (event >= 128 || !event || !ioc->event_log)
329 return 0;
330
331 desired_event = (1 << (event % 32));
332 if (!desired_event)
333 desired_event = 1;
334 i = event / 32;
335 return desired_event & ioc->event_type[i];
336 }
337
338 /**
339 * mpt3sas_ctl_add_to_event_log - add event
340 * @ioc: per adapter object
341 * @mpi_reply: reply message frame
342 */
343 void
mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER * ioc,Mpi2EventNotificationReply_t * mpi_reply)344 mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
345 Mpi2EventNotificationReply_t *mpi_reply)
346 {
347 struct MPT3_IOCTL_EVENTS *event_log;
348 u16 event;
349 int i;
350 u32 sz, event_data_sz;
351 u8 send_aen = 0;
352
353 if (!ioc->event_log)
354 return;
355
356 event = le16_to_cpu(mpi_reply->Event);
357
358 if (_ctl_check_event_type(ioc, event)) {
359
360 /* insert entry into circular event_log */
361 i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
362 event_log = ioc->event_log;
363 event_log[i].event = event;
364 event_log[i].context = ioc->event_context++;
365
366 event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
367 sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
368 memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
369 memcpy(event_log[i].data, mpi_reply->EventData, sz);
370 send_aen = 1;
371 }
372
373 /* This aen_event_read_flag flag is set until the
374 * application has read the event log.
375 * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
376 */
377 if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
378 (send_aen && !ioc->aen_event_read_flag)) {
379 ioc->aen_event_read_flag = 1;
380 wake_up_interruptible(&ctl_poll_wait);
381 if (async_queue)
382 kill_fasync(&async_queue, SIGIO, POLL_IN);
383 }
384 }
385
386 /**
387 * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
388 * @ioc: per adapter object
389 * @msix_index: MSIX table index supplied by the OS
390 * @reply: reply message frame(lower 32bit addr)
391 * Context: interrupt.
392 *
393 * This function merely adds a new work task into ioc->firmware_event_thread.
394 * The tasks are worked from _firmware_event_work in user context.
395 *
396 * Return: 1 meaning mf should be freed from _base_interrupt
397 * 0 means the mf is freed from this function.
398 */
399 u8
mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)400 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
401 u32 reply)
402 {
403 Mpi2EventNotificationReply_t *mpi_reply;
404
405 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
406 if (mpi_reply)
407 mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
408 return 1;
409 }
410
411 /**
412 * _ctl_verify_adapter - validates ioc_number passed from application
413 * @ioc_number: ?
414 * @iocpp: The ioc pointer is returned in this.
415 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
416 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
417 *
418 * Return: (-1) means error, else ioc_number.
419 */
420 static int
_ctl_verify_adapter(int ioc_number,struct MPT3SAS_ADAPTER ** iocpp,int mpi_version)421 _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp,
422 int mpi_version)
423 {
424 struct MPT3SAS_ADAPTER *ioc;
425 int version = 0;
426 /* global ioc lock to protect controller on list operations */
427 spin_lock(&gioc_lock);
428 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
429 if (ioc->id != ioc_number)
430 continue;
431 /* Check whether this ioctl command is from right
432 * ioctl device or not, if not continue the search.
433 */
434 version = ioc->hba_mpi_version_belonged;
435 /* MPI25_VERSION and MPI26_VERSION uses same ioctl
436 * device.
437 */
438 if (mpi_version == (MPI25_VERSION | MPI26_VERSION)) {
439 if ((version == MPI25_VERSION) ||
440 (version == MPI26_VERSION))
441 goto out;
442 else
443 continue;
444 } else {
445 if (version != mpi_version)
446 continue;
447 }
448 out:
449 spin_unlock(&gioc_lock);
450 *iocpp = ioc;
451 return ioc_number;
452 }
453 spin_unlock(&gioc_lock);
454 *iocpp = NULL;
455 return -1;
456 }
457
458 /**
459 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
460 * @ioc: per adapter object
461 *
462 * The handler for doing any required cleanup or initialization.
463 */
mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)464 void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
465 {
466 int i;
467 u8 issue_reset;
468
469 dtmprintk(ioc, pr_info(MPT3SAS_FMT
470 "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
471 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
472 if (!(ioc->diag_buffer_status[i] &
473 MPT3_DIAG_BUFFER_IS_REGISTERED))
474 continue;
475 if ((ioc->diag_buffer_status[i] &
476 MPT3_DIAG_BUFFER_IS_RELEASED))
477 continue;
478 mpt3sas_send_diag_release(ioc, i, &issue_reset);
479 }
480 }
481
482 /**
483 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
484 * @ioc: per adapter object
485 *
486 * The handler for doing any required cleanup or initialization.
487 */
mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER * ioc)488 void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
489 {
490 dtmprintk(ioc, pr_info(MPT3SAS_FMT
491 "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
492 if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
493 ioc->ctl_cmds.status |= MPT3_CMD_RESET;
494 mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
495 complete(&ioc->ctl_cmds.done);
496 }
497 }
498
499 /**
500 * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
501 * @ioc: per adapter object
502 *
503 * The handler for doing any required cleanup or initialization.
504 */
mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)505 void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
506 {
507 int i;
508
509 dtmprintk(ioc, pr_info(MPT3SAS_FMT
510 "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
511
512 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
513 if (!(ioc->diag_buffer_status[i] &
514 MPT3_DIAG_BUFFER_IS_REGISTERED))
515 continue;
516 if ((ioc->diag_buffer_status[i] &
517 MPT3_DIAG_BUFFER_IS_RELEASED))
518 continue;
519 ioc->diag_buffer_status[i] |=
520 MPT3_DIAG_BUFFER_IS_DIAG_RESET;
521 }
522 }
523
524 /**
525 * _ctl_fasync -
526 * @fd: ?
527 * @filep: ?
528 * @mode: ?
529 *
530 * Called when application request fasyn callback handler.
531 */
532 static int
_ctl_fasync(int fd,struct file * filep,int mode)533 _ctl_fasync(int fd, struct file *filep, int mode)
534 {
535 return fasync_helper(fd, filep, mode, &async_queue);
536 }
537
538 /**
539 * _ctl_poll -
540 * @filep: ?
541 * @wait: ?
542 *
543 */
544 static __poll_t
_ctl_poll(struct file * filep,poll_table * wait)545 _ctl_poll(struct file *filep, poll_table *wait)
546 {
547 struct MPT3SAS_ADAPTER *ioc;
548
549 poll_wait(filep, &ctl_poll_wait, wait);
550
551 /* global ioc lock to protect controller on list operations */
552 spin_lock(&gioc_lock);
553 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
554 if (ioc->aen_event_read_flag) {
555 spin_unlock(&gioc_lock);
556 return EPOLLIN | EPOLLRDNORM;
557 }
558 }
559 spin_unlock(&gioc_lock);
560 return 0;
561 }
562
563 /**
564 * _ctl_set_task_mid - assign an active smid to tm request
565 * @ioc: per adapter object
566 * @karg: (struct mpt3_ioctl_command)
567 * @tm_request: pointer to mf from user space
568 *
569 * Return: 0 when an smid if found, else fail.
570 * during failure, the reply frame is filled.
571 */
572 static int
_ctl_set_task_mid(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_command * karg,Mpi2SCSITaskManagementRequest_t * tm_request)573 _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
574 Mpi2SCSITaskManagementRequest_t *tm_request)
575 {
576 u8 found = 0;
577 u16 smid;
578 u16 handle;
579 struct scsi_cmnd *scmd;
580 struct MPT3SAS_DEVICE *priv_data;
581 Mpi2SCSITaskManagementReply_t *tm_reply;
582 u32 sz;
583 u32 lun;
584 char *desc = NULL;
585
586 if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
587 desc = "abort_task";
588 else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
589 desc = "query_task";
590 else
591 return 0;
592
593 lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
594
595 handle = le16_to_cpu(tm_request->DevHandle);
596 for (smid = ioc->scsiio_depth; smid && !found; smid--) {
597 struct scsiio_tracker *st;
598
599 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
600 if (!scmd)
601 continue;
602 if (lun != scmd->device->lun)
603 continue;
604 priv_data = scmd->device->hostdata;
605 if (priv_data->sas_target == NULL)
606 continue;
607 if (priv_data->sas_target->handle != handle)
608 continue;
609 st = scsi_cmd_priv(scmd);
610 tm_request->TaskMID = cpu_to_le16(st->smid);
611 found = 1;
612 }
613
614 if (!found) {
615 dctlprintk(ioc, pr_info(MPT3SAS_FMT
616 "%s: handle(0x%04x), lun(%d), no active mid!!\n",
617 ioc->name,
618 desc, le16_to_cpu(tm_request->DevHandle), lun));
619 tm_reply = ioc->ctl_cmds.reply;
620 tm_reply->DevHandle = tm_request->DevHandle;
621 tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
622 tm_reply->TaskType = tm_request->TaskType;
623 tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
624 tm_reply->VP_ID = tm_request->VP_ID;
625 tm_reply->VF_ID = tm_request->VF_ID;
626 sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
627 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
628 sz))
629 pr_err("failure at %s:%d/%s()!\n", __FILE__,
630 __LINE__, __func__);
631 return 1;
632 }
633
634 dctlprintk(ioc, pr_info(MPT3SAS_FMT
635 "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
636 desc, le16_to_cpu(tm_request->DevHandle), lun,
637 le16_to_cpu(tm_request->TaskMID)));
638 return 0;
639 }
640
641 /**
642 * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
643 * @ioc: per adapter object
644 * @karg: (struct mpt3_ioctl_command)
645 * @mf: pointer to mf in user space
646 */
647 static long
_ctl_do_mpt_command(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_command karg,void __user * mf)648 _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
649 void __user *mf)
650 {
651 MPI2RequestHeader_t *mpi_request = NULL, *request;
652 MPI2DefaultReply_t *mpi_reply;
653 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request = NULL;
654 struct _pcie_device *pcie_device = NULL;
655 u32 ioc_state;
656 u16 smid;
657 u8 timeout;
658 u8 issue_reset;
659 u32 sz, sz_arg;
660 void *psge;
661 void *data_out = NULL;
662 dma_addr_t data_out_dma = 0;
663 size_t data_out_sz = 0;
664 void *data_in = NULL;
665 dma_addr_t data_in_dma = 0;
666 size_t data_in_sz = 0;
667 long ret;
668 u16 wait_state_count;
669 u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE;
670 u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
671
672 issue_reset = 0;
673
674 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
675 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
676 ioc->name, __func__);
677 ret = -EAGAIN;
678 goto out;
679 }
680
681 wait_state_count = 0;
682 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
683 while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
684 if (wait_state_count++ == 10) {
685 pr_err(MPT3SAS_FMT
686 "%s: failed due to ioc not operational\n",
687 ioc->name, __func__);
688 ret = -EFAULT;
689 goto out;
690 }
691 ssleep(1);
692 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
693 pr_info(MPT3SAS_FMT
694 "%s: waiting for operational state(count=%d)\n",
695 ioc->name,
696 __func__, wait_state_count);
697 }
698 if (wait_state_count)
699 pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
700 ioc->name, __func__);
701
702 mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
703 if (!mpi_request) {
704 pr_err(MPT3SAS_FMT
705 "%s: failed obtaining a memory for mpi_request\n",
706 ioc->name, __func__);
707 ret = -ENOMEM;
708 goto out;
709 }
710
711 /* Check for overflow and wraparound */
712 if (karg.data_sge_offset * 4 > ioc->request_sz ||
713 karg.data_sge_offset > (UINT_MAX / 4)) {
714 ret = -EINVAL;
715 goto out;
716 }
717
718 /* copy in request message frame from user */
719 if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
720 pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
721 __func__);
722 ret = -EFAULT;
723 goto out;
724 }
725
726 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
727 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
728 if (!smid) {
729 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
730 ioc->name, __func__);
731 ret = -EAGAIN;
732 goto out;
733 }
734 } else {
735 /* Use first reserved smid for passthrough ioctls */
736 smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
737 }
738
739 ret = 0;
740 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
741 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
742 request = mpt3sas_base_get_msg_frame(ioc, smid);
743 memcpy(request, mpi_request, karg.data_sge_offset*4);
744 ioc->ctl_cmds.smid = smid;
745 data_out_sz = karg.data_out_size;
746 data_in_sz = karg.data_in_size;
747
748 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
749 mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
750 mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT ||
751 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH ||
752 mpi_request->Function == MPI2_FUNCTION_NVME_ENCAPSULATED) {
753
754 device_handle = le16_to_cpu(mpi_request->FunctionDependent1);
755 if (!device_handle || (device_handle >
756 ioc->facts.MaxDevHandle)) {
757 ret = -EINVAL;
758 mpt3sas_base_free_smid(ioc, smid);
759 goto out;
760 }
761 }
762
763 /* obtain dma-able memory for data transfer */
764 if (data_out_sz) /* WRITE */ {
765 data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
766 &data_out_dma);
767 if (!data_out) {
768 pr_err("failure at %s:%d/%s()!\n", __FILE__,
769 __LINE__, __func__);
770 ret = -ENOMEM;
771 mpt3sas_base_free_smid(ioc, smid);
772 goto out;
773 }
774 if (copy_from_user(data_out, karg.data_out_buf_ptr,
775 data_out_sz)) {
776 pr_err("failure at %s:%d/%s()!\n", __FILE__,
777 __LINE__, __func__);
778 ret = -EFAULT;
779 mpt3sas_base_free_smid(ioc, smid);
780 goto out;
781 }
782 }
783
784 if (data_in_sz) /* READ */ {
785 data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
786 &data_in_dma);
787 if (!data_in) {
788 pr_err("failure at %s:%d/%s()!\n", __FILE__,
789 __LINE__, __func__);
790 ret = -ENOMEM;
791 mpt3sas_base_free_smid(ioc, smid);
792 goto out;
793 }
794 }
795
796 psge = (void *)request + (karg.data_sge_offset*4);
797
798 /* send command to firmware */
799 _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
800
801 init_completion(&ioc->ctl_cmds.done);
802 switch (mpi_request->Function) {
803 case MPI2_FUNCTION_NVME_ENCAPSULATED:
804 {
805 nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
806 /*
807 * Get the Physical Address of the sense buffer.
808 * Use Error Response buffer address field to hold the sense
809 * buffer address.
810 * Clear the internal sense buffer, which will potentially hold
811 * the Completion Queue Entry on return, or 0 if no Entry.
812 * Build the PRPs and set direction bits.
813 * Send the request.
814 */
815 nvme_encap_request->ErrorResponseBaseAddress =
816 cpu_to_le64(ioc->sense_dma & 0xFFFFFFFF00000000UL);
817 nvme_encap_request->ErrorResponseBaseAddress |=
818 cpu_to_le64(le32_to_cpu(
819 mpt3sas_base_get_sense_buffer_dma(ioc, smid)));
820 nvme_encap_request->ErrorResponseAllocationLength =
821 cpu_to_le16(NVME_ERROR_RESPONSE_SIZE);
822 memset(ioc->ctl_cmds.sense, 0, NVME_ERROR_RESPONSE_SIZE);
823 ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
824 data_out_dma, data_out_sz, data_in_dma, data_in_sz);
825 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
826 dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
827 "ioctl failed due to device removal in progress\n",
828 ioc->name, device_handle));
829 mpt3sas_base_free_smid(ioc, smid);
830 ret = -EINVAL;
831 goto out;
832 }
833 mpt3sas_base_put_smid_nvme_encap(ioc, smid);
834 break;
835 }
836 case MPI2_FUNCTION_SCSI_IO_REQUEST:
837 case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
838 {
839 Mpi2SCSIIORequest_t *scsiio_request =
840 (Mpi2SCSIIORequest_t *)request;
841 scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
842 scsiio_request->SenseBufferLowAddress =
843 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
844 memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
845 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
846 dtmprintk(ioc, pr_info(MPT3SAS_FMT
847 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
848 ioc->name, device_handle));
849 mpt3sas_base_free_smid(ioc, smid);
850 ret = -EINVAL;
851 goto out;
852 }
853 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
854 data_in_dma, data_in_sz);
855 if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
856 ioc->put_smid_scsi_io(ioc, smid, device_handle);
857 else
858 mpt3sas_base_put_smid_default(ioc, smid);
859 break;
860 }
861 case MPI2_FUNCTION_SCSI_TASK_MGMT:
862 {
863 Mpi2SCSITaskManagementRequest_t *tm_request =
864 (Mpi2SCSITaskManagementRequest_t *)request;
865
866 dtmprintk(ioc, pr_info(MPT3SAS_FMT
867 "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
868 ioc->name,
869 le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
870 ioc->got_task_abort_from_ioctl = 1;
871 if (tm_request->TaskType ==
872 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
873 tm_request->TaskType ==
874 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
875 if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
876 mpt3sas_base_free_smid(ioc, smid);
877 ioc->got_task_abort_from_ioctl = 0;
878 goto out;
879 }
880 }
881 ioc->got_task_abort_from_ioctl = 0;
882
883 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
884 dtmprintk(ioc, pr_info(MPT3SAS_FMT
885 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
886 ioc->name, device_handle));
887 mpt3sas_base_free_smid(ioc, smid);
888 ret = -EINVAL;
889 goto out;
890 }
891 mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
892 tm_request->DevHandle));
893 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
894 data_in_dma, data_in_sz);
895 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
896 break;
897 }
898 case MPI2_FUNCTION_SMP_PASSTHROUGH:
899 {
900 Mpi2SmpPassthroughRequest_t *smp_request =
901 (Mpi2SmpPassthroughRequest_t *)mpi_request;
902 u8 *data;
903
904 /* ioc determines which port to use */
905 smp_request->PhysicalPort = 0xFF;
906 if (smp_request->PassthroughFlags &
907 MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
908 data = (u8 *)&smp_request->SGL;
909 else {
910 if (unlikely(data_out == NULL)) {
911 pr_err("failure at %s:%d/%s()!\n",
912 __FILE__, __LINE__, __func__);
913 mpt3sas_base_free_smid(ioc, smid);
914 ret = -EINVAL;
915 goto out;
916 }
917 data = data_out;
918 }
919
920 if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
921 ioc->ioc_link_reset_in_progress = 1;
922 ioc->ignore_loginfos = 1;
923 }
924 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
925 data_in_sz);
926 mpt3sas_base_put_smid_default(ioc, smid);
927 break;
928 }
929 case MPI2_FUNCTION_SATA_PASSTHROUGH:
930 {
931 if (test_bit(device_handle, ioc->device_remove_in_progress)) {
932 dtmprintk(ioc, pr_info(MPT3SAS_FMT
933 "handle(0x%04x) :ioctl failed due to device removal in progress\n",
934 ioc->name, device_handle));
935 mpt3sas_base_free_smid(ioc, smid);
936 ret = -EINVAL;
937 goto out;
938 }
939 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
940 data_in_sz);
941 mpt3sas_base_put_smid_default(ioc, smid);
942 break;
943 }
944 case MPI2_FUNCTION_FW_DOWNLOAD:
945 case MPI2_FUNCTION_FW_UPLOAD:
946 {
947 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
948 data_in_sz);
949 mpt3sas_base_put_smid_default(ioc, smid);
950 break;
951 }
952 case MPI2_FUNCTION_TOOLBOX:
953 {
954 Mpi2ToolboxCleanRequest_t *toolbox_request =
955 (Mpi2ToolboxCleanRequest_t *)mpi_request;
956
957 if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
958 ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
959 data_in_dma, data_in_sz);
960 } else {
961 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
962 data_in_dma, data_in_sz);
963 }
964 mpt3sas_base_put_smid_default(ioc, smid);
965 break;
966 }
967 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
968 {
969 Mpi2SasIoUnitControlRequest_t *sasiounit_request =
970 (Mpi2SasIoUnitControlRequest_t *)mpi_request;
971
972 if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
973 || sasiounit_request->Operation ==
974 MPI2_SAS_OP_PHY_LINK_RESET) {
975 ioc->ioc_link_reset_in_progress = 1;
976 ioc->ignore_loginfos = 1;
977 }
978 /* drop to default case for posting the request */
979 }
980 /* fall through */
981 default:
982 ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
983 data_in_dma, data_in_sz);
984 mpt3sas_base_put_smid_default(ioc, smid);
985 break;
986 }
987
988 if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
989 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
990 else
991 timeout = karg.timeout;
992 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
993 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
994 Mpi2SCSITaskManagementRequest_t *tm_request =
995 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
996 mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
997 tm_request->DevHandle));
998 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
999 } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
1000 mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
1001 ioc->ioc_link_reset_in_progress) {
1002 ioc->ioc_link_reset_in_progress = 0;
1003 ioc->ignore_loginfos = 0;
1004 }
1005 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1006 issue_reset =
1007 mpt3sas_base_check_cmd_timeout(ioc,
1008 ioc->ctl_cmds.status, mpi_request,
1009 karg.data_sge_offset);
1010 goto issue_host_reset;
1011 }
1012
1013 mpi_reply = ioc->ctl_cmds.reply;
1014
1015 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
1016 (ioc->logging_level & MPT_DEBUG_TM)) {
1017 Mpi2SCSITaskManagementReply_t *tm_reply =
1018 (Mpi2SCSITaskManagementReply_t *)mpi_reply;
1019
1020 pr_info(MPT3SAS_FMT "TASK_MGMT: " \
1021 "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
1022 "TerminationCount(0x%08x)\n", ioc->name,
1023 le16_to_cpu(tm_reply->IOCStatus),
1024 le32_to_cpu(tm_reply->IOCLogInfo),
1025 le32_to_cpu(tm_reply->TerminationCount));
1026 }
1027
1028 /* copy out xdata to user */
1029 if (data_in_sz) {
1030 if (copy_to_user(karg.data_in_buf_ptr, data_in,
1031 data_in_sz)) {
1032 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1033 __LINE__, __func__);
1034 ret = -ENODATA;
1035 goto out;
1036 }
1037 }
1038
1039 /* copy out reply message frame to user */
1040 if (karg.max_reply_bytes) {
1041 sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
1042 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
1043 sz)) {
1044 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1045 __LINE__, __func__);
1046 ret = -ENODATA;
1047 goto out;
1048 }
1049 }
1050
1051 /* copy out sense/NVMe Error Response to user */
1052 if (karg.max_sense_bytes && (mpi_request->Function ==
1053 MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
1054 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
1055 MPI2_FUNCTION_NVME_ENCAPSULATED)) {
1056 if (karg.sense_data_ptr == NULL) {
1057 pr_info(MPT3SAS_FMT "Response buffer provided"
1058 " by application is NULL; Response data will"
1059 " not be returned.\n", ioc->name);
1060 goto out;
1061 }
1062 sz_arg = (mpi_request->Function ==
1063 MPI2_FUNCTION_NVME_ENCAPSULATED) ? NVME_ERROR_RESPONSE_SIZE :
1064 SCSI_SENSE_BUFFERSIZE;
1065 sz = min_t(u32, karg.max_sense_bytes, sz_arg);
1066 if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
1067 sz)) {
1068 pr_err("failure at %s:%d/%s()!\n", __FILE__,
1069 __LINE__, __func__);
1070 ret = -ENODATA;
1071 goto out;
1072 }
1073 }
1074
1075 issue_host_reset:
1076 if (issue_reset) {
1077 ret = -ENODATA;
1078 if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
1079 mpi_request->Function ==
1080 MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
1081 mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
1082 pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
1083 ioc->name,
1084 le16_to_cpu(mpi_request->FunctionDependent1));
1085 mpt3sas_halt_firmware(ioc);
1086 pcie_device = mpt3sas_get_pdev_by_handle(ioc,
1087 le16_to_cpu(mpi_request->FunctionDependent1));
1088 if (pcie_device && (!ioc->tm_custom_handling))
1089 mpt3sas_scsih_issue_locked_tm(ioc,
1090 le16_to_cpu(mpi_request->FunctionDependent1),
1091 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1092 0, pcie_device->reset_timeout,
1093 tr_method);
1094 else
1095 mpt3sas_scsih_issue_locked_tm(ioc,
1096 le16_to_cpu(mpi_request->FunctionDependent1),
1097 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
1098 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
1099 } else
1100 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1101 }
1102
1103 out:
1104 if (pcie_device)
1105 pcie_device_put(pcie_device);
1106
1107 /* free memory associated with sg buffers */
1108 if (data_in)
1109 pci_free_consistent(ioc->pdev, data_in_sz, data_in,
1110 data_in_dma);
1111
1112 if (data_out)
1113 pci_free_consistent(ioc->pdev, data_out_sz, data_out,
1114 data_out_dma);
1115
1116 kfree(mpi_request);
1117 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1118 return ret;
1119 }
1120
1121 /**
1122 * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
1123 * @ioc: per adapter object
1124 * @arg: user space buffer containing ioctl content
1125 */
1126 static long
_ctl_getiocinfo(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1127 _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1128 {
1129 struct mpt3_ioctl_iocinfo karg;
1130
1131 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1132 __func__));
1133
1134 memset(&karg, 0 , sizeof(karg));
1135 if (ioc->pfacts)
1136 karg.port_number = ioc->pfacts[0].PortNumber;
1137 karg.hw_rev = ioc->pdev->revision;
1138 karg.pci_id = ioc->pdev->device;
1139 karg.subsystem_device = ioc->pdev->subsystem_device;
1140 karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
1141 karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
1142 karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
1143 karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
1144 karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
1145 karg.firmware_version = ioc->facts.FWVersion.Word;
1146 strcpy(karg.driver_version, ioc->driver_name);
1147 strcat(karg.driver_version, "-");
1148 switch (ioc->hba_mpi_version_belonged) {
1149 case MPI2_VERSION:
1150 if (ioc->is_warpdrive)
1151 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200;
1152 else
1153 karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2;
1154 strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION);
1155 break;
1156 case MPI25_VERSION:
1157 case MPI26_VERSION:
1158 if (ioc->is_gen35_ioc)
1159 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS35;
1160 else
1161 karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
1162 strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
1163 break;
1164 }
1165 karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
1166
1167 if (copy_to_user(arg, &karg, sizeof(karg))) {
1168 pr_err("failure at %s:%d/%s()!\n",
1169 __FILE__, __LINE__, __func__);
1170 return -EFAULT;
1171 }
1172 return 0;
1173 }
1174
1175 /**
1176 * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
1177 * @ioc: per adapter object
1178 * @arg: user space buffer containing ioctl content
1179 */
1180 static long
_ctl_eventquery(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1181 _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1182 {
1183 struct mpt3_ioctl_eventquery karg;
1184
1185 if (copy_from_user(&karg, arg, sizeof(karg))) {
1186 pr_err("failure at %s:%d/%s()!\n",
1187 __FILE__, __LINE__, __func__);
1188 return -EFAULT;
1189 }
1190
1191 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1192 __func__));
1193
1194 karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
1195 memcpy(karg.event_types, ioc->event_type,
1196 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1197
1198 if (copy_to_user(arg, &karg, sizeof(karg))) {
1199 pr_err("failure at %s:%d/%s()!\n",
1200 __FILE__, __LINE__, __func__);
1201 return -EFAULT;
1202 }
1203 return 0;
1204 }
1205
1206 /**
1207 * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
1208 * @ioc: per adapter object
1209 * @arg: user space buffer containing ioctl content
1210 */
1211 static long
_ctl_eventenable(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1212 _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1213 {
1214 struct mpt3_ioctl_eventenable karg;
1215
1216 if (copy_from_user(&karg, arg, sizeof(karg))) {
1217 pr_err("failure at %s:%d/%s()!\n",
1218 __FILE__, __LINE__, __func__);
1219 return -EFAULT;
1220 }
1221
1222 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1223 __func__));
1224
1225 memcpy(ioc->event_type, karg.event_types,
1226 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
1227 mpt3sas_base_validate_event_type(ioc, ioc->event_type);
1228
1229 if (ioc->event_log)
1230 return 0;
1231 /* initialize event_log */
1232 ioc->event_context = 0;
1233 ioc->aen_event_read_flag = 0;
1234 ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
1235 sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
1236 if (!ioc->event_log) {
1237 pr_err("failure at %s:%d/%s()!\n",
1238 __FILE__, __LINE__, __func__);
1239 return -ENOMEM;
1240 }
1241 return 0;
1242 }
1243
1244 /**
1245 * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
1246 * @ioc: per adapter object
1247 * @arg: user space buffer containing ioctl content
1248 */
1249 static long
_ctl_eventreport(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1250 _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1251 {
1252 struct mpt3_ioctl_eventreport karg;
1253 u32 number_bytes, max_events, max;
1254 struct mpt3_ioctl_eventreport __user *uarg = arg;
1255
1256 if (copy_from_user(&karg, arg, sizeof(karg))) {
1257 pr_err("failure at %s:%d/%s()!\n",
1258 __FILE__, __LINE__, __func__);
1259 return -EFAULT;
1260 }
1261
1262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1263 __func__));
1264
1265 number_bytes = karg.hdr.max_data_size -
1266 sizeof(struct mpt3_ioctl_header);
1267 max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
1268 max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
1269
1270 /* If fewer than 1 event is requested, there must have
1271 * been some type of error.
1272 */
1273 if (!max || !ioc->event_log)
1274 return -ENODATA;
1275
1276 number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
1277 if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
1278 pr_err("failure at %s:%d/%s()!\n",
1279 __FILE__, __LINE__, __func__);
1280 return -EFAULT;
1281 }
1282
1283 /* reset flag so SIGIO can restart */
1284 ioc->aen_event_read_flag = 0;
1285 return 0;
1286 }
1287
1288 /**
1289 * _ctl_do_reset - main handler for MPT3HARDRESET opcode
1290 * @ioc: per adapter object
1291 * @arg: user space buffer containing ioctl content
1292 */
1293 static long
_ctl_do_reset(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1294 _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1295 {
1296 struct mpt3_ioctl_diag_reset karg;
1297 int retval;
1298
1299 if (copy_from_user(&karg, arg, sizeof(karg))) {
1300 pr_err("failure at %s:%d/%s()!\n",
1301 __FILE__, __LINE__, __func__);
1302 return -EFAULT;
1303 }
1304
1305 if (ioc->shost_recovery || ioc->pci_error_recovery ||
1306 ioc->is_driver_loading)
1307 return -EAGAIN;
1308
1309 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1310 __func__));
1311
1312 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1313 pr_info(MPT3SAS_FMT "host reset: %s\n",
1314 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1315 return 0;
1316 }
1317
1318 /**
1319 * _ctl_btdh_search_sas_device - searching for sas device
1320 * @ioc: per adapter object
1321 * @btdh: btdh ioctl payload
1322 */
1323 static int
_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1324 _ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
1325 struct mpt3_ioctl_btdh_mapping *btdh)
1326 {
1327 struct _sas_device *sas_device;
1328 unsigned long flags;
1329 int rc = 0;
1330
1331 if (list_empty(&ioc->sas_device_list))
1332 return rc;
1333
1334 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1335 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
1336 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1337 btdh->handle == sas_device->handle) {
1338 btdh->bus = sas_device->channel;
1339 btdh->id = sas_device->id;
1340 rc = 1;
1341 goto out;
1342 } else if (btdh->bus == sas_device->channel && btdh->id ==
1343 sas_device->id && btdh->handle == 0xFFFF) {
1344 btdh->handle = sas_device->handle;
1345 rc = 1;
1346 goto out;
1347 }
1348 }
1349 out:
1350 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1351 return rc;
1352 }
1353
1354 /**
1355 * _ctl_btdh_search_pcie_device - searching for pcie device
1356 * @ioc: per adapter object
1357 * @btdh: btdh ioctl payload
1358 */
1359 static int
_ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1360 _ctl_btdh_search_pcie_device(struct MPT3SAS_ADAPTER *ioc,
1361 struct mpt3_ioctl_btdh_mapping *btdh)
1362 {
1363 struct _pcie_device *pcie_device;
1364 unsigned long flags;
1365 int rc = 0;
1366
1367 if (list_empty(&ioc->pcie_device_list))
1368 return rc;
1369
1370 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1371 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1372 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1373 btdh->handle == pcie_device->handle) {
1374 btdh->bus = pcie_device->channel;
1375 btdh->id = pcie_device->id;
1376 rc = 1;
1377 goto out;
1378 } else if (btdh->bus == pcie_device->channel && btdh->id ==
1379 pcie_device->id && btdh->handle == 0xFFFF) {
1380 btdh->handle = pcie_device->handle;
1381 rc = 1;
1382 goto out;
1383 }
1384 }
1385 out:
1386 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1387 return rc;
1388 }
1389
1390 /**
1391 * _ctl_btdh_search_raid_device - searching for raid device
1392 * @ioc: per adapter object
1393 * @btdh: btdh ioctl payload
1394 */
1395 static int
_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER * ioc,struct mpt3_ioctl_btdh_mapping * btdh)1396 _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
1397 struct mpt3_ioctl_btdh_mapping *btdh)
1398 {
1399 struct _raid_device *raid_device;
1400 unsigned long flags;
1401 int rc = 0;
1402
1403 if (list_empty(&ioc->raid_device_list))
1404 return rc;
1405
1406 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1407 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1408 if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
1409 btdh->handle == raid_device->handle) {
1410 btdh->bus = raid_device->channel;
1411 btdh->id = raid_device->id;
1412 rc = 1;
1413 goto out;
1414 } else if (btdh->bus == raid_device->channel && btdh->id ==
1415 raid_device->id && btdh->handle == 0xFFFF) {
1416 btdh->handle = raid_device->handle;
1417 rc = 1;
1418 goto out;
1419 }
1420 }
1421 out:
1422 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1423 return rc;
1424 }
1425
1426 /**
1427 * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
1428 * @ioc: per adapter object
1429 * @arg: user space buffer containing ioctl content
1430 */
1431 static long
_ctl_btdh_mapping(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1432 _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1433 {
1434 struct mpt3_ioctl_btdh_mapping karg;
1435 int rc;
1436
1437 if (copy_from_user(&karg, arg, sizeof(karg))) {
1438 pr_err("failure at %s:%d/%s()!\n",
1439 __FILE__, __LINE__, __func__);
1440 return -EFAULT;
1441 }
1442
1443 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1444 __func__));
1445
1446 rc = _ctl_btdh_search_sas_device(ioc, &karg);
1447 if (!rc)
1448 rc = _ctl_btdh_search_pcie_device(ioc, &karg);
1449 if (!rc)
1450 _ctl_btdh_search_raid_device(ioc, &karg);
1451
1452 if (copy_to_user(arg, &karg, sizeof(karg))) {
1453 pr_err("failure at %s:%d/%s()!\n",
1454 __FILE__, __LINE__, __func__);
1455 return -EFAULT;
1456 }
1457 return 0;
1458 }
1459
1460 /**
1461 * _ctl_diag_capability - return diag buffer capability
1462 * @ioc: per adapter object
1463 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1464 *
1465 * returns 1 when diag buffer support is enabled in firmware
1466 */
1467 static u8
_ctl_diag_capability(struct MPT3SAS_ADAPTER * ioc,u8 buffer_type)1468 _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
1469 {
1470 u8 rc = 0;
1471
1472 switch (buffer_type) {
1473 case MPI2_DIAG_BUF_TYPE_TRACE:
1474 if (ioc->facts.IOCCapabilities &
1475 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
1476 rc = 1;
1477 break;
1478 case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
1479 if (ioc->facts.IOCCapabilities &
1480 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
1481 rc = 1;
1482 break;
1483 case MPI2_DIAG_BUF_TYPE_EXTENDED:
1484 if (ioc->facts.IOCCapabilities &
1485 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
1486 rc = 1;
1487 }
1488
1489 return rc;
1490 }
1491
1492
1493 /**
1494 * _ctl_diag_register_2 - wrapper for registering diag buffer support
1495 * @ioc: per adapter object
1496 * @diag_register: the diag_register struct passed in from user space
1497 *
1498 */
1499 static long
_ctl_diag_register_2(struct MPT3SAS_ADAPTER * ioc,struct mpt3_diag_register * diag_register)1500 _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1501 struct mpt3_diag_register *diag_register)
1502 {
1503 int rc, i;
1504 void *request_data = NULL;
1505 dma_addr_t request_data_dma;
1506 u32 request_data_sz = 0;
1507 Mpi2DiagBufferPostRequest_t *mpi_request;
1508 Mpi2DiagBufferPostReply_t *mpi_reply;
1509 u8 buffer_type;
1510 u16 smid;
1511 u16 ioc_status;
1512 u32 ioc_state;
1513 u8 issue_reset = 0;
1514
1515 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1516 __func__));
1517
1518 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1519 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1520 pr_err(MPT3SAS_FMT
1521 "%s: failed due to ioc not operational\n",
1522 ioc->name, __func__);
1523 rc = -EAGAIN;
1524 goto out;
1525 }
1526
1527 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1528 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1529 ioc->name, __func__);
1530 rc = -EAGAIN;
1531 goto out;
1532 }
1533
1534 buffer_type = diag_register->buffer_type;
1535 if (!_ctl_diag_capability(ioc, buffer_type)) {
1536 pr_err(MPT3SAS_FMT
1537 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1538 ioc->name, __func__, buffer_type);
1539 return -EPERM;
1540 }
1541
1542 if (ioc->diag_buffer_status[buffer_type] &
1543 MPT3_DIAG_BUFFER_IS_REGISTERED) {
1544 pr_err(MPT3SAS_FMT
1545 "%s: already has a registered buffer for buffer_type(0x%02x)\n",
1546 ioc->name, __func__,
1547 buffer_type);
1548 return -EINVAL;
1549 }
1550
1551 if (diag_register->requested_buffer_size % 4) {
1552 pr_err(MPT3SAS_FMT
1553 "%s: the requested_buffer_size is not 4 byte aligned\n",
1554 ioc->name, __func__);
1555 return -EINVAL;
1556 }
1557
1558 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1559 if (!smid) {
1560 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1561 ioc->name, __func__);
1562 rc = -EAGAIN;
1563 goto out;
1564 }
1565
1566 rc = 0;
1567 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1568 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1569 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1570 ioc->ctl_cmds.smid = smid;
1571
1572 request_data = ioc->diag_buffer[buffer_type];
1573 request_data_sz = diag_register->requested_buffer_size;
1574 ioc->unique_id[buffer_type] = diag_register->unique_id;
1575 ioc->diag_buffer_status[buffer_type] = 0;
1576 memcpy(ioc->product_specific[buffer_type],
1577 diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
1578 ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
1579
1580 if (request_data) {
1581 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1582 if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
1583 pci_free_consistent(ioc->pdev,
1584 ioc->diag_buffer_sz[buffer_type],
1585 request_data, request_data_dma);
1586 request_data = NULL;
1587 }
1588 }
1589
1590 if (request_data == NULL) {
1591 ioc->diag_buffer_sz[buffer_type] = 0;
1592 ioc->diag_buffer_dma[buffer_type] = 0;
1593 request_data = pci_alloc_consistent(
1594 ioc->pdev, request_data_sz, &request_data_dma);
1595 if (request_data == NULL) {
1596 pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
1597 " for diag buffers, requested size(%d)\n",
1598 ioc->name, __func__, request_data_sz);
1599 mpt3sas_base_free_smid(ioc, smid);
1600 return -ENOMEM;
1601 }
1602 ioc->diag_buffer[buffer_type] = request_data;
1603 ioc->diag_buffer_sz[buffer_type] = request_data_sz;
1604 ioc->diag_buffer_dma[buffer_type] = request_data_dma;
1605 }
1606
1607 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
1608 mpi_request->BufferType = diag_register->buffer_type;
1609 mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
1610 mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
1611 mpi_request->BufferLength = cpu_to_le32(request_data_sz);
1612 mpi_request->VF_ID = 0; /* TODO */
1613 mpi_request->VP_ID = 0;
1614
1615 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1616 "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
1617 ioc->name, __func__, request_data,
1618 (unsigned long long)request_data_dma,
1619 le32_to_cpu(mpi_request->BufferLength)));
1620
1621 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1622 mpi_request->ProductSpecific[i] =
1623 cpu_to_le32(ioc->product_specific[buffer_type][i]);
1624
1625 init_completion(&ioc->ctl_cmds.done);
1626 mpt3sas_base_put_smid_default(ioc, smid);
1627 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1628 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1629
1630 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1631 issue_reset =
1632 mpt3sas_base_check_cmd_timeout(ioc,
1633 ioc->ctl_cmds.status, mpi_request,
1634 sizeof(Mpi2DiagBufferPostRequest_t)/4);
1635 goto issue_host_reset;
1636 }
1637
1638 /* process the completed Reply Message Frame */
1639 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1640 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1641 ioc->name, __func__);
1642 rc = -EFAULT;
1643 goto out;
1644 }
1645
1646 mpi_reply = ioc->ctl_cmds.reply;
1647 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1648
1649 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1650 ioc->diag_buffer_status[buffer_type] |=
1651 MPT3_DIAG_BUFFER_IS_REGISTERED;
1652 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1653 ioc->name, __func__));
1654 } else {
1655 pr_info(MPT3SAS_FMT
1656 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
1657 ioc->name, __func__,
1658 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
1659 rc = -EFAULT;
1660 }
1661
1662 issue_host_reset:
1663 if (issue_reset)
1664 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1665
1666 out:
1667
1668 if (rc && request_data)
1669 pci_free_consistent(ioc->pdev, request_data_sz,
1670 request_data, request_data_dma);
1671
1672 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
1673 return rc;
1674 }
1675
1676 /**
1677 * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
1678 * @ioc: per adapter object
1679 * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
1680 *
1681 * This is called when command line option diag_buffer_enable is enabled
1682 * at driver load time.
1683 */
1684 void
mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER * ioc,u8 bits_to_register)1685 mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
1686 {
1687 struct mpt3_diag_register diag_register;
1688
1689 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
1690
1691 if (bits_to_register & 1) {
1692 pr_info(MPT3SAS_FMT "registering trace buffer support\n",
1693 ioc->name);
1694 ioc->diag_trigger_master.MasterData =
1695 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
1696 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
1697 /* register for 2MB buffers */
1698 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1699 diag_register.unique_id = 0x7075900;
1700 _ctl_diag_register_2(ioc, &diag_register);
1701 }
1702
1703 if (bits_to_register & 2) {
1704 pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
1705 ioc->name);
1706 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
1707 /* register for 2MB buffers */
1708 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1709 diag_register.unique_id = 0x7075901;
1710 _ctl_diag_register_2(ioc, &diag_register);
1711 }
1712
1713 if (bits_to_register & 4) {
1714 pr_info(MPT3SAS_FMT "registering extended buffer support\n",
1715 ioc->name);
1716 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
1717 /* register for 2MB buffers */
1718 diag_register.requested_buffer_size = 2 * (1024 * 1024);
1719 diag_register.unique_id = 0x7075901;
1720 _ctl_diag_register_2(ioc, &diag_register);
1721 }
1722 }
1723
1724 /**
1725 * _ctl_diag_register - application register with driver
1726 * @ioc: per adapter object
1727 * @arg: user space buffer containing ioctl content
1728 *
1729 * This will allow the driver to setup any required buffers that will be
1730 * needed by firmware to communicate with the driver.
1731 */
1732 static long
_ctl_diag_register(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1733 _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1734 {
1735 struct mpt3_diag_register karg;
1736 long rc;
1737
1738 if (copy_from_user(&karg, arg, sizeof(karg))) {
1739 pr_err("failure at %s:%d/%s()!\n",
1740 __FILE__, __LINE__, __func__);
1741 return -EFAULT;
1742 }
1743
1744 rc = _ctl_diag_register_2(ioc, &karg);
1745 return rc;
1746 }
1747
1748 /**
1749 * _ctl_diag_unregister - application unregister with driver
1750 * @ioc: per adapter object
1751 * @arg: user space buffer containing ioctl content
1752 *
1753 * This will allow the driver to cleanup any memory allocated for diag
1754 * messages and to free up any resources.
1755 */
1756 static long
_ctl_diag_unregister(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1757 _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1758 {
1759 struct mpt3_diag_unregister karg;
1760 void *request_data;
1761 dma_addr_t request_data_dma;
1762 u32 request_data_sz;
1763 u8 buffer_type;
1764
1765 if (copy_from_user(&karg, arg, sizeof(karg))) {
1766 pr_err("failure at %s:%d/%s()!\n",
1767 __FILE__, __LINE__, __func__);
1768 return -EFAULT;
1769 }
1770
1771 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1772 __func__));
1773
1774 buffer_type = karg.unique_id & 0x000000ff;
1775 if (!_ctl_diag_capability(ioc, buffer_type)) {
1776 pr_err(MPT3SAS_FMT
1777 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1778 ioc->name, __func__, buffer_type);
1779 return -EPERM;
1780 }
1781
1782 if ((ioc->diag_buffer_status[buffer_type] &
1783 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1784 pr_err(MPT3SAS_FMT
1785 "%s: buffer_type(0x%02x) is not registered\n",
1786 ioc->name, __func__, buffer_type);
1787 return -EINVAL;
1788 }
1789 if ((ioc->diag_buffer_status[buffer_type] &
1790 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
1791 pr_err(MPT3SAS_FMT
1792 "%s: buffer_type(0x%02x) has not been released\n",
1793 ioc->name, __func__, buffer_type);
1794 return -EINVAL;
1795 }
1796
1797 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1798 pr_err(MPT3SAS_FMT
1799 "%s: unique_id(0x%08x) is not registered\n",
1800 ioc->name, __func__, karg.unique_id);
1801 return -EINVAL;
1802 }
1803
1804 request_data = ioc->diag_buffer[buffer_type];
1805 if (!request_data) {
1806 pr_err(MPT3SAS_FMT
1807 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
1808 ioc->name, __func__, buffer_type);
1809 return -ENOMEM;
1810 }
1811
1812 request_data_sz = ioc->diag_buffer_sz[buffer_type];
1813 request_data_dma = ioc->diag_buffer_dma[buffer_type];
1814 pci_free_consistent(ioc->pdev, request_data_sz,
1815 request_data, request_data_dma);
1816 ioc->diag_buffer[buffer_type] = NULL;
1817 ioc->diag_buffer_status[buffer_type] = 0;
1818 return 0;
1819 }
1820
1821 /**
1822 * _ctl_diag_query - query relevant info associated with diag buffers
1823 * @ioc: per adapter object
1824 * @arg: user space buffer containing ioctl content
1825 *
1826 * The application will send only buffer_type and unique_id. Driver will
1827 * inspect unique_id first, if valid, fill in all the info. If unique_id is
1828 * 0x00, the driver will return info specified by Buffer Type.
1829 */
1830 static long
_ctl_diag_query(struct MPT3SAS_ADAPTER * ioc,void __user * arg)1831 _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1832 {
1833 struct mpt3_diag_query karg;
1834 void *request_data;
1835 int i;
1836 u8 buffer_type;
1837
1838 if (copy_from_user(&karg, arg, sizeof(karg))) {
1839 pr_err("failure at %s:%d/%s()!\n",
1840 __FILE__, __LINE__, __func__);
1841 return -EFAULT;
1842 }
1843
1844 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1845 __func__));
1846
1847 karg.application_flags = 0;
1848 buffer_type = karg.buffer_type;
1849
1850 if (!_ctl_diag_capability(ioc, buffer_type)) {
1851 pr_err(MPT3SAS_FMT
1852 "%s: doesn't have capability for buffer_type(0x%02x)\n",
1853 ioc->name, __func__, buffer_type);
1854 return -EPERM;
1855 }
1856
1857 if ((ioc->diag_buffer_status[buffer_type] &
1858 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
1859 pr_err(MPT3SAS_FMT
1860 "%s: buffer_type(0x%02x) is not registered\n",
1861 ioc->name, __func__, buffer_type);
1862 return -EINVAL;
1863 }
1864
1865 if (karg.unique_id & 0xffffff00) {
1866 if (karg.unique_id != ioc->unique_id[buffer_type]) {
1867 pr_err(MPT3SAS_FMT
1868 "%s: unique_id(0x%08x) is not registered\n",
1869 ioc->name, __func__, karg.unique_id);
1870 return -EINVAL;
1871 }
1872 }
1873
1874 request_data = ioc->diag_buffer[buffer_type];
1875 if (!request_data) {
1876 pr_err(MPT3SAS_FMT
1877 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
1878 ioc->name, __func__, buffer_type);
1879 return -ENOMEM;
1880 }
1881
1882 if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
1883 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1884 MPT3_APP_FLAGS_BUFFER_VALID);
1885 else
1886 karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
1887 MPT3_APP_FLAGS_BUFFER_VALID |
1888 MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
1889
1890 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
1891 karg.product_specific[i] =
1892 ioc->product_specific[buffer_type][i];
1893
1894 karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
1895 karg.driver_added_buffer_size = 0;
1896 karg.unique_id = ioc->unique_id[buffer_type];
1897 karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
1898
1899 if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
1900 pr_err(MPT3SAS_FMT
1901 "%s: unable to write mpt3_diag_query data @ %p\n",
1902 ioc->name, __func__, arg);
1903 return -EFAULT;
1904 }
1905 return 0;
1906 }
1907
1908 /**
1909 * mpt3sas_send_diag_release - Diag Release Message
1910 * @ioc: per adapter object
1911 * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
1912 * @issue_reset: specifies whether host reset is required.
1913 *
1914 */
1915 int
mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER * ioc,u8 buffer_type,u8 * issue_reset)1916 mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1917 u8 *issue_reset)
1918 {
1919 Mpi2DiagReleaseRequest_t *mpi_request;
1920 Mpi2DiagReleaseReply_t *mpi_reply;
1921 u16 smid;
1922 u16 ioc_status;
1923 u32 ioc_state;
1924 int rc;
1925
1926 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1927 __func__));
1928
1929 rc = 0;
1930 *issue_reset = 0;
1931
1932 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
1933 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
1934 if (ioc->diag_buffer_status[buffer_type] &
1935 MPT3_DIAG_BUFFER_IS_REGISTERED)
1936 ioc->diag_buffer_status[buffer_type] |=
1937 MPT3_DIAG_BUFFER_IS_RELEASED;
1938 dctlprintk(ioc, pr_info(MPT3SAS_FMT
1939 "%s: skipping due to FAULT state\n", ioc->name,
1940 __func__));
1941 rc = -EAGAIN;
1942 goto out;
1943 }
1944
1945 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
1946 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
1947 ioc->name, __func__);
1948 rc = -EAGAIN;
1949 goto out;
1950 }
1951
1952 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
1953 if (!smid) {
1954 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
1955 ioc->name, __func__);
1956 rc = -EAGAIN;
1957 goto out;
1958 }
1959
1960 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
1961 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
1962 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1963 ioc->ctl_cmds.smid = smid;
1964
1965 mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
1966 mpi_request->BufferType = buffer_type;
1967 mpi_request->VF_ID = 0; /* TODO */
1968 mpi_request->VP_ID = 0;
1969
1970 init_completion(&ioc->ctl_cmds.done);
1971 mpt3sas_base_put_smid_default(ioc, smid);
1972 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1973 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1974
1975 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
1976 *issue_reset = mpt3sas_base_check_cmd_timeout(ioc,
1977 ioc->ctl_cmds.status, mpi_request,
1978 sizeof(Mpi2DiagReleaseRequest_t)/4);
1979 rc = -EFAULT;
1980 goto out;
1981 }
1982
1983 /* process the completed Reply Message Frame */
1984 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
1985 pr_err(MPT3SAS_FMT "%s: no reply message\n",
1986 ioc->name, __func__);
1987 rc = -EFAULT;
1988 goto out;
1989 }
1990
1991 mpi_reply = ioc->ctl_cmds.reply;
1992 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
1993
1994 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
1995 ioc->diag_buffer_status[buffer_type] |=
1996 MPT3_DIAG_BUFFER_IS_RELEASED;
1997 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
1998 ioc->name, __func__));
1999 } else {
2000 pr_info(MPT3SAS_FMT
2001 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2002 ioc->name, __func__,
2003 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2004 rc = -EFAULT;
2005 }
2006
2007 out:
2008 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2009 return rc;
2010 }
2011
2012 /**
2013 * _ctl_diag_release - request to send Diag Release Message to firmware
2014 * @ioc: ?
2015 * @arg: user space buffer containing ioctl content
2016 *
2017 * This allows ownership of the specified buffer to returned to the driver,
2018 * allowing an application to read the buffer without fear that firmware is
2019 * overwriting information in the buffer.
2020 */
2021 static long
_ctl_diag_release(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2022 _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2023 {
2024 struct mpt3_diag_release karg;
2025 void *request_data;
2026 int rc;
2027 u8 buffer_type;
2028 u8 issue_reset = 0;
2029
2030 if (copy_from_user(&karg, arg, sizeof(karg))) {
2031 pr_err("failure at %s:%d/%s()!\n",
2032 __FILE__, __LINE__, __func__);
2033 return -EFAULT;
2034 }
2035
2036 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2037 __func__));
2038
2039 buffer_type = karg.unique_id & 0x000000ff;
2040 if (!_ctl_diag_capability(ioc, buffer_type)) {
2041 pr_err(MPT3SAS_FMT
2042 "%s: doesn't have capability for buffer_type(0x%02x)\n",
2043 ioc->name, __func__, buffer_type);
2044 return -EPERM;
2045 }
2046
2047 if ((ioc->diag_buffer_status[buffer_type] &
2048 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
2049 pr_err(MPT3SAS_FMT
2050 "%s: buffer_type(0x%02x) is not registered\n",
2051 ioc->name, __func__, buffer_type);
2052 return -EINVAL;
2053 }
2054
2055 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2056 pr_err(MPT3SAS_FMT
2057 "%s: unique_id(0x%08x) is not registered\n",
2058 ioc->name, __func__, karg.unique_id);
2059 return -EINVAL;
2060 }
2061
2062 if (ioc->diag_buffer_status[buffer_type] &
2063 MPT3_DIAG_BUFFER_IS_RELEASED) {
2064 pr_err(MPT3SAS_FMT
2065 "%s: buffer_type(0x%02x) is already released\n",
2066 ioc->name, __func__,
2067 buffer_type);
2068 return 0;
2069 }
2070
2071 request_data = ioc->diag_buffer[buffer_type];
2072
2073 if (!request_data) {
2074 pr_err(MPT3SAS_FMT
2075 "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
2076 ioc->name, __func__, buffer_type);
2077 return -ENOMEM;
2078 }
2079
2080 /* buffers were released by due to host reset */
2081 if ((ioc->diag_buffer_status[buffer_type] &
2082 MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
2083 ioc->diag_buffer_status[buffer_type] |=
2084 MPT3_DIAG_BUFFER_IS_RELEASED;
2085 ioc->diag_buffer_status[buffer_type] &=
2086 ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
2087 pr_err(MPT3SAS_FMT
2088 "%s: buffer_type(0x%02x) was released due to host reset\n",
2089 ioc->name, __func__, buffer_type);
2090 return 0;
2091 }
2092
2093 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
2094
2095 if (issue_reset)
2096 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2097
2098 return rc;
2099 }
2100
2101 /**
2102 * _ctl_diag_read_buffer - request for copy of the diag buffer
2103 * @ioc: per adapter object
2104 * @arg: user space buffer containing ioctl content
2105 */
2106 static long
_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER * ioc,void __user * arg)2107 _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2108 {
2109 struct mpt3_diag_read_buffer karg;
2110 struct mpt3_diag_read_buffer __user *uarg = arg;
2111 void *request_data, *diag_data;
2112 Mpi2DiagBufferPostRequest_t *mpi_request;
2113 Mpi2DiagBufferPostReply_t *mpi_reply;
2114 int rc, i;
2115 u8 buffer_type;
2116 unsigned long request_size, copy_size;
2117 u16 smid;
2118 u16 ioc_status;
2119 u8 issue_reset = 0;
2120
2121 if (copy_from_user(&karg, arg, sizeof(karg))) {
2122 pr_err("failure at %s:%d/%s()!\n",
2123 __FILE__, __LINE__, __func__);
2124 return -EFAULT;
2125 }
2126
2127 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2128 __func__));
2129
2130 buffer_type = karg.unique_id & 0x000000ff;
2131 if (!_ctl_diag_capability(ioc, buffer_type)) {
2132 pr_err(MPT3SAS_FMT
2133 "%s: doesn't have capability for buffer_type(0x%02x)\n",
2134 ioc->name, __func__, buffer_type);
2135 return -EPERM;
2136 }
2137
2138 if (karg.unique_id != ioc->unique_id[buffer_type]) {
2139 pr_err(MPT3SAS_FMT
2140 "%s: unique_id(0x%08x) is not registered\n",
2141 ioc->name, __func__, karg.unique_id);
2142 return -EINVAL;
2143 }
2144
2145 request_data = ioc->diag_buffer[buffer_type];
2146 if (!request_data) {
2147 pr_err(MPT3SAS_FMT
2148 "%s: doesn't have buffer for buffer_type(0x%02x)\n",
2149 ioc->name, __func__, buffer_type);
2150 return -ENOMEM;
2151 }
2152
2153 request_size = ioc->diag_buffer_sz[buffer_type];
2154
2155 if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
2156 pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
2157 "or bytes_to_read are not 4 byte aligned\n", ioc->name,
2158 __func__);
2159 return -EINVAL;
2160 }
2161
2162 if (karg.starting_offset > request_size)
2163 return -EINVAL;
2164
2165 diag_data = (void *)(request_data + karg.starting_offset);
2166 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2167 "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
2168 ioc->name, __func__,
2169 diag_data, karg.starting_offset, karg.bytes_to_read));
2170
2171 /* Truncate data on requests that are too large */
2172 if ((diag_data + karg.bytes_to_read < diag_data) ||
2173 (diag_data + karg.bytes_to_read > request_data + request_size))
2174 copy_size = request_size - karg.starting_offset;
2175 else
2176 copy_size = karg.bytes_to_read;
2177
2178 if (copy_to_user((void __user *)uarg->diagnostic_data,
2179 diag_data, copy_size)) {
2180 pr_err(MPT3SAS_FMT
2181 "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
2182 ioc->name, __func__, diag_data);
2183 return -EFAULT;
2184 }
2185
2186 if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
2187 return 0;
2188
2189 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2190 "%s: Reregister buffer_type(0x%02x)\n",
2191 ioc->name, __func__, buffer_type));
2192 if ((ioc->diag_buffer_status[buffer_type] &
2193 MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
2194 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2195 "%s: buffer_type(0x%02x) is still registered\n",
2196 ioc->name, __func__, buffer_type));
2197 return 0;
2198 }
2199 /* Get a free request frame and save the message context.
2200 */
2201
2202 if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
2203 pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
2204 ioc->name, __func__);
2205 rc = -EAGAIN;
2206 goto out;
2207 }
2208
2209 smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
2210 if (!smid) {
2211 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2212 ioc->name, __func__);
2213 rc = -EAGAIN;
2214 goto out;
2215 }
2216
2217 rc = 0;
2218 ioc->ctl_cmds.status = MPT3_CMD_PENDING;
2219 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
2220 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2221 ioc->ctl_cmds.smid = smid;
2222
2223 mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
2224 mpi_request->BufferType = buffer_type;
2225 mpi_request->BufferLength =
2226 cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
2227 mpi_request->BufferAddress =
2228 cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
2229 for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
2230 mpi_request->ProductSpecific[i] =
2231 cpu_to_le32(ioc->product_specific[buffer_type][i]);
2232 mpi_request->VF_ID = 0; /* TODO */
2233 mpi_request->VP_ID = 0;
2234
2235 init_completion(&ioc->ctl_cmds.done);
2236 mpt3sas_base_put_smid_default(ioc, smid);
2237 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2238 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2239
2240 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
2241 issue_reset =
2242 mpt3sas_base_check_cmd_timeout(ioc,
2243 ioc->ctl_cmds.status, mpi_request,
2244 sizeof(Mpi2DiagBufferPostRequest_t)/4);
2245 goto issue_host_reset;
2246 }
2247
2248 /* process the completed Reply Message Frame */
2249 if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
2250 pr_err(MPT3SAS_FMT "%s: no reply message\n",
2251 ioc->name, __func__);
2252 rc = -EFAULT;
2253 goto out;
2254 }
2255
2256 mpi_reply = ioc->ctl_cmds.reply;
2257 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
2258
2259 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
2260 ioc->diag_buffer_status[buffer_type] |=
2261 MPT3_DIAG_BUFFER_IS_REGISTERED;
2262 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
2263 ioc->name, __func__));
2264 } else {
2265 pr_info(MPT3SAS_FMT
2266 "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
2267 ioc->name, __func__,
2268 ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
2269 rc = -EFAULT;
2270 }
2271
2272 issue_host_reset:
2273 if (issue_reset)
2274 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2275
2276 out:
2277
2278 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
2279 return rc;
2280 }
2281
2282
2283
2284 #ifdef CONFIG_COMPAT
2285 /**
2286 * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
2287 * @ioc: per adapter object
2288 * @cmd: ioctl opcode
2289 * @arg: (struct mpt3_ioctl_command32)
2290 *
2291 * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
2292 */
2293 static long
_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER * ioc,unsigned cmd,void __user * arg)2294 _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
2295 void __user *arg)
2296 {
2297 struct mpt3_ioctl_command32 karg32;
2298 struct mpt3_ioctl_command32 __user *uarg;
2299 struct mpt3_ioctl_command karg;
2300
2301 if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
2302 return -EINVAL;
2303
2304 uarg = (struct mpt3_ioctl_command32 __user *) arg;
2305
2306 if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
2307 pr_err("failure at %s:%d/%s()!\n",
2308 __FILE__, __LINE__, __func__);
2309 return -EFAULT;
2310 }
2311
2312 memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
2313 karg.hdr.ioc_number = karg32.hdr.ioc_number;
2314 karg.hdr.port_number = karg32.hdr.port_number;
2315 karg.hdr.max_data_size = karg32.hdr.max_data_size;
2316 karg.timeout = karg32.timeout;
2317 karg.max_reply_bytes = karg32.max_reply_bytes;
2318 karg.data_in_size = karg32.data_in_size;
2319 karg.data_out_size = karg32.data_out_size;
2320 karg.max_sense_bytes = karg32.max_sense_bytes;
2321 karg.data_sge_offset = karg32.data_sge_offset;
2322 karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
2323 karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
2324 karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
2325 karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
2326 return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2327 }
2328 #endif
2329
2330 /**
2331 * _ctl_ioctl_main - main ioctl entry point
2332 * @file: (struct file)
2333 * @cmd: ioctl opcode
2334 * @arg: user space data buffer
2335 * @compat: handles 32 bit applications in 64bit os
2336 * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device &
2337 * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device.
2338 */
2339 static long
_ctl_ioctl_main(struct file * file,unsigned int cmd,void __user * arg,u8 compat,u16 mpi_version)2340 _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
2341 u8 compat, u16 mpi_version)
2342 {
2343 struct MPT3SAS_ADAPTER *ioc;
2344 struct mpt3_ioctl_header ioctl_header;
2345 enum block_state state;
2346 long ret = -EINVAL;
2347
2348 /* get IOCTL header */
2349 if (copy_from_user(&ioctl_header, (char __user *)arg,
2350 sizeof(struct mpt3_ioctl_header))) {
2351 pr_err("failure at %s:%d/%s()!\n",
2352 __FILE__, __LINE__, __func__);
2353 return -EFAULT;
2354 }
2355
2356 if (_ctl_verify_adapter(ioctl_header.ioc_number,
2357 &ioc, mpi_version) == -1 || !ioc)
2358 return -ENODEV;
2359
2360 /* pci_access_mutex lock acquired by ioctl path */
2361 mutex_lock(&ioc->pci_access_mutex);
2362
2363 if (ioc->shost_recovery || ioc->pci_error_recovery ||
2364 ioc->is_driver_loading || ioc->remove_host) {
2365 ret = -EAGAIN;
2366 goto out_unlock_pciaccess;
2367 }
2368
2369 state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
2370 if (state == NON_BLOCKING) {
2371 if (!mutex_trylock(&ioc->ctl_cmds.mutex)) {
2372 ret = -EAGAIN;
2373 goto out_unlock_pciaccess;
2374 }
2375 } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) {
2376 ret = -ERESTARTSYS;
2377 goto out_unlock_pciaccess;
2378 }
2379
2380
2381 switch (cmd) {
2382 case MPT3IOCINFO:
2383 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
2384 ret = _ctl_getiocinfo(ioc, arg);
2385 break;
2386 #ifdef CONFIG_COMPAT
2387 case MPT3COMMAND32:
2388 #endif
2389 case MPT3COMMAND:
2390 {
2391 struct mpt3_ioctl_command __user *uarg;
2392 struct mpt3_ioctl_command karg;
2393
2394 #ifdef CONFIG_COMPAT
2395 if (compat) {
2396 ret = _ctl_compat_mpt_command(ioc, cmd, arg);
2397 break;
2398 }
2399 #endif
2400 if (copy_from_user(&karg, arg, sizeof(karg))) {
2401 pr_err("failure at %s:%d/%s()!\n",
2402 __FILE__, __LINE__, __func__);
2403 ret = -EFAULT;
2404 break;
2405 }
2406
2407 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
2408 uarg = arg;
2409 ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
2410 }
2411 break;
2412 }
2413 case MPT3EVENTQUERY:
2414 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
2415 ret = _ctl_eventquery(ioc, arg);
2416 break;
2417 case MPT3EVENTENABLE:
2418 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
2419 ret = _ctl_eventenable(ioc, arg);
2420 break;
2421 case MPT3EVENTREPORT:
2422 ret = _ctl_eventreport(ioc, arg);
2423 break;
2424 case MPT3HARDRESET:
2425 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
2426 ret = _ctl_do_reset(ioc, arg);
2427 break;
2428 case MPT3BTDHMAPPING:
2429 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
2430 ret = _ctl_btdh_mapping(ioc, arg);
2431 break;
2432 case MPT3DIAGREGISTER:
2433 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
2434 ret = _ctl_diag_register(ioc, arg);
2435 break;
2436 case MPT3DIAGUNREGISTER:
2437 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
2438 ret = _ctl_diag_unregister(ioc, arg);
2439 break;
2440 case MPT3DIAGQUERY:
2441 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
2442 ret = _ctl_diag_query(ioc, arg);
2443 break;
2444 case MPT3DIAGRELEASE:
2445 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
2446 ret = _ctl_diag_release(ioc, arg);
2447 break;
2448 case MPT3DIAGREADBUFFER:
2449 if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
2450 ret = _ctl_diag_read_buffer(ioc, arg);
2451 break;
2452 default:
2453 dctlprintk(ioc, pr_info(MPT3SAS_FMT
2454 "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
2455 break;
2456 }
2457
2458 mutex_unlock(&ioc->ctl_cmds.mutex);
2459 out_unlock_pciaccess:
2460 mutex_unlock(&ioc->pci_access_mutex);
2461 return ret;
2462 }
2463
2464 /**
2465 * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked)
2466 * @file: (struct file)
2467 * @cmd: ioctl opcode
2468 * @arg: ?
2469 */
2470 static long
_ctl_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2471 _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2472 {
2473 long ret;
2474
2475 /* pass MPI25_VERSION | MPI26_VERSION value,
2476 * to indicate that this ioctl cmd
2477 * came from mpt3ctl ioctl device.
2478 */
2479 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0,
2480 MPI25_VERSION | MPI26_VERSION);
2481 return ret;
2482 }
2483
2484 /**
2485 * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked)
2486 * @file: (struct file)
2487 * @cmd: ioctl opcode
2488 * @arg: ?
2489 */
2490 static long
_ctl_mpt2_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2491 _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2492 {
2493 long ret;
2494
2495 /* pass MPI2_VERSION value, to indicate that this ioctl cmd
2496 * came from mpt2ctl ioctl device.
2497 */
2498 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION);
2499 return ret;
2500 }
2501 #ifdef CONFIG_COMPAT
2502 /**
2503 *_ ctl_ioctl_compat - main ioctl entry point (compat)
2504 * @file: ?
2505 * @cmd: ?
2506 * @arg: ?
2507 *
2508 * This routine handles 32 bit applications in 64bit os.
2509 */
2510 static long
_ctl_ioctl_compat(struct file * file,unsigned cmd,unsigned long arg)2511 _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2512 {
2513 long ret;
2514
2515 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1,
2516 MPI25_VERSION | MPI26_VERSION);
2517 return ret;
2518 }
2519
2520 /**
2521 *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat)
2522 * @file: ?
2523 * @cmd: ?
2524 * @arg: ?
2525 *
2526 * This routine handles 32 bit applications in 64bit os.
2527 */
2528 static long
_ctl_mpt2_ioctl_compat(struct file * file,unsigned cmd,unsigned long arg)2529 _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2530 {
2531 long ret;
2532
2533 ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION);
2534 return ret;
2535 }
2536 #endif
2537
2538 /* scsi host attributes */
2539 /**
2540 * _ctl_version_fw_show - firmware version
2541 * @cdev: pointer to embedded class device
2542 * @attr: ?
2543 * @buf: the buffer returned
2544 *
2545 * A sysfs 'read-only' shost attribute.
2546 */
2547 static ssize_t
_ctl_version_fw_show(struct device * cdev,struct device_attribute * attr,char * buf)2548 _ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
2549 char *buf)
2550 {
2551 struct Scsi_Host *shost = class_to_shost(cdev);
2552 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2553
2554 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2555 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
2556 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
2557 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
2558 ioc->facts.FWVersion.Word & 0x000000FF);
2559 }
2560 static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
2561
2562 /**
2563 * _ctl_version_bios_show - bios version
2564 * @cdev: pointer to embedded class device
2565 * @attr: ?
2566 * @buf: the buffer returned
2567 *
2568 * A sysfs 'read-only' shost attribute.
2569 */
2570 static ssize_t
_ctl_version_bios_show(struct device * cdev,struct device_attribute * attr,char * buf)2571 _ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
2572 char *buf)
2573 {
2574 struct Scsi_Host *shost = class_to_shost(cdev);
2575 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2576
2577 u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
2578
2579 return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
2580 (version & 0xFF000000) >> 24,
2581 (version & 0x00FF0000) >> 16,
2582 (version & 0x0000FF00) >> 8,
2583 version & 0x000000FF);
2584 }
2585 static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
2586
2587 /**
2588 * _ctl_version_mpi_show - MPI (message passing interface) version
2589 * @cdev: pointer to embedded class device
2590 * @attr: ?
2591 * @buf: the buffer returned
2592 *
2593 * A sysfs 'read-only' shost attribute.
2594 */
2595 static ssize_t
_ctl_version_mpi_show(struct device * cdev,struct device_attribute * attr,char * buf)2596 _ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
2597 char *buf)
2598 {
2599 struct Scsi_Host *shost = class_to_shost(cdev);
2600 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2601
2602 return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
2603 ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
2604 }
2605 static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
2606
2607 /**
2608 * _ctl_version_product_show - product name
2609 * @cdev: pointer to embedded class device
2610 * @attr: ?
2611 * @buf: the buffer returned
2612 *
2613 * A sysfs 'read-only' shost attribute.
2614 */
2615 static ssize_t
_ctl_version_product_show(struct device * cdev,struct device_attribute * attr,char * buf)2616 _ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
2617 char *buf)
2618 {
2619 struct Scsi_Host *shost = class_to_shost(cdev);
2620 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2621
2622 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
2623 }
2624 static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
2625
2626 /**
2627 * _ctl_version_nvdata_persistent_show - ndvata persistent version
2628 * @cdev: pointer to embedded class device
2629 * @attr: ?
2630 * @buf: the buffer returned
2631 *
2632 * A sysfs 'read-only' shost attribute.
2633 */
2634 static ssize_t
_ctl_version_nvdata_persistent_show(struct device * cdev,struct device_attribute * attr,char * buf)2635 _ctl_version_nvdata_persistent_show(struct device *cdev,
2636 struct device_attribute *attr, char *buf)
2637 {
2638 struct Scsi_Host *shost = class_to_shost(cdev);
2639 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2640
2641 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2642 le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
2643 }
2644 static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
2645 _ctl_version_nvdata_persistent_show, NULL);
2646
2647 /**
2648 * _ctl_version_nvdata_default_show - nvdata default version
2649 * @cdev: pointer to embedded class device
2650 * @attr: ?
2651 * @buf: the buffer returned
2652 *
2653 * A sysfs 'read-only' shost attribute.
2654 */
2655 static ssize_t
_ctl_version_nvdata_default_show(struct device * cdev,struct device_attribute * attr,char * buf)2656 _ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
2657 *attr, char *buf)
2658 {
2659 struct Scsi_Host *shost = class_to_shost(cdev);
2660 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2661
2662 return snprintf(buf, PAGE_SIZE, "%08xh\n",
2663 le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
2664 }
2665 static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
2666 _ctl_version_nvdata_default_show, NULL);
2667
2668 /**
2669 * _ctl_board_name_show - board name
2670 * @cdev: pointer to embedded class device
2671 * @attr: ?
2672 * @buf: the buffer returned
2673 *
2674 * A sysfs 'read-only' shost attribute.
2675 */
2676 static ssize_t
_ctl_board_name_show(struct device * cdev,struct device_attribute * attr,char * buf)2677 _ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
2678 char *buf)
2679 {
2680 struct Scsi_Host *shost = class_to_shost(cdev);
2681 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2682
2683 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
2684 }
2685 static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
2686
2687 /**
2688 * _ctl_board_assembly_show - board assembly name
2689 * @cdev: pointer to embedded class device
2690 * @attr: ?
2691 * @buf: the buffer returned
2692 *
2693 * A sysfs 'read-only' shost attribute.
2694 */
2695 static ssize_t
_ctl_board_assembly_show(struct device * cdev,struct device_attribute * attr,char * buf)2696 _ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
2697 char *buf)
2698 {
2699 struct Scsi_Host *shost = class_to_shost(cdev);
2700 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2701
2702 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
2703 }
2704 static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
2705
2706 /**
2707 * _ctl_board_tracer_show - board tracer number
2708 * @cdev: pointer to embedded class device
2709 * @attr: ?
2710 * @buf: the buffer returned
2711 *
2712 * A sysfs 'read-only' shost attribute.
2713 */
2714 static ssize_t
_ctl_board_tracer_show(struct device * cdev,struct device_attribute * attr,char * buf)2715 _ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
2716 char *buf)
2717 {
2718 struct Scsi_Host *shost = class_to_shost(cdev);
2719 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2720
2721 return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
2722 }
2723 static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
2724
2725 /**
2726 * _ctl_io_delay_show - io missing delay
2727 * @cdev: pointer to embedded class device
2728 * @attr: ?
2729 * @buf: the buffer returned
2730 *
2731 * This is for firmware implemention for deboucing device
2732 * removal events.
2733 *
2734 * A sysfs 'read-only' shost attribute.
2735 */
2736 static ssize_t
_ctl_io_delay_show(struct device * cdev,struct device_attribute * attr,char * buf)2737 _ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
2738 char *buf)
2739 {
2740 struct Scsi_Host *shost = class_to_shost(cdev);
2741 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2742
2743 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
2744 }
2745 static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
2746
2747 /**
2748 * _ctl_device_delay_show - device missing delay
2749 * @cdev: pointer to embedded class device
2750 * @attr: ?
2751 * @buf: the buffer returned
2752 *
2753 * This is for firmware implemention for deboucing device
2754 * removal events.
2755 *
2756 * A sysfs 'read-only' shost attribute.
2757 */
2758 static ssize_t
_ctl_device_delay_show(struct device * cdev,struct device_attribute * attr,char * buf)2759 _ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
2760 char *buf)
2761 {
2762 struct Scsi_Host *shost = class_to_shost(cdev);
2763 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2764
2765 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
2766 }
2767 static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
2768
2769 /**
2770 * _ctl_fw_queue_depth_show - global credits
2771 * @cdev: pointer to embedded class device
2772 * @attr: ?
2773 * @buf: the buffer returned
2774 *
2775 * This is firmware queue depth limit
2776 *
2777 * A sysfs 'read-only' shost attribute.
2778 */
2779 static ssize_t
_ctl_fw_queue_depth_show(struct device * cdev,struct device_attribute * attr,char * buf)2780 _ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
2781 char *buf)
2782 {
2783 struct Scsi_Host *shost = class_to_shost(cdev);
2784 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2785
2786 return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
2787 }
2788 static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
2789
2790 /**
2791 * _ctl_sas_address_show - sas address
2792 * @cdev: pointer to embedded class device
2793 * @attr: ?
2794 * @buf: the buffer returned
2795 *
2796 * This is the controller sas address
2797 *
2798 * A sysfs 'read-only' shost attribute.
2799 */
2800 static ssize_t
_ctl_host_sas_address_show(struct device * cdev,struct device_attribute * attr,char * buf)2801 _ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
2802 char *buf)
2803
2804 {
2805 struct Scsi_Host *shost = class_to_shost(cdev);
2806 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2807
2808 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
2809 (unsigned long long)ioc->sas_hba.sas_address);
2810 }
2811 static DEVICE_ATTR(host_sas_address, S_IRUGO,
2812 _ctl_host_sas_address_show, NULL);
2813
2814 /**
2815 * _ctl_logging_level_show - logging level
2816 * @cdev: pointer to embedded class device
2817 * @attr: ?
2818 * @buf: the buffer returned
2819 *
2820 * A sysfs 'read/write' shost attribute.
2821 */
2822 static ssize_t
_ctl_logging_level_show(struct device * cdev,struct device_attribute * attr,char * buf)2823 _ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
2824 char *buf)
2825 {
2826 struct Scsi_Host *shost = class_to_shost(cdev);
2827 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2828
2829 return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
2830 }
2831 static ssize_t
_ctl_logging_level_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)2832 _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
2833 const char *buf, size_t count)
2834 {
2835 struct Scsi_Host *shost = class_to_shost(cdev);
2836 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2837 int val = 0;
2838
2839 if (sscanf(buf, "%x", &val) != 1)
2840 return -EINVAL;
2841
2842 ioc->logging_level = val;
2843 pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
2844 ioc->logging_level);
2845 return strlen(buf);
2846 }
2847 static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
2848 _ctl_logging_level_store);
2849
2850 /**
2851 * _ctl_fwfault_debug_show - show/store fwfault_debug
2852 * @cdev: pointer to embedded class device
2853 * @attr: ?
2854 * @buf: the buffer returned
2855 *
2856 * mpt3sas_fwfault_debug is command line option
2857 * A sysfs 'read/write' shost attribute.
2858 */
2859 static ssize_t
_ctl_fwfault_debug_show(struct device * cdev,struct device_attribute * attr,char * buf)2860 _ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
2861 char *buf)
2862 {
2863 struct Scsi_Host *shost = class_to_shost(cdev);
2864 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2865
2866 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
2867 }
2868 static ssize_t
_ctl_fwfault_debug_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)2869 _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
2870 const char *buf, size_t count)
2871 {
2872 struct Scsi_Host *shost = class_to_shost(cdev);
2873 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2874 int val = 0;
2875
2876 if (sscanf(buf, "%d", &val) != 1)
2877 return -EINVAL;
2878
2879 ioc->fwfault_debug = val;
2880 pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
2881 ioc->fwfault_debug);
2882 return strlen(buf);
2883 }
2884 static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
2885 _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
2886
2887 /**
2888 * _ctl_ioc_reset_count_show - ioc reset count
2889 * @cdev: pointer to embedded class device
2890 * @attr: ?
2891 * @buf: the buffer returned
2892 *
2893 * This is firmware queue depth limit
2894 *
2895 * A sysfs 'read-only' shost attribute.
2896 */
2897 static ssize_t
_ctl_ioc_reset_count_show(struct device * cdev,struct device_attribute * attr,char * buf)2898 _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2899 char *buf)
2900 {
2901 struct Scsi_Host *shost = class_to_shost(cdev);
2902 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2903
2904 return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
2905 }
2906 static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
2907
2908 /**
2909 * _ctl_ioc_reply_queue_count_show - number of reply queues
2910 * @cdev: pointer to embedded class device
2911 * @attr: ?
2912 * @buf: the buffer returned
2913 *
2914 * This is number of reply queues
2915 *
2916 * A sysfs 'read-only' shost attribute.
2917 */
2918 static ssize_t
_ctl_ioc_reply_queue_count_show(struct device * cdev,struct device_attribute * attr,char * buf)2919 _ctl_ioc_reply_queue_count_show(struct device *cdev,
2920 struct device_attribute *attr, char *buf)
2921 {
2922 u8 reply_queue_count;
2923 struct Scsi_Host *shost = class_to_shost(cdev);
2924 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2925
2926 if ((ioc->facts.IOCCapabilities &
2927 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2928 reply_queue_count = ioc->reply_queue_count;
2929 else
2930 reply_queue_count = 1;
2931
2932 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2933 }
2934 static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
2935 NULL);
2936
2937 /**
2938 * _ctl_BRM_status_show - Backup Rail Monitor Status
2939 * @cdev: pointer to embedded class device
2940 * @attr: ?
2941 * @buf: the buffer returned
2942 *
2943 * This is number of reply queues
2944 *
2945 * A sysfs 'read-only' shost attribute.
2946 */
2947 static ssize_t
_ctl_BRM_status_show(struct device * cdev,struct device_attribute * attr,char * buf)2948 _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
2949 char *buf)
2950 {
2951 struct Scsi_Host *shost = class_to_shost(cdev);
2952 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2953 Mpi2IOUnitPage3_t *io_unit_pg3 = NULL;
2954 Mpi2ConfigReply_t mpi_reply;
2955 u16 backup_rail_monitor_status = 0;
2956 u16 ioc_status;
2957 int sz;
2958 ssize_t rc = 0;
2959
2960 if (!ioc->is_warpdrive) {
2961 pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
2962 " warpdrive\n", ioc->name, __func__);
2963 goto out;
2964 }
2965 /* pci_access_mutex lock acquired by sysfs show path */
2966 mutex_lock(&ioc->pci_access_mutex);
2967 if (ioc->pci_error_recovery || ioc->remove_host) {
2968 mutex_unlock(&ioc->pci_access_mutex);
2969 return 0;
2970 }
2971
2972 /* allocate upto GPIOVal 36 entries */
2973 sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
2974 io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
2975 if (!io_unit_pg3) {
2976 pr_err(MPT3SAS_FMT "%s: failed allocating memory "
2977 "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
2978 goto out;
2979 }
2980
2981 if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
2982 0) {
2983 pr_err(MPT3SAS_FMT
2984 "%s: failed reading iounit_pg3\n", ioc->name,
2985 __func__);
2986 goto out;
2987 }
2988
2989 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
2990 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2991 pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
2992 "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
2993 goto out;
2994 }
2995
2996 if (io_unit_pg3->GPIOCount < 25) {
2997 pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
2998 "25 entries, detected (%d) entries\n", ioc->name, __func__,
2999 io_unit_pg3->GPIOCount);
3000 goto out;
3001 }
3002
3003 /* BRM status is in bit zero of GPIOVal[24] */
3004 backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]);
3005 rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1));
3006
3007 out:
3008 kfree(io_unit_pg3);
3009 mutex_unlock(&ioc->pci_access_mutex);
3010 return rc;
3011 }
3012 static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL);
3013
3014 struct DIAG_BUFFER_START {
3015 __le32 Size;
3016 __le32 DiagVersion;
3017 u8 BufferType;
3018 u8 Reserved[3];
3019 __le32 Reserved1;
3020 __le32 Reserved2;
3021 __le32 Reserved3;
3022 };
3023
3024 /**
3025 * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
3026 * @cdev: pointer to embedded class device
3027 * @attr: ?
3028 * @buf: the buffer returned
3029 *
3030 * A sysfs 'read-only' shost attribute.
3031 */
3032 static ssize_t
_ctl_host_trace_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3033 _ctl_host_trace_buffer_size_show(struct device *cdev,
3034 struct device_attribute *attr, char *buf)
3035 {
3036 struct Scsi_Host *shost = class_to_shost(cdev);
3037 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3038 u32 size = 0;
3039 struct DIAG_BUFFER_START *request_data;
3040
3041 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3042 pr_err(MPT3SAS_FMT
3043 "%s: host_trace_buffer is not registered\n",
3044 ioc->name, __func__);
3045 return 0;
3046 }
3047
3048 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3049 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3050 pr_err(MPT3SAS_FMT
3051 "%s: host_trace_buffer is not registered\n",
3052 ioc->name, __func__);
3053 return 0;
3054 }
3055
3056 request_data = (struct DIAG_BUFFER_START *)
3057 ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
3058 if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
3059 le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
3060 le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
3061 le32_to_cpu(request_data->Reserved3) == 0x4742444c)
3062 size = le32_to_cpu(request_data->Size);
3063
3064 ioc->ring_buffer_sz = size;
3065 return snprintf(buf, PAGE_SIZE, "%d\n", size);
3066 }
3067 static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
3068 _ctl_host_trace_buffer_size_show, NULL);
3069
3070 /**
3071 * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
3072 * @cdev: pointer to embedded class device
3073 * @attr: ?
3074 * @buf: the buffer returned
3075 *
3076 * A sysfs 'read/write' shost attribute.
3077 *
3078 * You will only be able to read 4k bytes of ring buffer at a time.
3079 * In order to read beyond 4k bytes, you will have to write out the
3080 * offset to the same attribute, it will move the pointer.
3081 */
3082 static ssize_t
_ctl_host_trace_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3083 _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
3084 char *buf)
3085 {
3086 struct Scsi_Host *shost = class_to_shost(cdev);
3087 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3088 void *request_data;
3089 u32 size;
3090
3091 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
3092 pr_err(MPT3SAS_FMT
3093 "%s: host_trace_buffer is not registered\n",
3094 ioc->name, __func__);
3095 return 0;
3096 }
3097
3098 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3099 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
3100 pr_err(MPT3SAS_FMT
3101 "%s: host_trace_buffer is not registered\n",
3102 ioc->name, __func__);
3103 return 0;
3104 }
3105
3106 if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
3107 return 0;
3108
3109 size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
3110 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3111 request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
3112 memcpy(buf, request_data, size);
3113 return size;
3114 }
3115
3116 static ssize_t
_ctl_host_trace_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3117 _ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
3118 const char *buf, size_t count)
3119 {
3120 struct Scsi_Host *shost = class_to_shost(cdev);
3121 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3122 int val = 0;
3123
3124 if (sscanf(buf, "%d", &val) != 1)
3125 return -EINVAL;
3126
3127 ioc->ring_buffer_offset = val;
3128 return strlen(buf);
3129 }
3130 static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
3131 _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
3132
3133
3134 /*****************************************/
3135
3136 /**
3137 * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
3138 * @cdev: pointer to embedded class device
3139 * @attr: ?
3140 * @buf: the buffer returned
3141 *
3142 * A sysfs 'read/write' shost attribute.
3143 *
3144 * This is a mechnism to post/release host_trace_buffers
3145 */
3146 static ssize_t
_ctl_host_trace_buffer_enable_show(struct device * cdev,struct device_attribute * attr,char * buf)3147 _ctl_host_trace_buffer_enable_show(struct device *cdev,
3148 struct device_attribute *attr, char *buf)
3149 {
3150 struct Scsi_Host *shost = class_to_shost(cdev);
3151 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3152
3153 if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
3154 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3155 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
3156 return snprintf(buf, PAGE_SIZE, "off\n");
3157 else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3158 MPT3_DIAG_BUFFER_IS_RELEASED))
3159 return snprintf(buf, PAGE_SIZE, "release\n");
3160 else
3161 return snprintf(buf, PAGE_SIZE, "post\n");
3162 }
3163
3164 static ssize_t
_ctl_host_trace_buffer_enable_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3165 _ctl_host_trace_buffer_enable_store(struct device *cdev,
3166 struct device_attribute *attr, const char *buf, size_t count)
3167 {
3168 struct Scsi_Host *shost = class_to_shost(cdev);
3169 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3170 char str[10] = "";
3171 struct mpt3_diag_register diag_register;
3172 u8 issue_reset = 0;
3173
3174 /* don't allow post/release occurr while recovery is active */
3175 if (ioc->shost_recovery || ioc->remove_host ||
3176 ioc->pci_error_recovery || ioc->is_driver_loading)
3177 return -EBUSY;
3178
3179 if (sscanf(buf, "%9s", str) != 1)
3180 return -EINVAL;
3181
3182 if (!strcmp(str, "post")) {
3183 /* exit out if host buffers are already posted */
3184 if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
3185 (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3186 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
3187 ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3188 MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
3189 goto out;
3190 memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
3191 pr_info(MPT3SAS_FMT "posting host trace buffers\n",
3192 ioc->name);
3193 diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
3194 diag_register.requested_buffer_size = (1024 * 1024);
3195 diag_register.unique_id = 0x7075900;
3196 ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
3197 _ctl_diag_register_2(ioc, &diag_register);
3198 } else if (!strcmp(str, "release")) {
3199 /* exit out if host buffers are already released */
3200 if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
3201 goto out;
3202 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3203 MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
3204 goto out;
3205 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
3206 MPT3_DIAG_BUFFER_IS_RELEASED))
3207 goto out;
3208 pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
3209 ioc->name);
3210 mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
3211 &issue_reset);
3212 }
3213
3214 out:
3215 return strlen(buf);
3216 }
3217 static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
3218 _ctl_host_trace_buffer_enable_show,
3219 _ctl_host_trace_buffer_enable_store);
3220
3221 /*********** diagnostic trigger suppport *********************************/
3222
3223 /**
3224 * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
3225 * @cdev: pointer to embedded class device
3226 * @attr: ?
3227 * @buf: the buffer returned
3228 *
3229 * A sysfs 'read/write' shost attribute.
3230 */
3231 static ssize_t
_ctl_diag_trigger_master_show(struct device * cdev,struct device_attribute * attr,char * buf)3232 _ctl_diag_trigger_master_show(struct device *cdev,
3233 struct device_attribute *attr, char *buf)
3234
3235 {
3236 struct Scsi_Host *shost = class_to_shost(cdev);
3237 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3238 unsigned long flags;
3239 ssize_t rc;
3240
3241 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3242 rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
3243 memcpy(buf, &ioc->diag_trigger_master, rc);
3244 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3245 return rc;
3246 }
3247
3248 /**
3249 * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
3250 * @cdev: pointer to embedded class device
3251 * @attr: ?
3252 * @buf: the buffer returned
3253 * @count: ?
3254 *
3255 * A sysfs 'read/write' shost attribute.
3256 */
3257 static ssize_t
_ctl_diag_trigger_master_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3258 _ctl_diag_trigger_master_store(struct device *cdev,
3259 struct device_attribute *attr, const char *buf, size_t count)
3260
3261 {
3262 struct Scsi_Host *shost = class_to_shost(cdev);
3263 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3264 unsigned long flags;
3265 ssize_t rc;
3266
3267 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3268 rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
3269 memset(&ioc->diag_trigger_master, 0,
3270 sizeof(struct SL_WH_MASTER_TRIGGER_T));
3271 memcpy(&ioc->diag_trigger_master, buf, rc);
3272 ioc->diag_trigger_master.MasterData |=
3273 (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
3274 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3275 return rc;
3276 }
3277 static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
3278 _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
3279
3280
3281 /**
3282 * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
3283 * @cdev: pointer to embedded class device
3284 * @attr: ?
3285 * @buf: the buffer returned
3286 *
3287 * A sysfs 'read/write' shost attribute.
3288 */
3289 static ssize_t
_ctl_diag_trigger_event_show(struct device * cdev,struct device_attribute * attr,char * buf)3290 _ctl_diag_trigger_event_show(struct device *cdev,
3291 struct device_attribute *attr, char *buf)
3292 {
3293 struct Scsi_Host *shost = class_to_shost(cdev);
3294 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3295 unsigned long flags;
3296 ssize_t rc;
3297
3298 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3299 rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
3300 memcpy(buf, &ioc->diag_trigger_event, rc);
3301 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3302 return rc;
3303 }
3304
3305 /**
3306 * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
3307 * @cdev: pointer to embedded class device
3308 * @attr: ?
3309 * @buf: the buffer returned
3310 * @count: ?
3311 *
3312 * A sysfs 'read/write' shost attribute.
3313 */
3314 static ssize_t
_ctl_diag_trigger_event_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3315 _ctl_diag_trigger_event_store(struct device *cdev,
3316 struct device_attribute *attr, const char *buf, size_t count)
3317
3318 {
3319 struct Scsi_Host *shost = class_to_shost(cdev);
3320 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3321 unsigned long flags;
3322 ssize_t sz;
3323
3324 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3325 sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
3326 memset(&ioc->diag_trigger_event, 0,
3327 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3328 memcpy(&ioc->diag_trigger_event, buf, sz);
3329 if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
3330 ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
3331 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3332 return sz;
3333 }
3334 static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
3335 _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
3336
3337
3338 /**
3339 * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
3340 * @cdev: pointer to embedded class device
3341 * @attr: ?
3342 * @buf: the buffer returned
3343 *
3344 * A sysfs 'read/write' shost attribute.
3345 */
3346 static ssize_t
_ctl_diag_trigger_scsi_show(struct device * cdev,struct device_attribute * attr,char * buf)3347 _ctl_diag_trigger_scsi_show(struct device *cdev,
3348 struct device_attribute *attr, char *buf)
3349 {
3350 struct Scsi_Host *shost = class_to_shost(cdev);
3351 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3352 unsigned long flags;
3353 ssize_t rc;
3354
3355 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3356 rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
3357 memcpy(buf, &ioc->diag_trigger_scsi, rc);
3358 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3359 return rc;
3360 }
3361
3362 /**
3363 * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
3364 * @cdev: pointer to embedded class device
3365 * @attr: ?
3366 * @buf: the buffer returned
3367 * @count: ?
3368 *
3369 * A sysfs 'read/write' shost attribute.
3370 */
3371 static ssize_t
_ctl_diag_trigger_scsi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3372 _ctl_diag_trigger_scsi_store(struct device *cdev,
3373 struct device_attribute *attr, const char *buf, size_t count)
3374 {
3375 struct Scsi_Host *shost = class_to_shost(cdev);
3376 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3377 unsigned long flags;
3378 ssize_t sz;
3379
3380 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3381 sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
3382 memset(&ioc->diag_trigger_scsi, 0,
3383 sizeof(struct SL_WH_EVENT_TRIGGERS_T));
3384 memcpy(&ioc->diag_trigger_scsi, buf, sz);
3385 if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
3386 ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
3387 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3388 return sz;
3389 }
3390 static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
3391 _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
3392
3393
3394 /**
3395 * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
3396 * @cdev: pointer to embedded class device
3397 * @attr: ?
3398 * @buf: the buffer returned
3399 *
3400 * A sysfs 'read/write' shost attribute.
3401 */
3402 static ssize_t
_ctl_diag_trigger_mpi_show(struct device * cdev,struct device_attribute * attr,char * buf)3403 _ctl_diag_trigger_mpi_show(struct device *cdev,
3404 struct device_attribute *attr, char *buf)
3405 {
3406 struct Scsi_Host *shost = class_to_shost(cdev);
3407 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3408 unsigned long flags;
3409 ssize_t rc;
3410
3411 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3412 rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
3413 memcpy(buf, &ioc->diag_trigger_mpi, rc);
3414 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3415 return rc;
3416 }
3417
3418 /**
3419 * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
3420 * @cdev: pointer to embedded class device
3421 * @attr: ?
3422 * @buf: the buffer returned
3423 * @count: ?
3424 *
3425 * A sysfs 'read/write' shost attribute.
3426 */
3427 static ssize_t
_ctl_diag_trigger_mpi_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3428 _ctl_diag_trigger_mpi_store(struct device *cdev,
3429 struct device_attribute *attr, const char *buf, size_t count)
3430 {
3431 struct Scsi_Host *shost = class_to_shost(cdev);
3432 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
3433 unsigned long flags;
3434 ssize_t sz;
3435
3436 spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
3437 sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
3438 memset(&ioc->diag_trigger_mpi, 0,
3439 sizeof(ioc->diag_trigger_mpi));
3440 memcpy(&ioc->diag_trigger_mpi, buf, sz);
3441 if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
3442 ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
3443 spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
3444 return sz;
3445 }
3446
3447 static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
3448 _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
3449
3450 /*********** diagnostic trigger suppport *** END ****************************/
3451
3452 /*****************************************/
3453
3454 struct device_attribute *mpt3sas_host_attrs[] = {
3455 &dev_attr_version_fw,
3456 &dev_attr_version_bios,
3457 &dev_attr_version_mpi,
3458 &dev_attr_version_product,
3459 &dev_attr_version_nvdata_persistent,
3460 &dev_attr_version_nvdata_default,
3461 &dev_attr_board_name,
3462 &dev_attr_board_assembly,
3463 &dev_attr_board_tracer,
3464 &dev_attr_io_delay,
3465 &dev_attr_device_delay,
3466 &dev_attr_logging_level,
3467 &dev_attr_fwfault_debug,
3468 &dev_attr_fw_queue_depth,
3469 &dev_attr_host_sas_address,
3470 &dev_attr_ioc_reset_count,
3471 &dev_attr_host_trace_buffer_size,
3472 &dev_attr_host_trace_buffer,
3473 &dev_attr_host_trace_buffer_enable,
3474 &dev_attr_reply_queue_count,
3475 &dev_attr_diag_trigger_master,
3476 &dev_attr_diag_trigger_event,
3477 &dev_attr_diag_trigger_scsi,
3478 &dev_attr_diag_trigger_mpi,
3479 &dev_attr_BRM_status,
3480 NULL,
3481 };
3482
3483 /* device attributes */
3484
3485 /**
3486 * _ctl_device_sas_address_show - sas address
3487 * @dev: pointer to embedded class device
3488 * @attr: ?
3489 * @buf: the buffer returned
3490 *
3491 * This is the sas address for the target
3492 *
3493 * A sysfs 'read-only' shost attribute.
3494 */
3495 static ssize_t
_ctl_device_sas_address_show(struct device * dev,struct device_attribute * attr,char * buf)3496 _ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
3497 char *buf)
3498 {
3499 struct scsi_device *sdev = to_scsi_device(dev);
3500 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3501
3502 return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
3503 (unsigned long long)sas_device_priv_data->sas_target->sas_address);
3504 }
3505 static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
3506
3507 /**
3508 * _ctl_device_handle_show - device handle
3509 * @dev: pointer to embedded class device
3510 * @attr: ?
3511 * @buf: the buffer returned
3512 *
3513 * This is the firmware assigned device handle
3514 *
3515 * A sysfs 'read-only' shost attribute.
3516 */
3517 static ssize_t
_ctl_device_handle_show(struct device * dev,struct device_attribute * attr,char * buf)3518 _ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
3519 char *buf)
3520 {
3521 struct scsi_device *sdev = to_scsi_device(dev);
3522 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3523
3524 return snprintf(buf, PAGE_SIZE, "0x%04x\n",
3525 sas_device_priv_data->sas_target->handle);
3526 }
3527 static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
3528
3529 /**
3530 * _ctl_device_ncq_io_prio_show - send prioritized io commands to device
3531 * @dev: pointer to embedded device
3532 * @attr: ?
3533 * @buf: the buffer returned
3534 *
3535 * A sysfs 'read/write' sdev attribute, only works with SATA
3536 */
3537 static ssize_t
_ctl_device_ncq_prio_enable_show(struct device * dev,struct device_attribute * attr,char * buf)3538 _ctl_device_ncq_prio_enable_show(struct device *dev,
3539 struct device_attribute *attr, char *buf)
3540 {
3541 struct scsi_device *sdev = to_scsi_device(dev);
3542 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3543
3544 return snprintf(buf, PAGE_SIZE, "%d\n",
3545 sas_device_priv_data->ncq_prio_enable);
3546 }
3547
3548 static ssize_t
_ctl_device_ncq_prio_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3549 _ctl_device_ncq_prio_enable_store(struct device *dev,
3550 struct device_attribute *attr,
3551 const char *buf, size_t count)
3552 {
3553 struct scsi_device *sdev = to_scsi_device(dev);
3554 struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
3555 bool ncq_prio_enable = 0;
3556
3557 if (kstrtobool(buf, &ncq_prio_enable))
3558 return -EINVAL;
3559
3560 if (!scsih_ncq_prio_supp(sdev))
3561 return -EINVAL;
3562
3563 sas_device_priv_data->ncq_prio_enable = ncq_prio_enable;
3564 return strlen(buf);
3565 }
3566 static DEVICE_ATTR(sas_ncq_prio_enable, S_IRUGO | S_IWUSR,
3567 _ctl_device_ncq_prio_enable_show,
3568 _ctl_device_ncq_prio_enable_store);
3569
3570 struct device_attribute *mpt3sas_dev_attrs[] = {
3571 &dev_attr_sas_address,
3572 &dev_attr_sas_device_handle,
3573 &dev_attr_sas_ncq_prio_enable,
3574 NULL,
3575 };
3576
3577 /* file operations table for mpt3ctl device */
3578 static const struct file_operations ctl_fops = {
3579 .owner = THIS_MODULE,
3580 .unlocked_ioctl = _ctl_ioctl,
3581 .poll = _ctl_poll,
3582 .fasync = _ctl_fasync,
3583 #ifdef CONFIG_COMPAT
3584 .compat_ioctl = _ctl_ioctl_compat,
3585 #endif
3586 };
3587
3588 /* file operations table for mpt2ctl device */
3589 static const struct file_operations ctl_gen2_fops = {
3590 .owner = THIS_MODULE,
3591 .unlocked_ioctl = _ctl_mpt2_ioctl,
3592 .poll = _ctl_poll,
3593 .fasync = _ctl_fasync,
3594 #ifdef CONFIG_COMPAT
3595 .compat_ioctl = _ctl_mpt2_ioctl_compat,
3596 #endif
3597 };
3598
3599 static struct miscdevice ctl_dev = {
3600 .minor = MPT3SAS_MINOR,
3601 .name = MPT3SAS_DEV_NAME,
3602 .fops = &ctl_fops,
3603 };
3604
3605 static struct miscdevice gen2_ctl_dev = {
3606 .minor = MPT2SAS_MINOR,
3607 .name = MPT2SAS_DEV_NAME,
3608 .fops = &ctl_gen2_fops,
3609 };
3610
3611 /**
3612 * mpt3sas_ctl_init - main entry point for ctl.
3613 * @hbas_to_enumerate: ?
3614 */
3615 void
mpt3sas_ctl_init(ushort hbas_to_enumerate)3616 mpt3sas_ctl_init(ushort hbas_to_enumerate)
3617 {
3618 async_queue = NULL;
3619
3620 /* Don't register mpt3ctl ioctl device if
3621 * hbas_to_enumarate is one.
3622 */
3623 if (hbas_to_enumerate != 1)
3624 if (misc_register(&ctl_dev) < 0)
3625 pr_err("%s can't register misc device [minor=%d]\n",
3626 MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
3627
3628 /* Don't register mpt3ctl ioctl device if
3629 * hbas_to_enumarate is two.
3630 */
3631 if (hbas_to_enumerate != 2)
3632 if (misc_register(&gen2_ctl_dev) < 0)
3633 pr_err("%s can't register misc device [minor=%d]\n",
3634 MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR);
3635
3636 init_waitqueue_head(&ctl_poll_wait);
3637 }
3638
3639 /**
3640 * mpt3sas_ctl_exit - exit point for ctl
3641 * @hbas_to_enumerate: ?
3642 */
3643 void
mpt3sas_ctl_exit(ushort hbas_to_enumerate)3644 mpt3sas_ctl_exit(ushort hbas_to_enumerate)
3645 {
3646 struct MPT3SAS_ADAPTER *ioc;
3647 int i;
3648
3649 list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
3650
3651 /* free memory associated to diag buffers */
3652 for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
3653 if (!ioc->diag_buffer[i])
3654 continue;
3655 if (!(ioc->diag_buffer_status[i] &
3656 MPT3_DIAG_BUFFER_IS_REGISTERED))
3657 continue;
3658 if ((ioc->diag_buffer_status[i] &
3659 MPT3_DIAG_BUFFER_IS_RELEASED))
3660 continue;
3661 pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
3662 ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
3663 ioc->diag_buffer[i] = NULL;
3664 ioc->diag_buffer_status[i] = 0;
3665 }
3666
3667 kfree(ioc->event_log);
3668 }
3669 if (hbas_to_enumerate != 1)
3670 misc_deregister(&ctl_dev);
3671 if (hbas_to_enumerate != 2)
3672 misc_deregister(&gen2_ctl_dev);
3673 }
3674