1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2009-2015 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
22
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
30 #include <linux/vmalloc.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_bsg_fc.h>
36 #include <scsi/fc/fc_fs.h>
37
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_bsg.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_debugfs.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
52
53 struct lpfc_bsg_event {
54 struct list_head node;
55 struct kref kref;
56 wait_queue_head_t wq;
57
58 /* Event type and waiter identifiers */
59 uint32_t type_mask;
60 uint32_t req_id;
61 uint32_t reg_id;
62
63 /* next two flags are here for the auto-delete logic */
64 unsigned long wait_time_stamp;
65 int waiting;
66
67 /* seen and not seen events */
68 struct list_head events_to_get;
69 struct list_head events_to_see;
70
71 /* driver data associated with the job */
72 void *dd_data;
73 };
74
75 struct lpfc_bsg_iocb {
76 struct lpfc_iocbq *cmdiocbq;
77 struct lpfc_dmabuf *rmp;
78 struct lpfc_nodelist *ndlp;
79 };
80
81 struct lpfc_bsg_mbox {
82 LPFC_MBOXQ_t *pmboxq;
83 MAILBOX_t *mb;
84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 uint8_t *ext; /* extended mailbox data */
86 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */
89 };
90
91 #define MENLO_DID 0x0000FC0E
92
93 struct lpfc_bsg_menlo {
94 struct lpfc_iocbq *cmdiocbq;
95 struct lpfc_dmabuf *rmp;
96 };
97
98 #define TYPE_EVT 1
99 #define TYPE_IOCB 2
100 #define TYPE_MBOX 3
101 #define TYPE_MENLO 4
102 struct bsg_job_data {
103 uint32_t type;
104 struct bsg_job *set_job; /* job waiting for this iocb to finish */
105 union {
106 struct lpfc_bsg_event *evt;
107 struct lpfc_bsg_iocb iocb;
108 struct lpfc_bsg_mbox mbox;
109 struct lpfc_bsg_menlo menlo;
110 } context_un;
111 };
112
113 struct event_data {
114 struct list_head node;
115 uint32_t type;
116 uint32_t immed_dat;
117 void *data;
118 uint32_t len;
119 };
120
121 #define BUF_SZ_4K 4096
122 #define SLI_CT_ELX_LOOPBACK 0x10
123
124 enum ELX_LOOPBACK_CMD {
125 ELX_LOOPBACK_XRI_SETUP,
126 ELX_LOOPBACK_DATA,
127 };
128
129 #define ELX_LOOPBACK_HEADER_SZ \
130 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131
132 struct lpfc_dmabufext {
133 struct lpfc_dmabuf dma;
134 uint32_t size;
135 uint32_t flag;
136 };
137
138 static void
lpfc_free_bsg_buffers(struct lpfc_hba * phba,struct lpfc_dmabuf * mlist)139 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
140 {
141 struct lpfc_dmabuf *mlast, *next_mlast;
142
143 if (mlist) {
144 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145 list) {
146 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147 list_del(&mlast->list);
148 kfree(mlast);
149 }
150 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151 kfree(mlist);
152 }
153 return;
154 }
155
156 static struct lpfc_dmabuf *
lpfc_alloc_bsg_buffers(struct lpfc_hba * phba,unsigned int size,int outbound_buffers,struct ulp_bde64 * bpl,int * bpl_entries)157 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158 int outbound_buffers, struct ulp_bde64 *bpl,
159 int *bpl_entries)
160 {
161 struct lpfc_dmabuf *mlist = NULL;
162 struct lpfc_dmabuf *mp;
163 unsigned int bytes_left = size;
164
165 /* Verify we can support the size specified */
166 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167 return NULL;
168
169 /* Determine the number of dma buffers to allocate */
170 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171 size/LPFC_BPL_SIZE);
172
173 /* Allocate dma buffer and place in BPL passed */
174 while (bytes_left) {
175 /* Allocate dma buffer */
176 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177 if (!mp) {
178 if (mlist)
179 lpfc_free_bsg_buffers(phba, mlist);
180 return NULL;
181 }
182
183 INIT_LIST_HEAD(&mp->list);
184 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185
186 if (!mp->virt) {
187 kfree(mp);
188 if (mlist)
189 lpfc_free_bsg_buffers(phba, mlist);
190 return NULL;
191 }
192
193 /* Queue it to a linked list */
194 if (!mlist)
195 mlist = mp;
196 else
197 list_add_tail(&mp->list, &mlist->list);
198
199 /* Add buffer to buffer pointer list */
200 if (outbound_buffers)
201 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202 else
203 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206 bpl->tus.f.bdeSize = (uint16_t)
207 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208 bytes_left);
209 bytes_left -= bpl->tus.f.bdeSize;
210 bpl->tus.w = le32_to_cpu(bpl->tus.w);
211 bpl++;
212 }
213 return mlist;
214 }
215
216 static unsigned int
lpfc_bsg_copy_data(struct lpfc_dmabuf * dma_buffers,struct bsg_buffer * bsg_buffers,unsigned int bytes_to_transfer,int to_buffers)217 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218 struct bsg_buffer *bsg_buffers,
219 unsigned int bytes_to_transfer, int to_buffers)
220 {
221
222 struct lpfc_dmabuf *mp;
223 unsigned int transfer_bytes, bytes_copied = 0;
224 unsigned int sg_offset, dma_offset;
225 unsigned char *dma_address, *sg_address;
226 LIST_HEAD(temp_list);
227 struct sg_mapping_iter miter;
228 unsigned long flags;
229 unsigned int sg_flags = SG_MITER_ATOMIC;
230 bool sg_valid;
231
232 list_splice_init(&dma_buffers->list, &temp_list);
233 list_add(&dma_buffers->list, &temp_list);
234 sg_offset = 0;
235 if (to_buffers)
236 sg_flags |= SG_MITER_FROM_SG;
237 else
238 sg_flags |= SG_MITER_TO_SG;
239 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240 sg_flags);
241 local_irq_save(flags);
242 sg_valid = sg_miter_next(&miter);
243 list_for_each_entry(mp, &temp_list, list) {
244 dma_offset = 0;
245 while (bytes_to_transfer && sg_valid &&
246 (dma_offset < LPFC_BPL_SIZE)) {
247 dma_address = mp->virt + dma_offset;
248 if (sg_offset) {
249 /* Continue previous partial transfer of sg */
250 sg_address = miter.addr + sg_offset;
251 transfer_bytes = miter.length - sg_offset;
252 } else {
253 sg_address = miter.addr;
254 transfer_bytes = miter.length;
255 }
256 if (bytes_to_transfer < transfer_bytes)
257 transfer_bytes = bytes_to_transfer;
258 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260 if (to_buffers)
261 memcpy(dma_address, sg_address, transfer_bytes);
262 else
263 memcpy(sg_address, dma_address, transfer_bytes);
264 dma_offset += transfer_bytes;
265 sg_offset += transfer_bytes;
266 bytes_to_transfer -= transfer_bytes;
267 bytes_copied += transfer_bytes;
268 if (sg_offset >= miter.length) {
269 sg_offset = 0;
270 sg_valid = sg_miter_next(&miter);
271 }
272 }
273 }
274 sg_miter_stop(&miter);
275 local_irq_restore(flags);
276 list_del_init(&dma_buffers->list);
277 list_splice(&temp_list, &dma_buffers->list);
278 return bytes_copied;
279 }
280
281 /**
282 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283 * @phba: Pointer to HBA context object.
284 * @cmdiocbq: Pointer to command iocb.
285 * @rspiocbq: Pointer to response iocb.
286 *
287 * This function is the completion handler for iocbs issued using
288 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289 * ring event handler function without any lock held. This function
290 * can be called from both worker thread context and interrupt
291 * context. This function also can be called from another thread which
292 * cleans up the SLI layer objects.
293 * This function copies the contents of the response iocb to the
294 * response iocb memory object provided by the caller of
295 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296 * sleeps for the iocb completion.
297 **/
298 static void
lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)299 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300 struct lpfc_iocbq *cmdiocbq,
301 struct lpfc_iocbq *rspiocbq)
302 {
303 struct bsg_job_data *dd_data;
304 struct bsg_job *job;
305 struct fc_bsg_reply *bsg_reply;
306 IOCB_t *rsp;
307 struct lpfc_dmabuf *bmp, *cmp, *rmp;
308 struct lpfc_nodelist *ndlp;
309 struct lpfc_bsg_iocb *iocb;
310 unsigned long flags;
311 unsigned int rsp_size;
312 int rc = 0;
313
314 dd_data = cmdiocbq->context1;
315
316 /* Determine if job has been aborted */
317 spin_lock_irqsave(&phba->ct_ev_lock, flags);
318 job = dd_data->set_job;
319 if (job) {
320 bsg_reply = job->reply;
321 /* Prevent timeout handling from trying to abort job */
322 job->dd_data = NULL;
323 }
324 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
325
326 /* Close the timeout handler abort window */
327 spin_lock_irqsave(&phba->hbalock, flags);
328 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
329 spin_unlock_irqrestore(&phba->hbalock, flags);
330
331 iocb = &dd_data->context_un.iocb;
332 ndlp = iocb->cmdiocbq->context_un.ndlp;
333 rmp = iocb->rmp;
334 cmp = cmdiocbq->context2;
335 bmp = cmdiocbq->context3;
336 rsp = &rspiocbq->iocb;
337
338 /* Copy the completed data or set the error status */
339
340 if (job) {
341 if (rsp->ulpStatus) {
342 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
343 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
344 case IOERR_SEQUENCE_TIMEOUT:
345 rc = -ETIMEDOUT;
346 break;
347 case IOERR_INVALID_RPI:
348 rc = -EFAULT;
349 break;
350 default:
351 rc = -EACCES;
352 break;
353 }
354 } else {
355 rc = -EACCES;
356 }
357 } else {
358 rsp_size = rsp->un.genreq64.bdl.bdeSize;
359 bsg_reply->reply_payload_rcv_len =
360 lpfc_bsg_copy_data(rmp, &job->reply_payload,
361 rsp_size, 0);
362 }
363 }
364
365 lpfc_free_bsg_buffers(phba, cmp);
366 lpfc_free_bsg_buffers(phba, rmp);
367 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368 kfree(bmp);
369 lpfc_nlp_put(ndlp);
370 lpfc_sli_release_iocbq(phba, cmdiocbq);
371 kfree(dd_data);
372
373 /* Complete the job if the job is still active */
374
375 if (job) {
376 bsg_reply->result = rc;
377 bsg_job_done(job, bsg_reply->result,
378 bsg_reply->reply_payload_rcv_len);
379 }
380 return;
381 }
382
383 /**
384 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385 * @job: fc_bsg_job to handle
386 **/
387 static int
lpfc_bsg_send_mgmt_cmd(struct bsg_job * job)388 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
389 {
390 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391 struct lpfc_hba *phba = vport->phba;
392 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
393 struct lpfc_nodelist *ndlp = rdata->pnode;
394 struct fc_bsg_reply *bsg_reply = job->reply;
395 struct ulp_bde64 *bpl = NULL;
396 uint32_t timeout;
397 struct lpfc_iocbq *cmdiocbq = NULL;
398 IOCB_t *cmd;
399 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
400 int request_nseg;
401 int reply_nseg;
402 struct bsg_job_data *dd_data;
403 unsigned long flags;
404 uint32_t creg_val;
405 int rc = 0;
406 int iocb_stat;
407
408 /* in case no data is transferred */
409 bsg_reply->reply_payload_rcv_len = 0;
410
411 if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
412 return -ENODEV;
413
414 /* allocate our bsg tracking structure */
415 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
416 if (!dd_data) {
417 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
418 "2733 Failed allocation of dd_data\n");
419 rc = -ENOMEM;
420 goto no_dd_data;
421 }
422
423 cmdiocbq = lpfc_sli_get_iocbq(phba);
424 if (!cmdiocbq) {
425 rc = -ENOMEM;
426 goto free_dd;
427 }
428
429 cmd = &cmdiocbq->iocb;
430
431 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
432 if (!bmp) {
433 rc = -ENOMEM;
434 goto free_cmdiocbq;
435 }
436 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
437 if (!bmp->virt) {
438 rc = -ENOMEM;
439 goto free_bmp;
440 }
441
442 INIT_LIST_HEAD(&bmp->list);
443
444 bpl = (struct ulp_bde64 *) bmp->virt;
445 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
446 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
447 1, bpl, &request_nseg);
448 if (!cmp) {
449 rc = -ENOMEM;
450 goto free_bmp;
451 }
452 lpfc_bsg_copy_data(cmp, &job->request_payload,
453 job->request_payload.payload_len, 1);
454
455 bpl += request_nseg;
456 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
457 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
458 bpl, &reply_nseg);
459 if (!rmp) {
460 rc = -ENOMEM;
461 goto free_cmp;
462 }
463
464 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
465 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
466 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
467 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
468 cmd->un.genreq64.bdl.bdeSize =
469 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
470 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
471 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
472 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
473 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
474 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
475 cmd->ulpBdeCount = 1;
476 cmd->ulpLe = 1;
477 cmd->ulpClass = CLASS3;
478 cmd->ulpContext = ndlp->nlp_rpi;
479 if (phba->sli_rev == LPFC_SLI_REV4)
480 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
481 cmd->ulpOwner = OWN_CHIP;
482 cmdiocbq->vport = phba->pport;
483 cmdiocbq->context3 = bmp;
484 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
485 timeout = phba->fc_ratov * 2;
486 cmd->ulpTimeout = timeout;
487
488 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
489 cmdiocbq->context1 = dd_data;
490 cmdiocbq->context2 = cmp;
491 cmdiocbq->context3 = bmp;
492
493 dd_data->type = TYPE_IOCB;
494 dd_data->set_job = job;
495 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
496 dd_data->context_un.iocb.rmp = rmp;
497 job->dd_data = dd_data;
498
499 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
500 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
501 rc = -EIO ;
502 goto free_rmp;
503 }
504 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
505 writel(creg_val, phba->HCregaddr);
506 readl(phba->HCregaddr); /* flush */
507 }
508
509 cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
510 if (!cmdiocbq->context_un.ndlp) {
511 rc = -ENODEV;
512 goto free_rmp;
513 }
514
515 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
516 if (iocb_stat == IOCB_SUCCESS) {
517 spin_lock_irqsave(&phba->hbalock, flags);
518 /* make sure the I/O had not been completed yet */
519 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
520 /* open up abort window to timeout handler */
521 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
522 }
523 spin_unlock_irqrestore(&phba->hbalock, flags);
524 return 0; /* done for now */
525 } else if (iocb_stat == IOCB_BUSY) {
526 rc = -EAGAIN;
527 } else {
528 rc = -EIO;
529 }
530
531 /* iocb failed so cleanup */
532 lpfc_nlp_put(ndlp);
533
534 free_rmp:
535 lpfc_free_bsg_buffers(phba, rmp);
536 free_cmp:
537 lpfc_free_bsg_buffers(phba, cmp);
538 free_bmp:
539 if (bmp->virt)
540 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
541 kfree(bmp);
542 free_cmdiocbq:
543 lpfc_sli_release_iocbq(phba, cmdiocbq);
544 free_dd:
545 kfree(dd_data);
546 no_dd_data:
547 /* make error code available to userspace */
548 bsg_reply->result = rc;
549 job->dd_data = NULL;
550 return rc;
551 }
552
553 /**
554 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
555 * @phba: Pointer to HBA context object.
556 * @cmdiocbq: Pointer to command iocb.
557 * @rspiocbq: Pointer to response iocb.
558 *
559 * This function is the completion handler for iocbs issued using
560 * lpfc_bsg_rport_els_cmp function. This function is called by the
561 * ring event handler function without any lock held. This function
562 * can be called from both worker thread context and interrupt
563 * context. This function also can be called from other thread which
564 * cleans up the SLI layer objects.
565 * This function copies the contents of the response iocb to the
566 * response iocb memory object provided by the caller of
567 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
568 * sleeps for the iocb completion.
569 **/
570 static void
lpfc_bsg_rport_els_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)571 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
572 struct lpfc_iocbq *cmdiocbq,
573 struct lpfc_iocbq *rspiocbq)
574 {
575 struct bsg_job_data *dd_data;
576 struct bsg_job *job;
577 struct fc_bsg_reply *bsg_reply;
578 IOCB_t *rsp;
579 struct lpfc_nodelist *ndlp;
580 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
581 struct fc_bsg_ctels_reply *els_reply;
582 uint8_t *rjt_data;
583 unsigned long flags;
584 unsigned int rsp_size;
585 int rc = 0;
586
587 dd_data = cmdiocbq->context1;
588 ndlp = dd_data->context_un.iocb.ndlp;
589 cmdiocbq->context1 = ndlp;
590
591 /* Determine if job has been aborted */
592 spin_lock_irqsave(&phba->ct_ev_lock, flags);
593 job = dd_data->set_job;
594 if (job) {
595 bsg_reply = job->reply;
596 /* Prevent timeout handling from trying to abort job */
597 job->dd_data = NULL;
598 }
599 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
600
601 /* Close the timeout handler abort window */
602 spin_lock_irqsave(&phba->hbalock, flags);
603 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
604 spin_unlock_irqrestore(&phba->hbalock, flags);
605
606 rsp = &rspiocbq->iocb;
607 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
608 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
609
610 /* Copy the completed job data or determine the job status if job is
611 * still active
612 */
613
614 if (job) {
615 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
616 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
617 bsg_reply->reply_payload_rcv_len =
618 sg_copy_from_buffer(job->reply_payload.sg_list,
619 job->reply_payload.sg_cnt,
620 prsp->virt,
621 rsp_size);
622 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
623 bsg_reply->reply_payload_rcv_len =
624 sizeof(struct fc_bsg_ctels_reply);
625 /* LS_RJT data returned in word 4 */
626 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
627 els_reply = &bsg_reply->reply_data.ctels_reply;
628 els_reply->status = FC_CTELS_STATUS_REJECT;
629 els_reply->rjt_data.action = rjt_data[3];
630 els_reply->rjt_data.reason_code = rjt_data[2];
631 els_reply->rjt_data.reason_explanation = rjt_data[1];
632 els_reply->rjt_data.vendor_unique = rjt_data[0];
633 } else {
634 rc = -EIO;
635 }
636 }
637
638 lpfc_els_free_iocb(phba, cmdiocbq);
639
640 lpfc_nlp_put(ndlp);
641 kfree(dd_data);
642
643 /* Complete the job if the job is still active */
644
645 if (job) {
646 bsg_reply->result = rc;
647 bsg_job_done(job, bsg_reply->result,
648 bsg_reply->reply_payload_rcv_len);
649 }
650 return;
651 }
652
653 /**
654 * lpfc_bsg_rport_els - send an ELS command from a bsg request
655 * @job: fc_bsg_job to handle
656 **/
657 static int
lpfc_bsg_rport_els(struct bsg_job * job)658 lpfc_bsg_rport_els(struct bsg_job *job)
659 {
660 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
661 struct lpfc_hba *phba = vport->phba;
662 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
663 struct lpfc_nodelist *ndlp = rdata->pnode;
664 struct fc_bsg_request *bsg_request = job->request;
665 struct fc_bsg_reply *bsg_reply = job->reply;
666 uint32_t elscmd;
667 uint32_t cmdsize;
668 struct lpfc_iocbq *cmdiocbq;
669 uint16_t rpi = 0;
670 struct bsg_job_data *dd_data;
671 unsigned long flags;
672 uint32_t creg_val;
673 int rc = 0;
674
675 /* in case no data is transferred */
676 bsg_reply->reply_payload_rcv_len = 0;
677
678 /* verify the els command is not greater than the
679 * maximum ELS transfer size.
680 */
681
682 if (job->request_payload.payload_len > FCELSSIZE) {
683 rc = -EINVAL;
684 goto no_dd_data;
685 }
686
687 /* allocate our bsg tracking structure */
688 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
689 if (!dd_data) {
690 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
691 "2735 Failed allocation of dd_data\n");
692 rc = -ENOMEM;
693 goto no_dd_data;
694 }
695
696 elscmd = bsg_request->rqst_data.r_els.els_code;
697 cmdsize = job->request_payload.payload_len;
698
699 if (!lpfc_nlp_get(ndlp)) {
700 rc = -ENODEV;
701 goto free_dd_data;
702 }
703
704 /* We will use the allocated dma buffers by prep els iocb for command
705 * and response to ensure if the job times out and the request is freed,
706 * we won't be dma into memory that is no longer allocated to for the
707 * request.
708 */
709
710 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
711 ndlp->nlp_DID, elscmd);
712 if (!cmdiocbq) {
713 rc = -EIO;
714 goto release_ndlp;
715 }
716
717 /* Transfer the request payload to allocated command dma buffer */
718 sg_copy_to_buffer(job->request_payload.sg_list,
719 job->request_payload.sg_cnt,
720 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
721 cmdsize);
722
723 rpi = ndlp->nlp_rpi;
724
725 if (phba->sli_rev == LPFC_SLI_REV4)
726 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
727 else
728 cmdiocbq->iocb.ulpContext = rpi;
729 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
730 cmdiocbq->context1 = dd_data;
731 cmdiocbq->context_un.ndlp = ndlp;
732 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
733 dd_data->type = TYPE_IOCB;
734 dd_data->set_job = job;
735 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
736 dd_data->context_un.iocb.ndlp = ndlp;
737 dd_data->context_un.iocb.rmp = NULL;
738 job->dd_data = dd_data;
739
740 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
741 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
742 rc = -EIO;
743 goto linkdown_err;
744 }
745 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
746 writel(creg_val, phba->HCregaddr);
747 readl(phba->HCregaddr); /* flush */
748 }
749
750 cmdiocbq->context1 = lpfc_nlp_get(ndlp);
751 if (!cmdiocbq->context1) {
752 rc = -EIO;
753 goto linkdown_err;
754 }
755
756 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
757 if (rc == IOCB_SUCCESS) {
758 spin_lock_irqsave(&phba->hbalock, flags);
759 /* make sure the I/O had not been completed/released */
760 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
761 /* open up abort window to timeout handler */
762 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
763 }
764 spin_unlock_irqrestore(&phba->hbalock, flags);
765 return 0; /* done for now */
766 } else if (rc == IOCB_BUSY) {
767 rc = -EAGAIN;
768 } else {
769 rc = -EIO;
770 }
771
772 /* I/O issue failed. Cleanup resources. */
773
774 linkdown_err:
775 lpfc_els_free_iocb(phba, cmdiocbq);
776
777 release_ndlp:
778 lpfc_nlp_put(ndlp);
779
780 free_dd_data:
781 kfree(dd_data);
782
783 no_dd_data:
784 /* make error code available to userspace */
785 bsg_reply->result = rc;
786 job->dd_data = NULL;
787 return rc;
788 }
789
790 /**
791 * lpfc_bsg_event_free - frees an allocated event structure
792 * @kref: Pointer to a kref.
793 *
794 * Called from kref_put. Back cast the kref into an event structure address.
795 * Free any events to get, delete associated nodes, free any events to see,
796 * free any data then free the event itself.
797 **/
798 static void
lpfc_bsg_event_free(struct kref * kref)799 lpfc_bsg_event_free(struct kref *kref)
800 {
801 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
802 kref);
803 struct event_data *ed;
804
805 list_del(&evt->node);
806
807 while (!list_empty(&evt->events_to_get)) {
808 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
809 list_del(&ed->node);
810 kfree(ed->data);
811 kfree(ed);
812 }
813
814 while (!list_empty(&evt->events_to_see)) {
815 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
816 list_del(&ed->node);
817 kfree(ed->data);
818 kfree(ed);
819 }
820
821 kfree(evt->dd_data);
822 kfree(evt);
823 }
824
825 /**
826 * lpfc_bsg_event_ref - increments the kref for an event
827 * @evt: Pointer to an event structure.
828 **/
829 static inline void
lpfc_bsg_event_ref(struct lpfc_bsg_event * evt)830 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
831 {
832 kref_get(&evt->kref);
833 }
834
835 /**
836 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
837 * @evt: Pointer to an event structure.
838 **/
839 static inline void
lpfc_bsg_event_unref(struct lpfc_bsg_event * evt)840 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
841 {
842 kref_put(&evt->kref, lpfc_bsg_event_free);
843 }
844
845 /**
846 * lpfc_bsg_event_new - allocate and initialize a event structure
847 * @ev_mask: Mask of events.
848 * @ev_reg_id: Event reg id.
849 * @ev_req_id: Event request id.
850 **/
851 static struct lpfc_bsg_event *
lpfc_bsg_event_new(uint32_t ev_mask,int ev_reg_id,uint32_t ev_req_id)852 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
853 {
854 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
855
856 if (!evt)
857 return NULL;
858
859 INIT_LIST_HEAD(&evt->events_to_get);
860 INIT_LIST_HEAD(&evt->events_to_see);
861 evt->type_mask = ev_mask;
862 evt->req_id = ev_req_id;
863 evt->reg_id = ev_reg_id;
864 evt->wait_time_stamp = jiffies;
865 evt->dd_data = NULL;
866 init_waitqueue_head(&evt->wq);
867 kref_init(&evt->kref);
868 return evt;
869 }
870
871 /**
872 * diag_cmd_data_free - Frees an lpfc dma buffer extension
873 * @phba: Pointer to HBA context object.
874 * @mlist: Pointer to an lpfc dma buffer extension.
875 **/
876 static int
diag_cmd_data_free(struct lpfc_hba * phba,struct lpfc_dmabufext * mlist)877 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
878 {
879 struct lpfc_dmabufext *mlast;
880 struct pci_dev *pcidev;
881 struct list_head head, *curr, *next;
882
883 if ((!mlist) || (!lpfc_is_link_up(phba) &&
884 (phba->link_flag & LS_LOOPBACK_MODE))) {
885 return 0;
886 }
887
888 pcidev = phba->pcidev;
889 list_add_tail(&head, &mlist->dma.list);
890
891 list_for_each_safe(curr, next, &head) {
892 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
893 if (mlast->dma.virt)
894 dma_free_coherent(&pcidev->dev,
895 mlast->size,
896 mlast->dma.virt,
897 mlast->dma.phys);
898 kfree(mlast);
899 }
900 return 0;
901 }
902
903 /*
904 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
905 *
906 * This function is called when an unsolicited CT command is received. It
907 * forwards the event to any processes registered to receive CT events.
908 **/
909 int
lpfc_bsg_ct_unsol_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocbq)910 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
911 struct lpfc_iocbq *piocbq)
912 {
913 uint32_t evt_req_id = 0;
914 uint32_t cmd;
915 struct lpfc_dmabuf *dmabuf = NULL;
916 struct lpfc_bsg_event *evt;
917 struct event_data *evt_dat = NULL;
918 struct lpfc_iocbq *iocbq;
919 size_t offset = 0;
920 struct list_head head;
921 struct ulp_bde64 *bde;
922 dma_addr_t dma_addr;
923 int i;
924 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
925 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
926 struct lpfc_hbq_entry *hbqe;
927 struct lpfc_sli_ct_request *ct_req;
928 struct bsg_job *job = NULL;
929 struct fc_bsg_reply *bsg_reply;
930 struct bsg_job_data *dd_data = NULL;
931 unsigned long flags;
932 int size = 0;
933
934 INIT_LIST_HEAD(&head);
935 list_add_tail(&head, &piocbq->list);
936
937 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1->virt;
938 evt_req_id = ct_req->FsType;
939 cmd = ct_req->CommandResponse.bits.CmdRsp;
940
941 spin_lock_irqsave(&phba->ct_ev_lock, flags);
942 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
943 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
944 evt->req_id != evt_req_id)
945 continue;
946
947 lpfc_bsg_event_ref(evt);
948 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
949 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
950 if (evt_dat == NULL) {
951 spin_lock_irqsave(&phba->ct_ev_lock, flags);
952 lpfc_bsg_event_unref(evt);
953 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
954 "2614 Memory allocation failed for "
955 "CT event\n");
956 break;
957 }
958
959 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
960 /* take accumulated byte count from the last iocbq */
961 iocbq = list_entry(head.prev, typeof(*iocbq), list);
962 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
963 } else {
964 list_for_each_entry(iocbq, &head, list) {
965 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
966 evt_dat->len +=
967 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
968 }
969 }
970
971 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
972 if (evt_dat->data == NULL) {
973 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
974 "2615 Memory allocation failed for "
975 "CT event data, size %d\n",
976 evt_dat->len);
977 kfree(evt_dat);
978 spin_lock_irqsave(&phba->ct_ev_lock, flags);
979 lpfc_bsg_event_unref(evt);
980 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
981 goto error_ct_unsol_exit;
982 }
983
984 list_for_each_entry(iocbq, &head, list) {
985 size = 0;
986 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
987 bdeBuf1 = iocbq->context2;
988 bdeBuf2 = iocbq->context3;
989 }
990 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
991 if (phba->sli3_options &
992 LPFC_SLI3_HBQ_ENABLED) {
993 if (i == 0) {
994 hbqe = (struct lpfc_hbq_entry *)
995 &iocbq->iocb.un.ulpWord[0];
996 size = hbqe->bde.tus.f.bdeSize;
997 dmabuf = bdeBuf1;
998 } else if (i == 1) {
999 hbqe = (struct lpfc_hbq_entry *)
1000 &iocbq->iocb.unsli3.
1001 sli3Words[4];
1002 size = hbqe->bde.tus.f.bdeSize;
1003 dmabuf = bdeBuf2;
1004 }
1005 if ((offset + size) > evt_dat->len)
1006 size = evt_dat->len - offset;
1007 } else {
1008 size = iocbq->iocb.un.cont64[i].
1009 tus.f.bdeSize;
1010 bde = &iocbq->iocb.un.cont64[i];
1011 dma_addr = getPaddr(bde->addrHigh,
1012 bde->addrLow);
1013 dmabuf = lpfc_sli_ringpostbuf_get(phba,
1014 pring, dma_addr);
1015 }
1016 if (!dmabuf) {
1017 lpfc_printf_log(phba, KERN_ERR,
1018 LOG_LIBDFC, "2616 No dmabuf "
1019 "found for iocbq x%px\n",
1020 iocbq);
1021 kfree(evt_dat->data);
1022 kfree(evt_dat);
1023 spin_lock_irqsave(&phba->ct_ev_lock,
1024 flags);
1025 lpfc_bsg_event_unref(evt);
1026 spin_unlock_irqrestore(
1027 &phba->ct_ev_lock, flags);
1028 goto error_ct_unsol_exit;
1029 }
1030 memcpy((char *)(evt_dat->data) + offset,
1031 dmabuf->virt, size);
1032 offset += size;
1033 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1034 !(phba->sli3_options &
1035 LPFC_SLI3_HBQ_ENABLED)) {
1036 lpfc_sli_ringpostbuf_put(phba, pring,
1037 dmabuf);
1038 } else {
1039 switch (cmd) {
1040 case ELX_LOOPBACK_DATA:
1041 if (phba->sli_rev <
1042 LPFC_SLI_REV4)
1043 diag_cmd_data_free(phba,
1044 (struct lpfc_dmabufext
1045 *)dmabuf);
1046 break;
1047 case ELX_LOOPBACK_XRI_SETUP:
1048 if ((phba->sli_rev ==
1049 LPFC_SLI_REV2) ||
1050 (phba->sli3_options &
1051 LPFC_SLI3_HBQ_ENABLED
1052 )) {
1053 lpfc_in_buf_free(phba,
1054 dmabuf);
1055 } else {
1056 lpfc_post_buffer(phba,
1057 pring,
1058 1);
1059 }
1060 break;
1061 default:
1062 if (!(phba->sli3_options &
1063 LPFC_SLI3_HBQ_ENABLED))
1064 lpfc_post_buffer(phba,
1065 pring,
1066 1);
1067 break;
1068 }
1069 }
1070 }
1071 }
1072
1073 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1074 if (phba->sli_rev == LPFC_SLI_REV4) {
1075 evt_dat->immed_dat = phba->ctx_idx;
1076 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1077 /* Provide warning for over-run of the ct_ctx array */
1078 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1079 UNSOL_VALID)
1080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1081 "2717 CT context array entry "
1082 "[%d] over-run: oxid:x%x, "
1083 "sid:x%x\n", phba->ctx_idx,
1084 phba->ct_ctx[
1085 evt_dat->immed_dat].oxid,
1086 phba->ct_ctx[
1087 evt_dat->immed_dat].SID);
1088 phba->ct_ctx[evt_dat->immed_dat].rxid =
1089 piocbq->iocb.ulpContext;
1090 phba->ct_ctx[evt_dat->immed_dat].oxid =
1091 piocbq->iocb.unsli3.rcvsli3.ox_id;
1092 phba->ct_ctx[evt_dat->immed_dat].SID =
1093 piocbq->iocb.un.rcvels.remoteID;
1094 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1095 } else
1096 evt_dat->immed_dat = piocbq->iocb.ulpContext;
1097
1098 evt_dat->type = FC_REG_CT_EVENT;
1099 list_add(&evt_dat->node, &evt->events_to_see);
1100 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1101 wake_up_interruptible(&evt->wq);
1102 lpfc_bsg_event_unref(evt);
1103 break;
1104 }
1105
1106 list_move(evt->events_to_see.prev, &evt->events_to_get);
1107
1108 dd_data = (struct bsg_job_data *)evt->dd_data;
1109 job = dd_data->set_job;
1110 dd_data->set_job = NULL;
1111 lpfc_bsg_event_unref(evt);
1112 if (job) {
1113 bsg_reply = job->reply;
1114 bsg_reply->reply_payload_rcv_len = size;
1115 /* make error code available to userspace */
1116 bsg_reply->result = 0;
1117 job->dd_data = NULL;
1118 /* complete the job back to userspace */
1119 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1120 bsg_job_done(job, bsg_reply->result,
1121 bsg_reply->reply_payload_rcv_len);
1122 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1123 }
1124 }
1125 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1126
1127 error_ct_unsol_exit:
1128 if (!list_empty(&head))
1129 list_del(&head);
1130 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1131 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1132 return 0;
1133 return 1;
1134 }
1135
1136 /**
1137 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1138 * @phba: Pointer to HBA context object.
1139 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1140 *
1141 * This function handles abort to the CT command toward management plane
1142 * for SLI4 port.
1143 *
1144 * If the pending context of a CT command to management plane present, clears
1145 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1146 * no context exists.
1147 **/
1148 int
lpfc_bsg_ct_unsol_abort(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)1149 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1150 {
1151 struct fc_frame_header fc_hdr;
1152 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1153 int ctx_idx, handled = 0;
1154 uint16_t oxid, rxid;
1155 uint32_t sid;
1156
1157 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1158 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1159 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1160 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1161
1162 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1163 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1164 continue;
1165 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1166 continue;
1167 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1168 continue;
1169 if (phba->ct_ctx[ctx_idx].SID != sid)
1170 continue;
1171 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1172 handled = 1;
1173 }
1174 return handled;
1175 }
1176
1177 /**
1178 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1179 * @job: SET_EVENT fc_bsg_job
1180 **/
1181 static int
lpfc_bsg_hba_set_event(struct bsg_job * job)1182 lpfc_bsg_hba_set_event(struct bsg_job *job)
1183 {
1184 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1185 struct lpfc_hba *phba = vport->phba;
1186 struct fc_bsg_request *bsg_request = job->request;
1187 struct set_ct_event *event_req;
1188 struct lpfc_bsg_event *evt;
1189 int rc = 0;
1190 struct bsg_job_data *dd_data = NULL;
1191 uint32_t ev_mask;
1192 unsigned long flags;
1193
1194 if (job->request_len <
1195 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1196 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1197 "2612 Received SET_CT_EVENT below minimum "
1198 "size\n");
1199 rc = -EINVAL;
1200 goto job_error;
1201 }
1202
1203 event_req = (struct set_ct_event *)
1204 bsg_request->rqst_data.h_vendor.vendor_cmd;
1205 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1206 FC_REG_EVENT_MASK);
1207 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1208 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1209 if (evt->reg_id == event_req->ev_reg_id) {
1210 lpfc_bsg_event_ref(evt);
1211 evt->wait_time_stamp = jiffies;
1212 dd_data = (struct bsg_job_data *)evt->dd_data;
1213 break;
1214 }
1215 }
1216 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1217
1218 if (&evt->node == &phba->ct_ev_waiters) {
1219 /* no event waiting struct yet - first call */
1220 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1221 if (dd_data == NULL) {
1222 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1223 "2734 Failed allocation of dd_data\n");
1224 rc = -ENOMEM;
1225 goto job_error;
1226 }
1227 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1228 event_req->ev_req_id);
1229 if (!evt) {
1230 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1231 "2617 Failed allocation of event "
1232 "waiter\n");
1233 rc = -ENOMEM;
1234 goto job_error;
1235 }
1236 dd_data->type = TYPE_EVT;
1237 dd_data->set_job = NULL;
1238 dd_data->context_un.evt = evt;
1239 evt->dd_data = (void *)dd_data;
1240 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1241 list_add(&evt->node, &phba->ct_ev_waiters);
1242 lpfc_bsg_event_ref(evt);
1243 evt->wait_time_stamp = jiffies;
1244 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1245 }
1246
1247 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1248 evt->waiting = 1;
1249 dd_data->set_job = job; /* for unsolicited command */
1250 job->dd_data = dd_data; /* for fc transport timeout callback*/
1251 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1252 return 0; /* call job done later */
1253
1254 job_error:
1255 kfree(dd_data);
1256 job->dd_data = NULL;
1257 return rc;
1258 }
1259
1260 /**
1261 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1262 * @job: GET_EVENT fc_bsg_job
1263 **/
1264 static int
lpfc_bsg_hba_get_event(struct bsg_job * job)1265 lpfc_bsg_hba_get_event(struct bsg_job *job)
1266 {
1267 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1268 struct lpfc_hba *phba = vport->phba;
1269 struct fc_bsg_request *bsg_request = job->request;
1270 struct fc_bsg_reply *bsg_reply = job->reply;
1271 struct get_ct_event *event_req;
1272 struct get_ct_event_reply *event_reply;
1273 struct lpfc_bsg_event *evt, *evt_next;
1274 struct event_data *evt_dat = NULL;
1275 unsigned long flags;
1276 uint32_t rc = 0;
1277
1278 if (job->request_len <
1279 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1280 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1281 "2613 Received GET_CT_EVENT request below "
1282 "minimum size\n");
1283 rc = -EINVAL;
1284 goto job_error;
1285 }
1286
1287 event_req = (struct get_ct_event *)
1288 bsg_request->rqst_data.h_vendor.vendor_cmd;
1289
1290 event_reply = (struct get_ct_event_reply *)
1291 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1292 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1293 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1294 if (evt->reg_id == event_req->ev_reg_id) {
1295 if (list_empty(&evt->events_to_get))
1296 break;
1297 lpfc_bsg_event_ref(evt);
1298 evt->wait_time_stamp = jiffies;
1299 evt_dat = list_entry(evt->events_to_get.prev,
1300 struct event_data, node);
1301 list_del(&evt_dat->node);
1302 break;
1303 }
1304 }
1305 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1306
1307 /* The app may continue to ask for event data until it gets
1308 * an error indicating that there isn't anymore
1309 */
1310 if (evt_dat == NULL) {
1311 bsg_reply->reply_payload_rcv_len = 0;
1312 rc = -ENOENT;
1313 goto job_error;
1314 }
1315
1316 if (evt_dat->len > job->request_payload.payload_len) {
1317 evt_dat->len = job->request_payload.payload_len;
1318 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1319 "2618 Truncated event data at %d "
1320 "bytes\n",
1321 job->request_payload.payload_len);
1322 }
1323
1324 event_reply->type = evt_dat->type;
1325 event_reply->immed_data = evt_dat->immed_dat;
1326 if (evt_dat->len > 0)
1327 bsg_reply->reply_payload_rcv_len =
1328 sg_copy_from_buffer(job->request_payload.sg_list,
1329 job->request_payload.sg_cnt,
1330 evt_dat->data, evt_dat->len);
1331 else
1332 bsg_reply->reply_payload_rcv_len = 0;
1333
1334 if (evt_dat) {
1335 kfree(evt_dat->data);
1336 kfree(evt_dat);
1337 }
1338
1339 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1340 lpfc_bsg_event_unref(evt);
1341 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1342 job->dd_data = NULL;
1343 bsg_reply->result = 0;
1344 bsg_job_done(job, bsg_reply->result,
1345 bsg_reply->reply_payload_rcv_len);
1346 return 0;
1347
1348 job_error:
1349 job->dd_data = NULL;
1350 bsg_reply->result = rc;
1351 return rc;
1352 }
1353
1354 /**
1355 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1356 * @phba: Pointer to HBA context object.
1357 * @cmdiocbq: Pointer to command iocb.
1358 * @rspiocbq: Pointer to response iocb.
1359 *
1360 * This function is the completion handler for iocbs issued using
1361 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1362 * ring event handler function without any lock held. This function
1363 * can be called from both worker thread context and interrupt
1364 * context. This function also can be called from other thread which
1365 * cleans up the SLI layer objects.
1366 * This function copy the contents of the response iocb to the
1367 * response iocb memory object provided by the caller of
1368 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1369 * sleeps for the iocb completion.
1370 **/
1371 static void
lpfc_issue_ct_rsp_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)1372 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1373 struct lpfc_iocbq *cmdiocbq,
1374 struct lpfc_iocbq *rspiocbq)
1375 {
1376 struct bsg_job_data *dd_data;
1377 struct bsg_job *job;
1378 struct fc_bsg_reply *bsg_reply;
1379 IOCB_t *rsp;
1380 struct lpfc_dmabuf *bmp, *cmp;
1381 struct lpfc_nodelist *ndlp;
1382 unsigned long flags;
1383 int rc = 0;
1384
1385 dd_data = cmdiocbq->context1;
1386
1387 /* Determine if job has been aborted */
1388 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1389 job = dd_data->set_job;
1390 if (job) {
1391 /* Prevent timeout handling from trying to abort job */
1392 job->dd_data = NULL;
1393 }
1394 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1395
1396 /* Close the timeout handler abort window */
1397 spin_lock_irqsave(&phba->hbalock, flags);
1398 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1399 spin_unlock_irqrestore(&phba->hbalock, flags);
1400
1401 ndlp = dd_data->context_un.iocb.ndlp;
1402 cmp = cmdiocbq->context2;
1403 bmp = cmdiocbq->context3;
1404 rsp = &rspiocbq->iocb;
1405
1406 /* Copy the completed job data or set the error status */
1407
1408 if (job) {
1409 bsg_reply = job->reply;
1410 if (rsp->ulpStatus) {
1411 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1412 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1413 case IOERR_SEQUENCE_TIMEOUT:
1414 rc = -ETIMEDOUT;
1415 break;
1416 case IOERR_INVALID_RPI:
1417 rc = -EFAULT;
1418 break;
1419 default:
1420 rc = -EACCES;
1421 break;
1422 }
1423 } else {
1424 rc = -EACCES;
1425 }
1426 } else {
1427 bsg_reply->reply_payload_rcv_len = 0;
1428 }
1429 }
1430
1431 lpfc_free_bsg_buffers(phba, cmp);
1432 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1433 kfree(bmp);
1434 lpfc_sli_release_iocbq(phba, cmdiocbq);
1435 lpfc_nlp_put(ndlp);
1436 kfree(dd_data);
1437
1438 /* Complete the job if the job is still active */
1439
1440 if (job) {
1441 bsg_reply->result = rc;
1442 bsg_job_done(job, bsg_reply->result,
1443 bsg_reply->reply_payload_rcv_len);
1444 }
1445 return;
1446 }
1447
1448 /**
1449 * lpfc_issue_ct_rsp - issue a ct response
1450 * @phba: Pointer to HBA context object.
1451 * @job: Pointer to the job object.
1452 * @tag: tag index value into the ports context exchange array.
1453 * @cmp: Pointer to a cmp dma buffer descriptor.
1454 * @bmp: Pointer to a bmp dma buffer descriptor.
1455 * @num_entry: Number of enties in the bde.
1456 **/
1457 static int
lpfc_issue_ct_rsp(struct lpfc_hba * phba,struct bsg_job * job,uint32_t tag,struct lpfc_dmabuf * cmp,struct lpfc_dmabuf * bmp,int num_entry)1458 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1459 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1460 int num_entry)
1461 {
1462 IOCB_t *icmd;
1463 struct lpfc_iocbq *ctiocb = NULL;
1464 int rc = 0;
1465 struct lpfc_nodelist *ndlp = NULL;
1466 struct bsg_job_data *dd_data;
1467 unsigned long flags;
1468 uint32_t creg_val;
1469
1470 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1471 if (!ndlp) {
1472 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1473 "2721 ndlp null for oxid %x SID %x\n",
1474 phba->ct_ctx[tag].rxid,
1475 phba->ct_ctx[tag].SID);
1476 return IOCB_ERROR;
1477 }
1478
1479 /* allocate our bsg tracking structure */
1480 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1481 if (!dd_data) {
1482 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1483 "2736 Failed allocation of dd_data\n");
1484 rc = -ENOMEM;
1485 goto no_dd_data;
1486 }
1487
1488 /* Allocate buffer for command iocb */
1489 ctiocb = lpfc_sli_get_iocbq(phba);
1490 if (!ctiocb) {
1491 rc = -ENOMEM;
1492 goto no_ctiocb;
1493 }
1494
1495 icmd = &ctiocb->iocb;
1496 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1497 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1498 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1499 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1500 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1501 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1502 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1503 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1504 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1505
1506 /* Fill in rest of iocb */
1507 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1508 icmd->ulpBdeCount = 1;
1509 icmd->ulpLe = 1;
1510 icmd->ulpClass = CLASS3;
1511 if (phba->sli_rev == LPFC_SLI_REV4) {
1512 /* Do not issue unsol response if oxid not marked as valid */
1513 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1514 rc = IOCB_ERROR;
1515 goto issue_ct_rsp_exit;
1516 }
1517 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1518 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1519 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1520 if (!ndlp) {
1521 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1522 "2721 ndlp null for oxid %x SID %x\n",
1523 icmd->ulpContext,
1524 phba->ct_ctx[tag].SID);
1525 rc = IOCB_ERROR;
1526 goto issue_ct_rsp_exit;
1527 }
1528
1529 /* get a refernece count so the ndlp doesn't go away while
1530 * we respond
1531 */
1532 if (!lpfc_nlp_get(ndlp)) {
1533 rc = IOCB_ERROR;
1534 goto issue_ct_rsp_exit;
1535 }
1536
1537 icmd->un.ulpWord[3] =
1538 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1539
1540 /* The exchange is done, mark the entry as invalid */
1541 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1542 } else
1543 icmd->ulpContext = (ushort) tag;
1544
1545 icmd->ulpTimeout = phba->fc_ratov * 2;
1546
1547 /* Xmit CT response on exchange <xid> */
1548 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1549 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1550 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1551
1552 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1553 ctiocb->vport = phba->pport;
1554 ctiocb->context1 = dd_data;
1555 ctiocb->context2 = cmp;
1556 ctiocb->context3 = bmp;
1557 ctiocb->context_un.ndlp = ndlp;
1558 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1559
1560 dd_data->type = TYPE_IOCB;
1561 dd_data->set_job = job;
1562 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1563 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1564 if (!dd_data->context_un.iocb.ndlp) {
1565 rc = -IOCB_ERROR;
1566 goto issue_ct_rsp_exit;
1567 }
1568 dd_data->context_un.iocb.rmp = NULL;
1569 job->dd_data = dd_data;
1570
1571 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1572 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1573 rc = -IOCB_ERROR;
1574 goto issue_ct_rsp_exit;
1575 }
1576 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1577 writel(creg_val, phba->HCregaddr);
1578 readl(phba->HCregaddr); /* flush */
1579 }
1580
1581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1582 if (rc == IOCB_SUCCESS) {
1583 spin_lock_irqsave(&phba->hbalock, flags);
1584 /* make sure the I/O had not been completed/released */
1585 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1586 /* open up abort window to timeout handler */
1587 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1588 }
1589 spin_unlock_irqrestore(&phba->hbalock, flags);
1590 return 0; /* done for now */
1591 }
1592
1593 /* iocb failed so cleanup */
1594 job->dd_data = NULL;
1595 lpfc_nlp_put(ndlp);
1596
1597 issue_ct_rsp_exit:
1598 lpfc_sli_release_iocbq(phba, ctiocb);
1599 no_ctiocb:
1600 kfree(dd_data);
1601 no_dd_data:
1602 return rc;
1603 }
1604
1605 /**
1606 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1607 * @job: SEND_MGMT_RESP fc_bsg_job
1608 **/
1609 static int
lpfc_bsg_send_mgmt_rsp(struct bsg_job * job)1610 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1611 {
1612 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1613 struct lpfc_hba *phba = vport->phba;
1614 struct fc_bsg_request *bsg_request = job->request;
1615 struct fc_bsg_reply *bsg_reply = job->reply;
1616 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1617 bsg_request->rqst_data.h_vendor.vendor_cmd;
1618 struct ulp_bde64 *bpl;
1619 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1620 int bpl_entries;
1621 uint32_t tag = mgmt_resp->tag;
1622 unsigned long reqbfrcnt =
1623 (unsigned long)job->request_payload.payload_len;
1624 int rc = 0;
1625
1626 /* in case no data is transferred */
1627 bsg_reply->reply_payload_rcv_len = 0;
1628
1629 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1630 rc = -ERANGE;
1631 goto send_mgmt_rsp_exit;
1632 }
1633
1634 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1635 if (!bmp) {
1636 rc = -ENOMEM;
1637 goto send_mgmt_rsp_exit;
1638 }
1639
1640 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1641 if (!bmp->virt) {
1642 rc = -ENOMEM;
1643 goto send_mgmt_rsp_free_bmp;
1644 }
1645
1646 INIT_LIST_HEAD(&bmp->list);
1647 bpl = (struct ulp_bde64 *) bmp->virt;
1648 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1649 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1650 1, bpl, &bpl_entries);
1651 if (!cmp) {
1652 rc = -ENOMEM;
1653 goto send_mgmt_rsp_free_bmp;
1654 }
1655 lpfc_bsg_copy_data(cmp, &job->request_payload,
1656 job->request_payload.payload_len, 1);
1657
1658 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1659
1660 if (rc == IOCB_SUCCESS)
1661 return 0; /* done for now */
1662
1663 rc = -EACCES;
1664
1665 lpfc_free_bsg_buffers(phba, cmp);
1666
1667 send_mgmt_rsp_free_bmp:
1668 if (bmp->virt)
1669 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1670 kfree(bmp);
1671 send_mgmt_rsp_exit:
1672 /* make error code available to userspace */
1673 bsg_reply->result = rc;
1674 job->dd_data = NULL;
1675 return rc;
1676 }
1677
1678 /**
1679 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1680 * @phba: Pointer to HBA context object.
1681 *
1682 * This function is responsible for preparing driver for diag loopback
1683 * on device.
1684 */
1685 static int
lpfc_bsg_diag_mode_enter(struct lpfc_hba * phba)1686 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1687 {
1688 struct lpfc_vport **vports;
1689 struct Scsi_Host *shost;
1690 struct lpfc_sli *psli;
1691 struct lpfc_queue *qp = NULL;
1692 struct lpfc_sli_ring *pring;
1693 int i = 0;
1694
1695 psli = &phba->sli;
1696 if (!psli)
1697 return -ENODEV;
1698
1699
1700 if ((phba->link_state == LPFC_HBA_ERROR) ||
1701 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1702 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1703 return -EACCES;
1704
1705 vports = lpfc_create_vport_work_array(phba);
1706 if (vports) {
1707 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1708 shost = lpfc_shost_from_vport(vports[i]);
1709 scsi_block_requests(shost);
1710 }
1711 lpfc_destroy_vport_work_array(phba, vports);
1712 } else {
1713 shost = lpfc_shost_from_vport(phba->pport);
1714 scsi_block_requests(shost);
1715 }
1716
1717 if (phba->sli_rev != LPFC_SLI_REV4) {
1718 pring = &psli->sli3_ring[LPFC_FCP_RING];
1719 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1720 return 0;
1721 }
1722 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1723 pring = qp->pring;
1724 if (!pring || (pring->ringno != LPFC_FCP_RING))
1725 continue;
1726 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1727 &pring->ring_lock))
1728 break;
1729 }
1730 return 0;
1731 }
1732
1733 /**
1734 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1735 * @phba: Pointer to HBA context object.
1736 *
1737 * This function is responsible for driver exit processing of setting up
1738 * diag loopback mode on device.
1739 */
1740 static void
lpfc_bsg_diag_mode_exit(struct lpfc_hba * phba)1741 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1742 {
1743 struct Scsi_Host *shost;
1744 struct lpfc_vport **vports;
1745 int i;
1746
1747 vports = lpfc_create_vport_work_array(phba);
1748 if (vports) {
1749 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1750 shost = lpfc_shost_from_vport(vports[i]);
1751 scsi_unblock_requests(shost);
1752 }
1753 lpfc_destroy_vport_work_array(phba, vports);
1754 } else {
1755 shost = lpfc_shost_from_vport(phba->pport);
1756 scsi_unblock_requests(shost);
1757 }
1758 return;
1759 }
1760
1761 /**
1762 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1763 * @phba: Pointer to HBA context object.
1764 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1765 *
1766 * This function is responsible for placing an sli3 port into diagnostic
1767 * loopback mode in order to perform a diagnostic loopback test.
1768 * All new scsi requests are blocked, a small delay is used to allow the
1769 * scsi requests to complete then the link is brought down. If the link is
1770 * is placed in loopback mode then scsi requests are again allowed
1771 * so the scsi mid-layer doesn't give up on the port.
1772 * All of this is done in-line.
1773 */
1774 static int
lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)1775 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1776 {
1777 struct fc_bsg_request *bsg_request = job->request;
1778 struct fc_bsg_reply *bsg_reply = job->reply;
1779 struct diag_mode_set *loopback_mode;
1780 uint32_t link_flags;
1781 uint32_t timeout;
1782 LPFC_MBOXQ_t *pmboxq = NULL;
1783 int mbxstatus = MBX_SUCCESS;
1784 int i = 0;
1785 int rc = 0;
1786
1787 /* no data to return just the return code */
1788 bsg_reply->reply_payload_rcv_len = 0;
1789
1790 if (job->request_len < sizeof(struct fc_bsg_request) +
1791 sizeof(struct diag_mode_set)) {
1792 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1793 "2738 Received DIAG MODE request size:%d "
1794 "below the minimum size:%d\n",
1795 job->request_len,
1796 (int)(sizeof(struct fc_bsg_request) +
1797 sizeof(struct diag_mode_set)));
1798 rc = -EINVAL;
1799 goto job_error;
1800 }
1801
1802 rc = lpfc_bsg_diag_mode_enter(phba);
1803 if (rc)
1804 goto job_error;
1805
1806 /* bring the link to diagnostic mode */
1807 loopback_mode = (struct diag_mode_set *)
1808 bsg_request->rqst_data.h_vendor.vendor_cmd;
1809 link_flags = loopback_mode->type;
1810 timeout = loopback_mode->timeout * 100;
1811
1812 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1813 if (!pmboxq) {
1814 rc = -ENOMEM;
1815 goto loopback_mode_exit;
1816 }
1817 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1818 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1819 pmboxq->u.mb.mbxOwner = OWN_HOST;
1820
1821 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1822
1823 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1824 /* wait for link down before proceeding */
1825 i = 0;
1826 while (phba->link_state != LPFC_LINK_DOWN) {
1827 if (i++ > timeout) {
1828 rc = -ETIMEDOUT;
1829 goto loopback_mode_exit;
1830 }
1831 msleep(10);
1832 }
1833
1834 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1835 if (link_flags == INTERNAL_LOOP_BACK)
1836 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1837 else
1838 pmboxq->u.mb.un.varInitLnk.link_flags =
1839 FLAGS_TOPOLOGY_MODE_LOOP;
1840
1841 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1842 pmboxq->u.mb.mbxOwner = OWN_HOST;
1843
1844 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1845 LPFC_MBOX_TMO);
1846
1847 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1848 rc = -ENODEV;
1849 else {
1850 spin_lock_irq(&phba->hbalock);
1851 phba->link_flag |= LS_LOOPBACK_MODE;
1852 spin_unlock_irq(&phba->hbalock);
1853 /* wait for the link attention interrupt */
1854 msleep(100);
1855
1856 i = 0;
1857 while (phba->link_state != LPFC_HBA_READY) {
1858 if (i++ > timeout) {
1859 rc = -ETIMEDOUT;
1860 break;
1861 }
1862
1863 msleep(10);
1864 }
1865 }
1866
1867 } else
1868 rc = -ENODEV;
1869
1870 loopback_mode_exit:
1871 lpfc_bsg_diag_mode_exit(phba);
1872
1873 /*
1874 * Let SLI layer release mboxq if mbox command completed after timeout.
1875 */
1876 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1877 mempool_free(pmboxq, phba->mbox_mem_pool);
1878
1879 job_error:
1880 /* make error code available to userspace */
1881 bsg_reply->result = rc;
1882 /* complete the job back to userspace if no error */
1883 if (rc == 0)
1884 bsg_job_done(job, bsg_reply->result,
1885 bsg_reply->reply_payload_rcv_len);
1886 return rc;
1887 }
1888
1889 /**
1890 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1891 * @phba: Pointer to HBA context object.
1892 * @diag: Flag for set link to diag or nomral operation state.
1893 *
1894 * This function is responsible for issuing a sli4 mailbox command for setting
1895 * link to either diag state or normal operation state.
1896 */
1897 static int
lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba * phba,uint32_t diag)1898 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1899 {
1900 LPFC_MBOXQ_t *pmboxq;
1901 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1902 uint32_t req_len, alloc_len;
1903 int mbxstatus = MBX_SUCCESS, rc;
1904
1905 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1906 if (!pmboxq)
1907 return -ENOMEM;
1908
1909 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1910 sizeof(struct lpfc_sli4_cfg_mhdr));
1911 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1912 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1913 req_len, LPFC_SLI4_MBX_EMBED);
1914 if (alloc_len != req_len) {
1915 rc = -ENOMEM;
1916 goto link_diag_state_set_out;
1917 }
1918 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1919 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1920 diag, phba->sli4_hba.lnk_info.lnk_tp,
1921 phba->sli4_hba.lnk_info.lnk_no);
1922
1923 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1924 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1925 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1926 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1927 phba->sli4_hba.lnk_info.lnk_no);
1928 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1929 phba->sli4_hba.lnk_info.lnk_tp);
1930 if (diag)
1931 bf_set(lpfc_mbx_set_diag_state_diag,
1932 &link_diag_state->u.req, 1);
1933 else
1934 bf_set(lpfc_mbx_set_diag_state_diag,
1935 &link_diag_state->u.req, 0);
1936
1937 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1938
1939 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1940 rc = 0;
1941 else
1942 rc = -ENODEV;
1943
1944 link_diag_state_set_out:
1945 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1946 mempool_free(pmboxq, phba->mbox_mem_pool);
1947
1948 return rc;
1949 }
1950
1951 /**
1952 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1953 * @phba: Pointer to HBA context object.
1954 * @mode: loopback mode to set
1955 * @link_no: link number for loopback mode to set
1956 *
1957 * This function is responsible for issuing a sli4 mailbox command for setting
1958 * up loopback diagnostic for a link.
1959 */
1960 static int
lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba * phba,int mode,uint32_t link_no)1961 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1962 uint32_t link_no)
1963 {
1964 LPFC_MBOXQ_t *pmboxq;
1965 uint32_t req_len, alloc_len;
1966 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1967 int mbxstatus = MBX_SUCCESS, rc = 0;
1968
1969 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1970 if (!pmboxq)
1971 return -ENOMEM;
1972 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1973 sizeof(struct lpfc_sli4_cfg_mhdr));
1974 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1975 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1976 req_len, LPFC_SLI4_MBX_EMBED);
1977 if (alloc_len != req_len) {
1978 mempool_free(pmboxq, phba->mbox_mem_pool);
1979 return -ENOMEM;
1980 }
1981 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1982 bf_set(lpfc_mbx_set_diag_state_link_num,
1983 &link_diag_loopback->u.req, link_no);
1984
1985 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1986 bf_set(lpfc_mbx_set_diag_state_link_type,
1987 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1988 } else {
1989 bf_set(lpfc_mbx_set_diag_state_link_type,
1990 &link_diag_loopback->u.req,
1991 phba->sli4_hba.lnk_info.lnk_tp);
1992 }
1993
1994 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1995 mode);
1996
1997 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1998 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1999 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2000 "3127 Failed setup loopback mode mailbox "
2001 "command, rc:x%x, status:x%x\n", mbxstatus,
2002 pmboxq->u.mb.mbxStatus);
2003 rc = -ENODEV;
2004 }
2005 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
2006 mempool_free(pmboxq, phba->mbox_mem_pool);
2007 return rc;
2008 }
2009
2010 /**
2011 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2012 * @phba: Pointer to HBA context object.
2013 *
2014 * This function set up SLI4 FC port registrations for diagnostic run, which
2015 * includes all the rpis, vfi, and also vpi.
2016 */
2017 static int
lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba * phba)2018 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2019 {
2020 int rc;
2021
2022 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2023 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2024 "3136 Port still had vfi registered: "
2025 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2026 phba->pport->fc_myDID, phba->fcf.fcfi,
2027 phba->sli4_hba.vfi_ids[phba->pport->vfi],
2028 phba->vpi_ids[phba->pport->vpi]);
2029 return -EINVAL;
2030 }
2031 rc = lpfc_issue_reg_vfi(phba->pport);
2032 return rc;
2033 }
2034
2035 /**
2036 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2037 * @phba: Pointer to HBA context object.
2038 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2039 *
2040 * This function is responsible for placing an sli4 port into diagnostic
2041 * loopback mode in order to perform a diagnostic loopback test.
2042 */
2043 static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba * phba,struct bsg_job * job)2044 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2045 {
2046 struct fc_bsg_request *bsg_request = job->request;
2047 struct fc_bsg_reply *bsg_reply = job->reply;
2048 struct diag_mode_set *loopback_mode;
2049 uint32_t link_flags, timeout, link_no;
2050 int i, rc = 0;
2051
2052 /* no data to return just the return code */
2053 bsg_reply->reply_payload_rcv_len = 0;
2054
2055 if (job->request_len < sizeof(struct fc_bsg_request) +
2056 sizeof(struct diag_mode_set)) {
2057 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2058 "3011 Received DIAG MODE request size:%d "
2059 "below the minimum size:%d\n",
2060 job->request_len,
2061 (int)(sizeof(struct fc_bsg_request) +
2062 sizeof(struct diag_mode_set)));
2063 rc = -EINVAL;
2064 goto job_done;
2065 }
2066
2067 loopback_mode = (struct diag_mode_set *)
2068 bsg_request->rqst_data.h_vendor.vendor_cmd;
2069 link_flags = loopback_mode->type;
2070 timeout = loopback_mode->timeout * 100;
2071
2072 if (loopback_mode->physical_link == -1)
2073 link_no = phba->sli4_hba.lnk_info.lnk_no;
2074 else
2075 link_no = loopback_mode->physical_link;
2076
2077 if (link_flags == DISABLE_LOOP_BACK) {
2078 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2079 LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2080 link_no);
2081 if (!rc) {
2082 /* Unset the need disable bit */
2083 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2084 }
2085 goto job_done;
2086 } else {
2087 /* Check if we need to disable the loopback state */
2088 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2089 rc = -EPERM;
2090 goto job_done;
2091 }
2092 }
2093
2094 rc = lpfc_bsg_diag_mode_enter(phba);
2095 if (rc)
2096 goto job_done;
2097
2098 /* indicate we are in loobpack diagnostic mode */
2099 spin_lock_irq(&phba->hbalock);
2100 phba->link_flag |= LS_LOOPBACK_MODE;
2101 spin_unlock_irq(&phba->hbalock);
2102
2103 /* reset port to start frome scratch */
2104 rc = lpfc_selective_reset(phba);
2105 if (rc)
2106 goto job_done;
2107
2108 /* bring the link to diagnostic mode */
2109 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2110 "3129 Bring link to diagnostic state.\n");
2111
2112 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2113 if (rc) {
2114 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2115 "3130 Failed to bring link to diagnostic "
2116 "state, rc:x%x\n", rc);
2117 goto loopback_mode_exit;
2118 }
2119
2120 /* wait for link down before proceeding */
2121 i = 0;
2122 while (phba->link_state != LPFC_LINK_DOWN) {
2123 if (i++ > timeout) {
2124 rc = -ETIMEDOUT;
2125 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2126 "3131 Timeout waiting for link to "
2127 "diagnostic mode, timeout:%d ms\n",
2128 timeout * 10);
2129 goto loopback_mode_exit;
2130 }
2131 msleep(10);
2132 }
2133
2134 /* set up loopback mode */
2135 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2136 "3132 Set up loopback mode:x%x\n", link_flags);
2137
2138 switch (link_flags) {
2139 case INTERNAL_LOOP_BACK:
2140 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2141 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2142 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2143 link_no);
2144 } else {
2145 /* Trunk is configured, but link is not in this trunk */
2146 if (phba->sli4_hba.conf_trunk) {
2147 rc = -ELNRNG;
2148 goto loopback_mode_exit;
2149 }
2150
2151 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2152 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2153 link_no);
2154 }
2155
2156 if (!rc) {
2157 /* Set the need disable bit */
2158 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2159 }
2160
2161 break;
2162 case EXTERNAL_LOOP_BACK:
2163 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2164 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2165 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2166 link_no);
2167 } else {
2168 /* Trunk is configured, but link is not in this trunk */
2169 if (phba->sli4_hba.conf_trunk) {
2170 rc = -ELNRNG;
2171 goto loopback_mode_exit;
2172 }
2173
2174 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2175 LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2176 link_no);
2177 }
2178
2179 if (!rc) {
2180 /* Set the need disable bit */
2181 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2182 }
2183
2184 break;
2185 default:
2186 rc = -EINVAL;
2187 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2188 "3141 Loopback mode:x%x not supported\n",
2189 link_flags);
2190 goto loopback_mode_exit;
2191 }
2192
2193 if (!rc) {
2194 /* wait for the link attention interrupt */
2195 msleep(100);
2196 i = 0;
2197 while (phba->link_state < LPFC_LINK_UP) {
2198 if (i++ > timeout) {
2199 rc = -ETIMEDOUT;
2200 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2201 "3137 Timeout waiting for link up "
2202 "in loopback mode, timeout:%d ms\n",
2203 timeout * 10);
2204 break;
2205 }
2206 msleep(10);
2207 }
2208 }
2209
2210 /* port resource registration setup for loopback diagnostic */
2211 if (!rc) {
2212 /* set up a none zero myDID for loopback test */
2213 phba->pport->fc_myDID = 1;
2214 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2215 } else
2216 goto loopback_mode_exit;
2217
2218 if (!rc) {
2219 /* wait for the port ready */
2220 msleep(100);
2221 i = 0;
2222 while (phba->link_state != LPFC_HBA_READY) {
2223 if (i++ > timeout) {
2224 rc = -ETIMEDOUT;
2225 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2226 "3133 Timeout waiting for port "
2227 "loopback mode ready, timeout:%d ms\n",
2228 timeout * 10);
2229 break;
2230 }
2231 msleep(10);
2232 }
2233 }
2234
2235 loopback_mode_exit:
2236 /* clear loopback diagnostic mode */
2237 if (rc) {
2238 spin_lock_irq(&phba->hbalock);
2239 phba->link_flag &= ~LS_LOOPBACK_MODE;
2240 spin_unlock_irq(&phba->hbalock);
2241 }
2242 lpfc_bsg_diag_mode_exit(phba);
2243
2244 job_done:
2245 /* make error code available to userspace */
2246 bsg_reply->result = rc;
2247 /* complete the job back to userspace if no error */
2248 if (rc == 0)
2249 bsg_job_done(job, bsg_reply->result,
2250 bsg_reply->reply_payload_rcv_len);
2251 return rc;
2252 }
2253
2254 /**
2255 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2256 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2257 *
2258 * This function is responsible for responding to check and dispatch bsg diag
2259 * command from the user to proper driver action routines.
2260 */
2261 static int
lpfc_bsg_diag_loopback_mode(struct bsg_job * job)2262 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2263 {
2264 struct Scsi_Host *shost;
2265 struct lpfc_vport *vport;
2266 struct lpfc_hba *phba;
2267 int rc;
2268
2269 shost = fc_bsg_to_shost(job);
2270 if (!shost)
2271 return -ENODEV;
2272 vport = shost_priv(shost);
2273 if (!vport)
2274 return -ENODEV;
2275 phba = vport->phba;
2276 if (!phba)
2277 return -ENODEV;
2278
2279 if (phba->sli_rev < LPFC_SLI_REV4)
2280 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2281 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2282 LPFC_SLI_INTF_IF_TYPE_2)
2283 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2284 else
2285 rc = -ENODEV;
2286
2287 return rc;
2288 }
2289
2290 /**
2291 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2292 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2293 *
2294 * This function is responsible for responding to check and dispatch bsg diag
2295 * command from the user to proper driver action routines.
2296 */
2297 static int
lpfc_sli4_bsg_diag_mode_end(struct bsg_job * job)2298 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2299 {
2300 struct fc_bsg_request *bsg_request = job->request;
2301 struct fc_bsg_reply *bsg_reply = job->reply;
2302 struct Scsi_Host *shost;
2303 struct lpfc_vport *vport;
2304 struct lpfc_hba *phba;
2305 struct diag_mode_set *loopback_mode_end_cmd;
2306 uint32_t timeout;
2307 int rc, i;
2308
2309 shost = fc_bsg_to_shost(job);
2310 if (!shost)
2311 return -ENODEV;
2312 vport = shost_priv(shost);
2313 if (!vport)
2314 return -ENODEV;
2315 phba = vport->phba;
2316 if (!phba)
2317 return -ENODEV;
2318
2319 if (phba->sli_rev < LPFC_SLI_REV4)
2320 return -ENODEV;
2321 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2322 LPFC_SLI_INTF_IF_TYPE_2)
2323 return -ENODEV;
2324
2325 /* clear loopback diagnostic mode */
2326 spin_lock_irq(&phba->hbalock);
2327 phba->link_flag &= ~LS_LOOPBACK_MODE;
2328 spin_unlock_irq(&phba->hbalock);
2329 loopback_mode_end_cmd = (struct diag_mode_set *)
2330 bsg_request->rqst_data.h_vendor.vendor_cmd;
2331 timeout = loopback_mode_end_cmd->timeout * 100;
2332
2333 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2334 if (rc) {
2335 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2336 "3139 Failed to bring link to diagnostic "
2337 "state, rc:x%x\n", rc);
2338 goto loopback_mode_end_exit;
2339 }
2340
2341 /* wait for link down before proceeding */
2342 i = 0;
2343 while (phba->link_state != LPFC_LINK_DOWN) {
2344 if (i++ > timeout) {
2345 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2346 "3140 Timeout waiting for link to "
2347 "diagnostic mode_end, timeout:%d ms\n",
2348 timeout * 10);
2349 /* there is nothing much we can do here */
2350 break;
2351 }
2352 msleep(10);
2353 }
2354
2355 /* reset port resource registrations */
2356 rc = lpfc_selective_reset(phba);
2357 phba->pport->fc_myDID = 0;
2358
2359 loopback_mode_end_exit:
2360 /* make return code available to userspace */
2361 bsg_reply->result = rc;
2362 /* complete the job back to userspace if no error */
2363 if (rc == 0)
2364 bsg_job_done(job, bsg_reply->result,
2365 bsg_reply->reply_payload_rcv_len);
2366 return rc;
2367 }
2368
2369 /**
2370 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2371 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2372 *
2373 * This function is to perform SLI4 diag link test request from the user
2374 * applicaiton.
2375 */
2376 static int
lpfc_sli4_bsg_link_diag_test(struct bsg_job * job)2377 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2378 {
2379 struct fc_bsg_request *bsg_request = job->request;
2380 struct fc_bsg_reply *bsg_reply = job->reply;
2381 struct Scsi_Host *shost;
2382 struct lpfc_vport *vport;
2383 struct lpfc_hba *phba;
2384 LPFC_MBOXQ_t *pmboxq;
2385 struct sli4_link_diag *link_diag_test_cmd;
2386 uint32_t req_len, alloc_len;
2387 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2388 union lpfc_sli4_cfg_shdr *shdr;
2389 uint32_t shdr_status, shdr_add_status;
2390 struct diag_status *diag_status_reply;
2391 int mbxstatus, rc = -ENODEV, rc1 = 0;
2392
2393 shost = fc_bsg_to_shost(job);
2394 if (!shost)
2395 goto job_error;
2396
2397 vport = shost_priv(shost);
2398 if (!vport)
2399 goto job_error;
2400
2401 phba = vport->phba;
2402 if (!phba)
2403 goto job_error;
2404
2405
2406 if (phba->sli_rev < LPFC_SLI_REV4)
2407 goto job_error;
2408
2409 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2410 LPFC_SLI_INTF_IF_TYPE_2)
2411 goto job_error;
2412
2413 if (job->request_len < sizeof(struct fc_bsg_request) +
2414 sizeof(struct sli4_link_diag)) {
2415 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2416 "3013 Received LINK DIAG TEST request "
2417 " size:%d below the minimum size:%d\n",
2418 job->request_len,
2419 (int)(sizeof(struct fc_bsg_request) +
2420 sizeof(struct sli4_link_diag)));
2421 rc = -EINVAL;
2422 goto job_error;
2423 }
2424
2425 rc = lpfc_bsg_diag_mode_enter(phba);
2426 if (rc)
2427 goto job_error;
2428
2429 link_diag_test_cmd = (struct sli4_link_diag *)
2430 bsg_request->rqst_data.h_vendor.vendor_cmd;
2431
2432 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2433
2434 if (rc)
2435 goto job_error;
2436
2437 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2438 if (!pmboxq) {
2439 rc = -ENOMEM;
2440 goto link_diag_test_exit;
2441 }
2442
2443 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2444 sizeof(struct lpfc_sli4_cfg_mhdr));
2445 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2446 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2447 req_len, LPFC_SLI4_MBX_EMBED);
2448 if (alloc_len != req_len) {
2449 rc = -ENOMEM;
2450 goto link_diag_test_exit;
2451 }
2452
2453 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2454 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2455 phba->sli4_hba.lnk_info.lnk_no);
2456 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2457 phba->sli4_hba.lnk_info.lnk_tp);
2458 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2459 link_diag_test_cmd->test_id);
2460 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2461 link_diag_test_cmd->loops);
2462 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2463 link_diag_test_cmd->test_version);
2464 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2465 link_diag_test_cmd->error_action);
2466
2467 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2468
2469 shdr = (union lpfc_sli4_cfg_shdr *)
2470 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2471 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2472 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2473 if (shdr_status || shdr_add_status || mbxstatus) {
2474 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2475 "3010 Run link diag test mailbox failed with "
2476 "mbx_status x%x status x%x, add_status x%x\n",
2477 mbxstatus, shdr_status, shdr_add_status);
2478 }
2479
2480 diag_status_reply = (struct diag_status *)
2481 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2482
2483 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2484 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2485 "3012 Received Run link diag test reply "
2486 "below minimum size (%d): reply_len:%d\n",
2487 (int)(sizeof(*bsg_reply) +
2488 sizeof(*diag_status_reply)),
2489 job->reply_len);
2490 rc = -EINVAL;
2491 goto job_error;
2492 }
2493
2494 diag_status_reply->mbox_status = mbxstatus;
2495 diag_status_reply->shdr_status = shdr_status;
2496 diag_status_reply->shdr_add_status = shdr_add_status;
2497
2498 link_diag_test_exit:
2499 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2500
2501 if (pmboxq)
2502 mempool_free(pmboxq, phba->mbox_mem_pool);
2503
2504 lpfc_bsg_diag_mode_exit(phba);
2505
2506 job_error:
2507 /* make error code available to userspace */
2508 if (rc1 && !rc)
2509 rc = rc1;
2510 bsg_reply->result = rc;
2511 /* complete the job back to userspace if no error */
2512 if (rc == 0)
2513 bsg_job_done(job, bsg_reply->result,
2514 bsg_reply->reply_payload_rcv_len);
2515 return rc;
2516 }
2517
2518 /**
2519 * lpfcdiag_loop_self_reg - obtains a remote port login id
2520 * @phba: Pointer to HBA context object
2521 * @rpi: Pointer to a remote port login id
2522 *
2523 * This function obtains a remote port login id so the diag loopback test
2524 * can send and receive its own unsolicited CT command.
2525 **/
lpfcdiag_loop_self_reg(struct lpfc_hba * phba,uint16_t * rpi)2526 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2527 {
2528 LPFC_MBOXQ_t *mbox;
2529 struct lpfc_dmabuf *dmabuff;
2530 int status;
2531
2532 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2533 if (!mbox)
2534 return -ENOMEM;
2535
2536 if (phba->sli_rev < LPFC_SLI_REV4)
2537 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2538 (uint8_t *)&phba->pport->fc_sparam,
2539 mbox, *rpi);
2540 else {
2541 *rpi = lpfc_sli4_alloc_rpi(phba);
2542 if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2543 mempool_free(mbox, phba->mbox_mem_pool);
2544 return -EBUSY;
2545 }
2546 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2547 phba->pport->fc_myDID,
2548 (uint8_t *)&phba->pport->fc_sparam,
2549 mbox, *rpi);
2550 }
2551
2552 if (status) {
2553 mempool_free(mbox, phba->mbox_mem_pool);
2554 if (phba->sli_rev == LPFC_SLI_REV4)
2555 lpfc_sli4_free_rpi(phba, *rpi);
2556 return -ENOMEM;
2557 }
2558
2559 dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2560 mbox->ctx_buf = NULL;
2561 mbox->ctx_ndlp = NULL;
2562 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2563
2564 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2565 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2566 kfree(dmabuff);
2567 if (status != MBX_TIMEOUT)
2568 mempool_free(mbox, phba->mbox_mem_pool);
2569 if (phba->sli_rev == LPFC_SLI_REV4)
2570 lpfc_sli4_free_rpi(phba, *rpi);
2571 return -ENODEV;
2572 }
2573
2574 if (phba->sli_rev < LPFC_SLI_REV4)
2575 *rpi = mbox->u.mb.un.varWords[0];
2576
2577 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2578 kfree(dmabuff);
2579 mempool_free(mbox, phba->mbox_mem_pool);
2580 return 0;
2581 }
2582
2583 /**
2584 * lpfcdiag_loop_self_unreg - unregs from the rpi
2585 * @phba: Pointer to HBA context object
2586 * @rpi: Remote port login id
2587 *
2588 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2589 **/
lpfcdiag_loop_self_unreg(struct lpfc_hba * phba,uint16_t rpi)2590 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2591 {
2592 LPFC_MBOXQ_t *mbox;
2593 int status;
2594
2595 /* Allocate mboxq structure */
2596 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2597 if (mbox == NULL)
2598 return -ENOMEM;
2599
2600 if (phba->sli_rev < LPFC_SLI_REV4)
2601 lpfc_unreg_login(phba, 0, rpi, mbox);
2602 else
2603 lpfc_unreg_login(phba, phba->pport->vpi,
2604 phba->sli4_hba.rpi_ids[rpi], mbox);
2605
2606 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2607
2608 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2609 if (status != MBX_TIMEOUT)
2610 mempool_free(mbox, phba->mbox_mem_pool);
2611 return -EIO;
2612 }
2613 mempool_free(mbox, phba->mbox_mem_pool);
2614 if (phba->sli_rev == LPFC_SLI_REV4)
2615 lpfc_sli4_free_rpi(phba, rpi);
2616 return 0;
2617 }
2618
2619 /**
2620 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2621 * @phba: Pointer to HBA context object
2622 * @rpi: Remote port login id
2623 * @txxri: Pointer to transmit exchange id
2624 * @rxxri: Pointer to response exchabge id
2625 *
2626 * This function obtains the transmit and receive ids required to send
2627 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2628 * flags are used to the unsolicted response handler is able to process
2629 * the ct command sent on the same port.
2630 **/
lpfcdiag_loop_get_xri(struct lpfc_hba * phba,uint16_t rpi,uint16_t * txxri,uint16_t * rxxri)2631 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2632 uint16_t *txxri, uint16_t * rxxri)
2633 {
2634 struct lpfc_bsg_event *evt;
2635 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2636 IOCB_t *cmd, *rsp;
2637 struct lpfc_dmabuf *dmabuf;
2638 struct ulp_bde64 *bpl = NULL;
2639 struct lpfc_sli_ct_request *ctreq = NULL;
2640 int ret_val = 0;
2641 int time_left;
2642 int iocb_stat = IOCB_SUCCESS;
2643 unsigned long flags;
2644
2645 *txxri = 0;
2646 *rxxri = 0;
2647 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2648 SLI_CT_ELX_LOOPBACK);
2649 if (!evt)
2650 return -ENOMEM;
2651
2652 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2653 list_add(&evt->node, &phba->ct_ev_waiters);
2654 lpfc_bsg_event_ref(evt);
2655 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2656
2657 cmdiocbq = lpfc_sli_get_iocbq(phba);
2658 rspiocbq = lpfc_sli_get_iocbq(phba);
2659
2660 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2661 if (dmabuf) {
2662 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2663 if (dmabuf->virt) {
2664 INIT_LIST_HEAD(&dmabuf->list);
2665 bpl = (struct ulp_bde64 *) dmabuf->virt;
2666 memset(bpl, 0, sizeof(*bpl));
2667 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2668 bpl->addrHigh =
2669 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2670 sizeof(*bpl)));
2671 bpl->addrLow =
2672 le32_to_cpu(putPaddrLow(dmabuf->phys +
2673 sizeof(*bpl)));
2674 bpl->tus.f.bdeFlags = 0;
2675 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2676 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2677 }
2678 }
2679
2680 if (cmdiocbq == NULL || rspiocbq == NULL ||
2681 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2682 dmabuf->virt == NULL) {
2683 ret_val = -ENOMEM;
2684 goto err_get_xri_exit;
2685 }
2686
2687 cmd = &cmdiocbq->iocb;
2688 rsp = &rspiocbq->iocb;
2689
2690 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2691
2692 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2693 ctreq->RevisionId.bits.InId = 0;
2694 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2695 ctreq->FsSubType = 0;
2696 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2697 ctreq->CommandResponse.bits.Size = 0;
2698
2699
2700 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2701 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2702 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2703 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2704
2705 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2706 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2707 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2708 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2709
2710 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2711 cmd->ulpBdeCount = 1;
2712 cmd->ulpLe = 1;
2713 cmd->ulpClass = CLASS3;
2714 cmd->ulpContext = rpi;
2715
2716 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2717 cmdiocbq->vport = phba->pport;
2718 cmdiocbq->iocb_cmpl = NULL;
2719
2720 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2721 rspiocbq,
2722 (phba->fc_ratov * 2)
2723 + LPFC_DRVR_TIMEOUT);
2724 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2725 ret_val = -EIO;
2726 goto err_get_xri_exit;
2727 }
2728 *txxri = rsp->ulpContext;
2729
2730 evt->waiting = 1;
2731 evt->wait_time_stamp = jiffies;
2732 time_left = wait_event_interruptible_timeout(
2733 evt->wq, !list_empty(&evt->events_to_see),
2734 msecs_to_jiffies(1000 *
2735 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2736 if (list_empty(&evt->events_to_see))
2737 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2738 else {
2739 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2740 list_move(evt->events_to_see.prev, &evt->events_to_get);
2741 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2742 *rxxri = (list_entry(evt->events_to_get.prev,
2743 typeof(struct event_data),
2744 node))->immed_dat;
2745 }
2746 evt->waiting = 0;
2747
2748 err_get_xri_exit:
2749 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2750 lpfc_bsg_event_unref(evt); /* release ref */
2751 lpfc_bsg_event_unref(evt); /* delete */
2752 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2753
2754 if (dmabuf) {
2755 if (dmabuf->virt)
2756 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2757 kfree(dmabuf);
2758 }
2759
2760 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2761 lpfc_sli_release_iocbq(phba, cmdiocbq);
2762 if (rspiocbq)
2763 lpfc_sli_release_iocbq(phba, rspiocbq);
2764 return ret_val;
2765 }
2766
2767 /**
2768 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2769 * @phba: Pointer to HBA context object
2770 *
2771 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2772 * returns the pointer to the buffer.
2773 **/
2774 static struct lpfc_dmabuf *
lpfc_bsg_dma_page_alloc(struct lpfc_hba * phba)2775 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2776 {
2777 struct lpfc_dmabuf *dmabuf;
2778 struct pci_dev *pcidev = phba->pcidev;
2779
2780 /* allocate dma buffer struct */
2781 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2782 if (!dmabuf)
2783 return NULL;
2784
2785 INIT_LIST_HEAD(&dmabuf->list);
2786
2787 /* now, allocate dma buffer */
2788 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2789 &(dmabuf->phys), GFP_KERNEL);
2790
2791 if (!dmabuf->virt) {
2792 kfree(dmabuf);
2793 return NULL;
2794 }
2795
2796 return dmabuf;
2797 }
2798
2799 /**
2800 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2801 * @phba: Pointer to HBA context object.
2802 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2803 *
2804 * This routine just simply frees a dma buffer and its associated buffer
2805 * descriptor referred by @dmabuf.
2806 **/
2807 static void
lpfc_bsg_dma_page_free(struct lpfc_hba * phba,struct lpfc_dmabuf * dmabuf)2808 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2809 {
2810 struct pci_dev *pcidev = phba->pcidev;
2811
2812 if (!dmabuf)
2813 return;
2814
2815 if (dmabuf->virt)
2816 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2817 dmabuf->virt, dmabuf->phys);
2818 kfree(dmabuf);
2819 return;
2820 }
2821
2822 /**
2823 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2824 * @phba: Pointer to HBA context object.
2825 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2826 *
2827 * This routine just simply frees all dma buffers and their associated buffer
2828 * descriptors referred by @dmabuf_list.
2829 **/
2830 static void
lpfc_bsg_dma_page_list_free(struct lpfc_hba * phba,struct list_head * dmabuf_list)2831 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2832 struct list_head *dmabuf_list)
2833 {
2834 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2835
2836 if (list_empty(dmabuf_list))
2837 return;
2838
2839 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2840 list_del_init(&dmabuf->list);
2841 lpfc_bsg_dma_page_free(phba, dmabuf);
2842 }
2843 return;
2844 }
2845
2846 /**
2847 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2848 * @phba: Pointer to HBA context object
2849 * @bpl: Pointer to 64 bit bde structure
2850 * @size: Number of bytes to process
2851 * @nocopydata: Flag to copy user data into the allocated buffer
2852 *
2853 * This function allocates page size buffers and populates an lpfc_dmabufext.
2854 * If allowed the user data pointed to with indataptr is copied into the kernel
2855 * memory. The chained list of page size buffers is returned.
2856 **/
2857 static struct lpfc_dmabufext *
diag_cmd_data_alloc(struct lpfc_hba * phba,struct ulp_bde64 * bpl,uint32_t size,int nocopydata)2858 diag_cmd_data_alloc(struct lpfc_hba *phba,
2859 struct ulp_bde64 *bpl, uint32_t size,
2860 int nocopydata)
2861 {
2862 struct lpfc_dmabufext *mlist = NULL;
2863 struct lpfc_dmabufext *dmp;
2864 int cnt, offset = 0, i = 0;
2865 struct pci_dev *pcidev;
2866
2867 pcidev = phba->pcidev;
2868
2869 while (size) {
2870 /* We get chunks of 4K */
2871 if (size > BUF_SZ_4K)
2872 cnt = BUF_SZ_4K;
2873 else
2874 cnt = size;
2875
2876 /* allocate struct lpfc_dmabufext buffer header */
2877 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2878 if (!dmp)
2879 goto out;
2880
2881 INIT_LIST_HEAD(&dmp->dma.list);
2882
2883 /* Queue it to a linked list */
2884 if (mlist)
2885 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2886 else
2887 mlist = dmp;
2888
2889 /* allocate buffer */
2890 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2891 cnt,
2892 &(dmp->dma.phys),
2893 GFP_KERNEL);
2894
2895 if (!dmp->dma.virt)
2896 goto out;
2897
2898 dmp->size = cnt;
2899
2900 if (nocopydata) {
2901 bpl->tus.f.bdeFlags = 0;
2902 } else {
2903 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2904 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2905 }
2906
2907 /* build buffer ptr list for IOCB */
2908 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2909 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2910 bpl->tus.f.bdeSize = (ushort) cnt;
2911 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2912 bpl++;
2913
2914 i++;
2915 offset += cnt;
2916 size -= cnt;
2917 }
2918
2919 if (mlist) {
2920 mlist->flag = i;
2921 return mlist;
2922 }
2923 out:
2924 diag_cmd_data_free(phba, mlist);
2925 return NULL;
2926 }
2927
2928 /**
2929 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2930 * @phba: Pointer to HBA context object
2931 * @rxxri: Receive exchange id
2932 * @len: Number of data bytes
2933 *
2934 * This function allocates and posts a data buffer of sufficient size to receive
2935 * an unsolicted CT command.
2936 **/
lpfcdiag_loop_post_rxbufs(struct lpfc_hba * phba,uint16_t rxxri,size_t len)2937 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2938 size_t len)
2939 {
2940 struct lpfc_sli_ring *pring;
2941 struct lpfc_iocbq *cmdiocbq;
2942 IOCB_t *cmd = NULL;
2943 struct list_head head, *curr, *next;
2944 struct lpfc_dmabuf *rxbmp;
2945 struct lpfc_dmabuf *dmp;
2946 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2947 struct ulp_bde64 *rxbpl = NULL;
2948 uint32_t num_bde;
2949 struct lpfc_dmabufext *rxbuffer = NULL;
2950 int ret_val = 0;
2951 int iocb_stat;
2952 int i = 0;
2953
2954 pring = lpfc_phba_elsring(phba);
2955
2956 cmdiocbq = lpfc_sli_get_iocbq(phba);
2957 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2958 if (rxbmp != NULL) {
2959 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2960 if (rxbmp->virt) {
2961 INIT_LIST_HEAD(&rxbmp->list);
2962 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2963 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2964 }
2965 }
2966
2967 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2968 ret_val = -ENOMEM;
2969 goto err_post_rxbufs_exit;
2970 }
2971
2972 /* Queue buffers for the receive exchange */
2973 num_bde = (uint32_t)rxbuffer->flag;
2974 dmp = &rxbuffer->dma;
2975
2976 cmd = &cmdiocbq->iocb;
2977 i = 0;
2978
2979 INIT_LIST_HEAD(&head);
2980 list_add_tail(&head, &dmp->list);
2981 list_for_each_safe(curr, next, &head) {
2982 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2983 list_del(curr);
2984
2985 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2986 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2987 cmd->un.quexri64cx.buff.bde.addrHigh =
2988 putPaddrHigh(mp[i]->phys);
2989 cmd->un.quexri64cx.buff.bde.addrLow =
2990 putPaddrLow(mp[i]->phys);
2991 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2992 ((struct lpfc_dmabufext *)mp[i])->size;
2993 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2994 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2995 cmd->ulpPU = 0;
2996 cmd->ulpLe = 1;
2997 cmd->ulpBdeCount = 1;
2998 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2999
3000 } else {
3001 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
3002 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
3003 cmd->un.cont64[i].tus.f.bdeSize =
3004 ((struct lpfc_dmabufext *)mp[i])->size;
3005 cmd->ulpBdeCount = ++i;
3006
3007 if ((--num_bde > 0) && (i < 2))
3008 continue;
3009
3010 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
3011 cmd->ulpLe = 1;
3012 }
3013
3014 cmd->ulpClass = CLASS3;
3015 cmd->ulpContext = rxxri;
3016
3017 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3018 0);
3019 if (iocb_stat == IOCB_ERROR) {
3020 diag_cmd_data_free(phba,
3021 (struct lpfc_dmabufext *)mp[0]);
3022 if (mp[1])
3023 diag_cmd_data_free(phba,
3024 (struct lpfc_dmabufext *)mp[1]);
3025 dmp = list_entry(next, struct lpfc_dmabuf, list);
3026 ret_val = -EIO;
3027 goto err_post_rxbufs_exit;
3028 }
3029
3030 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
3031 if (mp[1]) {
3032 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
3033 mp[1] = NULL;
3034 }
3035
3036 /* The iocb was freed by lpfc_sli_issue_iocb */
3037 cmdiocbq = lpfc_sli_get_iocbq(phba);
3038 if (!cmdiocbq) {
3039 dmp = list_entry(next, struct lpfc_dmabuf, list);
3040 ret_val = -EIO;
3041 goto err_post_rxbufs_exit;
3042 }
3043
3044 cmd = &cmdiocbq->iocb;
3045 i = 0;
3046 }
3047 list_del(&head);
3048
3049 err_post_rxbufs_exit:
3050
3051 if (rxbmp) {
3052 if (rxbmp->virt)
3053 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3054 kfree(rxbmp);
3055 }
3056
3057 if (cmdiocbq)
3058 lpfc_sli_release_iocbq(phba, cmdiocbq);
3059 return ret_val;
3060 }
3061
3062 /**
3063 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3064 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3065 *
3066 * This function receives a user data buffer to be transmitted and received on
3067 * the same port, the link must be up and in loopback mode prior
3068 * to being called.
3069 * 1. A kernel buffer is allocated to copy the user data into.
3070 * 2. The port registers with "itself".
3071 * 3. The transmit and receive exchange ids are obtained.
3072 * 4. The receive exchange id is posted.
3073 * 5. A new els loopback event is created.
3074 * 6. The command and response iocbs are allocated.
3075 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3076 *
3077 * This function is meant to be called n times while the port is in loopback
3078 * so it is the apps responsibility to issue a reset to take the port out
3079 * of loopback mode.
3080 **/
3081 static int
lpfc_bsg_diag_loopback_run(struct bsg_job * job)3082 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3083 {
3084 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3085 struct fc_bsg_reply *bsg_reply = job->reply;
3086 struct lpfc_hba *phba = vport->phba;
3087 struct lpfc_bsg_event *evt;
3088 struct event_data *evdat;
3089 struct lpfc_sli *psli = &phba->sli;
3090 uint32_t size;
3091 uint32_t full_size;
3092 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3093 uint16_t rpi = 0;
3094 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3095 IOCB_t *cmd, *rsp = NULL;
3096 struct lpfc_sli_ct_request *ctreq;
3097 struct lpfc_dmabuf *txbmp;
3098 struct ulp_bde64 *txbpl = NULL;
3099 struct lpfc_dmabufext *txbuffer = NULL;
3100 struct list_head head;
3101 struct lpfc_dmabuf *curr;
3102 uint16_t txxri = 0, rxxri;
3103 uint32_t num_bde;
3104 uint8_t *ptr = NULL, *rx_databuf = NULL;
3105 int rc = 0;
3106 int time_left;
3107 int iocb_stat = IOCB_SUCCESS;
3108 unsigned long flags;
3109 void *dataout = NULL;
3110 uint32_t total_mem;
3111
3112 /* in case no data is returned return just the return code */
3113 bsg_reply->reply_payload_rcv_len = 0;
3114
3115 if (job->request_len <
3116 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3117 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3118 "2739 Received DIAG TEST request below minimum "
3119 "size\n");
3120 rc = -EINVAL;
3121 goto loopback_test_exit;
3122 }
3123
3124 if (job->request_payload.payload_len !=
3125 job->reply_payload.payload_len) {
3126 rc = -EINVAL;
3127 goto loopback_test_exit;
3128 }
3129
3130 if ((phba->link_state == LPFC_HBA_ERROR) ||
3131 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3132 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3133 rc = -EACCES;
3134 goto loopback_test_exit;
3135 }
3136
3137 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3138 rc = -EACCES;
3139 goto loopback_test_exit;
3140 }
3141
3142 size = job->request_payload.payload_len;
3143 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3144
3145 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3146 rc = -ERANGE;
3147 goto loopback_test_exit;
3148 }
3149
3150 if (full_size >= BUF_SZ_4K) {
3151 /*
3152 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3153 * then we allocate 64k and re-use that buffer over and over to
3154 * xfer the whole block. This is because Linux kernel has a
3155 * problem allocating more than 120k of kernel space memory. Saw
3156 * problem with GET_FCPTARGETMAPPING...
3157 */
3158 if (size <= (64 * 1024))
3159 total_mem = full_size;
3160 else
3161 total_mem = 64 * 1024;
3162 } else
3163 /* Allocate memory for ioctl data */
3164 total_mem = BUF_SZ_4K;
3165
3166 dataout = kmalloc(total_mem, GFP_KERNEL);
3167 if (dataout == NULL) {
3168 rc = -ENOMEM;
3169 goto loopback_test_exit;
3170 }
3171
3172 ptr = dataout;
3173 ptr += ELX_LOOPBACK_HEADER_SZ;
3174 sg_copy_to_buffer(job->request_payload.sg_list,
3175 job->request_payload.sg_cnt,
3176 ptr, size);
3177 rc = lpfcdiag_loop_self_reg(phba, &rpi);
3178 if (rc)
3179 goto loopback_test_exit;
3180
3181 if (phba->sli_rev < LPFC_SLI_REV4) {
3182 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3183 if (rc) {
3184 lpfcdiag_loop_self_unreg(phba, rpi);
3185 goto loopback_test_exit;
3186 }
3187
3188 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3189 if (rc) {
3190 lpfcdiag_loop_self_unreg(phba, rpi);
3191 goto loopback_test_exit;
3192 }
3193 }
3194 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3195 SLI_CT_ELX_LOOPBACK);
3196 if (!evt) {
3197 lpfcdiag_loop_self_unreg(phba, rpi);
3198 rc = -ENOMEM;
3199 goto loopback_test_exit;
3200 }
3201
3202 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3203 list_add(&evt->node, &phba->ct_ev_waiters);
3204 lpfc_bsg_event_ref(evt);
3205 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3206
3207 cmdiocbq = lpfc_sli_get_iocbq(phba);
3208 if (phba->sli_rev < LPFC_SLI_REV4)
3209 rspiocbq = lpfc_sli_get_iocbq(phba);
3210 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3211
3212 if (txbmp) {
3213 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3214 if (txbmp->virt) {
3215 INIT_LIST_HEAD(&txbmp->list);
3216 txbpl = (struct ulp_bde64 *) txbmp->virt;
3217 txbuffer = diag_cmd_data_alloc(phba,
3218 txbpl, full_size, 0);
3219 }
3220 }
3221
3222 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3223 rc = -ENOMEM;
3224 goto err_loopback_test_exit;
3225 }
3226 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3227 rc = -ENOMEM;
3228 goto err_loopback_test_exit;
3229 }
3230
3231 cmd = &cmdiocbq->iocb;
3232 if (phba->sli_rev < LPFC_SLI_REV4)
3233 rsp = &rspiocbq->iocb;
3234
3235 INIT_LIST_HEAD(&head);
3236 list_add_tail(&head, &txbuffer->dma.list);
3237 list_for_each_entry(curr, &head, list) {
3238 segment_len = ((struct lpfc_dmabufext *)curr)->size;
3239 if (current_offset == 0) {
3240 ctreq = curr->virt;
3241 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3242 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3243 ctreq->RevisionId.bits.InId = 0;
3244 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3245 ctreq->FsSubType = 0;
3246 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3247 ctreq->CommandResponse.bits.Size = size;
3248 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3249 } else
3250 segment_offset = 0;
3251
3252 BUG_ON(segment_offset >= segment_len);
3253 memcpy(curr->virt + segment_offset,
3254 ptr + current_offset,
3255 segment_len - segment_offset);
3256
3257 current_offset += segment_len - segment_offset;
3258 BUG_ON(current_offset > size);
3259 }
3260 list_del(&head);
3261
3262 /* Build the XMIT_SEQUENCE iocb */
3263 num_bde = (uint32_t)txbuffer->flag;
3264
3265 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3266 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3267 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3268 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3269
3270 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3271 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3272 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3273 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3274
3275 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3276 cmd->ulpBdeCount = 1;
3277 cmd->ulpLe = 1;
3278 cmd->ulpClass = CLASS3;
3279
3280 if (phba->sli_rev < LPFC_SLI_REV4) {
3281 cmd->ulpContext = txxri;
3282 } else {
3283 cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3284 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3285 cmdiocbq->context3 = txbmp;
3286 cmdiocbq->sli4_xritag = NO_XRI;
3287 cmd->unsli3.rcvsli3.ox_id = 0xffff;
3288 }
3289 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3290 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3291 cmdiocbq->vport = phba->pport;
3292 cmdiocbq->iocb_cmpl = NULL;
3293 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3294 rspiocbq, (phba->fc_ratov * 2) +
3295 LPFC_DRVR_TIMEOUT);
3296
3297 if ((iocb_stat != IOCB_SUCCESS) ||
3298 ((phba->sli_rev < LPFC_SLI_REV4) &&
3299 (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3300 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3301 "3126 Failed loopback test issue iocb: "
3302 "iocb_stat:x%x\n", iocb_stat);
3303 rc = -EIO;
3304 goto err_loopback_test_exit;
3305 }
3306
3307 evt->waiting = 1;
3308 time_left = wait_event_interruptible_timeout(
3309 evt->wq, !list_empty(&evt->events_to_see),
3310 msecs_to_jiffies(1000 *
3311 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3312 evt->waiting = 0;
3313 if (list_empty(&evt->events_to_see)) {
3314 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3315 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3316 "3125 Not receiving unsolicited event, "
3317 "rc:x%x\n", rc);
3318 } else {
3319 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3320 list_move(evt->events_to_see.prev, &evt->events_to_get);
3321 evdat = list_entry(evt->events_to_get.prev,
3322 typeof(*evdat), node);
3323 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3324 rx_databuf = evdat->data;
3325 if (evdat->len != full_size) {
3326 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3327 "1603 Loopback test did not receive expected "
3328 "data length. actual length 0x%x expected "
3329 "length 0x%x\n",
3330 evdat->len, full_size);
3331 rc = -EIO;
3332 } else if (rx_databuf == NULL)
3333 rc = -EIO;
3334 else {
3335 rc = IOCB_SUCCESS;
3336 /* skip over elx loopback header */
3337 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3338 bsg_reply->reply_payload_rcv_len =
3339 sg_copy_from_buffer(job->reply_payload.sg_list,
3340 job->reply_payload.sg_cnt,
3341 rx_databuf, size);
3342 bsg_reply->reply_payload_rcv_len = size;
3343 }
3344 }
3345
3346 err_loopback_test_exit:
3347 lpfcdiag_loop_self_unreg(phba, rpi);
3348
3349 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3350 lpfc_bsg_event_unref(evt); /* release ref */
3351 lpfc_bsg_event_unref(evt); /* delete */
3352 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3353
3354 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3355 lpfc_sli_release_iocbq(phba, cmdiocbq);
3356
3357 if (rspiocbq != NULL)
3358 lpfc_sli_release_iocbq(phba, rspiocbq);
3359
3360 if (txbmp != NULL) {
3361 if (txbpl != NULL) {
3362 if (txbuffer != NULL)
3363 diag_cmd_data_free(phba, txbuffer);
3364 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3365 }
3366 kfree(txbmp);
3367 }
3368
3369 loopback_test_exit:
3370 kfree(dataout);
3371 /* make error code available to userspace */
3372 bsg_reply->result = rc;
3373 job->dd_data = NULL;
3374 /* complete the job back to userspace if no error */
3375 if (rc == IOCB_SUCCESS)
3376 bsg_job_done(job, bsg_reply->result,
3377 bsg_reply->reply_payload_rcv_len);
3378 return rc;
3379 }
3380
3381 /**
3382 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3383 * @job: GET_DFC_REV fc_bsg_job
3384 **/
3385 static int
lpfc_bsg_get_dfc_rev(struct bsg_job * job)3386 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3387 {
3388 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3389 struct fc_bsg_reply *bsg_reply = job->reply;
3390 struct lpfc_hba *phba = vport->phba;
3391 struct get_mgmt_rev_reply *event_reply;
3392 int rc = 0;
3393
3394 if (job->request_len <
3395 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3396 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3397 "2740 Received GET_DFC_REV request below "
3398 "minimum size\n");
3399 rc = -EINVAL;
3400 goto job_error;
3401 }
3402
3403 event_reply = (struct get_mgmt_rev_reply *)
3404 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3405
3406 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3407 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3408 "2741 Received GET_DFC_REV reply below "
3409 "minimum size\n");
3410 rc = -EINVAL;
3411 goto job_error;
3412 }
3413
3414 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3415 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3416 job_error:
3417 bsg_reply->result = rc;
3418 if (rc == 0)
3419 bsg_job_done(job, bsg_reply->result,
3420 bsg_reply->reply_payload_rcv_len);
3421 return rc;
3422 }
3423
3424 /**
3425 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3426 * @phba: Pointer to HBA context object.
3427 * @pmboxq: Pointer to mailbox command.
3428 *
3429 * This is completion handler function for mailbox commands issued from
3430 * lpfc_bsg_issue_mbox function. This function is called by the
3431 * mailbox event handler function with no lock held. This function
3432 * will wake up thread waiting on the wait queue pointed by context1
3433 * of the mailbox.
3434 **/
3435 static void
lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3436 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3437 {
3438 struct bsg_job_data *dd_data;
3439 struct fc_bsg_reply *bsg_reply;
3440 struct bsg_job *job;
3441 uint32_t size;
3442 unsigned long flags;
3443 uint8_t *pmb, *pmb_buf;
3444
3445 dd_data = pmboxq->ctx_ndlp;
3446
3447 /*
3448 * The outgoing buffer is readily referred from the dma buffer,
3449 * just need to get header part from mailboxq structure.
3450 */
3451 pmb = (uint8_t *)&pmboxq->u.mb;
3452 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3453 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3454
3455 /* Determine if job has been aborted */
3456
3457 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3458 job = dd_data->set_job;
3459 if (job) {
3460 /* Prevent timeout handling from trying to abort job */
3461 job->dd_data = NULL;
3462 }
3463 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3464
3465 /* Copy the mailbox data to the job if it is still active */
3466
3467 if (job) {
3468 bsg_reply = job->reply;
3469 size = job->reply_payload.payload_len;
3470 bsg_reply->reply_payload_rcv_len =
3471 sg_copy_from_buffer(job->reply_payload.sg_list,
3472 job->reply_payload.sg_cnt,
3473 pmb_buf, size);
3474 }
3475
3476 dd_data->set_job = NULL;
3477 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3478 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3479 kfree(dd_data);
3480
3481 /* Complete the job if the job is still active */
3482
3483 if (job) {
3484 bsg_reply->result = 0;
3485 bsg_job_done(job, bsg_reply->result,
3486 bsg_reply->reply_payload_rcv_len);
3487 }
3488 return;
3489 }
3490
3491 /**
3492 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3493 * @phba: Pointer to HBA context object.
3494 * @mb: Pointer to a mailbox object.
3495 * @vport: Pointer to a vport object.
3496 *
3497 * Some commands require the port to be offline, some may not be called from
3498 * the application.
3499 **/
lpfc_bsg_check_cmd_access(struct lpfc_hba * phba,MAILBOX_t * mb,struct lpfc_vport * vport)3500 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3501 MAILBOX_t *mb, struct lpfc_vport *vport)
3502 {
3503 /* return negative error values for bsg job */
3504 switch (mb->mbxCommand) {
3505 /* Offline only */
3506 case MBX_INIT_LINK:
3507 case MBX_DOWN_LINK:
3508 case MBX_CONFIG_LINK:
3509 case MBX_CONFIG_RING:
3510 case MBX_RESET_RING:
3511 case MBX_UNREG_LOGIN:
3512 case MBX_CLEAR_LA:
3513 case MBX_DUMP_CONTEXT:
3514 case MBX_RUN_DIAGS:
3515 case MBX_RESTART:
3516 case MBX_SET_MASK:
3517 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3518 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3519 "2743 Command 0x%x is illegal in on-line "
3520 "state\n",
3521 mb->mbxCommand);
3522 return -EPERM;
3523 }
3524 break;
3525 case MBX_WRITE_NV:
3526 case MBX_WRITE_VPARMS:
3527 case MBX_LOAD_SM:
3528 case MBX_READ_NV:
3529 case MBX_READ_CONFIG:
3530 case MBX_READ_RCONFIG:
3531 case MBX_READ_STATUS:
3532 case MBX_READ_XRI:
3533 case MBX_READ_REV:
3534 case MBX_READ_LNK_STAT:
3535 case MBX_DUMP_MEMORY:
3536 case MBX_DOWN_LOAD:
3537 case MBX_UPDATE_CFG:
3538 case MBX_KILL_BOARD:
3539 case MBX_READ_TOPOLOGY:
3540 case MBX_LOAD_AREA:
3541 case MBX_LOAD_EXP_ROM:
3542 case MBX_BEACON:
3543 case MBX_DEL_LD_ENTRY:
3544 case MBX_SET_DEBUG:
3545 case MBX_WRITE_WWN:
3546 case MBX_SLI4_CONFIG:
3547 case MBX_READ_EVENT_LOG:
3548 case MBX_READ_EVENT_LOG_STATUS:
3549 case MBX_WRITE_EVENT_LOG:
3550 case MBX_PORT_CAPABILITIES:
3551 case MBX_PORT_IOV_CONTROL:
3552 case MBX_RUN_BIU_DIAG64:
3553 break;
3554 case MBX_SET_VARIABLE:
3555 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3556 "1226 mbox: set_variable 0x%x, 0x%x\n",
3557 mb->un.varWords[0],
3558 mb->un.varWords[1]);
3559 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3560 && (mb->un.varWords[1] == 1)) {
3561 phba->wait_4_mlo_maint_flg = 1;
3562 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3563 spin_lock_irq(&phba->hbalock);
3564 phba->link_flag &= ~LS_LOOPBACK_MODE;
3565 spin_unlock_irq(&phba->hbalock);
3566 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3567 }
3568 break;
3569 case MBX_READ_SPARM64:
3570 case MBX_REG_LOGIN:
3571 case MBX_REG_LOGIN64:
3572 case MBX_CONFIG_PORT:
3573 case MBX_RUN_BIU_DIAG:
3574 default:
3575 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3576 "2742 Unknown Command 0x%x\n",
3577 mb->mbxCommand);
3578 return -EPERM;
3579 }
3580
3581 return 0; /* ok */
3582 }
3583
3584 /**
3585 * lpfc_bsg_mbox_ext_session_reset - clean up context of multi-buffer mbox session
3586 * @phba: Pointer to HBA context object.
3587 *
3588 * This is routine clean up and reset BSG handling of multi-buffer mbox
3589 * command session.
3590 **/
3591 static void
lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba * phba)3592 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3593 {
3594 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3595 return;
3596
3597 /* free all memory, including dma buffers */
3598 lpfc_bsg_dma_page_list_free(phba,
3599 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3600 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3601 /* multi-buffer write mailbox command pass-through complete */
3602 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3603 sizeof(struct lpfc_mbox_ext_buf_ctx));
3604 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3605
3606 return;
3607 }
3608
3609 /**
3610 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3611 * @phba: Pointer to HBA context object.
3612 * @pmboxq: Pointer to mailbox command.
3613 *
3614 * This is routine handles BSG job for mailbox commands completions with
3615 * multiple external buffers.
3616 **/
3617 static struct bsg_job *
lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3618 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3619 {
3620 struct bsg_job_data *dd_data;
3621 struct bsg_job *job;
3622 struct fc_bsg_reply *bsg_reply;
3623 uint8_t *pmb, *pmb_buf;
3624 unsigned long flags;
3625 uint32_t size;
3626 int rc = 0;
3627 struct lpfc_dmabuf *dmabuf;
3628 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3629 uint8_t *pmbx;
3630
3631 dd_data = pmboxq->ctx_buf;
3632
3633 /* Determine if job has been aborted */
3634 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3635 job = dd_data->set_job;
3636 if (job) {
3637 bsg_reply = job->reply;
3638 /* Prevent timeout handling from trying to abort job */
3639 job->dd_data = NULL;
3640 }
3641 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3642
3643 /*
3644 * The outgoing buffer is readily referred from the dma buffer,
3645 * just need to get header part from mailboxq structure.
3646 */
3647
3648 pmb = (uint8_t *)&pmboxq->u.mb;
3649 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3650 /* Copy the byte swapped response mailbox back to the user */
3651 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3652 /* if there is any non-embedded extended data copy that too */
3653 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3654 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3655 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3656 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3657 pmbx = (uint8_t *)dmabuf->virt;
3658 /* byte swap the extended data following the mailbox command */
3659 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3660 &pmbx[sizeof(MAILBOX_t)],
3661 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3662 }
3663
3664 /* Complete the job if the job is still active */
3665
3666 if (job) {
3667 size = job->reply_payload.payload_len;
3668 bsg_reply->reply_payload_rcv_len =
3669 sg_copy_from_buffer(job->reply_payload.sg_list,
3670 job->reply_payload.sg_cnt,
3671 pmb_buf, size);
3672
3673 /* result for successful */
3674 bsg_reply->result = 0;
3675
3676 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3677 "2937 SLI_CONFIG ext-buffer mailbox command "
3678 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3679 phba->mbox_ext_buf_ctx.nembType,
3680 phba->mbox_ext_buf_ctx.mboxType, size);
3681 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3682 phba->mbox_ext_buf_ctx.nembType,
3683 phba->mbox_ext_buf_ctx.mboxType,
3684 dma_ebuf, sta_pos_addr,
3685 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3686 } else {
3687 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3688 "2938 SLI_CONFIG ext-buffer mailbox "
3689 "command (x%x/x%x) failure, rc:x%x\n",
3690 phba->mbox_ext_buf_ctx.nembType,
3691 phba->mbox_ext_buf_ctx.mboxType, rc);
3692 }
3693
3694
3695 /* state change */
3696 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3697 kfree(dd_data);
3698 return job;
3699 }
3700
3701 /**
3702 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3703 * @phba: Pointer to HBA context object.
3704 * @pmboxq: Pointer to mailbox command.
3705 *
3706 * This is completion handler function for mailbox read commands with multiple
3707 * external buffers.
3708 **/
3709 static void
lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3710 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3711 {
3712 struct bsg_job *job;
3713 struct fc_bsg_reply *bsg_reply;
3714
3715 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3716
3717 /* handle the BSG job with mailbox command */
3718 if (!job)
3719 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3720
3721 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3722 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3723 "complete, ctxState:x%x, mbxStatus:x%x\n",
3724 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3725
3726 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3727 lpfc_bsg_mbox_ext_session_reset(phba);
3728
3729 /* free base driver mailbox structure memory */
3730 mempool_free(pmboxq, phba->mbox_mem_pool);
3731
3732 /* if the job is still active, call job done */
3733 if (job) {
3734 bsg_reply = job->reply;
3735 bsg_job_done(job, bsg_reply->result,
3736 bsg_reply->reply_payload_rcv_len);
3737 }
3738 return;
3739 }
3740
3741 /**
3742 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3743 * @phba: Pointer to HBA context object.
3744 * @pmboxq: Pointer to mailbox command.
3745 *
3746 * This is completion handler function for mailbox write commands with multiple
3747 * external buffers.
3748 **/
3749 static void
lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)3750 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3751 {
3752 struct bsg_job *job;
3753 struct fc_bsg_reply *bsg_reply;
3754
3755 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3756
3757 /* handle the BSG job with the mailbox command */
3758 if (!job)
3759 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3760
3761 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3762 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3763 "complete, ctxState:x%x, mbxStatus:x%x\n",
3764 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3765
3766 /* free all memory, including dma buffers */
3767 mempool_free(pmboxq, phba->mbox_mem_pool);
3768 lpfc_bsg_mbox_ext_session_reset(phba);
3769
3770 /* if the job is still active, call job done */
3771 if (job) {
3772 bsg_reply = job->reply;
3773 bsg_job_done(job, bsg_reply->result,
3774 bsg_reply->reply_payload_rcv_len);
3775 }
3776
3777 return;
3778 }
3779
3780 static void
lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba * phba,enum nemb_type nemb_tp,uint32_t index,struct lpfc_dmabuf * mbx_dmabuf,struct lpfc_dmabuf * ext_dmabuf)3781 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3782 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3783 struct lpfc_dmabuf *ext_dmabuf)
3784 {
3785 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3786
3787 /* pointer to the start of mailbox command */
3788 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3789
3790 if (nemb_tp == nemb_mse) {
3791 if (index == 0) {
3792 sli_cfg_mbx->un.sli_config_emb0_subsys.
3793 mse[index].pa_hi =
3794 putPaddrHigh(mbx_dmabuf->phys +
3795 sizeof(MAILBOX_t));
3796 sli_cfg_mbx->un.sli_config_emb0_subsys.
3797 mse[index].pa_lo =
3798 putPaddrLow(mbx_dmabuf->phys +
3799 sizeof(MAILBOX_t));
3800 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3801 "2943 SLI_CONFIG(mse)[%d], "
3802 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3803 index,
3804 sli_cfg_mbx->un.sli_config_emb0_subsys.
3805 mse[index].buf_len,
3806 sli_cfg_mbx->un.sli_config_emb0_subsys.
3807 mse[index].pa_hi,
3808 sli_cfg_mbx->un.sli_config_emb0_subsys.
3809 mse[index].pa_lo);
3810 } else {
3811 sli_cfg_mbx->un.sli_config_emb0_subsys.
3812 mse[index].pa_hi =
3813 putPaddrHigh(ext_dmabuf->phys);
3814 sli_cfg_mbx->un.sli_config_emb0_subsys.
3815 mse[index].pa_lo =
3816 putPaddrLow(ext_dmabuf->phys);
3817 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3818 "2944 SLI_CONFIG(mse)[%d], "
3819 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3820 index,
3821 sli_cfg_mbx->un.sli_config_emb0_subsys.
3822 mse[index].buf_len,
3823 sli_cfg_mbx->un.sli_config_emb0_subsys.
3824 mse[index].pa_hi,
3825 sli_cfg_mbx->un.sli_config_emb0_subsys.
3826 mse[index].pa_lo);
3827 }
3828 } else {
3829 if (index == 0) {
3830 sli_cfg_mbx->un.sli_config_emb1_subsys.
3831 hbd[index].pa_hi =
3832 putPaddrHigh(mbx_dmabuf->phys +
3833 sizeof(MAILBOX_t));
3834 sli_cfg_mbx->un.sli_config_emb1_subsys.
3835 hbd[index].pa_lo =
3836 putPaddrLow(mbx_dmabuf->phys +
3837 sizeof(MAILBOX_t));
3838 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3839 "3007 SLI_CONFIG(hbd)[%d], "
3840 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3841 index,
3842 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3843 &sli_cfg_mbx->un.
3844 sli_config_emb1_subsys.hbd[index]),
3845 sli_cfg_mbx->un.sli_config_emb1_subsys.
3846 hbd[index].pa_hi,
3847 sli_cfg_mbx->un.sli_config_emb1_subsys.
3848 hbd[index].pa_lo);
3849
3850 } else {
3851 sli_cfg_mbx->un.sli_config_emb1_subsys.
3852 hbd[index].pa_hi =
3853 putPaddrHigh(ext_dmabuf->phys);
3854 sli_cfg_mbx->un.sli_config_emb1_subsys.
3855 hbd[index].pa_lo =
3856 putPaddrLow(ext_dmabuf->phys);
3857 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3858 "3008 SLI_CONFIG(hbd)[%d], "
3859 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3860 index,
3861 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3862 &sli_cfg_mbx->un.
3863 sli_config_emb1_subsys.hbd[index]),
3864 sli_cfg_mbx->un.sli_config_emb1_subsys.
3865 hbd[index].pa_hi,
3866 sli_cfg_mbx->un.sli_config_emb1_subsys.
3867 hbd[index].pa_lo);
3868 }
3869 }
3870 return;
3871 }
3872
3873 /**
3874 * lpfc_bsg_sli_cfg_read_cmd_ext - sli_config non-embedded mailbox cmd read
3875 * @phba: Pointer to HBA context object.
3876 * @job: Pointer to the job object.
3877 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3878 * @dmabuf: Pointer to a DMA buffer descriptor.
3879 *
3880 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3881 * non-embedded external buffers.
3882 **/
3883 static int
lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)3884 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3885 enum nemb_type nemb_tp,
3886 struct lpfc_dmabuf *dmabuf)
3887 {
3888 struct fc_bsg_request *bsg_request = job->request;
3889 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3890 struct dfc_mbox_req *mbox_req;
3891 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3892 uint32_t ext_buf_cnt, ext_buf_index;
3893 struct lpfc_dmabuf *ext_dmabuf = NULL;
3894 struct bsg_job_data *dd_data = NULL;
3895 LPFC_MBOXQ_t *pmboxq = NULL;
3896 MAILBOX_t *pmb;
3897 uint8_t *pmbx;
3898 int rc, i;
3899
3900 mbox_req =
3901 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3902
3903 /* pointer to the start of mailbox command */
3904 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3905
3906 if (nemb_tp == nemb_mse) {
3907 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3908 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3909 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3910 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3911 "2945 Handled SLI_CONFIG(mse) rd, "
3912 "ext_buf_cnt(%d) out of range(%d)\n",
3913 ext_buf_cnt,
3914 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3915 rc = -ERANGE;
3916 goto job_error;
3917 }
3918 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3919 "2941 Handled SLI_CONFIG(mse) rd, "
3920 "ext_buf_cnt:%d\n", ext_buf_cnt);
3921 } else {
3922 /* sanity check on interface type for support */
3923 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3924 LPFC_SLI_INTF_IF_TYPE_2) {
3925 rc = -ENODEV;
3926 goto job_error;
3927 }
3928 /* nemb_tp == nemb_hbd */
3929 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3930 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3931 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3932 "2946 Handled SLI_CONFIG(hbd) rd, "
3933 "ext_buf_cnt(%d) out of range(%d)\n",
3934 ext_buf_cnt,
3935 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3936 rc = -ERANGE;
3937 goto job_error;
3938 }
3939 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3940 "2942 Handled SLI_CONFIG(hbd) rd, "
3941 "ext_buf_cnt:%d\n", ext_buf_cnt);
3942 }
3943
3944 /* before dma descriptor setup */
3945 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3946 sta_pre_addr, dmabuf, ext_buf_cnt);
3947
3948 /* reject non-embedded mailbox command with none external buffer */
3949 if (ext_buf_cnt == 0) {
3950 rc = -EPERM;
3951 goto job_error;
3952 } else if (ext_buf_cnt > 1) {
3953 /* additional external read buffers */
3954 for (i = 1; i < ext_buf_cnt; i++) {
3955 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3956 if (!ext_dmabuf) {
3957 rc = -ENOMEM;
3958 goto job_error;
3959 }
3960 list_add_tail(&ext_dmabuf->list,
3961 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3962 }
3963 }
3964
3965 /* bsg tracking structure */
3966 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3967 if (!dd_data) {
3968 rc = -ENOMEM;
3969 goto job_error;
3970 }
3971
3972 /* mailbox command structure for base driver */
3973 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3974 if (!pmboxq) {
3975 rc = -ENOMEM;
3976 goto job_error;
3977 }
3978 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3979
3980 /* for the first external buffer */
3981 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3982
3983 /* for the rest of external buffer descriptors if any */
3984 if (ext_buf_cnt > 1) {
3985 ext_buf_index = 1;
3986 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3987 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3988 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3989 ext_buf_index, dmabuf,
3990 curr_dmabuf);
3991 ext_buf_index++;
3992 }
3993 }
3994
3995 /* after dma descriptor setup */
3996 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3997 sta_pos_addr, dmabuf, ext_buf_cnt);
3998
3999 /* construct base driver mbox command */
4000 pmb = &pmboxq->u.mb;
4001 pmbx = (uint8_t *)dmabuf->virt;
4002 memcpy(pmb, pmbx, sizeof(*pmb));
4003 pmb->mbxOwner = OWN_HOST;
4004 pmboxq->vport = phba->pport;
4005
4006 /* multi-buffer handling context */
4007 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4008 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
4009 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4010 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4011 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4012 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4013
4014 /* callback for multi-buffer read mailbox command */
4015 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
4016
4017 /* context fields to callback function */
4018 pmboxq->ctx_buf = dd_data;
4019 dd_data->type = TYPE_MBOX;
4020 dd_data->set_job = job;
4021 dd_data->context_un.mbox.pmboxq = pmboxq;
4022 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4023 job->dd_data = dd_data;
4024
4025 /* state change */
4026 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4027
4028 /*
4029 * Non-embedded mailbox subcommand data gets byte swapped here because
4030 * the lower level driver code only does the first 64 mailbox words.
4031 */
4032 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
4033 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
4034 (nemb_tp == nemb_mse))
4035 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
4036 &pmbx[sizeof(MAILBOX_t)],
4037 sli_cfg_mbx->un.sli_config_emb0_subsys.
4038 mse[0].buf_len);
4039
4040 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4041 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4042 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4043 "2947 Issued SLI_CONFIG ext-buffer "
4044 "mailbox command, rc:x%x\n", rc);
4045 return SLI_CONFIG_HANDLED;
4046 }
4047 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4048 "2948 Failed to issue SLI_CONFIG ext-buffer "
4049 "mailbox command, rc:x%x\n", rc);
4050 rc = -EPIPE;
4051
4052 job_error:
4053 if (pmboxq)
4054 mempool_free(pmboxq, phba->mbox_mem_pool);
4055 lpfc_bsg_dma_page_list_free(phba,
4056 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4057 kfree(dd_data);
4058 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4059 return rc;
4060 }
4061
4062 /**
4063 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4064 * @phba: Pointer to HBA context object.
4065 * @job: Pointer to the job object.
4066 * @nemb_tp: Enumerate of non-embedded mailbox command type.
4067 * @dmabuf: Pointer to a DMA buffer descriptor.
4068 *
4069 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4070 * non-embedded external buffers.
4071 **/
4072 static int
lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba * phba,struct bsg_job * job,enum nemb_type nemb_tp,struct lpfc_dmabuf * dmabuf)4073 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4074 enum nemb_type nemb_tp,
4075 struct lpfc_dmabuf *dmabuf)
4076 {
4077 struct fc_bsg_request *bsg_request = job->request;
4078 struct fc_bsg_reply *bsg_reply = job->reply;
4079 struct dfc_mbox_req *mbox_req;
4080 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4081 uint32_t ext_buf_cnt;
4082 struct bsg_job_data *dd_data = NULL;
4083 LPFC_MBOXQ_t *pmboxq = NULL;
4084 MAILBOX_t *pmb;
4085 uint8_t *mbx;
4086 int rc = SLI_CONFIG_NOT_HANDLED, i;
4087
4088 mbox_req =
4089 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4090
4091 /* pointer to the start of mailbox command */
4092 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4093
4094 if (nemb_tp == nemb_mse) {
4095 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4096 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4097 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4098 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4099 "2953 Failed SLI_CONFIG(mse) wr, "
4100 "ext_buf_cnt(%d) out of range(%d)\n",
4101 ext_buf_cnt,
4102 LPFC_MBX_SLI_CONFIG_MAX_MSE);
4103 return -ERANGE;
4104 }
4105 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4106 "2949 Handled SLI_CONFIG(mse) wr, "
4107 "ext_buf_cnt:%d\n", ext_buf_cnt);
4108 } else {
4109 /* sanity check on interface type for support */
4110 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4111 LPFC_SLI_INTF_IF_TYPE_2)
4112 return -ENODEV;
4113 /* nemb_tp == nemb_hbd */
4114 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4115 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4116 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4117 "2954 Failed SLI_CONFIG(hbd) wr, "
4118 "ext_buf_cnt(%d) out of range(%d)\n",
4119 ext_buf_cnt,
4120 LPFC_MBX_SLI_CONFIG_MAX_HBD);
4121 return -ERANGE;
4122 }
4123 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4124 "2950 Handled SLI_CONFIG(hbd) wr, "
4125 "ext_buf_cnt:%d\n", ext_buf_cnt);
4126 }
4127
4128 /* before dma buffer descriptor setup */
4129 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4130 sta_pre_addr, dmabuf, ext_buf_cnt);
4131
4132 if (ext_buf_cnt == 0)
4133 return -EPERM;
4134
4135 /* for the first external buffer */
4136 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4137
4138 /* after dma descriptor setup */
4139 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4140 sta_pos_addr, dmabuf, ext_buf_cnt);
4141
4142 /* log for looking forward */
4143 for (i = 1; i < ext_buf_cnt; i++) {
4144 if (nemb_tp == nemb_mse)
4145 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4146 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4147 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4148 mse[i].buf_len);
4149 else
4150 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4151 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4152 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4153 &sli_cfg_mbx->un.sli_config_emb1_subsys.
4154 hbd[i]));
4155 }
4156
4157 /* multi-buffer handling context */
4158 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4159 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4160 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4161 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4162 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4163 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4164
4165 if (ext_buf_cnt == 1) {
4166 /* bsg tracking structure */
4167 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4168 if (!dd_data) {
4169 rc = -ENOMEM;
4170 goto job_error;
4171 }
4172
4173 /* mailbox command structure for base driver */
4174 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4175 if (!pmboxq) {
4176 rc = -ENOMEM;
4177 goto job_error;
4178 }
4179 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4180 pmb = &pmboxq->u.mb;
4181 mbx = (uint8_t *)dmabuf->virt;
4182 memcpy(pmb, mbx, sizeof(*pmb));
4183 pmb->mbxOwner = OWN_HOST;
4184 pmboxq->vport = phba->pport;
4185
4186 /* callback for multi-buffer read mailbox command */
4187 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4188
4189 /* context fields to callback function */
4190 pmboxq->ctx_buf = dd_data;
4191 dd_data->type = TYPE_MBOX;
4192 dd_data->set_job = job;
4193 dd_data->context_un.mbox.pmboxq = pmboxq;
4194 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4195 job->dd_data = dd_data;
4196
4197 /* state change */
4198
4199 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4200 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4201 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4202 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4203 "2955 Issued SLI_CONFIG ext-buffer "
4204 "mailbox command, rc:x%x\n", rc);
4205 return SLI_CONFIG_HANDLED;
4206 }
4207 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4208 "2956 Failed to issue SLI_CONFIG ext-buffer "
4209 "mailbox command, rc:x%x\n", rc);
4210 rc = -EPIPE;
4211 goto job_error;
4212 }
4213
4214 /* wait for additional external buffers */
4215
4216 bsg_reply->result = 0;
4217 bsg_job_done(job, bsg_reply->result,
4218 bsg_reply->reply_payload_rcv_len);
4219 return SLI_CONFIG_HANDLED;
4220
4221 job_error:
4222 if (pmboxq)
4223 mempool_free(pmboxq, phba->mbox_mem_pool);
4224 kfree(dd_data);
4225
4226 return rc;
4227 }
4228
4229 /**
4230 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4231 * @phba: Pointer to HBA context object.
4232 * @job: Pointer to the job object.
4233 * @dmabuf: Pointer to a DMA buffer descriptor.
4234 *
4235 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4236 * external buffers, including both 0x9B with non-embedded MSEs and 0x9B
4237 * with embedded subsystem 0x1 and opcodes with external HBDs.
4238 **/
4239 static int
lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4240 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4241 struct lpfc_dmabuf *dmabuf)
4242 {
4243 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4244 uint32_t subsys;
4245 uint32_t opcode;
4246 int rc = SLI_CONFIG_NOT_HANDLED;
4247
4248 /* state change on new multi-buffer pass-through mailbox command */
4249 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4250
4251 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4252
4253 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4254 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4255 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4256 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4257 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4258 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4259 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4260 switch (opcode) {
4261 case FCOE_OPCODE_READ_FCF:
4262 case FCOE_OPCODE_GET_DPORT_RESULTS:
4263 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4264 "2957 Handled SLI_CONFIG "
4265 "subsys_fcoe, opcode:x%x\n",
4266 opcode);
4267 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4268 nemb_mse, dmabuf);
4269 break;
4270 case FCOE_OPCODE_ADD_FCF:
4271 case FCOE_OPCODE_SET_DPORT_MODE:
4272 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4273 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4274 "2958 Handled SLI_CONFIG "
4275 "subsys_fcoe, opcode:x%x\n",
4276 opcode);
4277 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4278 nemb_mse, dmabuf);
4279 break;
4280 default:
4281 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4282 "2959 Reject SLI_CONFIG "
4283 "subsys_fcoe, opcode:x%x\n",
4284 opcode);
4285 rc = -EPERM;
4286 break;
4287 }
4288 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4289 switch (opcode) {
4290 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4291 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4292 case COMN_OPCODE_GET_PROFILE_CONFIG:
4293 case COMN_OPCODE_SET_FEATURES:
4294 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4295 "3106 Handled SLI_CONFIG "
4296 "subsys_comn, opcode:x%x\n",
4297 opcode);
4298 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4299 nemb_mse, dmabuf);
4300 break;
4301 default:
4302 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4303 "3107 Reject SLI_CONFIG "
4304 "subsys_comn, opcode:x%x\n",
4305 opcode);
4306 rc = -EPERM;
4307 break;
4308 }
4309 } else {
4310 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4311 "2977 Reject SLI_CONFIG "
4312 "subsys:x%d, opcode:x%x\n",
4313 subsys, opcode);
4314 rc = -EPERM;
4315 }
4316 } else {
4317 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4318 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4319 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4320 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4321 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4322 switch (opcode) {
4323 case COMN_OPCODE_READ_OBJECT:
4324 case COMN_OPCODE_READ_OBJECT_LIST:
4325 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4326 "2960 Handled SLI_CONFIG "
4327 "subsys_comn, opcode:x%x\n",
4328 opcode);
4329 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4330 nemb_hbd, dmabuf);
4331 break;
4332 case COMN_OPCODE_WRITE_OBJECT:
4333 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4334 "2961 Handled SLI_CONFIG "
4335 "subsys_comn, opcode:x%x\n",
4336 opcode);
4337 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4338 nemb_hbd, dmabuf);
4339 break;
4340 default:
4341 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4342 "2962 Not handled SLI_CONFIG "
4343 "subsys_comn, opcode:x%x\n",
4344 opcode);
4345 rc = SLI_CONFIG_NOT_HANDLED;
4346 break;
4347 }
4348 } else {
4349 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4350 "2978 Not handled SLI_CONFIG "
4351 "subsys:x%d, opcode:x%x\n",
4352 subsys, opcode);
4353 rc = SLI_CONFIG_NOT_HANDLED;
4354 }
4355 }
4356
4357 /* state reset on not handled new multi-buffer mailbox command */
4358 if (rc != SLI_CONFIG_HANDLED)
4359 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4360
4361 return rc;
4362 }
4363
4364 /**
4365 * lpfc_bsg_mbox_ext_abort - request to abort mbox command with ext buffers
4366 * @phba: Pointer to HBA context object.
4367 *
4368 * This routine is for requesting to abort a pass-through mailbox command with
4369 * multiple external buffers due to error condition.
4370 **/
4371 static void
lpfc_bsg_mbox_ext_abort(struct lpfc_hba * phba)4372 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4373 {
4374 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4375 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4376 else
4377 lpfc_bsg_mbox_ext_session_reset(phba);
4378 return;
4379 }
4380
4381 /**
4382 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4383 * @phba: Pointer to HBA context object.
4384 * @job: Pointer to the job object.
4385 *
4386 * This routine extracts the next mailbox read external buffer back to
4387 * user space through BSG.
4388 **/
4389 static int
lpfc_bsg_read_ebuf_get(struct lpfc_hba * phba,struct bsg_job * job)4390 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4391 {
4392 struct fc_bsg_reply *bsg_reply = job->reply;
4393 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4394 struct lpfc_dmabuf *dmabuf;
4395 uint8_t *pbuf;
4396 uint32_t size;
4397 uint32_t index;
4398
4399 index = phba->mbox_ext_buf_ctx.seqNum;
4400 phba->mbox_ext_buf_ctx.seqNum++;
4401
4402 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4403 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4404
4405 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4406 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4407 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4408 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4409 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4410 "buffer[%d], size:%d\n", index, size);
4411 } else {
4412 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4413 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4414 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4415 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4416 "buffer[%d], size:%d\n", index, size);
4417 }
4418 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4419 return -EPIPE;
4420 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4421 struct lpfc_dmabuf, list);
4422 list_del_init(&dmabuf->list);
4423
4424 /* after dma buffer descriptor setup */
4425 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4426 mbox_rd, dma_ebuf, sta_pos_addr,
4427 dmabuf, index);
4428
4429 pbuf = (uint8_t *)dmabuf->virt;
4430 bsg_reply->reply_payload_rcv_len =
4431 sg_copy_from_buffer(job->reply_payload.sg_list,
4432 job->reply_payload.sg_cnt,
4433 pbuf, size);
4434
4435 lpfc_bsg_dma_page_free(phba, dmabuf);
4436
4437 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4438 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4439 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4440 "command session done\n");
4441 lpfc_bsg_mbox_ext_session_reset(phba);
4442 }
4443
4444 bsg_reply->result = 0;
4445 bsg_job_done(job, bsg_reply->result,
4446 bsg_reply->reply_payload_rcv_len);
4447
4448 return SLI_CONFIG_HANDLED;
4449 }
4450
4451 /**
4452 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4453 * @phba: Pointer to HBA context object.
4454 * @job: Pointer to the job object.
4455 * @dmabuf: Pointer to a DMA buffer descriptor.
4456 *
4457 * This routine sets up the next mailbox read external buffer obtained
4458 * from user space through BSG.
4459 **/
4460 static int
lpfc_bsg_write_ebuf_set(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4461 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4462 struct lpfc_dmabuf *dmabuf)
4463 {
4464 struct fc_bsg_reply *bsg_reply = job->reply;
4465 struct bsg_job_data *dd_data = NULL;
4466 LPFC_MBOXQ_t *pmboxq = NULL;
4467 MAILBOX_t *pmb;
4468 enum nemb_type nemb_tp;
4469 uint8_t *pbuf;
4470 uint32_t size;
4471 uint32_t index;
4472 int rc;
4473
4474 index = phba->mbox_ext_buf_ctx.seqNum;
4475 phba->mbox_ext_buf_ctx.seqNum++;
4476 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4477
4478 pbuf = (uint8_t *)dmabuf->virt;
4479 size = job->request_payload.payload_len;
4480 sg_copy_to_buffer(job->request_payload.sg_list,
4481 job->request_payload.sg_cnt,
4482 pbuf, size);
4483
4484 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4485 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4486 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4487 "buffer[%d], size:%d\n",
4488 phba->mbox_ext_buf_ctx.seqNum, size);
4489
4490 } else {
4491 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4492 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4493 "buffer[%d], size:%d\n",
4494 phba->mbox_ext_buf_ctx.seqNum, size);
4495
4496 }
4497
4498 /* set up external buffer descriptor and add to external buffer list */
4499 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4500 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4501 dmabuf);
4502 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4503
4504 /* after write dma buffer */
4505 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4506 mbox_wr, dma_ebuf, sta_pos_addr,
4507 dmabuf, index);
4508
4509 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4510 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4511 "2968 SLI_CONFIG ext-buffer wr all %d "
4512 "ebuffers received\n",
4513 phba->mbox_ext_buf_ctx.numBuf);
4514
4515 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4516 if (!dd_data) {
4517 rc = -ENOMEM;
4518 goto job_error;
4519 }
4520
4521 /* mailbox command structure for base driver */
4522 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4523 if (!pmboxq) {
4524 rc = -ENOMEM;
4525 goto job_error;
4526 }
4527 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4528 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4529 pmb = &pmboxq->u.mb;
4530 memcpy(pmb, pbuf, sizeof(*pmb));
4531 pmb->mbxOwner = OWN_HOST;
4532 pmboxq->vport = phba->pport;
4533
4534 /* callback for multi-buffer write mailbox command */
4535 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4536
4537 /* context fields to callback function */
4538 pmboxq->ctx_buf = dd_data;
4539 dd_data->type = TYPE_MBOX;
4540 dd_data->set_job = job;
4541 dd_data->context_un.mbox.pmboxq = pmboxq;
4542 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4543 job->dd_data = dd_data;
4544
4545 /* state change */
4546 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4547
4548 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4549 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4550 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4551 "2969 Issued SLI_CONFIG ext-buffer "
4552 "mailbox command, rc:x%x\n", rc);
4553 return SLI_CONFIG_HANDLED;
4554 }
4555 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4556 "2970 Failed to issue SLI_CONFIG ext-buffer "
4557 "mailbox command, rc:x%x\n", rc);
4558 rc = -EPIPE;
4559 goto job_error;
4560 }
4561
4562 /* wait for additional external buffers */
4563 bsg_reply->result = 0;
4564 bsg_job_done(job, bsg_reply->result,
4565 bsg_reply->reply_payload_rcv_len);
4566 return SLI_CONFIG_HANDLED;
4567
4568 job_error:
4569 if (pmboxq)
4570 mempool_free(pmboxq, phba->mbox_mem_pool);
4571 lpfc_bsg_dma_page_free(phba, dmabuf);
4572 kfree(dd_data);
4573
4574 return rc;
4575 }
4576
4577 /**
4578 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4579 * @phba: Pointer to HBA context object.
4580 * @job: Pointer to the job object.
4581 * @dmabuf: Pointer to a DMA buffer descriptor.
4582 *
4583 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4584 * command with multiple non-embedded external buffers.
4585 **/
4586 static int
lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4587 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4588 struct lpfc_dmabuf *dmabuf)
4589 {
4590 int rc;
4591
4592 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4593 "2971 SLI_CONFIG buffer (type:x%x)\n",
4594 phba->mbox_ext_buf_ctx.mboxType);
4595
4596 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4597 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4598 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4599 "2972 SLI_CONFIG rd buffer state "
4600 "mismatch:x%x\n",
4601 phba->mbox_ext_buf_ctx.state);
4602 lpfc_bsg_mbox_ext_abort(phba);
4603 return -EPIPE;
4604 }
4605 rc = lpfc_bsg_read_ebuf_get(phba, job);
4606 if (rc == SLI_CONFIG_HANDLED)
4607 lpfc_bsg_dma_page_free(phba, dmabuf);
4608 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4609 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4610 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4611 "2973 SLI_CONFIG wr buffer state "
4612 "mismatch:x%x\n",
4613 phba->mbox_ext_buf_ctx.state);
4614 lpfc_bsg_mbox_ext_abort(phba);
4615 return -EPIPE;
4616 }
4617 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4618 }
4619 return rc;
4620 }
4621
4622 /**
4623 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4624 * @phba: Pointer to HBA context object.
4625 * @job: Pointer to the job object.
4626 * @dmabuf: Pointer to a DMA buffer descriptor.
4627 *
4628 * This routine checks and handles non-embedded multi-buffer SLI_CONFIG
4629 * (0x9B) mailbox commands and external buffers.
4630 **/
4631 static int
lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_dmabuf * dmabuf)4632 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4633 struct lpfc_dmabuf *dmabuf)
4634 {
4635 struct fc_bsg_request *bsg_request = job->request;
4636 struct dfc_mbox_req *mbox_req;
4637 int rc = SLI_CONFIG_NOT_HANDLED;
4638
4639 mbox_req =
4640 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4641
4642 /* mbox command with/without single external buffer */
4643 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4644 return rc;
4645
4646 /* mbox command and first external buffer */
4647 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4648 if (mbox_req->extSeqNum == 1) {
4649 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4650 "2974 SLI_CONFIG mailbox: tag:%d, "
4651 "seq:%d\n", mbox_req->extMboxTag,
4652 mbox_req->extSeqNum);
4653 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4654 return rc;
4655 } else
4656 goto sli_cfg_ext_error;
4657 }
4658
4659 /*
4660 * handle additional external buffers
4661 */
4662
4663 /* check broken pipe conditions */
4664 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4665 goto sli_cfg_ext_error;
4666 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4667 goto sli_cfg_ext_error;
4668 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4669 goto sli_cfg_ext_error;
4670
4671 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4672 "2975 SLI_CONFIG mailbox external buffer: "
4673 "extSta:x%x, tag:%d, seq:%d\n",
4674 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4675 mbox_req->extSeqNum);
4676 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4677 return rc;
4678
4679 sli_cfg_ext_error:
4680 /* all other cases, broken pipe */
4681 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4682 "2976 SLI_CONFIG mailbox broken pipe: "
4683 "ctxSta:x%x, ctxNumBuf:%d "
4684 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4685 phba->mbox_ext_buf_ctx.state,
4686 phba->mbox_ext_buf_ctx.numBuf,
4687 phba->mbox_ext_buf_ctx.mbxTag,
4688 phba->mbox_ext_buf_ctx.seqNum,
4689 mbox_req->extMboxTag, mbox_req->extSeqNum);
4690
4691 lpfc_bsg_mbox_ext_session_reset(phba);
4692
4693 return -EPIPE;
4694 }
4695
4696 /**
4697 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4698 * @phba: Pointer to HBA context object.
4699 * @job: Pointer to the job object.
4700 * @vport: Pointer to a vport object.
4701 *
4702 * Allocate a tracking object, mailbox command memory, get a mailbox
4703 * from the mailbox pool, copy the caller mailbox command.
4704 *
4705 * If offline and the sli is active we need to poll for the command (port is
4706 * being reset) and complete the job, otherwise issue the mailbox command and
4707 * let our completion handler finish the command.
4708 **/
4709 static int
lpfc_bsg_issue_mbox(struct lpfc_hba * phba,struct bsg_job * job,struct lpfc_vport * vport)4710 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4711 struct lpfc_vport *vport)
4712 {
4713 struct fc_bsg_request *bsg_request = job->request;
4714 struct fc_bsg_reply *bsg_reply = job->reply;
4715 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4716 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4717 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4718 uint8_t *pmbx = NULL;
4719 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4720 struct lpfc_dmabuf *dmabuf = NULL;
4721 struct dfc_mbox_req *mbox_req;
4722 struct READ_EVENT_LOG_VAR *rdEventLog;
4723 uint32_t transmit_length, receive_length, mode;
4724 struct lpfc_mbx_sli4_config *sli4_config;
4725 struct lpfc_mbx_nembed_cmd *nembed_sge;
4726 struct ulp_bde64 *bde;
4727 uint8_t *ext = NULL;
4728 int rc = 0;
4729 uint8_t *from;
4730 uint32_t size;
4731
4732 /* in case no data is transferred */
4733 bsg_reply->reply_payload_rcv_len = 0;
4734
4735 /* sanity check to protect driver */
4736 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4737 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4738 rc = -ERANGE;
4739 goto job_done;
4740 }
4741
4742 /*
4743 * Don't allow mailbox commands to be sent when blocked or when in
4744 * the middle of discovery
4745 */
4746 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4747 rc = -EAGAIN;
4748 goto job_done;
4749 }
4750
4751 mbox_req =
4752 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4753
4754 /* check if requested extended data lengths are valid */
4755 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4756 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4757 rc = -ERANGE;
4758 goto job_done;
4759 }
4760
4761 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4762 if (!dmabuf || !dmabuf->virt) {
4763 rc = -ENOMEM;
4764 goto job_done;
4765 }
4766
4767 /* Get the mailbox command or external buffer from BSG */
4768 pmbx = (uint8_t *)dmabuf->virt;
4769 size = job->request_payload.payload_len;
4770 sg_copy_to_buffer(job->request_payload.sg_list,
4771 job->request_payload.sg_cnt, pmbx, size);
4772
4773 /* Handle possible SLI_CONFIG with non-embedded payloads */
4774 if (phba->sli_rev == LPFC_SLI_REV4) {
4775 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4776 if (rc == SLI_CONFIG_HANDLED)
4777 goto job_cont;
4778 if (rc)
4779 goto job_done;
4780 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4781 }
4782
4783 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4784 if (rc != 0)
4785 goto job_done; /* must be negative */
4786
4787 /* allocate our bsg tracking structure */
4788 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4789 if (!dd_data) {
4790 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4791 "2727 Failed allocation of dd_data\n");
4792 rc = -ENOMEM;
4793 goto job_done;
4794 }
4795
4796 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4797 if (!pmboxq) {
4798 rc = -ENOMEM;
4799 goto job_done;
4800 }
4801 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4802
4803 pmb = &pmboxq->u.mb;
4804 memcpy(pmb, pmbx, sizeof(*pmb));
4805 pmb->mbxOwner = OWN_HOST;
4806 pmboxq->vport = vport;
4807
4808 /* If HBA encountered an error attention, allow only DUMP
4809 * or RESTART mailbox commands until the HBA is restarted.
4810 */
4811 if (phba->pport->stopped &&
4812 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4813 pmb->mbxCommand != MBX_RESTART &&
4814 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4815 pmb->mbxCommand != MBX_WRITE_WWN)
4816 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4817 "2797 mbox: Issued mailbox cmd "
4818 "0x%x while in stopped state.\n",
4819 pmb->mbxCommand);
4820
4821 /* extended mailbox commands will need an extended buffer */
4822 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4823 from = pmbx;
4824 ext = from + sizeof(MAILBOX_t);
4825 pmboxq->ctx_buf = ext;
4826 pmboxq->in_ext_byte_len =
4827 mbox_req->inExtWLen * sizeof(uint32_t);
4828 pmboxq->out_ext_byte_len =
4829 mbox_req->outExtWLen * sizeof(uint32_t);
4830 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4831 }
4832
4833 /* biu diag will need a kernel buffer to transfer the data
4834 * allocate our own buffer and setup the mailbox command to
4835 * use ours
4836 */
4837 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4838 transmit_length = pmb->un.varWords[1];
4839 receive_length = pmb->un.varWords[4];
4840 /* transmit length cannot be greater than receive length or
4841 * mailbox extension size
4842 */
4843 if ((transmit_length > receive_length) ||
4844 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4845 rc = -ERANGE;
4846 goto job_done;
4847 }
4848 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4849 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4850 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4851 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4852
4853 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4854 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4855 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4856 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4857 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4858 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4859 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4860 rdEventLog = &pmb->un.varRdEventLog;
4861 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4862 mode = bf_get(lpfc_event_log, rdEventLog);
4863
4864 /* receive length cannot be greater than mailbox
4865 * extension size
4866 */
4867 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4868 rc = -ERANGE;
4869 goto job_done;
4870 }
4871
4872 /* mode zero uses a bde like biu diags command */
4873 if (mode == 0) {
4874 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4875 + sizeof(MAILBOX_t));
4876 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4877 + sizeof(MAILBOX_t));
4878 }
4879 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4880 /* Let type 4 (well known data) through because the data is
4881 * returned in varwords[4-8]
4882 * otherwise check the recieve length and fetch the buffer addr
4883 */
4884 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4885 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4886 /* rebuild the command for sli4 using our own buffers
4887 * like we do for biu diags
4888 */
4889 receive_length = pmb->un.varWords[2];
4890 /* receive length cannot be greater than mailbox
4891 * extension size
4892 */
4893 if (receive_length == 0) {
4894 rc = -ERANGE;
4895 goto job_done;
4896 }
4897 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4898 + sizeof(MAILBOX_t));
4899 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4900 + sizeof(MAILBOX_t));
4901 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4902 pmb->un.varUpdateCfg.co) {
4903 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4904
4905 /* bde size cannot be greater than mailbox ext size */
4906 if (bde->tus.f.bdeSize >
4907 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4908 rc = -ERANGE;
4909 goto job_done;
4910 }
4911 bde->addrHigh = putPaddrHigh(dmabuf->phys
4912 + sizeof(MAILBOX_t));
4913 bde->addrLow = putPaddrLow(dmabuf->phys
4914 + sizeof(MAILBOX_t));
4915 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4916 /* Handling non-embedded SLI_CONFIG mailbox command */
4917 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4918 if (!bf_get(lpfc_mbox_hdr_emb,
4919 &sli4_config->header.cfg_mhdr)) {
4920 /* rebuild the command for sli4 using our
4921 * own buffers like we do for biu diags
4922 */
4923 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4924 &pmb->un.varWords[0];
4925 receive_length = nembed_sge->sge[0].length;
4926
4927 /* receive length cannot be greater than
4928 * mailbox extension size
4929 */
4930 if ((receive_length == 0) ||
4931 (receive_length >
4932 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4933 rc = -ERANGE;
4934 goto job_done;
4935 }
4936
4937 nembed_sge->sge[0].pa_hi =
4938 putPaddrHigh(dmabuf->phys
4939 + sizeof(MAILBOX_t));
4940 nembed_sge->sge[0].pa_lo =
4941 putPaddrLow(dmabuf->phys
4942 + sizeof(MAILBOX_t));
4943 }
4944 }
4945 }
4946
4947 dd_data->context_un.mbox.dmabuffers = dmabuf;
4948
4949 /* setup wake call as IOCB callback */
4950 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4951
4952 /* setup context field to pass wait_queue pointer to wake function */
4953 pmboxq->ctx_ndlp = dd_data;
4954 dd_data->type = TYPE_MBOX;
4955 dd_data->set_job = job;
4956 dd_data->context_un.mbox.pmboxq = pmboxq;
4957 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4958 dd_data->context_un.mbox.ext = ext;
4959 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4960 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4961 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4962 job->dd_data = dd_data;
4963
4964 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4965 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4966 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4967 if (rc != MBX_SUCCESS) {
4968 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4969 goto job_done;
4970 }
4971
4972 /* job finished, copy the data */
4973 memcpy(pmbx, pmb, sizeof(*pmb));
4974 bsg_reply->reply_payload_rcv_len =
4975 sg_copy_from_buffer(job->reply_payload.sg_list,
4976 job->reply_payload.sg_cnt,
4977 pmbx, size);
4978 /* not waiting mbox already done */
4979 rc = 0;
4980 goto job_done;
4981 }
4982
4983 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4984 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4985 return 1; /* job started */
4986
4987 job_done:
4988 /* common exit for error or job completed inline */
4989 if (pmboxq)
4990 mempool_free(pmboxq, phba->mbox_mem_pool);
4991 lpfc_bsg_dma_page_free(phba, dmabuf);
4992 kfree(dd_data);
4993
4994 job_cont:
4995 return rc;
4996 }
4997
4998 /**
4999 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
5000 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
5001 **/
5002 static int
lpfc_bsg_mbox_cmd(struct bsg_job * job)5003 lpfc_bsg_mbox_cmd(struct bsg_job *job)
5004 {
5005 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5006 struct fc_bsg_request *bsg_request = job->request;
5007 struct fc_bsg_reply *bsg_reply = job->reply;
5008 struct lpfc_hba *phba = vport->phba;
5009 struct dfc_mbox_req *mbox_req;
5010 int rc = 0;
5011
5012 /* mix-and-match backward compatibility */
5013 bsg_reply->reply_payload_rcv_len = 0;
5014 if (job->request_len <
5015 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
5016 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
5017 "2737 Mix-and-match backward compatibility "
5018 "between MBOX_REQ old size:%d and "
5019 "new request size:%d\n",
5020 (int)(job->request_len -
5021 sizeof(struct fc_bsg_request)),
5022 (int)sizeof(struct dfc_mbox_req));
5023 mbox_req = (struct dfc_mbox_req *)
5024 bsg_request->rqst_data.h_vendor.vendor_cmd;
5025 mbox_req->extMboxTag = 0;
5026 mbox_req->extSeqNum = 0;
5027 }
5028
5029 rc = lpfc_bsg_issue_mbox(phba, job, vport);
5030
5031 if (rc == 0) {
5032 /* job done */
5033 bsg_reply->result = 0;
5034 job->dd_data = NULL;
5035 bsg_job_done(job, bsg_reply->result,
5036 bsg_reply->reply_payload_rcv_len);
5037 } else if (rc == 1)
5038 /* job submitted, will complete later*/
5039 rc = 0; /* return zero, no error */
5040 else {
5041 /* some error occurred */
5042 bsg_reply->result = rc;
5043 job->dd_data = NULL;
5044 }
5045
5046 return rc;
5047 }
5048
5049 /**
5050 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
5051 * @phba: Pointer to HBA context object.
5052 * @cmdiocbq: Pointer to command iocb.
5053 * @rspiocbq: Pointer to response iocb.
5054 *
5055 * This function is the completion handler for iocbs issued using
5056 * lpfc_menlo_cmd function. This function is called by the
5057 * ring event handler function without any lock held. This function
5058 * can be called from both worker thread context and interrupt
5059 * context. This function also can be called from another thread which
5060 * cleans up the SLI layer objects.
5061 * This function copies the contents of the response iocb to the
5062 * response iocb memory object provided by the caller of
5063 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5064 * sleeps for the iocb completion.
5065 **/
5066 static void
lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5067 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5068 struct lpfc_iocbq *cmdiocbq,
5069 struct lpfc_iocbq *rspiocbq)
5070 {
5071 struct bsg_job_data *dd_data;
5072 struct bsg_job *job;
5073 struct fc_bsg_reply *bsg_reply;
5074 IOCB_t *rsp;
5075 struct lpfc_dmabuf *bmp, *cmp, *rmp;
5076 struct lpfc_bsg_menlo *menlo;
5077 unsigned long flags;
5078 struct menlo_response *menlo_resp;
5079 unsigned int rsp_size;
5080 int rc = 0;
5081
5082 dd_data = cmdiocbq->context1;
5083 cmp = cmdiocbq->context2;
5084 bmp = cmdiocbq->context3;
5085 menlo = &dd_data->context_un.menlo;
5086 rmp = menlo->rmp;
5087 rsp = &rspiocbq->iocb;
5088
5089 /* Determine if job has been aborted */
5090 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5091 job = dd_data->set_job;
5092 if (job) {
5093 bsg_reply = job->reply;
5094 /* Prevent timeout handling from trying to abort job */
5095 job->dd_data = NULL;
5096 }
5097 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5098
5099 /* Copy the job data or set the failing status for the job */
5100
5101 if (job) {
5102 /* always return the xri, this would be used in the case
5103 * of a menlo download to allow the data to be sent as a
5104 * continuation of the exchange.
5105 */
5106
5107 menlo_resp = (struct menlo_response *)
5108 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5109 menlo_resp->xri = rsp->ulpContext;
5110 if (rsp->ulpStatus) {
5111 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5112 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5113 case IOERR_SEQUENCE_TIMEOUT:
5114 rc = -ETIMEDOUT;
5115 break;
5116 case IOERR_INVALID_RPI:
5117 rc = -EFAULT;
5118 break;
5119 default:
5120 rc = -EACCES;
5121 break;
5122 }
5123 } else {
5124 rc = -EACCES;
5125 }
5126 } else {
5127 rsp_size = rsp->un.genreq64.bdl.bdeSize;
5128 bsg_reply->reply_payload_rcv_len =
5129 lpfc_bsg_copy_data(rmp, &job->reply_payload,
5130 rsp_size, 0);
5131 }
5132
5133 }
5134
5135 lpfc_sli_release_iocbq(phba, cmdiocbq);
5136 lpfc_free_bsg_buffers(phba, cmp);
5137 lpfc_free_bsg_buffers(phba, rmp);
5138 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5139 kfree(bmp);
5140 kfree(dd_data);
5141
5142 /* Complete the job if active */
5143
5144 if (job) {
5145 bsg_reply->result = rc;
5146 bsg_job_done(job, bsg_reply->result,
5147 bsg_reply->reply_payload_rcv_len);
5148 }
5149
5150 return;
5151 }
5152
5153 /**
5154 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5155 * @job: fc_bsg_job to handle
5156 *
5157 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5158 * all the command completions will return the xri for the command.
5159 * For menlo data requests a gen request 64 CX is used to continue the exchange
5160 * supplied in the menlo request header xri field.
5161 **/
5162 static int
lpfc_menlo_cmd(struct bsg_job * job)5163 lpfc_menlo_cmd(struct bsg_job *job)
5164 {
5165 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5166 struct fc_bsg_request *bsg_request = job->request;
5167 struct fc_bsg_reply *bsg_reply = job->reply;
5168 struct lpfc_hba *phba = vport->phba;
5169 struct lpfc_iocbq *cmdiocbq;
5170 IOCB_t *cmd;
5171 int rc = 0;
5172 struct menlo_command *menlo_cmd;
5173 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5174 int request_nseg;
5175 int reply_nseg;
5176 struct bsg_job_data *dd_data;
5177 struct ulp_bde64 *bpl = NULL;
5178
5179 /* in case no data is returned return just the return code */
5180 bsg_reply->reply_payload_rcv_len = 0;
5181
5182 if (job->request_len <
5183 sizeof(struct fc_bsg_request) +
5184 sizeof(struct menlo_command)) {
5185 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5186 "2784 Received MENLO_CMD request below "
5187 "minimum size\n");
5188 rc = -ERANGE;
5189 goto no_dd_data;
5190 }
5191
5192 if (job->reply_len < sizeof(*bsg_reply) +
5193 sizeof(struct menlo_response)) {
5194 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5195 "2785 Received MENLO_CMD reply below "
5196 "minimum size\n");
5197 rc = -ERANGE;
5198 goto no_dd_data;
5199 }
5200
5201 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5202 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5203 "2786 Adapter does not support menlo "
5204 "commands\n");
5205 rc = -EPERM;
5206 goto no_dd_data;
5207 }
5208
5209 menlo_cmd = (struct menlo_command *)
5210 bsg_request->rqst_data.h_vendor.vendor_cmd;
5211
5212 /* allocate our bsg tracking structure */
5213 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5214 if (!dd_data) {
5215 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5216 "2787 Failed allocation of dd_data\n");
5217 rc = -ENOMEM;
5218 goto no_dd_data;
5219 }
5220
5221 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5222 if (!bmp) {
5223 rc = -ENOMEM;
5224 goto free_dd;
5225 }
5226
5227 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5228 if (!bmp->virt) {
5229 rc = -ENOMEM;
5230 goto free_bmp;
5231 }
5232
5233 INIT_LIST_HEAD(&bmp->list);
5234
5235 bpl = (struct ulp_bde64 *)bmp->virt;
5236 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5237 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5238 1, bpl, &request_nseg);
5239 if (!cmp) {
5240 rc = -ENOMEM;
5241 goto free_bmp;
5242 }
5243 lpfc_bsg_copy_data(cmp, &job->request_payload,
5244 job->request_payload.payload_len, 1);
5245
5246 bpl += request_nseg;
5247 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5248 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5249 bpl, &reply_nseg);
5250 if (!rmp) {
5251 rc = -ENOMEM;
5252 goto free_cmp;
5253 }
5254
5255 cmdiocbq = lpfc_sli_get_iocbq(phba);
5256 if (!cmdiocbq) {
5257 rc = -ENOMEM;
5258 goto free_rmp;
5259 }
5260
5261 cmd = &cmdiocbq->iocb;
5262 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5263 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5264 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5265 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5266 cmd->un.genreq64.bdl.bdeSize =
5267 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5268 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5269 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5270 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5271 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5272 cmd->ulpBdeCount = 1;
5273 cmd->ulpClass = CLASS3;
5274 cmd->ulpOwner = OWN_CHIP;
5275 cmd->ulpLe = 1; /* Limited Edition */
5276 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5277 cmdiocbq->vport = phba->pport;
5278 /* We want the firmware to timeout before we do */
5279 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5280 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5281 cmdiocbq->context1 = dd_data;
5282 cmdiocbq->context2 = cmp;
5283 cmdiocbq->context3 = bmp;
5284 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5285 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5286 cmd->ulpPU = MENLO_PU; /* 3 */
5287 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5288 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5289 } else {
5290 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5291 cmd->ulpPU = 1;
5292 cmd->un.ulpWord[4] = 0;
5293 cmd->ulpContext = menlo_cmd->xri;
5294 }
5295
5296 dd_data->type = TYPE_MENLO;
5297 dd_data->set_job = job;
5298 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5299 dd_data->context_un.menlo.rmp = rmp;
5300 job->dd_data = dd_data;
5301
5302 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5303 MENLO_TIMEOUT - 5);
5304 if (rc == IOCB_SUCCESS)
5305 return 0; /* done for now */
5306
5307 lpfc_sli_release_iocbq(phba, cmdiocbq);
5308
5309 free_rmp:
5310 lpfc_free_bsg_buffers(phba, rmp);
5311 free_cmp:
5312 lpfc_free_bsg_buffers(phba, cmp);
5313 free_bmp:
5314 if (bmp->virt)
5315 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5316 kfree(bmp);
5317 free_dd:
5318 kfree(dd_data);
5319 no_dd_data:
5320 /* make error code available to userspace */
5321 bsg_reply->result = rc;
5322 job->dd_data = NULL;
5323 return rc;
5324 }
5325
5326 static int
lpfc_forced_link_speed(struct bsg_job * job)5327 lpfc_forced_link_speed(struct bsg_job *job)
5328 {
5329 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5330 struct lpfc_vport *vport = shost_priv(shost);
5331 struct lpfc_hba *phba = vport->phba;
5332 struct fc_bsg_reply *bsg_reply = job->reply;
5333 struct forced_link_speed_support_reply *forced_reply;
5334 int rc = 0;
5335
5336 if (job->request_len <
5337 sizeof(struct fc_bsg_request) +
5338 sizeof(struct get_forced_link_speed_support)) {
5339 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5340 "0048 Received FORCED_LINK_SPEED request "
5341 "below minimum size\n");
5342 rc = -EINVAL;
5343 goto job_error;
5344 }
5345
5346 forced_reply = (struct forced_link_speed_support_reply *)
5347 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5348
5349 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5350 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5351 "0049 Received FORCED_LINK_SPEED reply below "
5352 "minimum size\n");
5353 rc = -EINVAL;
5354 goto job_error;
5355 }
5356
5357 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5358 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5359 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5360 job_error:
5361 bsg_reply->result = rc;
5362 if (rc == 0)
5363 bsg_job_done(job, bsg_reply->result,
5364 bsg_reply->reply_payload_rcv_len);
5365 return rc;
5366 }
5367
5368 /**
5369 * lpfc_check_fwlog_support: Check FW log support on the adapter
5370 * @phba: Pointer to HBA context object.
5371 *
5372 * Check if FW Logging support by the adapter
5373 **/
5374 int
lpfc_check_fwlog_support(struct lpfc_hba * phba)5375 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5376 {
5377 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5378
5379 ras_fwlog = &phba->ras_fwlog;
5380
5381 if (!ras_fwlog->ras_hwsupport)
5382 return -EACCES;
5383 else if (!ras_fwlog->ras_enabled)
5384 return -EPERM;
5385 else
5386 return 0;
5387 }
5388
5389 /**
5390 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5391 * @job: fc_bsg_job to handle
5392 *
5393 * Get RAS configuration values set.
5394 **/
5395 static int
lpfc_bsg_get_ras_config(struct bsg_job * job)5396 lpfc_bsg_get_ras_config(struct bsg_job *job)
5397 {
5398 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5399 struct lpfc_vport *vport = shost_priv(shost);
5400 struct fc_bsg_reply *bsg_reply = job->reply;
5401 struct lpfc_hba *phba = vport->phba;
5402 struct lpfc_bsg_get_ras_config_reply *ras_reply;
5403 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5404 int rc = 0;
5405
5406 if (job->request_len <
5407 sizeof(struct fc_bsg_request) +
5408 sizeof(struct lpfc_bsg_ras_req)) {
5409 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5410 "6192 FW_LOG request received "
5411 "below minimum size\n");
5412 rc = -EINVAL;
5413 goto ras_job_error;
5414 }
5415
5416 /* Check FW log status */
5417 rc = lpfc_check_fwlog_support(phba);
5418 if (rc)
5419 goto ras_job_error;
5420
5421 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5422 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5423
5424 /* Current logging state */
5425 spin_lock_irq(&phba->hbalock);
5426 if (ras_fwlog->state == ACTIVE)
5427 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5428 else
5429 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5430 spin_unlock_irq(&phba->hbalock);
5431
5432 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5433 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5434
5435 ras_job_error:
5436 /* make error code available to userspace */
5437 bsg_reply->result = rc;
5438
5439 /* complete the job back to userspace */
5440 if (!rc)
5441 bsg_job_done(job, bsg_reply->result,
5442 bsg_reply->reply_payload_rcv_len);
5443 return rc;
5444 }
5445
5446 /**
5447 * lpfc_bsg_set_ras_config: Set FW logging parameters
5448 * @job: fc_bsg_job to handle
5449 *
5450 * Set log-level parameters for FW-logging in host memory
5451 **/
5452 static int
lpfc_bsg_set_ras_config(struct bsg_job * job)5453 lpfc_bsg_set_ras_config(struct bsg_job *job)
5454 {
5455 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5456 struct lpfc_vport *vport = shost_priv(shost);
5457 struct lpfc_hba *phba = vport->phba;
5458 struct lpfc_bsg_set_ras_config_req *ras_req;
5459 struct fc_bsg_request *bsg_request = job->request;
5460 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5461 struct fc_bsg_reply *bsg_reply = job->reply;
5462 uint8_t action = 0, log_level = 0;
5463 int rc = 0, action_status = 0;
5464
5465 if (job->request_len <
5466 sizeof(struct fc_bsg_request) +
5467 sizeof(struct lpfc_bsg_set_ras_config_req)) {
5468 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5469 "6182 Received RAS_LOG request "
5470 "below minimum size\n");
5471 rc = -EINVAL;
5472 goto ras_job_error;
5473 }
5474
5475 /* Check FW log status */
5476 rc = lpfc_check_fwlog_support(phba);
5477 if (rc)
5478 goto ras_job_error;
5479
5480 ras_req = (struct lpfc_bsg_set_ras_config_req *)
5481 bsg_request->rqst_data.h_vendor.vendor_cmd;
5482 action = ras_req->action;
5483 log_level = ras_req->log_level;
5484
5485 if (action == LPFC_RASACTION_STOP_LOGGING) {
5486 /* Check if already disabled */
5487 spin_lock_irq(&phba->hbalock);
5488 if (ras_fwlog->state != ACTIVE) {
5489 spin_unlock_irq(&phba->hbalock);
5490 rc = -ESRCH;
5491 goto ras_job_error;
5492 }
5493 spin_unlock_irq(&phba->hbalock);
5494
5495 /* Disable logging */
5496 lpfc_ras_stop_fwlog(phba);
5497 } else {
5498 /*action = LPFC_RASACTION_START_LOGGING*/
5499
5500 /* Even though FW-logging is active re-initialize
5501 * FW-logging with new log-level. Return status
5502 * "Logging already Running" to caller.
5503 **/
5504 spin_lock_irq(&phba->hbalock);
5505 if (ras_fwlog->state != INACTIVE)
5506 action_status = -EINPROGRESS;
5507 spin_unlock_irq(&phba->hbalock);
5508
5509 /* Enable logging */
5510 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5511 LPFC_RAS_ENABLE_LOGGING);
5512 if (rc) {
5513 rc = -EINVAL;
5514 goto ras_job_error;
5515 }
5516
5517 /* Check if FW-logging is re-initialized */
5518 if (action_status == -EINPROGRESS)
5519 rc = action_status;
5520 }
5521 ras_job_error:
5522 /* make error code available to userspace */
5523 bsg_reply->result = rc;
5524
5525 /* complete the job back to userspace */
5526 if (!rc)
5527 bsg_job_done(job, bsg_reply->result,
5528 bsg_reply->reply_payload_rcv_len);
5529
5530 return rc;
5531 }
5532
5533 /**
5534 * lpfc_bsg_get_ras_lwpd: Get log write position data
5535 * @job: fc_bsg_job to handle
5536 *
5537 * Get Offset/Wrap count of the log message written
5538 * in host memory
5539 **/
5540 static int
lpfc_bsg_get_ras_lwpd(struct bsg_job * job)5541 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5542 {
5543 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5544 struct lpfc_vport *vport = shost_priv(shost);
5545 struct lpfc_bsg_get_ras_lwpd *ras_reply;
5546 struct lpfc_hba *phba = vport->phba;
5547 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5548 struct fc_bsg_reply *bsg_reply = job->reply;
5549 u32 *lwpd_ptr = NULL;
5550 int rc = 0;
5551
5552 rc = lpfc_check_fwlog_support(phba);
5553 if (rc)
5554 goto ras_job_error;
5555
5556 if (job->request_len <
5557 sizeof(struct fc_bsg_request) +
5558 sizeof(struct lpfc_bsg_ras_req)) {
5559 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5560 "6183 Received RAS_LOG request "
5561 "below minimum size\n");
5562 rc = -EINVAL;
5563 goto ras_job_error;
5564 }
5565
5566 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5567 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5568
5569 if (!ras_fwlog->lwpd.virt) {
5570 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5571 "6193 Restart FW Logging\n");
5572 rc = -EINVAL;
5573 goto ras_job_error;
5574 }
5575
5576 /* Get lwpd offset */
5577 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5578 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5579
5580 /* Get wrap count */
5581 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5582
5583 ras_job_error:
5584 /* make error code available to userspace */
5585 bsg_reply->result = rc;
5586
5587 /* complete the job back to userspace */
5588 if (!rc)
5589 bsg_job_done(job, bsg_reply->result,
5590 bsg_reply->reply_payload_rcv_len);
5591
5592 return rc;
5593 }
5594
5595 /**
5596 * lpfc_bsg_get_ras_fwlog: Read FW log
5597 * @job: fc_bsg_job to handle
5598 *
5599 * Copy the FW log into the passed buffer.
5600 **/
5601 static int
lpfc_bsg_get_ras_fwlog(struct bsg_job * job)5602 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5603 {
5604 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5605 struct lpfc_vport *vport = shost_priv(shost);
5606 struct lpfc_hba *phba = vport->phba;
5607 struct fc_bsg_request *bsg_request = job->request;
5608 struct fc_bsg_reply *bsg_reply = job->reply;
5609 struct lpfc_bsg_get_fwlog_req *ras_req;
5610 u32 rd_offset, rd_index, offset;
5611 void *src, *fwlog_buff;
5612 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5613 struct lpfc_dmabuf *dmabuf, *next;
5614 int rc = 0;
5615
5616 ras_fwlog = &phba->ras_fwlog;
5617
5618 rc = lpfc_check_fwlog_support(phba);
5619 if (rc)
5620 goto ras_job_error;
5621
5622 /* Logging to be stopped before reading */
5623 spin_lock_irq(&phba->hbalock);
5624 if (ras_fwlog->state == ACTIVE) {
5625 spin_unlock_irq(&phba->hbalock);
5626 rc = -EINPROGRESS;
5627 goto ras_job_error;
5628 }
5629 spin_unlock_irq(&phba->hbalock);
5630
5631 if (job->request_len <
5632 sizeof(struct fc_bsg_request) +
5633 sizeof(struct lpfc_bsg_get_fwlog_req)) {
5634 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5635 "6184 Received RAS_LOG request "
5636 "below minimum size\n");
5637 rc = -EINVAL;
5638 goto ras_job_error;
5639 }
5640
5641 ras_req = (struct lpfc_bsg_get_fwlog_req *)
5642 bsg_request->rqst_data.h_vendor.vendor_cmd;
5643 rd_offset = ras_req->read_offset;
5644
5645 /* Allocate memory to read fw log*/
5646 fwlog_buff = vmalloc(ras_req->read_size);
5647 if (!fwlog_buff) {
5648 rc = -ENOMEM;
5649 goto ras_job_error;
5650 }
5651
5652 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5653 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5654
5655 list_for_each_entry_safe(dmabuf, next,
5656 &ras_fwlog->fwlog_buff_list, list) {
5657
5658 if (dmabuf->buffer_tag < rd_index)
5659 continue;
5660
5661 src = dmabuf->virt + offset;
5662 memcpy(fwlog_buff, src, ras_req->read_size);
5663 break;
5664 }
5665
5666 bsg_reply->reply_payload_rcv_len =
5667 sg_copy_from_buffer(job->reply_payload.sg_list,
5668 job->reply_payload.sg_cnt,
5669 fwlog_buff, ras_req->read_size);
5670
5671 vfree(fwlog_buff);
5672
5673 ras_job_error:
5674 bsg_reply->result = rc;
5675 if (!rc)
5676 bsg_job_done(job, bsg_reply->result,
5677 bsg_reply->reply_payload_rcv_len);
5678
5679 return rc;
5680 }
5681
5682 static int
lpfc_get_trunk_info(struct bsg_job * job)5683 lpfc_get_trunk_info(struct bsg_job *job)
5684 {
5685 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5686 struct lpfc_hba *phba = vport->phba;
5687 struct fc_bsg_reply *bsg_reply = job->reply;
5688 struct lpfc_trunk_info *event_reply;
5689 int rc = 0;
5690
5691 if (job->request_len <
5692 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5693 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5694 "2744 Received GET TRUNK _INFO request below "
5695 "minimum size\n");
5696 rc = -EINVAL;
5697 goto job_error;
5698 }
5699
5700 event_reply = (struct lpfc_trunk_info *)
5701 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5702
5703 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5704 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5705 "2728 Received GET TRUNK _INFO reply below "
5706 "minimum size\n");
5707 rc = -EINVAL;
5708 goto job_error;
5709 }
5710 if (event_reply == NULL) {
5711 rc = -EINVAL;
5712 goto job_error;
5713 }
5714
5715 bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5716 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5717
5718 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5719 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5720
5721 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5722 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5723
5724 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5725 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5726
5727 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5728 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5729
5730 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5731 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5732
5733 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5734 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5735
5736 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5737 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5738
5739 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5740 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5741
5742 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5743 event_reply->logical_speed =
5744 phba->sli4_hba.link_state.logical_speed / 1000;
5745 job_error:
5746 bsg_reply->result = rc;
5747 if (!rc)
5748 bsg_job_done(job, bsg_reply->result,
5749 bsg_reply->reply_payload_rcv_len);
5750 return rc;
5751
5752 }
5753
5754 static int
lpfc_get_cgnbuf_info(struct bsg_job * job)5755 lpfc_get_cgnbuf_info(struct bsg_job *job)
5756 {
5757 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5758 struct lpfc_hba *phba = vport->phba;
5759 struct fc_bsg_request *bsg_request = job->request;
5760 struct fc_bsg_reply *bsg_reply = job->reply;
5761 struct get_cgnbuf_info_req *cgnbuf_req;
5762 struct lpfc_cgn_info *cp;
5763 uint8_t *cgn_buff;
5764 int size, cinfosz;
5765 int rc = 0;
5766
5767 if (job->request_len < sizeof(struct fc_bsg_request) +
5768 sizeof(struct get_cgnbuf_info_req)) {
5769 rc = -ENOMEM;
5770 goto job_exit;
5771 }
5772
5773 if (!phba->sli4_hba.pc_sli4_params.cmf) {
5774 rc = -ENOENT;
5775 goto job_exit;
5776 }
5777
5778 if (!phba->cgn_i || !phba->cgn_i->virt) {
5779 rc = -ENOENT;
5780 goto job_exit;
5781 }
5782
5783 cp = phba->cgn_i->virt;
5784 if (cp->cgn_info_version < LPFC_CGN_INFO_V3) {
5785 rc = -EPERM;
5786 goto job_exit;
5787 }
5788
5789 cgnbuf_req = (struct get_cgnbuf_info_req *)
5790 bsg_request->rqst_data.h_vendor.vendor_cmd;
5791
5792 /* For reset or size == 0 */
5793 bsg_reply->reply_payload_rcv_len = 0;
5794
5795 if (cgnbuf_req->reset == LPFC_BSG_CGN_RESET_STAT) {
5796 lpfc_init_congestion_stat(phba);
5797 goto job_exit;
5798 }
5799
5800 /* We don't want to include the CRC at the end */
5801 cinfosz = sizeof(struct lpfc_cgn_info) - sizeof(uint32_t);
5802
5803 size = cgnbuf_req->read_size;
5804 if (!size)
5805 goto job_exit;
5806
5807 if (size < cinfosz) {
5808 /* Just copy back what we can */
5809 cinfosz = size;
5810 rc = -E2BIG;
5811 }
5812
5813 /* Allocate memory to read congestion info */
5814 cgn_buff = vmalloc(cinfosz);
5815 if (!cgn_buff) {
5816 rc = -ENOMEM;
5817 goto job_exit;
5818 }
5819
5820 memcpy(cgn_buff, cp, cinfosz);
5821
5822 bsg_reply->reply_payload_rcv_len =
5823 sg_copy_from_buffer(job->reply_payload.sg_list,
5824 job->reply_payload.sg_cnt,
5825 cgn_buff, cinfosz);
5826
5827 vfree(cgn_buff);
5828
5829 job_exit:
5830 bsg_reply->result = rc;
5831 if (!rc)
5832 bsg_job_done(job, bsg_reply->result,
5833 bsg_reply->reply_payload_rcv_len);
5834 else
5835 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5836 "2724 GET CGNBUF error: %d\n", rc);
5837 return rc;
5838 }
5839
5840 /**
5841 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5842 * @job: fc_bsg_job to handle
5843 **/
5844 static int
lpfc_bsg_hst_vendor(struct bsg_job * job)5845 lpfc_bsg_hst_vendor(struct bsg_job *job)
5846 {
5847 struct fc_bsg_request *bsg_request = job->request;
5848 struct fc_bsg_reply *bsg_reply = job->reply;
5849 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5850 int rc;
5851
5852 switch (command) {
5853 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5854 rc = lpfc_bsg_hba_set_event(job);
5855 break;
5856 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5857 rc = lpfc_bsg_hba_get_event(job);
5858 break;
5859 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5860 rc = lpfc_bsg_send_mgmt_rsp(job);
5861 break;
5862 case LPFC_BSG_VENDOR_DIAG_MODE:
5863 rc = lpfc_bsg_diag_loopback_mode(job);
5864 break;
5865 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5866 rc = lpfc_sli4_bsg_diag_mode_end(job);
5867 break;
5868 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5869 rc = lpfc_bsg_diag_loopback_run(job);
5870 break;
5871 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5872 rc = lpfc_sli4_bsg_link_diag_test(job);
5873 break;
5874 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5875 rc = lpfc_bsg_get_dfc_rev(job);
5876 break;
5877 case LPFC_BSG_VENDOR_MBOX:
5878 rc = lpfc_bsg_mbox_cmd(job);
5879 break;
5880 case LPFC_BSG_VENDOR_MENLO_CMD:
5881 case LPFC_BSG_VENDOR_MENLO_DATA:
5882 rc = lpfc_menlo_cmd(job);
5883 break;
5884 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5885 rc = lpfc_forced_link_speed(job);
5886 break;
5887 case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5888 rc = lpfc_bsg_get_ras_lwpd(job);
5889 break;
5890 case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5891 rc = lpfc_bsg_get_ras_fwlog(job);
5892 break;
5893 case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5894 rc = lpfc_bsg_get_ras_config(job);
5895 break;
5896 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5897 rc = lpfc_bsg_set_ras_config(job);
5898 break;
5899 case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5900 rc = lpfc_get_trunk_info(job);
5901 break;
5902 case LPFC_BSG_VENDOR_GET_CGNBUF_INFO:
5903 rc = lpfc_get_cgnbuf_info(job);
5904 break;
5905 default:
5906 rc = -EINVAL;
5907 bsg_reply->reply_payload_rcv_len = 0;
5908 /* make error code available to userspace */
5909 bsg_reply->result = rc;
5910 break;
5911 }
5912
5913 return rc;
5914 }
5915
5916 /**
5917 * lpfc_bsg_request - handle a bsg request from the FC transport
5918 * @job: bsg_job to handle
5919 **/
5920 int
lpfc_bsg_request(struct bsg_job * job)5921 lpfc_bsg_request(struct bsg_job *job)
5922 {
5923 struct fc_bsg_request *bsg_request = job->request;
5924 struct fc_bsg_reply *bsg_reply = job->reply;
5925 uint32_t msgcode;
5926 int rc;
5927
5928 msgcode = bsg_request->msgcode;
5929 switch (msgcode) {
5930 case FC_BSG_HST_VENDOR:
5931 rc = lpfc_bsg_hst_vendor(job);
5932 break;
5933 case FC_BSG_RPT_ELS:
5934 rc = lpfc_bsg_rport_els(job);
5935 break;
5936 case FC_BSG_RPT_CT:
5937 rc = lpfc_bsg_send_mgmt_cmd(job);
5938 break;
5939 default:
5940 rc = -EINVAL;
5941 bsg_reply->reply_payload_rcv_len = 0;
5942 /* make error code available to userspace */
5943 bsg_reply->result = rc;
5944 break;
5945 }
5946
5947 return rc;
5948 }
5949
5950 /**
5951 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5952 * @job: bsg_job that has timed out
5953 *
5954 * This function just aborts the job's IOCB. The aborted IOCB will return to
5955 * the waiting function which will handle passing the error back to userspace
5956 **/
5957 int
lpfc_bsg_timeout(struct bsg_job * job)5958 lpfc_bsg_timeout(struct bsg_job *job)
5959 {
5960 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5961 struct lpfc_hba *phba = vport->phba;
5962 struct lpfc_iocbq *cmdiocb;
5963 struct lpfc_sli_ring *pring;
5964 struct bsg_job_data *dd_data;
5965 unsigned long flags;
5966 int rc = 0;
5967 LIST_HEAD(completions);
5968 struct lpfc_iocbq *check_iocb, *next_iocb;
5969
5970 pring = lpfc_phba_elsring(phba);
5971 if (unlikely(!pring))
5972 return -EIO;
5973
5974 /* if job's driver data is NULL, the command completed or is in the
5975 * the process of completing. In this case, return status to request
5976 * so the timeout is retried. This avoids double completion issues
5977 * and the request will be pulled off the timer queue when the
5978 * command's completion handler executes. Otherwise, prevent the
5979 * command's completion handler from executing the job done callback
5980 * and continue processing to abort the outstanding the command.
5981 */
5982
5983 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5984 dd_data = (struct bsg_job_data *)job->dd_data;
5985 if (dd_data) {
5986 dd_data->set_job = NULL;
5987 job->dd_data = NULL;
5988 } else {
5989 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5990 return -EAGAIN;
5991 }
5992
5993 switch (dd_data->type) {
5994 case TYPE_IOCB:
5995 /* Check to see if IOCB was issued to the port or not. If not,
5996 * remove it from the txq queue and call cancel iocbs.
5997 * Otherwise, call abort iotag
5998 */
5999 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
6000 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6001
6002 spin_lock_irqsave(&phba->hbalock, flags);
6003 /* make sure the I/O abort window is still open */
6004 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
6005 spin_unlock_irqrestore(&phba->hbalock, flags);
6006 return -EAGAIN;
6007 }
6008 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
6009 list) {
6010 if (check_iocb == cmdiocb) {
6011 list_move_tail(&check_iocb->list, &completions);
6012 break;
6013 }
6014 }
6015 if (list_empty(&completions))
6016 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
6017 spin_unlock_irqrestore(&phba->hbalock, flags);
6018 if (!list_empty(&completions)) {
6019 lpfc_sli_cancel_iocbs(phba, &completions,
6020 IOSTAT_LOCAL_REJECT,
6021 IOERR_SLI_ABORTED);
6022 }
6023 break;
6024
6025 case TYPE_EVT:
6026 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6027 break;
6028
6029 case TYPE_MBOX:
6030 /* Update the ext buf ctx state if needed */
6031
6032 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
6033 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
6034 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6035 break;
6036 case TYPE_MENLO:
6037 /* Check to see if IOCB was issued to the port or not. If not,
6038 * remove it from the txq queue and call cancel iocbs.
6039 * Otherwise, call abort iotag.
6040 */
6041 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
6042 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6043
6044 spin_lock_irqsave(&phba->hbalock, flags);
6045 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
6046 list) {
6047 if (check_iocb == cmdiocb) {
6048 list_move_tail(&check_iocb->list, &completions);
6049 break;
6050 }
6051 }
6052 if (list_empty(&completions))
6053 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
6054 spin_unlock_irqrestore(&phba->hbalock, flags);
6055 if (!list_empty(&completions)) {
6056 lpfc_sli_cancel_iocbs(phba, &completions,
6057 IOSTAT_LOCAL_REJECT,
6058 IOERR_SLI_ABORTED);
6059 }
6060 break;
6061 default:
6062 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
6063 break;
6064 }
6065
6066 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
6067 * otherwise an error message will be displayed on the console
6068 * so always return success (zero)
6069 */
6070 return rc;
6071 }
6072