1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <linux/blk-cgroup.h>
32 #include <net/checksum.h>
33
34 #include <scsi/scsi.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_eh.h>
37 #include <scsi/scsi_host.h>
38 #include <scsi/scsi_tcq.h>
39 #include <scsi/scsi_transport_fc.h>
40
41 #include "lpfc_version.h"
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_logmsg.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_vport.h"
53
54 #define LPFC_RESET_WAIT 2
55 #define LPFC_ABORT_WAIT 2
56
57 static char *dif_op_str[] = {
58 "PROT_NORMAL",
59 "PROT_READ_INSERT",
60 "PROT_WRITE_STRIP",
61 "PROT_READ_STRIP",
62 "PROT_WRITE_INSERT",
63 "PROT_READ_PASS",
64 "PROT_WRITE_PASS",
65 };
66
67 struct scsi_dif_tuple {
68 __be16 guard_tag; /* Checksum */
69 __be16 app_tag; /* Opaque storage */
70 __be32 ref_tag; /* Target LBA or indirect LBA */
71 };
72
73 static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)74 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75 {
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77
78 if (vport->phba->cfg_fof)
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80 else
81 return (struct lpfc_rport_data *)sdev->hostdata;
82 }
83
84 static void
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
86 static void
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
88 static int
89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90
91 /**
92 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
93 * @phba: Pointer to HBA object.
94 * @lpfc_cmd: lpfc scsi command object pointer.
95 *
96 * This function is called from the lpfc_prep_task_mgmt_cmd function to
97 * set the last bit in the response sge entry.
98 **/
99 static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
101 struct lpfc_io_buf *lpfc_cmd)
102 {
103 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
104 if (sgl) {
105 sgl += 1;
106 sgl->word2 = le32_to_cpu(sgl->word2);
107 bf_set(lpfc_sli4_sge_last, sgl, 1);
108 sgl->word2 = cpu_to_le32(sgl->word2);
109 }
110 }
111
112 #define LPFC_INVALID_REFTAG ((u32)-1)
113
114 /**
115 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
116 * @phba: The Hba for which this call is being executed.
117 *
118 * This routine is called when there is resource error in driver or firmware.
119 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
120 * posts at most 1 event each second. This routine wakes up worker thread of
121 * @phba to process WORKER_RAM_DOWN_EVENT event.
122 *
123 * This routine should be called with no lock held.
124 **/
125 void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)126 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
127 {
128 unsigned long flags;
129 uint32_t evt_posted;
130 unsigned long expires;
131
132 spin_lock_irqsave(&phba->hbalock, flags);
133 atomic_inc(&phba->num_rsrc_err);
134 phba->last_rsrc_error_time = jiffies;
135
136 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
137 if (time_after(expires, jiffies)) {
138 spin_unlock_irqrestore(&phba->hbalock, flags);
139 return;
140 }
141
142 phba->last_ramp_down_time = jiffies;
143
144 spin_unlock_irqrestore(&phba->hbalock, flags);
145
146 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
147 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
148 if (!evt_posted)
149 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
150 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
151
152 if (!evt_posted)
153 lpfc_worker_wake_up(phba);
154 return;
155 }
156
157 /**
158 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
159 * @phba: The Hba for which this call is being executed.
160 *
161 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
162 * thread.This routine reduces queue depth for all scsi device on each vport
163 * associated with @phba.
164 **/
165 void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)166 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
167 {
168 struct lpfc_vport **vports;
169 struct Scsi_Host *shost;
170 struct scsi_device *sdev;
171 unsigned long new_queue_depth;
172 unsigned long num_rsrc_err, num_cmd_success;
173 int i;
174
175 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
176 num_cmd_success = atomic_read(&phba->num_cmd_success);
177
178 /*
179 * The error and success command counters are global per
180 * driver instance. If another handler has already
181 * operated on this error event, just exit.
182 */
183 if (num_rsrc_err == 0)
184 return;
185
186 vports = lpfc_create_vport_work_array(phba);
187 if (vports != NULL)
188 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
189 shost = lpfc_shost_from_vport(vports[i]);
190 shost_for_each_device(sdev, shost) {
191 new_queue_depth =
192 sdev->queue_depth * num_rsrc_err /
193 (num_rsrc_err + num_cmd_success);
194 if (!new_queue_depth)
195 new_queue_depth = sdev->queue_depth - 1;
196 else
197 new_queue_depth = sdev->queue_depth -
198 new_queue_depth;
199 scsi_change_queue_depth(sdev, new_queue_depth);
200 }
201 }
202 lpfc_destroy_vport_work_array(phba, vports);
203 atomic_set(&phba->num_rsrc_err, 0);
204 atomic_set(&phba->num_cmd_success, 0);
205 }
206
207 /**
208 * lpfc_scsi_dev_block - set all scsi hosts to block state
209 * @phba: Pointer to HBA context object.
210 *
211 * This function walks vport list and set each SCSI host to block state
212 * by invoking fc_remote_port_delete() routine. This function is invoked
213 * with EEH when device's PCI slot has been permanently disabled.
214 **/
215 void
lpfc_scsi_dev_block(struct lpfc_hba * phba)216 lpfc_scsi_dev_block(struct lpfc_hba *phba)
217 {
218 struct lpfc_vport **vports;
219 struct Scsi_Host *shost;
220 struct scsi_device *sdev;
221 struct fc_rport *rport;
222 int i;
223
224 vports = lpfc_create_vport_work_array(phba);
225 if (vports != NULL)
226 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
227 shost = lpfc_shost_from_vport(vports[i]);
228 shost_for_each_device(sdev, shost) {
229 rport = starget_to_rport(scsi_target(sdev));
230 fc_remote_port_delete(rport);
231 }
232 }
233 lpfc_destroy_vport_work_array(phba, vports);
234 }
235
236 /**
237 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
238 * @vport: The virtual port for which this call being executed.
239 * @num_to_alloc: The requested number of buffers to allocate.
240 *
241 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
242 * the scsi buffer contains all the necessary information needed to initiate
243 * a SCSI I/O. The non-DMAable buffer region contains information to build
244 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
245 * and the initial BPL. In addition to allocating memory, the FCP CMND and
246 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
247 *
248 * Return codes:
249 * int - number of scsi buffers that were allocated.
250 * 0 = failure, less than num_to_alloc is a partial failure.
251 **/
252 static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)253 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
254 {
255 struct lpfc_hba *phba = vport->phba;
256 struct lpfc_io_buf *psb;
257 struct ulp_bde64 *bpl;
258 IOCB_t *iocb;
259 dma_addr_t pdma_phys_fcp_cmd;
260 dma_addr_t pdma_phys_fcp_rsp;
261 dma_addr_t pdma_phys_sgl;
262 uint16_t iotag;
263 int bcnt, bpl_size;
264
265 bpl_size = phba->cfg_sg_dma_buf_size -
266 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
267
268 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
269 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
270 num_to_alloc, phba->cfg_sg_dma_buf_size,
271 (int)sizeof(struct fcp_cmnd),
272 (int)sizeof(struct fcp_rsp), bpl_size);
273
274 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
275 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
276 if (!psb)
277 break;
278
279 /*
280 * Get memory from the pci pool to map the virt space to pci
281 * bus space for an I/O. The DMA buffer includes space for the
282 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
283 * necessary to support the sg_tablesize.
284 */
285 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
286 GFP_KERNEL, &psb->dma_handle);
287 if (!psb->data) {
288 kfree(psb);
289 break;
290 }
291
292
293 /* Allocate iotag for psb->cur_iocbq. */
294 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
295 if (iotag == 0) {
296 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
297 psb->data, psb->dma_handle);
298 kfree(psb);
299 break;
300 }
301 psb->cur_iocbq.cmd_flag |= LPFC_IO_FCP;
302
303 psb->fcp_cmnd = psb->data;
304 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
305 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
306 sizeof(struct fcp_rsp);
307
308 /* Initialize local short-hand pointers. */
309 bpl = (struct ulp_bde64 *)psb->dma_sgl;
310 pdma_phys_fcp_cmd = psb->dma_handle;
311 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
312 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
313 sizeof(struct fcp_rsp);
314
315 /*
316 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
317 * are sg list bdes. Initialize the first two and leave the
318 * rest for queuecommand.
319 */
320 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
321 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
322 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
323 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
324 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
325
326 /* Setup the physical region for the FCP RSP */
327 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
328 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
329 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
330 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
331 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
332
333 /*
334 * Since the IOCB for the FCP I/O is built into this
335 * lpfc_scsi_buf, initialize it with all known data now.
336 */
337 iocb = &psb->cur_iocbq.iocb;
338 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
339 if ((phba->sli_rev == 3) &&
340 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
341 /* fill in immediate fcp command BDE */
342 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
343 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
344 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
345 unsli3.fcp_ext.icd);
346 iocb->un.fcpi64.bdl.addrHigh = 0;
347 iocb->ulpBdeCount = 0;
348 iocb->ulpLe = 0;
349 /* fill in response BDE */
350 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
351 BUFF_TYPE_BDE_64;
352 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
353 sizeof(struct fcp_rsp);
354 iocb->unsli3.fcp_ext.rbde.addrLow =
355 putPaddrLow(pdma_phys_fcp_rsp);
356 iocb->unsli3.fcp_ext.rbde.addrHigh =
357 putPaddrHigh(pdma_phys_fcp_rsp);
358 } else {
359 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
360 iocb->un.fcpi64.bdl.bdeSize =
361 (2 * sizeof(struct ulp_bde64));
362 iocb->un.fcpi64.bdl.addrLow =
363 putPaddrLow(pdma_phys_sgl);
364 iocb->un.fcpi64.bdl.addrHigh =
365 putPaddrHigh(pdma_phys_sgl);
366 iocb->ulpBdeCount = 1;
367 iocb->ulpLe = 1;
368 }
369 iocb->ulpClass = CLASS3;
370 psb->status = IOSTAT_SUCCESS;
371 /* Put it back into the SCSI buffer list */
372 psb->cur_iocbq.io_buf = psb;
373 spin_lock_init(&psb->buf_lock);
374 lpfc_release_scsi_buf_s3(phba, psb);
375
376 }
377
378 return bcnt;
379 }
380
381 /**
382 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
383 * @vport: pointer to lpfc vport data structure.
384 *
385 * This routine is invoked by the vport cleanup for deletions and the cleanup
386 * for an ndlp on removal.
387 **/
388 void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)389 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
390 {
391 struct lpfc_hba *phba = vport->phba;
392 struct lpfc_io_buf *psb, *next_psb;
393 struct lpfc_sli4_hdw_queue *qp;
394 unsigned long iflag = 0;
395 int idx;
396
397 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
398 return;
399
400 spin_lock_irqsave(&phba->hbalock, iflag);
401 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
402 qp = &phba->sli4_hba.hdwq[idx];
403
404 spin_lock(&qp->abts_io_buf_list_lock);
405 list_for_each_entry_safe(psb, next_psb,
406 &qp->lpfc_abts_io_buf_list, list) {
407 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME)
408 continue;
409
410 if (psb->rdata && psb->rdata->pnode &&
411 psb->rdata->pnode->vport == vport)
412 psb->rdata = NULL;
413 }
414 spin_unlock(&qp->abts_io_buf_list_lock);
415 }
416 spin_unlock_irqrestore(&phba->hbalock, iflag);
417 }
418
419 /**
420 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
421 * @phba: pointer to lpfc hba data structure.
422 * @axri: pointer to the fcp xri abort wcqe structure.
423 * @idx: index into hdwq
424 *
425 * This routine is invoked by the worker thread to process a SLI4 fast-path
426 * FCP or NVME aborted xri.
427 **/
428 void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)429 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
430 struct sli4_wcqe_xri_aborted *axri, int idx)
431 {
432 u16 xri = 0;
433 u16 rxid = 0;
434 struct lpfc_io_buf *psb, *next_psb;
435 struct lpfc_sli4_hdw_queue *qp;
436 unsigned long iflag = 0;
437 struct lpfc_iocbq *iocbq;
438 int i;
439 struct lpfc_nodelist *ndlp;
440 int rrq_empty = 0;
441 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
442 struct scsi_cmnd *cmd;
443 int offline = 0;
444
445 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
446 return;
447 offline = pci_channel_offline(phba->pcidev);
448 if (!offline) {
449 xri = bf_get(lpfc_wcqe_xa_xri, axri);
450 rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
451 }
452 qp = &phba->sli4_hba.hdwq[idx];
453 spin_lock_irqsave(&phba->hbalock, iflag);
454 spin_lock(&qp->abts_io_buf_list_lock);
455 list_for_each_entry_safe(psb, next_psb,
456 &qp->lpfc_abts_io_buf_list, list) {
457 if (offline)
458 xri = psb->cur_iocbq.sli4_xritag;
459 if (psb->cur_iocbq.sli4_xritag == xri) {
460 list_del_init(&psb->list);
461 psb->flags &= ~LPFC_SBUF_XBUSY;
462 psb->status = IOSTAT_SUCCESS;
463 if (psb->cur_iocbq.cmd_flag & LPFC_IO_NVME) {
464 qp->abts_nvme_io_bufs--;
465 spin_unlock(&qp->abts_io_buf_list_lock);
466 spin_unlock_irqrestore(&phba->hbalock, iflag);
467 if (!offline) {
468 lpfc_sli4_nvme_xri_aborted(phba, axri,
469 psb);
470 return;
471 }
472 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
473 spin_lock_irqsave(&phba->hbalock, iflag);
474 spin_lock(&qp->abts_io_buf_list_lock);
475 continue;
476 }
477 qp->abts_scsi_io_bufs--;
478 spin_unlock(&qp->abts_io_buf_list_lock);
479
480 if (psb->rdata && psb->rdata->pnode)
481 ndlp = psb->rdata->pnode;
482 else
483 ndlp = NULL;
484
485 rrq_empty = list_empty(&phba->active_rrq_list);
486 spin_unlock_irqrestore(&phba->hbalock, iflag);
487 if (ndlp && !offline) {
488 lpfc_set_rrq_active(phba, ndlp,
489 psb->cur_iocbq.sli4_lxritag, rxid, 1);
490 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
491 }
492
493 if (phba->cfg_fcp_wait_abts_rsp || offline) {
494 spin_lock_irqsave(&psb->buf_lock, iflag);
495 cmd = psb->pCmd;
496 psb->pCmd = NULL;
497 spin_unlock_irqrestore(&psb->buf_lock, iflag);
498
499 /* The sdev is not guaranteed to be valid post
500 * scsi_done upcall.
501 */
502 if (cmd)
503 scsi_done(cmd);
504
505 /*
506 * We expect there is an abort thread waiting
507 * for command completion wake up the thread.
508 */
509 spin_lock_irqsave(&psb->buf_lock, iflag);
510 psb->cur_iocbq.cmd_flag &=
511 ~LPFC_DRIVER_ABORTED;
512 if (psb->waitq)
513 wake_up(psb->waitq);
514 spin_unlock_irqrestore(&psb->buf_lock, iflag);
515 }
516
517 lpfc_release_scsi_buf_s4(phba, psb);
518 if (rrq_empty)
519 lpfc_worker_wake_up(phba);
520 if (!offline)
521 return;
522 spin_lock_irqsave(&phba->hbalock, iflag);
523 spin_lock(&qp->abts_io_buf_list_lock);
524 continue;
525 }
526 }
527 spin_unlock(&qp->abts_io_buf_list_lock);
528 if (!offline) {
529 for (i = 1; i <= phba->sli.last_iotag; i++) {
530 iocbq = phba->sli.iocbq_lookup[i];
531
532 if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
533 (iocbq->cmd_flag & LPFC_IO_LIBDFC))
534 continue;
535 if (iocbq->sli4_xritag != xri)
536 continue;
537 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
538 psb->flags &= ~LPFC_SBUF_XBUSY;
539 spin_unlock_irqrestore(&phba->hbalock, iflag);
540 if (!list_empty(&pring->txq))
541 lpfc_worker_wake_up(phba);
542 return;
543 }
544 }
545 spin_unlock_irqrestore(&phba->hbalock, iflag);
546 }
547
548 /**
549 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
550 * @phba: The HBA for which this call is being executed.
551 * @ndlp: pointer to a node-list data structure.
552 * @cmnd: Pointer to scsi_cmnd data structure.
553 *
554 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
555 * and returns to caller.
556 *
557 * Return codes:
558 * NULL - Error
559 * Pointer to lpfc_scsi_buf - Success
560 **/
561 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)562 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
563 struct scsi_cmnd *cmnd)
564 {
565 struct lpfc_io_buf *lpfc_cmd = NULL;
566 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
567 unsigned long iflag = 0;
568
569 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
570 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
571 list);
572 if (!lpfc_cmd) {
573 spin_lock(&phba->scsi_buf_list_put_lock);
574 list_splice(&phba->lpfc_scsi_buf_list_put,
575 &phba->lpfc_scsi_buf_list_get);
576 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
577 list_remove_head(scsi_buf_list_get, lpfc_cmd,
578 struct lpfc_io_buf, list);
579 spin_unlock(&phba->scsi_buf_list_put_lock);
580 }
581 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
582
583 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
584 atomic_inc(&ndlp->cmd_pending);
585 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
586 }
587 return lpfc_cmd;
588 }
589 /**
590 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
591 * @phba: The HBA for which this call is being executed.
592 * @ndlp: pointer to a node-list data structure.
593 * @cmnd: Pointer to scsi_cmnd data structure.
594 *
595 * This routine removes a scsi buffer from head of @hdwq io_buf_list
596 * and returns to caller.
597 *
598 * Return codes:
599 * NULL - Error
600 * Pointer to lpfc_scsi_buf - Success
601 **/
602 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)603 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
604 struct scsi_cmnd *cmnd)
605 {
606 struct lpfc_io_buf *lpfc_cmd;
607 struct lpfc_sli4_hdw_queue *qp;
608 struct sli4_sge *sgl;
609 dma_addr_t pdma_phys_fcp_rsp;
610 dma_addr_t pdma_phys_fcp_cmd;
611 uint32_t cpu, idx;
612 int tag;
613 struct fcp_cmd_rsp_buf *tmp = NULL;
614
615 cpu = raw_smp_processor_id();
616 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
617 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
618 idx = blk_mq_unique_tag_to_hwq(tag);
619 } else {
620 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
621 }
622
623 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
624 !phba->cfg_xri_rebalancing);
625 if (!lpfc_cmd) {
626 qp = &phba->sli4_hba.hdwq[idx];
627 qp->empty_io_bufs++;
628 return NULL;
629 }
630
631 /* Setup key fields in buffer that may have been changed
632 * if other protocols used this buffer.
633 */
634 lpfc_cmd->cur_iocbq.cmd_flag = LPFC_IO_FCP;
635 lpfc_cmd->prot_seg_cnt = 0;
636 lpfc_cmd->seg_cnt = 0;
637 lpfc_cmd->timeout = 0;
638 lpfc_cmd->flags = 0;
639 lpfc_cmd->start_time = jiffies;
640 lpfc_cmd->waitq = NULL;
641 lpfc_cmd->cpu = cpu;
642 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
643 lpfc_cmd->prot_data_type = 0;
644 #endif
645 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
646 if (!tmp) {
647 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
648 return NULL;
649 }
650
651 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
652 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
653
654 /*
655 * The first two SGEs are the FCP_CMD and FCP_RSP.
656 * The balance are sg list bdes. Initialize the
657 * first two and leave the rest for queuecommand.
658 */
659 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
660 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
661 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
662 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
663 sgl->word2 = le32_to_cpu(sgl->word2);
664 bf_set(lpfc_sli4_sge_last, sgl, 0);
665 sgl->word2 = cpu_to_le32(sgl->word2);
666 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
667 sgl++;
668
669 /* Setup the physical region for the FCP RSP */
670 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
671 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
672 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
673 sgl->word2 = le32_to_cpu(sgl->word2);
674 bf_set(lpfc_sli4_sge_last, sgl, 1);
675 sgl->word2 = cpu_to_le32(sgl->word2);
676 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
677
678 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
679 atomic_inc(&ndlp->cmd_pending);
680 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
681 }
682 return lpfc_cmd;
683 }
684 /**
685 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
686 * @phba: The HBA for which this call is being executed.
687 * @ndlp: pointer to a node-list data structure.
688 * @cmnd: Pointer to scsi_cmnd data structure.
689 *
690 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
691 * and returns to caller.
692 *
693 * Return codes:
694 * NULL - Error
695 * Pointer to lpfc_scsi_buf - Success
696 **/
697 static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)698 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
699 struct scsi_cmnd *cmnd)
700 {
701 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
702 }
703
704 /**
705 * lpfc_release_scsi_buf_s3 - Return a scsi buffer back to hba scsi buf list
706 * @phba: The Hba for which this call is being executed.
707 * @psb: The scsi buffer which is being released.
708 *
709 * This routine releases @psb scsi buffer by adding it to tail of @phba
710 * lpfc_scsi_buf_list list.
711 **/
712 static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)713 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
714 {
715 unsigned long iflag = 0;
716
717 psb->seg_cnt = 0;
718 psb->prot_seg_cnt = 0;
719
720 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
721 psb->pCmd = NULL;
722 psb->cur_iocbq.cmd_flag = LPFC_IO_FCP;
723 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
724 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
725 }
726
727 /**
728 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
729 * @phba: The Hba for which this call is being executed.
730 * @psb: The scsi buffer which is being released.
731 *
732 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
733 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
734 * and cannot be reused for at least RA_TOV amount of time if it was
735 * aborted.
736 **/
737 static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)738 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
739 {
740 struct lpfc_sli4_hdw_queue *qp;
741 unsigned long iflag = 0;
742
743 psb->seg_cnt = 0;
744 psb->prot_seg_cnt = 0;
745
746 qp = psb->hdwq;
747 if (psb->flags & LPFC_SBUF_XBUSY) {
748 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
749 if (!phba->cfg_fcp_wait_abts_rsp)
750 psb->pCmd = NULL;
751 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
752 qp->abts_scsi_io_bufs++;
753 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
754 } else {
755 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
756 }
757 }
758
759 /**
760 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
761 * @phba: The Hba for which this call is being executed.
762 * @psb: The scsi buffer which is being released.
763 *
764 * This routine releases @psb scsi buffer by adding it to tail of @phba
765 * lpfc_scsi_buf_list list.
766 **/
767 static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)768 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
769 {
770 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
771 atomic_dec(&psb->ndlp->cmd_pending);
772
773 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
774 phba->lpfc_release_scsi_buf(phba, psb);
775 }
776
777 /**
778 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
779 * @data: A pointer to the immediate command data portion of the IOCB.
780 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
781 *
782 * The routine copies the entire FCP command from @fcp_cmnd to @data while
783 * byte swapping the data to big endian format for transmission on the wire.
784 **/
785 static void
lpfc_fcpcmd_to_iocb(u8 * data,struct fcp_cmnd * fcp_cmnd)786 lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
787 {
788 int i, j;
789
790 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
791 i += sizeof(uint32_t), j++) {
792 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
793 }
794 }
795
796 /**
797 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
798 * @phba: The Hba for which this call is being executed.
799 * @lpfc_cmd: The scsi buffer which is going to be mapped.
800 *
801 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
802 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
803 * through sg elements and format the bde. This routine also initializes all
804 * IOCB fields which are dependent on scsi command request buffer.
805 *
806 * Return codes:
807 * 1 - Error
808 * 0 - Success
809 **/
810 static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)811 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
812 {
813 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
814 struct scatterlist *sgel = NULL;
815 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
816 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
817 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
818 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
819 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
820 dma_addr_t physaddr;
821 uint32_t num_bde = 0;
822 int nseg, datadir = scsi_cmnd->sc_data_direction;
823
824 /*
825 * There are three possibilities here - use scatter-gather segment, use
826 * the single mapping, or neither. Start the lpfc command prep by
827 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
828 * data bde entry.
829 */
830 bpl += 2;
831 if (scsi_sg_count(scsi_cmnd)) {
832 /*
833 * The driver stores the segment count returned from dma_map_sg
834 * because this a count of dma-mappings used to map the use_sg
835 * pages. They are not guaranteed to be the same for those
836 * architectures that implement an IOMMU.
837 */
838
839 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
840 scsi_sg_count(scsi_cmnd), datadir);
841 if (unlikely(!nseg))
842 return 1;
843
844 lpfc_cmd->seg_cnt = nseg;
845 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
846 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
847 "9064 BLKGRD: %s: Too many sg segments"
848 " from dma_map_sg. Config %d, seg_cnt"
849 " %d\n", __func__, phba->cfg_sg_seg_cnt,
850 lpfc_cmd->seg_cnt);
851 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
852 lpfc_cmd->seg_cnt = 0;
853 scsi_dma_unmap(scsi_cmnd);
854 return 2;
855 }
856
857 /*
858 * The driver established a maximum scatter-gather segment count
859 * during probe that limits the number of sg elements in any
860 * single scsi command. Just run through the seg_cnt and format
861 * the bde's.
862 * When using SLI-3 the driver will try to fit all the BDEs into
863 * the IOCB. If it can't then the BDEs get added to a BPL as it
864 * does for SLI-2 mode.
865 */
866 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
867 physaddr = sg_dma_address(sgel);
868 if (phba->sli_rev == 3 &&
869 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
870 !(iocbq->cmd_flag & DSS_SECURITY_OP) &&
871 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
872 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
873 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
874 data_bde->addrLow = putPaddrLow(physaddr);
875 data_bde->addrHigh = putPaddrHigh(physaddr);
876 data_bde++;
877 } else {
878 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
879 bpl->tus.f.bdeSize = sg_dma_len(sgel);
880 bpl->tus.w = le32_to_cpu(bpl->tus.w);
881 bpl->addrLow =
882 le32_to_cpu(putPaddrLow(physaddr));
883 bpl->addrHigh =
884 le32_to_cpu(putPaddrHigh(physaddr));
885 bpl++;
886 }
887 }
888 }
889
890 /*
891 * Finish initializing those IOCB fields that are dependent on the
892 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
893 * explicitly reinitialized and for SLI-3 the extended bde count is
894 * explicitly reinitialized since all iocb memory resources are reused.
895 */
896 if (phba->sli_rev == 3 &&
897 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
898 !(iocbq->cmd_flag & DSS_SECURITY_OP)) {
899 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
900 /*
901 * The extended IOCB format can only fit 3 BDE or a BPL.
902 * This I/O has more than 3 BDE so the 1st data bde will
903 * be a BPL that is filled in here.
904 */
905 physaddr = lpfc_cmd->dma_handle;
906 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
907 data_bde->tus.f.bdeSize = (num_bde *
908 sizeof(struct ulp_bde64));
909 physaddr += (sizeof(struct fcp_cmnd) +
910 sizeof(struct fcp_rsp) +
911 (2 * sizeof(struct ulp_bde64)));
912 data_bde->addrHigh = putPaddrHigh(physaddr);
913 data_bde->addrLow = putPaddrLow(physaddr);
914 /* ebde count includes the response bde and data bpl */
915 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
916 } else {
917 /* ebde count includes the response bde and data bdes */
918 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
919 }
920 } else {
921 iocb_cmd->un.fcpi64.bdl.bdeSize =
922 ((num_bde + 2) * sizeof(struct ulp_bde64));
923 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
924 }
925 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
926
927 /*
928 * Due to difference in data length between DIF/non-DIF paths,
929 * we need to set word 4 of IOCB here
930 */
931 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
932 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
933 return 0;
934 }
935
936 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
937
938 /* Return BG_ERR_INIT if error injection is detected by Initiator */
939 #define BG_ERR_INIT 0x1
940 /* Return BG_ERR_TGT if error injection is detected by Target */
941 #define BG_ERR_TGT 0x2
942 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
943 #define BG_ERR_SWAP 0x10
944 /*
945 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
946 * error injection
947 */
948 #define BG_ERR_CHECK 0x20
949
950 /**
951 * lpfc_bg_err_inject - Determine if we should inject an error
952 * @phba: The Hba for which this call is being executed.
953 * @sc: The SCSI command to examine
954 * @reftag: (out) BlockGuard reference tag for transmitted data
955 * @apptag: (out) BlockGuard application tag for transmitted data
956 * @new_guard: (in) Value to replace CRC with if needed
957 *
958 * Returns BG_ERR_* bit mask or 0 if request ignored
959 **/
960 static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)961 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
962 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
963 {
964 struct scatterlist *sgpe; /* s/g prot entry */
965 struct lpfc_io_buf *lpfc_cmd = NULL;
966 struct scsi_dif_tuple *src = NULL;
967 struct lpfc_nodelist *ndlp;
968 struct lpfc_rport_data *rdata;
969 uint32_t op = scsi_get_prot_op(sc);
970 uint32_t blksize;
971 uint32_t numblks;
972 u32 lba;
973 int rc = 0;
974 int blockoff = 0;
975
976 if (op == SCSI_PROT_NORMAL)
977 return 0;
978
979 sgpe = scsi_prot_sglist(sc);
980 lba = scsi_prot_ref_tag(sc);
981 if (lba == LPFC_INVALID_REFTAG)
982 return 0;
983
984 /* First check if we need to match the LBA */
985 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
986 blksize = scsi_prot_interval(sc);
987 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
988
989 /* Make sure we have the right LBA if one is specified */
990 if (phba->lpfc_injerr_lba < (u64)lba ||
991 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
992 return 0;
993 if (sgpe) {
994 blockoff = phba->lpfc_injerr_lba - (u64)lba;
995 numblks = sg_dma_len(sgpe) /
996 sizeof(struct scsi_dif_tuple);
997 if (numblks < blockoff)
998 blockoff = numblks;
999 }
1000 }
1001
1002 /* Next check if we need to match the remote NPortID or WWPN */
1003 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1004 if (rdata && rdata->pnode) {
1005 ndlp = rdata->pnode;
1006
1007 /* Make sure we have the right NPortID if one is specified */
1008 if (phba->lpfc_injerr_nportid &&
1009 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1010 return 0;
1011
1012 /*
1013 * Make sure we have the right WWPN if one is specified.
1014 * wwn[0] should be a non-zero NAA in a good WWPN.
1015 */
1016 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1017 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1018 sizeof(struct lpfc_name)) != 0))
1019 return 0;
1020 }
1021
1022 /* Setup a ptr to the protection data if the SCSI host provides it */
1023 if (sgpe) {
1024 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1025 src += blockoff;
1026 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1027 }
1028
1029 /* Should we change the Reference Tag */
1030 if (reftag) {
1031 if (phba->lpfc_injerr_wref_cnt) {
1032 switch (op) {
1033 case SCSI_PROT_WRITE_PASS:
1034 if (src) {
1035 /*
1036 * For WRITE_PASS, force the error
1037 * to be sent on the wire. It should
1038 * be detected by the Target.
1039 * If blockoff != 0 error will be
1040 * inserted in middle of the IO.
1041 */
1042
1043 lpfc_printf_log(phba, KERN_ERR,
1044 LOG_TRACE_EVENT,
1045 "9076 BLKGRD: Injecting reftag error: "
1046 "write lba x%lx + x%x oldrefTag x%x\n",
1047 (unsigned long)lba, blockoff,
1048 be32_to_cpu(src->ref_tag));
1049
1050 /*
1051 * Save the old ref_tag so we can
1052 * restore it on completion.
1053 */
1054 if (lpfc_cmd) {
1055 lpfc_cmd->prot_data_type =
1056 LPFC_INJERR_REFTAG;
1057 lpfc_cmd->prot_data_segment =
1058 src;
1059 lpfc_cmd->prot_data =
1060 src->ref_tag;
1061 }
1062 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1063 phba->lpfc_injerr_wref_cnt--;
1064 if (phba->lpfc_injerr_wref_cnt == 0) {
1065 phba->lpfc_injerr_nportid = 0;
1066 phba->lpfc_injerr_lba =
1067 LPFC_INJERR_LBA_OFF;
1068 memset(&phba->lpfc_injerr_wwpn,
1069 0, sizeof(struct lpfc_name));
1070 }
1071 rc = BG_ERR_TGT | BG_ERR_CHECK;
1072
1073 break;
1074 }
1075 fallthrough;
1076 case SCSI_PROT_WRITE_INSERT:
1077 /*
1078 * For WRITE_INSERT, force the error
1079 * to be sent on the wire. It should be
1080 * detected by the Target.
1081 */
1082 /* DEADBEEF will be the reftag on the wire */
1083 *reftag = 0xDEADBEEF;
1084 phba->lpfc_injerr_wref_cnt--;
1085 if (phba->lpfc_injerr_wref_cnt == 0) {
1086 phba->lpfc_injerr_nportid = 0;
1087 phba->lpfc_injerr_lba =
1088 LPFC_INJERR_LBA_OFF;
1089 memset(&phba->lpfc_injerr_wwpn,
1090 0, sizeof(struct lpfc_name));
1091 }
1092 rc = BG_ERR_TGT | BG_ERR_CHECK;
1093
1094 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1095 "9078 BLKGRD: Injecting reftag error: "
1096 "write lba x%lx\n", (unsigned long)lba);
1097 break;
1098 case SCSI_PROT_WRITE_STRIP:
1099 /*
1100 * For WRITE_STRIP and WRITE_PASS,
1101 * force the error on data
1102 * being copied from SLI-Host to SLI-Port.
1103 */
1104 *reftag = 0xDEADBEEF;
1105 phba->lpfc_injerr_wref_cnt--;
1106 if (phba->lpfc_injerr_wref_cnt == 0) {
1107 phba->lpfc_injerr_nportid = 0;
1108 phba->lpfc_injerr_lba =
1109 LPFC_INJERR_LBA_OFF;
1110 memset(&phba->lpfc_injerr_wwpn,
1111 0, sizeof(struct lpfc_name));
1112 }
1113 rc = BG_ERR_INIT;
1114
1115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1116 "9077 BLKGRD: Injecting reftag error: "
1117 "write lba x%lx\n", (unsigned long)lba);
1118 break;
1119 }
1120 }
1121 if (phba->lpfc_injerr_rref_cnt) {
1122 switch (op) {
1123 case SCSI_PROT_READ_INSERT:
1124 case SCSI_PROT_READ_STRIP:
1125 case SCSI_PROT_READ_PASS:
1126 /*
1127 * For READ_STRIP and READ_PASS, force the
1128 * error on data being read off the wire. It
1129 * should force an IO error to the driver.
1130 */
1131 *reftag = 0xDEADBEEF;
1132 phba->lpfc_injerr_rref_cnt--;
1133 if (phba->lpfc_injerr_rref_cnt == 0) {
1134 phba->lpfc_injerr_nportid = 0;
1135 phba->lpfc_injerr_lba =
1136 LPFC_INJERR_LBA_OFF;
1137 memset(&phba->lpfc_injerr_wwpn,
1138 0, sizeof(struct lpfc_name));
1139 }
1140 rc = BG_ERR_INIT;
1141
1142 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1143 "9079 BLKGRD: Injecting reftag error: "
1144 "read lba x%lx\n", (unsigned long)lba);
1145 break;
1146 }
1147 }
1148 }
1149
1150 /* Should we change the Application Tag */
1151 if (apptag) {
1152 if (phba->lpfc_injerr_wapp_cnt) {
1153 switch (op) {
1154 case SCSI_PROT_WRITE_PASS:
1155 if (src) {
1156 /*
1157 * For WRITE_PASS, force the error
1158 * to be sent on the wire. It should
1159 * be detected by the Target.
1160 * If blockoff != 0 error will be
1161 * inserted in middle of the IO.
1162 */
1163
1164 lpfc_printf_log(phba, KERN_ERR,
1165 LOG_TRACE_EVENT,
1166 "9080 BLKGRD: Injecting apptag error: "
1167 "write lba x%lx + x%x oldappTag x%x\n",
1168 (unsigned long)lba, blockoff,
1169 be16_to_cpu(src->app_tag));
1170
1171 /*
1172 * Save the old app_tag so we can
1173 * restore it on completion.
1174 */
1175 if (lpfc_cmd) {
1176 lpfc_cmd->prot_data_type =
1177 LPFC_INJERR_APPTAG;
1178 lpfc_cmd->prot_data_segment =
1179 src;
1180 lpfc_cmd->prot_data =
1181 src->app_tag;
1182 }
1183 src->app_tag = cpu_to_be16(0xDEAD);
1184 phba->lpfc_injerr_wapp_cnt--;
1185 if (phba->lpfc_injerr_wapp_cnt == 0) {
1186 phba->lpfc_injerr_nportid = 0;
1187 phba->lpfc_injerr_lba =
1188 LPFC_INJERR_LBA_OFF;
1189 memset(&phba->lpfc_injerr_wwpn,
1190 0, sizeof(struct lpfc_name));
1191 }
1192 rc = BG_ERR_TGT | BG_ERR_CHECK;
1193 break;
1194 }
1195 fallthrough;
1196 case SCSI_PROT_WRITE_INSERT:
1197 /*
1198 * For WRITE_INSERT, force the
1199 * error to be sent on the wire. It should be
1200 * detected by the Target.
1201 */
1202 /* DEAD will be the apptag on the wire */
1203 *apptag = 0xDEAD;
1204 phba->lpfc_injerr_wapp_cnt--;
1205 if (phba->lpfc_injerr_wapp_cnt == 0) {
1206 phba->lpfc_injerr_nportid = 0;
1207 phba->lpfc_injerr_lba =
1208 LPFC_INJERR_LBA_OFF;
1209 memset(&phba->lpfc_injerr_wwpn,
1210 0, sizeof(struct lpfc_name));
1211 }
1212 rc = BG_ERR_TGT | BG_ERR_CHECK;
1213
1214 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1215 "0813 BLKGRD: Injecting apptag error: "
1216 "write lba x%lx\n", (unsigned long)lba);
1217 break;
1218 case SCSI_PROT_WRITE_STRIP:
1219 /*
1220 * For WRITE_STRIP and WRITE_PASS,
1221 * force the error on data
1222 * being copied from SLI-Host to SLI-Port.
1223 */
1224 *apptag = 0xDEAD;
1225 phba->lpfc_injerr_wapp_cnt--;
1226 if (phba->lpfc_injerr_wapp_cnt == 0) {
1227 phba->lpfc_injerr_nportid = 0;
1228 phba->lpfc_injerr_lba =
1229 LPFC_INJERR_LBA_OFF;
1230 memset(&phba->lpfc_injerr_wwpn,
1231 0, sizeof(struct lpfc_name));
1232 }
1233 rc = BG_ERR_INIT;
1234
1235 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1236 "0812 BLKGRD: Injecting apptag error: "
1237 "write lba x%lx\n", (unsigned long)lba);
1238 break;
1239 }
1240 }
1241 if (phba->lpfc_injerr_rapp_cnt) {
1242 switch (op) {
1243 case SCSI_PROT_READ_INSERT:
1244 case SCSI_PROT_READ_STRIP:
1245 case SCSI_PROT_READ_PASS:
1246 /*
1247 * For READ_STRIP and READ_PASS, force the
1248 * error on data being read off the wire. It
1249 * should force an IO error to the driver.
1250 */
1251 *apptag = 0xDEAD;
1252 phba->lpfc_injerr_rapp_cnt--;
1253 if (phba->lpfc_injerr_rapp_cnt == 0) {
1254 phba->lpfc_injerr_nportid = 0;
1255 phba->lpfc_injerr_lba =
1256 LPFC_INJERR_LBA_OFF;
1257 memset(&phba->lpfc_injerr_wwpn,
1258 0, sizeof(struct lpfc_name));
1259 }
1260 rc = BG_ERR_INIT;
1261
1262 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1263 "0814 BLKGRD: Injecting apptag error: "
1264 "read lba x%lx\n", (unsigned long)lba);
1265 break;
1266 }
1267 }
1268 }
1269
1270
1271 /* Should we change the Guard Tag */
1272 if (new_guard) {
1273 if (phba->lpfc_injerr_wgrd_cnt) {
1274 switch (op) {
1275 case SCSI_PROT_WRITE_PASS:
1276 rc = BG_ERR_CHECK;
1277 fallthrough;
1278
1279 case SCSI_PROT_WRITE_INSERT:
1280 /*
1281 * For WRITE_INSERT, force the
1282 * error to be sent on the wire. It should be
1283 * detected by the Target.
1284 */
1285 phba->lpfc_injerr_wgrd_cnt--;
1286 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1287 phba->lpfc_injerr_nportid = 0;
1288 phba->lpfc_injerr_lba =
1289 LPFC_INJERR_LBA_OFF;
1290 memset(&phba->lpfc_injerr_wwpn,
1291 0, sizeof(struct lpfc_name));
1292 }
1293
1294 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1295 /* Signals the caller to swap CRC->CSUM */
1296
1297 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1298 "0817 BLKGRD: Injecting guard error: "
1299 "write lba x%lx\n", (unsigned long)lba);
1300 break;
1301 case SCSI_PROT_WRITE_STRIP:
1302 /*
1303 * For WRITE_STRIP and WRITE_PASS,
1304 * force the error on data
1305 * being copied from SLI-Host to SLI-Port.
1306 */
1307 phba->lpfc_injerr_wgrd_cnt--;
1308 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1309 phba->lpfc_injerr_nportid = 0;
1310 phba->lpfc_injerr_lba =
1311 LPFC_INJERR_LBA_OFF;
1312 memset(&phba->lpfc_injerr_wwpn,
1313 0, sizeof(struct lpfc_name));
1314 }
1315
1316 rc = BG_ERR_INIT | BG_ERR_SWAP;
1317 /* Signals the caller to swap CRC->CSUM */
1318
1319 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1320 "0816 BLKGRD: Injecting guard error: "
1321 "write lba x%lx\n", (unsigned long)lba);
1322 break;
1323 }
1324 }
1325 if (phba->lpfc_injerr_rgrd_cnt) {
1326 switch (op) {
1327 case SCSI_PROT_READ_INSERT:
1328 case SCSI_PROT_READ_STRIP:
1329 case SCSI_PROT_READ_PASS:
1330 /*
1331 * For READ_STRIP and READ_PASS, force the
1332 * error on data being read off the wire. It
1333 * should force an IO error to the driver.
1334 */
1335 phba->lpfc_injerr_rgrd_cnt--;
1336 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1337 phba->lpfc_injerr_nportid = 0;
1338 phba->lpfc_injerr_lba =
1339 LPFC_INJERR_LBA_OFF;
1340 memset(&phba->lpfc_injerr_wwpn,
1341 0, sizeof(struct lpfc_name));
1342 }
1343
1344 rc = BG_ERR_INIT | BG_ERR_SWAP;
1345 /* Signals the caller to swap CRC->CSUM */
1346
1347 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1348 "0818 BLKGRD: Injecting guard error: "
1349 "read lba x%lx\n", (unsigned long)lba);
1350 }
1351 }
1352 }
1353
1354 return rc;
1355 }
1356 #endif
1357
1358 /**
1359 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1360 * the specified SCSI command.
1361 * @phba: The Hba for which this call is being executed.
1362 * @sc: The SCSI command to examine
1363 * @txop: (out) BlockGuard operation for transmitted data
1364 * @rxop: (out) BlockGuard operation for received data
1365 *
1366 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1367 *
1368 **/
1369 static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1370 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1371 uint8_t *txop, uint8_t *rxop)
1372 {
1373 uint8_t ret = 0;
1374
1375 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1376 switch (scsi_get_prot_op(sc)) {
1377 case SCSI_PROT_READ_INSERT:
1378 case SCSI_PROT_WRITE_STRIP:
1379 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1380 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1381 break;
1382
1383 case SCSI_PROT_READ_STRIP:
1384 case SCSI_PROT_WRITE_INSERT:
1385 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1386 *txop = BG_OP_IN_NODIF_OUT_CRC;
1387 break;
1388
1389 case SCSI_PROT_READ_PASS:
1390 case SCSI_PROT_WRITE_PASS:
1391 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1392 *txop = BG_OP_IN_CSUM_OUT_CRC;
1393 break;
1394
1395 case SCSI_PROT_NORMAL:
1396 default:
1397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1398 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1399 scsi_get_prot_op(sc));
1400 ret = 1;
1401 break;
1402
1403 }
1404 } else {
1405 switch (scsi_get_prot_op(sc)) {
1406 case SCSI_PROT_READ_STRIP:
1407 case SCSI_PROT_WRITE_INSERT:
1408 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1409 *txop = BG_OP_IN_NODIF_OUT_CRC;
1410 break;
1411
1412 case SCSI_PROT_READ_PASS:
1413 case SCSI_PROT_WRITE_PASS:
1414 *rxop = BG_OP_IN_CRC_OUT_CRC;
1415 *txop = BG_OP_IN_CRC_OUT_CRC;
1416 break;
1417
1418 case SCSI_PROT_READ_INSERT:
1419 case SCSI_PROT_WRITE_STRIP:
1420 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1421 *txop = BG_OP_IN_CRC_OUT_NODIF;
1422 break;
1423
1424 case SCSI_PROT_NORMAL:
1425 default:
1426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1427 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1428 scsi_get_prot_op(sc));
1429 ret = 1;
1430 break;
1431 }
1432 }
1433
1434 return ret;
1435 }
1436
1437 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1438 /**
1439 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1440 * the specified SCSI command in order to force a guard tag error.
1441 * @phba: The Hba for which this call is being executed.
1442 * @sc: The SCSI command to examine
1443 * @txop: (out) BlockGuard operation for transmitted data
1444 * @rxop: (out) BlockGuard operation for received data
1445 *
1446 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1447 *
1448 **/
1449 static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1450 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1451 uint8_t *txop, uint8_t *rxop)
1452 {
1453
1454 if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
1455 switch (scsi_get_prot_op(sc)) {
1456 case SCSI_PROT_READ_INSERT:
1457 case SCSI_PROT_WRITE_STRIP:
1458 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1459 *txop = BG_OP_IN_CRC_OUT_NODIF;
1460 break;
1461
1462 case SCSI_PROT_READ_STRIP:
1463 case SCSI_PROT_WRITE_INSERT:
1464 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1465 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1466 break;
1467
1468 case SCSI_PROT_READ_PASS:
1469 case SCSI_PROT_WRITE_PASS:
1470 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1471 *txop = BG_OP_IN_CRC_OUT_CSUM;
1472 break;
1473
1474 case SCSI_PROT_NORMAL:
1475 default:
1476 break;
1477
1478 }
1479 } else {
1480 switch (scsi_get_prot_op(sc)) {
1481 case SCSI_PROT_READ_STRIP:
1482 case SCSI_PROT_WRITE_INSERT:
1483 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1484 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1485 break;
1486
1487 case SCSI_PROT_READ_PASS:
1488 case SCSI_PROT_WRITE_PASS:
1489 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1490 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1491 break;
1492
1493 case SCSI_PROT_READ_INSERT:
1494 case SCSI_PROT_WRITE_STRIP:
1495 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1496 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1497 break;
1498
1499 case SCSI_PROT_NORMAL:
1500 default:
1501 break;
1502 }
1503 }
1504
1505 return 0;
1506 }
1507 #endif
1508
1509 /**
1510 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1511 * @phba: The Hba for which this call is being executed.
1512 * @sc: pointer to scsi command we're working on
1513 * @bpl: pointer to buffer list for protection groups
1514 * @datasegcnt: number of segments of data that have been dma mapped
1515 *
1516 * This function sets up BPL buffer list for protection groups of
1517 * type LPFC_PG_TYPE_NO_DIF
1518 *
1519 * This is usually used when the HBA is instructed to generate
1520 * DIFs and insert them into data stream (or strip DIF from
1521 * incoming data stream)
1522 *
1523 * The buffer list consists of just one protection group described
1524 * below:
1525 * +-------------------------+
1526 * start of prot group --> | PDE_5 |
1527 * +-------------------------+
1528 * | PDE_6 |
1529 * +-------------------------+
1530 * | Data BDE |
1531 * +-------------------------+
1532 * |more Data BDE's ... (opt)|
1533 * +-------------------------+
1534 *
1535 *
1536 * Note: Data s/g buffers have been dma mapped
1537 *
1538 * Returns the number of BDEs added to the BPL.
1539 **/
1540 static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1541 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1542 struct ulp_bde64 *bpl, int datasegcnt)
1543 {
1544 struct scatterlist *sgde = NULL; /* s/g data entry */
1545 struct lpfc_pde5 *pde5 = NULL;
1546 struct lpfc_pde6 *pde6 = NULL;
1547 dma_addr_t physaddr;
1548 int i = 0, num_bde = 0, status;
1549 int datadir = sc->sc_data_direction;
1550 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1551 uint32_t rc;
1552 #endif
1553 uint32_t checking = 1;
1554 uint32_t reftag;
1555 uint8_t txop, rxop;
1556
1557 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1558 if (status)
1559 goto out;
1560
1561 /* extract some info from the scsi command for pde*/
1562 reftag = scsi_prot_ref_tag(sc);
1563 if (reftag == LPFC_INVALID_REFTAG)
1564 goto out;
1565
1566 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1567 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1568 if (rc) {
1569 if (rc & BG_ERR_SWAP)
1570 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1571 if (rc & BG_ERR_CHECK)
1572 checking = 0;
1573 }
1574 #endif
1575
1576 /* setup PDE5 with what we have */
1577 pde5 = (struct lpfc_pde5 *) bpl;
1578 memset(pde5, 0, sizeof(struct lpfc_pde5));
1579 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1580
1581 /* Endianness conversion if necessary for PDE5 */
1582 pde5->word0 = cpu_to_le32(pde5->word0);
1583 pde5->reftag = cpu_to_le32(reftag);
1584
1585 /* advance bpl and increment bde count */
1586 num_bde++;
1587 bpl++;
1588 pde6 = (struct lpfc_pde6 *) bpl;
1589
1590 /* setup PDE6 with the rest of the info */
1591 memset(pde6, 0, sizeof(struct lpfc_pde6));
1592 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1593 bf_set(pde6_optx, pde6, txop);
1594 bf_set(pde6_oprx, pde6, rxop);
1595
1596 /*
1597 * We only need to check the data on READs, for WRITEs
1598 * protection data is automatically generated, not checked.
1599 */
1600 if (datadir == DMA_FROM_DEVICE) {
1601 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1602 bf_set(pde6_ce, pde6, checking);
1603 else
1604 bf_set(pde6_ce, pde6, 0);
1605
1606 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1607 bf_set(pde6_re, pde6, checking);
1608 else
1609 bf_set(pde6_re, pde6, 0);
1610 }
1611 bf_set(pde6_ai, pde6, 1);
1612 bf_set(pde6_ae, pde6, 0);
1613 bf_set(pde6_apptagval, pde6, 0);
1614
1615 /* Endianness conversion if necessary for PDE6 */
1616 pde6->word0 = cpu_to_le32(pde6->word0);
1617 pde6->word1 = cpu_to_le32(pde6->word1);
1618 pde6->word2 = cpu_to_le32(pde6->word2);
1619
1620 /* advance bpl and increment bde count */
1621 num_bde++;
1622 bpl++;
1623
1624 /* assumption: caller has already run dma_map_sg on command data */
1625 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1626 physaddr = sg_dma_address(sgde);
1627 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1628 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1629 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1630 if (datadir == DMA_TO_DEVICE)
1631 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1632 else
1633 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1634 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1635 bpl++;
1636 num_bde++;
1637 }
1638
1639 out:
1640 return num_bde;
1641 }
1642
1643 /**
1644 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1645 * @phba: The Hba for which this call is being executed.
1646 * @sc: pointer to scsi command we're working on
1647 * @bpl: pointer to buffer list for protection groups
1648 * @datacnt: number of segments of data that have been dma mapped
1649 * @protcnt: number of segment of protection data that have been dma mapped
1650 *
1651 * This function sets up BPL buffer list for protection groups of
1652 * type LPFC_PG_TYPE_DIF
1653 *
1654 * This is usually used when DIFs are in their own buffers,
1655 * separate from the data. The HBA can then by instructed
1656 * to place the DIFs in the outgoing stream. For read operations,
1657 * The HBA could extract the DIFs and place it in DIF buffers.
1658 *
1659 * The buffer list for this type consists of one or more of the
1660 * protection groups described below:
1661 * +-------------------------+
1662 * start of first prot group --> | PDE_5 |
1663 * +-------------------------+
1664 * | PDE_6 |
1665 * +-------------------------+
1666 * | PDE_7 (Prot BDE) |
1667 * +-------------------------+
1668 * | Data BDE |
1669 * +-------------------------+
1670 * |more Data BDE's ... (opt)|
1671 * +-------------------------+
1672 * start of new prot group --> | PDE_5 |
1673 * +-------------------------+
1674 * | ... |
1675 * +-------------------------+
1676 *
1677 * Note: It is assumed that both data and protection s/g buffers have been
1678 * mapped for DMA
1679 *
1680 * Returns the number of BDEs added to the BPL.
1681 **/
1682 static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1683 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1684 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1685 {
1686 struct scatterlist *sgde = NULL; /* s/g data entry */
1687 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1688 struct lpfc_pde5 *pde5 = NULL;
1689 struct lpfc_pde6 *pde6 = NULL;
1690 struct lpfc_pde7 *pde7 = NULL;
1691 dma_addr_t dataphysaddr, protphysaddr;
1692 unsigned short curr_data = 0, curr_prot = 0;
1693 unsigned int split_offset;
1694 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1695 unsigned int protgrp_blks, protgrp_bytes;
1696 unsigned int remainder, subtotal;
1697 int status;
1698 int datadir = sc->sc_data_direction;
1699 unsigned char pgdone = 0, alldone = 0;
1700 unsigned blksize;
1701 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1702 uint32_t rc;
1703 #endif
1704 uint32_t checking = 1;
1705 uint32_t reftag;
1706 uint8_t txop, rxop;
1707 int num_bde = 0;
1708
1709 sgpe = scsi_prot_sglist(sc);
1710 sgde = scsi_sglist(sc);
1711
1712 if (!sgpe || !sgde) {
1713 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1714 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1715 sgpe, sgde);
1716 return 0;
1717 }
1718
1719 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1720 if (status)
1721 goto out;
1722
1723 /* extract some info from the scsi command */
1724 blksize = scsi_prot_interval(sc);
1725 reftag = scsi_prot_ref_tag(sc);
1726 if (reftag == LPFC_INVALID_REFTAG)
1727 goto out;
1728
1729 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1730 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1731 if (rc) {
1732 if (rc & BG_ERR_SWAP)
1733 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1734 if (rc & BG_ERR_CHECK)
1735 checking = 0;
1736 }
1737 #endif
1738
1739 split_offset = 0;
1740 do {
1741 /* Check to see if we ran out of space */
1742 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1743 return num_bde + 3;
1744
1745 /* setup PDE5 with what we have */
1746 pde5 = (struct lpfc_pde5 *) bpl;
1747 memset(pde5, 0, sizeof(struct lpfc_pde5));
1748 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1749
1750 /* Endianness conversion if necessary for PDE5 */
1751 pde5->word0 = cpu_to_le32(pde5->word0);
1752 pde5->reftag = cpu_to_le32(reftag);
1753
1754 /* advance bpl and increment bde count */
1755 num_bde++;
1756 bpl++;
1757 pde6 = (struct lpfc_pde6 *) bpl;
1758
1759 /* setup PDE6 with the rest of the info */
1760 memset(pde6, 0, sizeof(struct lpfc_pde6));
1761 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1762 bf_set(pde6_optx, pde6, txop);
1763 bf_set(pde6_oprx, pde6, rxop);
1764
1765 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1766 bf_set(pde6_ce, pde6, checking);
1767 else
1768 bf_set(pde6_ce, pde6, 0);
1769
1770 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1771 bf_set(pde6_re, pde6, checking);
1772 else
1773 bf_set(pde6_re, pde6, 0);
1774
1775 bf_set(pde6_ai, pde6, 1);
1776 bf_set(pde6_ae, pde6, 0);
1777 bf_set(pde6_apptagval, pde6, 0);
1778
1779 /* Endianness conversion if necessary for PDE6 */
1780 pde6->word0 = cpu_to_le32(pde6->word0);
1781 pde6->word1 = cpu_to_le32(pde6->word1);
1782 pde6->word2 = cpu_to_le32(pde6->word2);
1783
1784 /* advance bpl and increment bde count */
1785 num_bde++;
1786 bpl++;
1787
1788 /* setup the first BDE that points to protection buffer */
1789 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1790 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1791
1792 /* must be integer multiple of the DIF block length */
1793 BUG_ON(protgroup_len % 8);
1794
1795 pde7 = (struct lpfc_pde7 *) bpl;
1796 memset(pde7, 0, sizeof(struct lpfc_pde7));
1797 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1798
1799 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1800 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1801
1802 protgrp_blks = protgroup_len / 8;
1803 protgrp_bytes = protgrp_blks * blksize;
1804
1805 /* check if this pde is crossing the 4K boundary; if so split */
1806 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1807 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1808 protgroup_offset += protgroup_remainder;
1809 protgrp_blks = protgroup_remainder / 8;
1810 protgrp_bytes = protgrp_blks * blksize;
1811 } else {
1812 protgroup_offset = 0;
1813 curr_prot++;
1814 }
1815
1816 num_bde++;
1817
1818 /* setup BDE's for data blocks associated with DIF data */
1819 pgdone = 0;
1820 subtotal = 0; /* total bytes processed for current prot grp */
1821 while (!pgdone) {
1822 /* Check to see if we ran out of space */
1823 if (num_bde >= phba->cfg_total_seg_cnt)
1824 return num_bde + 1;
1825
1826 if (!sgde) {
1827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1828 "9065 BLKGRD:%s Invalid data segment\n",
1829 __func__);
1830 return 0;
1831 }
1832 bpl++;
1833 dataphysaddr = sg_dma_address(sgde) + split_offset;
1834 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1835 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1836
1837 remainder = sg_dma_len(sgde) - split_offset;
1838
1839 if ((subtotal + remainder) <= protgrp_bytes) {
1840 /* we can use this whole buffer */
1841 bpl->tus.f.bdeSize = remainder;
1842 split_offset = 0;
1843
1844 if ((subtotal + remainder) == protgrp_bytes)
1845 pgdone = 1;
1846 } else {
1847 /* must split this buffer with next prot grp */
1848 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1849 split_offset += bpl->tus.f.bdeSize;
1850 }
1851
1852 subtotal += bpl->tus.f.bdeSize;
1853
1854 if (datadir == DMA_TO_DEVICE)
1855 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1856 else
1857 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1858 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1859
1860 num_bde++;
1861 curr_data++;
1862
1863 if (split_offset)
1864 break;
1865
1866 /* Move to the next s/g segment if possible */
1867 sgde = sg_next(sgde);
1868
1869 }
1870
1871 if (protgroup_offset) {
1872 /* update the reference tag */
1873 reftag += protgrp_blks;
1874 bpl++;
1875 continue;
1876 }
1877
1878 /* are we done ? */
1879 if (curr_prot == protcnt) {
1880 alldone = 1;
1881 } else if (curr_prot < protcnt) {
1882 /* advance to next prot buffer */
1883 sgpe = sg_next(sgpe);
1884 bpl++;
1885
1886 /* update the reference tag */
1887 reftag += protgrp_blks;
1888 } else {
1889 /* if we're here, we have a bug */
1890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1891 "9054 BLKGRD: bug in %s\n", __func__);
1892 }
1893
1894 } while (!alldone);
1895 out:
1896
1897 return num_bde;
1898 }
1899
1900 /**
1901 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1902 * @phba: The Hba for which this call is being executed.
1903 * @sc: pointer to scsi command we're working on
1904 * @sgl: pointer to buffer list for protection groups
1905 * @datasegcnt: number of segments of data that have been dma mapped
1906 * @lpfc_cmd: lpfc scsi command object pointer.
1907 *
1908 * This function sets up SGL buffer list for protection groups of
1909 * type LPFC_PG_TYPE_NO_DIF
1910 *
1911 * This is usually used when the HBA is instructed to generate
1912 * DIFs and insert them into data stream (or strip DIF from
1913 * incoming data stream)
1914 *
1915 * The buffer list consists of just one protection group described
1916 * below:
1917 * +-------------------------+
1918 * start of prot group --> | DI_SEED |
1919 * +-------------------------+
1920 * | Data SGE |
1921 * +-------------------------+
1922 * |more Data SGE's ... (opt)|
1923 * +-------------------------+
1924 *
1925 *
1926 * Note: Data s/g buffers have been dma mapped
1927 *
1928 * Returns the number of SGEs added to the SGL.
1929 **/
1930 static int
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1931 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1932 struct sli4_sge *sgl, int datasegcnt,
1933 struct lpfc_io_buf *lpfc_cmd)
1934 {
1935 struct scatterlist *sgde = NULL; /* s/g data entry */
1936 struct sli4_sge_diseed *diseed = NULL;
1937 dma_addr_t physaddr;
1938 int i = 0, num_sge = 0, status;
1939 uint32_t reftag;
1940 uint8_t txop, rxop;
1941 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1942 uint32_t rc;
1943 #endif
1944 uint32_t checking = 1;
1945 uint32_t dma_len;
1946 uint32_t dma_offset = 0;
1947 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1948 int j;
1949 bool lsp_just_set = false;
1950
1951 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1952 if (status)
1953 goto out;
1954
1955 /* extract some info from the scsi command for pde*/
1956 reftag = scsi_prot_ref_tag(sc);
1957 if (reftag == LPFC_INVALID_REFTAG)
1958 goto out;
1959
1960 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1961 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1962 if (rc) {
1963 if (rc & BG_ERR_SWAP)
1964 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1965 if (rc & BG_ERR_CHECK)
1966 checking = 0;
1967 }
1968 #endif
1969
1970 /* setup DISEED with what we have */
1971 diseed = (struct sli4_sge_diseed *) sgl;
1972 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1973 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1974
1975 /* Endianness conversion if necessary */
1976 diseed->ref_tag = cpu_to_le32(reftag);
1977 diseed->ref_tag_tran = diseed->ref_tag;
1978
1979 /*
1980 * We only need to check the data on READs, for WRITEs
1981 * protection data is automatically generated, not checked.
1982 */
1983 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1984 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
1985 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1986 else
1987 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
1988
1989 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
1990 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
1991 else
1992 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
1993 }
1994
1995 /* setup DISEED with the rest of the info */
1996 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
1997 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
1998
1999 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2000 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2001
2002 /* Endianness conversion if necessary for DISEED */
2003 diseed->word2 = cpu_to_le32(diseed->word2);
2004 diseed->word3 = cpu_to_le32(diseed->word3);
2005
2006 /* advance bpl and increment sge count */
2007 num_sge++;
2008 sgl++;
2009
2010 /* assumption: caller has already run dma_map_sg on command data */
2011 sgde = scsi_sglist(sc);
2012 j = 3;
2013 for (i = 0; i < datasegcnt; i++) {
2014 /* clear it */
2015 sgl->word2 = 0;
2016
2017 /* do we need to expand the segment */
2018 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2019 ((datasegcnt - 1) != i)) {
2020 /* set LSP type */
2021 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2022
2023 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2024
2025 if (unlikely(!sgl_xtra)) {
2026 lpfc_cmd->seg_cnt = 0;
2027 return 0;
2028 }
2029 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2030 sgl_xtra->dma_phys_sgl));
2031 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2032 sgl_xtra->dma_phys_sgl));
2033
2034 } else {
2035 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2036 }
2037
2038 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2039 if ((datasegcnt - 1) == i)
2040 bf_set(lpfc_sli4_sge_last, sgl, 1);
2041 physaddr = sg_dma_address(sgde);
2042 dma_len = sg_dma_len(sgde);
2043 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2044 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2045
2046 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2047 sgl->word2 = cpu_to_le32(sgl->word2);
2048 sgl->sge_len = cpu_to_le32(dma_len);
2049
2050 dma_offset += dma_len;
2051 sgde = sg_next(sgde);
2052
2053 sgl++;
2054 num_sge++;
2055 lsp_just_set = false;
2056
2057 } else {
2058 sgl->word2 = cpu_to_le32(sgl->word2);
2059 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2060
2061 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2062 i = i - 1;
2063
2064 lsp_just_set = true;
2065 }
2066
2067 j++;
2068
2069 }
2070
2071 out:
2072 return num_sge;
2073 }
2074
2075 /**
2076 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2077 * @phba: The Hba for which this call is being executed.
2078 * @sc: pointer to scsi command we're working on
2079 * @sgl: pointer to buffer list for protection groups
2080 * @datacnt: number of segments of data that have been dma mapped
2081 * @protcnt: number of segment of protection data that have been dma mapped
2082 * @lpfc_cmd: lpfc scsi command object pointer.
2083 *
2084 * This function sets up SGL buffer list for protection groups of
2085 * type LPFC_PG_TYPE_DIF
2086 *
2087 * This is usually used when DIFs are in their own buffers,
2088 * separate from the data. The HBA can then by instructed
2089 * to place the DIFs in the outgoing stream. For read operations,
2090 * The HBA could extract the DIFs and place it in DIF buffers.
2091 *
2092 * The buffer list for this type consists of one or more of the
2093 * protection groups described below:
2094 * +-------------------------+
2095 * start of first prot group --> | DISEED |
2096 * +-------------------------+
2097 * | DIF (Prot SGE) |
2098 * +-------------------------+
2099 * | Data SGE |
2100 * +-------------------------+
2101 * |more Data SGE's ... (opt)|
2102 * +-------------------------+
2103 * start of new prot group --> | DISEED |
2104 * +-------------------------+
2105 * | ... |
2106 * +-------------------------+
2107 *
2108 * Note: It is assumed that both data and protection s/g buffers have been
2109 * mapped for DMA
2110 *
2111 * Returns the number of SGEs added to the SGL.
2112 **/
2113 static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2114 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2115 struct sli4_sge *sgl, int datacnt, int protcnt,
2116 struct lpfc_io_buf *lpfc_cmd)
2117 {
2118 struct scatterlist *sgde = NULL; /* s/g data entry */
2119 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2120 struct sli4_sge_diseed *diseed = NULL;
2121 dma_addr_t dataphysaddr, protphysaddr;
2122 unsigned short curr_data = 0, curr_prot = 0;
2123 unsigned int split_offset;
2124 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2125 unsigned int protgrp_blks, protgrp_bytes;
2126 unsigned int remainder, subtotal;
2127 int status;
2128 unsigned char pgdone = 0, alldone = 0;
2129 unsigned blksize;
2130 uint32_t reftag;
2131 uint8_t txop, rxop;
2132 uint32_t dma_len;
2133 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2134 uint32_t rc;
2135 #endif
2136 uint32_t checking = 1;
2137 uint32_t dma_offset = 0;
2138 int num_sge = 0, j = 2;
2139 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2140
2141 sgpe = scsi_prot_sglist(sc);
2142 sgde = scsi_sglist(sc);
2143
2144 if (!sgpe || !sgde) {
2145 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2146 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2147 sgpe, sgde);
2148 return 0;
2149 }
2150
2151 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2152 if (status)
2153 goto out;
2154
2155 /* extract some info from the scsi command */
2156 blksize = scsi_prot_interval(sc);
2157 reftag = scsi_prot_ref_tag(sc);
2158 if (reftag == LPFC_INVALID_REFTAG)
2159 goto out;
2160
2161 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2162 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2163 if (rc) {
2164 if (rc & BG_ERR_SWAP)
2165 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2166 if (rc & BG_ERR_CHECK)
2167 checking = 0;
2168 }
2169 #endif
2170
2171 split_offset = 0;
2172 do {
2173 /* Check to see if we ran out of space */
2174 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2175 !(phba->cfg_xpsgl))
2176 return num_sge + 3;
2177
2178 /* DISEED and DIF have to be together */
2179 if (!((j + 1) % phba->border_sge_num) ||
2180 !((j + 2) % phba->border_sge_num) ||
2181 !((j + 3) % phba->border_sge_num)) {
2182 sgl->word2 = 0;
2183
2184 /* set LSP type */
2185 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2186
2187 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2188
2189 if (unlikely(!sgl_xtra)) {
2190 goto out;
2191 } else {
2192 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2193 sgl_xtra->dma_phys_sgl));
2194 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2195 sgl_xtra->dma_phys_sgl));
2196 }
2197
2198 sgl->word2 = cpu_to_le32(sgl->word2);
2199 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2200
2201 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2202 j = 0;
2203 }
2204
2205 /* setup DISEED with what we have */
2206 diseed = (struct sli4_sge_diseed *) sgl;
2207 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2208 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2209
2210 /* Endianness conversion if necessary */
2211 diseed->ref_tag = cpu_to_le32(reftag);
2212 diseed->ref_tag_tran = diseed->ref_tag;
2213
2214 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK) {
2215 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2216 } else {
2217 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2218 /*
2219 * When in this mode, the hardware will replace
2220 * the guard tag from the host with a
2221 * newly generated good CRC for the wire.
2222 * Switch to raw mode here to avoid this
2223 * behavior. What the host sends gets put on the wire.
2224 */
2225 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2226 txop = BG_OP_RAW_MODE;
2227 rxop = BG_OP_RAW_MODE;
2228 }
2229 }
2230
2231
2232 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
2233 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2234 else
2235 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2236
2237 /* setup DISEED with the rest of the info */
2238 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2239 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2240
2241 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2242 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2243
2244 /* Endianness conversion if necessary for DISEED */
2245 diseed->word2 = cpu_to_le32(diseed->word2);
2246 diseed->word3 = cpu_to_le32(diseed->word3);
2247
2248 /* advance sgl and increment bde count */
2249 num_sge++;
2250
2251 sgl++;
2252 j++;
2253
2254 /* setup the first BDE that points to protection buffer */
2255 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2256 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2257
2258 /* must be integer multiple of the DIF block length */
2259 BUG_ON(protgroup_len % 8);
2260
2261 /* Now setup DIF SGE */
2262 sgl->word2 = 0;
2263 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2264 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2265 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2266 sgl->word2 = cpu_to_le32(sgl->word2);
2267 sgl->sge_len = 0;
2268
2269 protgrp_blks = protgroup_len / 8;
2270 protgrp_bytes = protgrp_blks * blksize;
2271
2272 /* check if DIF SGE is crossing the 4K boundary; if so split */
2273 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2274 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2275 protgroup_offset += protgroup_remainder;
2276 protgrp_blks = protgroup_remainder / 8;
2277 protgrp_bytes = protgrp_blks * blksize;
2278 } else {
2279 protgroup_offset = 0;
2280 curr_prot++;
2281 }
2282
2283 num_sge++;
2284
2285 /* setup SGE's for data blocks associated with DIF data */
2286 pgdone = 0;
2287 subtotal = 0; /* total bytes processed for current prot grp */
2288
2289 sgl++;
2290 j++;
2291
2292 while (!pgdone) {
2293 /* Check to see if we ran out of space */
2294 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2295 !phba->cfg_xpsgl)
2296 return num_sge + 1;
2297
2298 if (!sgde) {
2299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2300 "9086 BLKGRD:%s Invalid data segment\n",
2301 __func__);
2302 return 0;
2303 }
2304
2305 if (!((j + 1) % phba->border_sge_num)) {
2306 sgl->word2 = 0;
2307
2308 /* set LSP type */
2309 bf_set(lpfc_sli4_sge_type, sgl,
2310 LPFC_SGE_TYPE_LSP);
2311
2312 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2313 lpfc_cmd);
2314
2315 if (unlikely(!sgl_xtra)) {
2316 goto out;
2317 } else {
2318 sgl->addr_lo = cpu_to_le32(
2319 putPaddrLow(sgl_xtra->dma_phys_sgl));
2320 sgl->addr_hi = cpu_to_le32(
2321 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2322 }
2323
2324 sgl->word2 = cpu_to_le32(sgl->word2);
2325 sgl->sge_len = cpu_to_le32(
2326 phba->cfg_sg_dma_buf_size);
2327
2328 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2329 } else {
2330 dataphysaddr = sg_dma_address(sgde) +
2331 split_offset;
2332
2333 remainder = sg_dma_len(sgde) - split_offset;
2334
2335 if ((subtotal + remainder) <= protgrp_bytes) {
2336 /* we can use this whole buffer */
2337 dma_len = remainder;
2338 split_offset = 0;
2339
2340 if ((subtotal + remainder) ==
2341 protgrp_bytes)
2342 pgdone = 1;
2343 } else {
2344 /* must split this buffer with next
2345 * prot grp
2346 */
2347 dma_len = protgrp_bytes - subtotal;
2348 split_offset += dma_len;
2349 }
2350
2351 subtotal += dma_len;
2352
2353 sgl->word2 = 0;
2354 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2355 dataphysaddr));
2356 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2357 dataphysaddr));
2358 bf_set(lpfc_sli4_sge_last, sgl, 0);
2359 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2360 bf_set(lpfc_sli4_sge_type, sgl,
2361 LPFC_SGE_TYPE_DATA);
2362
2363 sgl->sge_len = cpu_to_le32(dma_len);
2364 dma_offset += dma_len;
2365
2366 num_sge++;
2367 curr_data++;
2368
2369 if (split_offset) {
2370 sgl++;
2371 j++;
2372 break;
2373 }
2374
2375 /* Move to the next s/g segment if possible */
2376 sgde = sg_next(sgde);
2377
2378 sgl++;
2379 }
2380
2381 j++;
2382 }
2383
2384 if (protgroup_offset) {
2385 /* update the reference tag */
2386 reftag += protgrp_blks;
2387 continue;
2388 }
2389
2390 /* are we done ? */
2391 if (curr_prot == protcnt) {
2392 /* mark the last SGL */
2393 sgl--;
2394 bf_set(lpfc_sli4_sge_last, sgl, 1);
2395 alldone = 1;
2396 } else if (curr_prot < protcnt) {
2397 /* advance to next prot buffer */
2398 sgpe = sg_next(sgpe);
2399
2400 /* update the reference tag */
2401 reftag += protgrp_blks;
2402 } else {
2403 /* if we're here, we have a bug */
2404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2405 "9085 BLKGRD: bug in %s\n", __func__);
2406 }
2407
2408 } while (!alldone);
2409
2410 out:
2411
2412 return num_sge;
2413 }
2414
2415 /**
2416 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2417 * @phba: The Hba for which this call is being executed.
2418 * @sc: pointer to scsi command we're working on
2419 *
2420 * Given a SCSI command that supports DIF, determine composition of protection
2421 * groups involved in setting up buffer lists
2422 *
2423 * Returns: Protection group type (with or without DIF)
2424 *
2425 **/
2426 static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2427 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2428 {
2429 int ret = LPFC_PG_TYPE_INVALID;
2430 unsigned char op = scsi_get_prot_op(sc);
2431
2432 switch (op) {
2433 case SCSI_PROT_READ_STRIP:
2434 case SCSI_PROT_WRITE_INSERT:
2435 ret = LPFC_PG_TYPE_NO_DIF;
2436 break;
2437 case SCSI_PROT_READ_INSERT:
2438 case SCSI_PROT_WRITE_STRIP:
2439 case SCSI_PROT_READ_PASS:
2440 case SCSI_PROT_WRITE_PASS:
2441 ret = LPFC_PG_TYPE_DIF_BUF;
2442 break;
2443 default:
2444 if (phba)
2445 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2446 "9021 Unsupported protection op:%d\n",
2447 op);
2448 break;
2449 }
2450 return ret;
2451 }
2452
2453 /**
2454 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2455 * @phba: The Hba for which this call is being executed.
2456 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2457 *
2458 * Adjust the data length to account for how much data
2459 * is actually on the wire.
2460 *
2461 * returns the adjusted data length
2462 **/
2463 static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2464 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2465 struct lpfc_io_buf *lpfc_cmd)
2466 {
2467 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2468 int fcpdl;
2469
2470 fcpdl = scsi_bufflen(sc);
2471
2472 /* Check if there is protection data on the wire */
2473 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2474 /* Read check for protection data */
2475 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2476 return fcpdl;
2477
2478 } else {
2479 /* Write check for protection data */
2480 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2481 return fcpdl;
2482 }
2483
2484 /*
2485 * If we are in DIF Type 1 mode every data block has a 8 byte
2486 * DIF (trailer) attached to it. Must ajust FCP data length
2487 * to account for the protection data.
2488 */
2489 fcpdl += (fcpdl / scsi_prot_interval(sc)) * 8;
2490
2491 return fcpdl;
2492 }
2493
2494 /**
2495 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2496 * @phba: The Hba for which this call is being executed.
2497 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2498 *
2499 * This is the protection/DIF aware version of
2500 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2501 * two functions eventually, but for now, it's here.
2502 * RETURNS 0 - SUCCESS,
2503 * 1 - Failed DMA map, retry.
2504 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2505 **/
2506 static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2507 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2508 struct lpfc_io_buf *lpfc_cmd)
2509 {
2510 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2511 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2512 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2513 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2514 uint32_t num_bde = 0;
2515 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2516 int prot_group_type = 0;
2517 int fcpdl;
2518 int ret = 1;
2519 struct lpfc_vport *vport = phba->pport;
2520
2521 /*
2522 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2523 * fcp_rsp regions to the first data bde entry
2524 */
2525 bpl += 2;
2526 if (scsi_sg_count(scsi_cmnd)) {
2527 /*
2528 * The driver stores the segment count returned from dma_map_sg
2529 * because this a count of dma-mappings used to map the use_sg
2530 * pages. They are not guaranteed to be the same for those
2531 * architectures that implement an IOMMU.
2532 */
2533 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2534 scsi_sglist(scsi_cmnd),
2535 scsi_sg_count(scsi_cmnd), datadir);
2536 if (unlikely(!datasegcnt))
2537 return 1;
2538
2539 lpfc_cmd->seg_cnt = datasegcnt;
2540
2541 /* First check if data segment count from SCSI Layer is good */
2542 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2543 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2544 ret = 2;
2545 goto err;
2546 }
2547
2548 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2549
2550 switch (prot_group_type) {
2551 case LPFC_PG_TYPE_NO_DIF:
2552
2553 /* Here we need to add a PDE5 and PDE6 to the count */
2554 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2555 ret = 2;
2556 goto err;
2557 }
2558
2559 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2560 datasegcnt);
2561 /* we should have 2 or more entries in buffer list */
2562 if (num_bde < 2) {
2563 ret = 2;
2564 goto err;
2565 }
2566 break;
2567
2568 case LPFC_PG_TYPE_DIF_BUF:
2569 /*
2570 * This type indicates that protection buffers are
2571 * passed to the driver, so that needs to be prepared
2572 * for DMA
2573 */
2574 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2575 scsi_prot_sglist(scsi_cmnd),
2576 scsi_prot_sg_count(scsi_cmnd), datadir);
2577 if (unlikely(!protsegcnt)) {
2578 scsi_dma_unmap(scsi_cmnd);
2579 return 1;
2580 }
2581
2582 lpfc_cmd->prot_seg_cnt = protsegcnt;
2583
2584 /*
2585 * There is a minimun of 4 BPLs used for every
2586 * protection data segment.
2587 */
2588 if ((lpfc_cmd->prot_seg_cnt * 4) >
2589 (phba->cfg_total_seg_cnt - 2)) {
2590 ret = 2;
2591 goto err;
2592 }
2593
2594 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2595 datasegcnt, protsegcnt);
2596 /* we should have 3 or more entries in buffer list */
2597 if ((num_bde < 3) ||
2598 (num_bde > phba->cfg_total_seg_cnt)) {
2599 ret = 2;
2600 goto err;
2601 }
2602 break;
2603
2604 case LPFC_PG_TYPE_INVALID:
2605 default:
2606 scsi_dma_unmap(scsi_cmnd);
2607 lpfc_cmd->seg_cnt = 0;
2608
2609 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2610 "9022 Unexpected protection group %i\n",
2611 prot_group_type);
2612 return 2;
2613 }
2614 }
2615
2616 /*
2617 * Finish initializing those IOCB fields that are dependent on the
2618 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2619 * reinitialized since all iocb memory resources are used many times
2620 * for transmit, receive, and continuation bpl's.
2621 */
2622 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2623 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2624 iocb_cmd->ulpBdeCount = 1;
2625 iocb_cmd->ulpLe = 1;
2626
2627 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2628 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2629
2630 /*
2631 * Due to difference in data length between DIF/non-DIF paths,
2632 * we need to set word 4 of IOCB here
2633 */
2634 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2635
2636 /*
2637 * For First burst, we may need to adjust the initial transfer
2638 * length for DIF
2639 */
2640 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2641 (fcpdl < vport->cfg_first_burst_size))
2642 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2643
2644 return 0;
2645 err:
2646 if (lpfc_cmd->seg_cnt)
2647 scsi_dma_unmap(scsi_cmnd);
2648 if (lpfc_cmd->prot_seg_cnt)
2649 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2650 scsi_prot_sg_count(scsi_cmnd),
2651 scsi_cmnd->sc_data_direction);
2652
2653 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2654 "9023 Cannot setup S/G List for HBA"
2655 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2656 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2657 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2658 prot_group_type, num_bde);
2659
2660 lpfc_cmd->seg_cnt = 0;
2661 lpfc_cmd->prot_seg_cnt = 0;
2662 return ret;
2663 }
2664
2665 /*
2666 * This function calcuates the T10 DIF guard tag
2667 * on the specified data using a CRC algorithmn
2668 * using crc_t10dif.
2669 */
2670 static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2671 lpfc_bg_crc(uint8_t *data, int count)
2672 {
2673 uint16_t crc = 0;
2674 uint16_t x;
2675
2676 crc = crc_t10dif(data, count);
2677 x = cpu_to_be16(crc);
2678 return x;
2679 }
2680
2681 /*
2682 * This function calcuates the T10 DIF guard tag
2683 * on the specified data using a CSUM algorithmn
2684 * using ip_compute_csum.
2685 */
2686 static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2687 lpfc_bg_csum(uint8_t *data, int count)
2688 {
2689 uint16_t ret;
2690
2691 ret = ip_compute_csum(data, count);
2692 return ret;
2693 }
2694
2695 /*
2696 * This function examines the protection data to try to determine
2697 * what type of T10-DIF error occurred.
2698 */
2699 static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2700 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2701 {
2702 struct scatterlist *sgpe; /* s/g prot entry */
2703 struct scatterlist *sgde; /* s/g data entry */
2704 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2705 struct scsi_dif_tuple *src = NULL;
2706 uint8_t *data_src = NULL;
2707 uint16_t guard_tag;
2708 uint16_t start_app_tag, app_tag;
2709 uint32_t start_ref_tag, ref_tag;
2710 int prot, protsegcnt;
2711 int err_type, len, data_len;
2712 int chk_ref, chk_app, chk_guard;
2713 uint16_t sum;
2714 unsigned blksize;
2715
2716 err_type = BGS_GUARD_ERR_MASK;
2717 sum = 0;
2718 guard_tag = 0;
2719
2720 /* First check to see if there is protection data to examine */
2721 prot = scsi_get_prot_op(cmd);
2722 if ((prot == SCSI_PROT_READ_STRIP) ||
2723 (prot == SCSI_PROT_WRITE_INSERT) ||
2724 (prot == SCSI_PROT_NORMAL))
2725 goto out;
2726
2727 /* Currently the driver just supports ref_tag and guard_tag checking */
2728 chk_ref = 1;
2729 chk_app = 0;
2730 chk_guard = 0;
2731
2732 /* Setup a ptr to the protection data provided by the SCSI host */
2733 sgpe = scsi_prot_sglist(cmd);
2734 protsegcnt = lpfc_cmd->prot_seg_cnt;
2735
2736 if (sgpe && protsegcnt) {
2737
2738 /*
2739 * We will only try to verify guard tag if the segment
2740 * data length is a multiple of the blksize.
2741 */
2742 sgde = scsi_sglist(cmd);
2743 blksize = scsi_prot_interval(cmd);
2744 data_src = (uint8_t *)sg_virt(sgde);
2745 data_len = sgde->length;
2746 if ((data_len & (blksize - 1)) == 0)
2747 chk_guard = 1;
2748
2749 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2750 start_ref_tag = scsi_prot_ref_tag(cmd);
2751 if (start_ref_tag == LPFC_INVALID_REFTAG)
2752 goto out;
2753 start_app_tag = src->app_tag;
2754 len = sgpe->length;
2755 while (src && protsegcnt) {
2756 while (len) {
2757
2758 /*
2759 * First check to see if a protection data
2760 * check is valid
2761 */
2762 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2763 (src->app_tag == T10_PI_APP_ESCAPE)) {
2764 start_ref_tag++;
2765 goto skipit;
2766 }
2767
2768 /* First Guard Tag checking */
2769 if (chk_guard) {
2770 guard_tag = src->guard_tag;
2771 if (cmd->prot_flags
2772 & SCSI_PROT_IP_CHECKSUM)
2773 sum = lpfc_bg_csum(data_src,
2774 blksize);
2775 else
2776 sum = lpfc_bg_crc(data_src,
2777 blksize);
2778 if ((guard_tag != sum)) {
2779 err_type = BGS_GUARD_ERR_MASK;
2780 goto out;
2781 }
2782 }
2783
2784 /* Reference Tag checking */
2785 ref_tag = be32_to_cpu(src->ref_tag);
2786 if (chk_ref && (ref_tag != start_ref_tag)) {
2787 err_type = BGS_REFTAG_ERR_MASK;
2788 goto out;
2789 }
2790 start_ref_tag++;
2791
2792 /* App Tag checking */
2793 app_tag = src->app_tag;
2794 if (chk_app && (app_tag != start_app_tag)) {
2795 err_type = BGS_APPTAG_ERR_MASK;
2796 goto out;
2797 }
2798 skipit:
2799 len -= sizeof(struct scsi_dif_tuple);
2800 if (len < 0)
2801 len = 0;
2802 src++;
2803
2804 data_src += blksize;
2805 data_len -= blksize;
2806
2807 /*
2808 * Are we at the end of the Data segment?
2809 * The data segment is only used for Guard
2810 * tag checking.
2811 */
2812 if (chk_guard && (data_len == 0)) {
2813 chk_guard = 0;
2814 sgde = sg_next(sgde);
2815 if (!sgde)
2816 goto out;
2817
2818 data_src = (uint8_t *)sg_virt(sgde);
2819 data_len = sgde->length;
2820 if ((data_len & (blksize - 1)) == 0)
2821 chk_guard = 1;
2822 }
2823 }
2824
2825 /* Goto the next Protection data segment */
2826 sgpe = sg_next(sgpe);
2827 if (sgpe) {
2828 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2829 len = sgpe->length;
2830 } else {
2831 src = NULL;
2832 }
2833 protsegcnt--;
2834 }
2835 }
2836 out:
2837 if (err_type == BGS_GUARD_ERR_MASK) {
2838 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2839 set_host_byte(cmd, DID_ABORT);
2840 phba->bg_guard_err_cnt++;
2841 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2842 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2843 scsi_prot_ref_tag(cmd),
2844 sum, guard_tag);
2845
2846 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2847 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2848 set_host_byte(cmd, DID_ABORT);
2849
2850 phba->bg_reftag_err_cnt++;
2851 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2852 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2853 scsi_prot_ref_tag(cmd),
2854 ref_tag, start_ref_tag);
2855
2856 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2857 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2858 set_host_byte(cmd, DID_ABORT);
2859
2860 phba->bg_apptag_err_cnt++;
2861 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2862 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2863 scsi_prot_ref_tag(cmd),
2864 app_tag, start_app_tag);
2865 }
2866 }
2867
2868 /*
2869 * This function checks for BlockGuard errors detected by
2870 * the HBA. In case of errors, the ASC/ASCQ fields in the
2871 * sense buffer will be set accordingly, paired with
2872 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2873 * detected corruption.
2874 *
2875 * Returns:
2876 * 0 - No error found
2877 * 1 - BlockGuard error found
2878 * -1 - Internal error (bad profile, ...etc)
2879 */
2880 static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2881 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2882 struct lpfc_iocbq *pIocbOut)
2883 {
2884 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2885 struct sli3_bg_fields *bgf;
2886 int ret = 0;
2887 struct lpfc_wcqe_complete *wcqe;
2888 u32 status;
2889 u32 bghm = 0;
2890 u32 bgstat = 0;
2891 u64 failing_sector = 0;
2892
2893 if (phba->sli_rev == LPFC_SLI_REV4) {
2894 wcqe = &pIocbOut->wcqe_cmpl;
2895 status = bf_get(lpfc_wcqe_c_status, wcqe);
2896
2897 if (status == CQE_STATUS_DI_ERROR) {
2898 /* Guard Check failed */
2899 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2900 bgstat |= BGS_GUARD_ERR_MASK;
2901
2902 /* AppTag Check failed */
2903 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2904 bgstat |= BGS_APPTAG_ERR_MASK;
2905
2906 /* RefTag Check failed */
2907 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2908 bgstat |= BGS_REFTAG_ERR_MASK;
2909
2910 /* Check to see if there was any good data before the
2911 * error
2912 */
2913 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2914 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2915 bghm = wcqe->total_data_placed;
2916 }
2917
2918 /*
2919 * Set ALL the error bits to indicate we don't know what
2920 * type of error it is.
2921 */
2922 if (!bgstat)
2923 bgstat |= (BGS_REFTAG_ERR_MASK |
2924 BGS_APPTAG_ERR_MASK |
2925 BGS_GUARD_ERR_MASK);
2926 }
2927
2928 } else {
2929 bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2930 bghm = bgf->bghm;
2931 bgstat = bgf->bgstat;
2932 }
2933
2934 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2935 cmd->result = DID_ERROR << 16;
2936 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2937 "9072 BLKGRD: Invalid BG Profile in cmd "
2938 "0x%x reftag 0x%x blk cnt 0x%x "
2939 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2940 scsi_prot_ref_tag(cmd),
2941 scsi_logical_block_count(cmd), bgstat, bghm);
2942 ret = (-1);
2943 goto out;
2944 }
2945
2946 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2947 cmd->result = DID_ERROR << 16;
2948 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2949 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
2950 "0x%x reftag 0x%x blk cnt 0x%x "
2951 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2952 scsi_prot_ref_tag(cmd),
2953 scsi_logical_block_count(cmd), bgstat, bghm);
2954 ret = (-1);
2955 goto out;
2956 }
2957
2958 if (lpfc_bgs_get_guard_err(bgstat)) {
2959 ret = 1;
2960 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2961 set_host_byte(cmd, DID_ABORT);
2962 phba->bg_guard_err_cnt++;
2963 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2964 "9055 BLKGRD: Guard Tag error in cmd "
2965 "0x%x reftag 0x%x blk cnt 0x%x "
2966 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2967 scsi_prot_ref_tag(cmd),
2968 scsi_logical_block_count(cmd), bgstat, bghm);
2969 }
2970
2971 if (lpfc_bgs_get_reftag_err(bgstat)) {
2972 ret = 1;
2973 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2974 set_host_byte(cmd, DID_ABORT);
2975 phba->bg_reftag_err_cnt++;
2976 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2977 "9056 BLKGRD: Ref Tag error in cmd "
2978 "0x%x reftag 0x%x blk cnt 0x%x "
2979 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2980 scsi_prot_ref_tag(cmd),
2981 scsi_logical_block_count(cmd), bgstat, bghm);
2982 }
2983
2984 if (lpfc_bgs_get_apptag_err(bgstat)) {
2985 ret = 1;
2986 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2987 set_host_byte(cmd, DID_ABORT);
2988 phba->bg_apptag_err_cnt++;
2989 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2990 "9061 BLKGRD: App Tag error in cmd "
2991 "0x%x reftag 0x%x blk cnt 0x%x "
2992 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2993 scsi_prot_ref_tag(cmd),
2994 scsi_logical_block_count(cmd), bgstat, bghm);
2995 }
2996
2997 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2998 /*
2999 * setup sense data descriptor 0 per SPC-4 as an information
3000 * field, and put the failing LBA in it.
3001 * This code assumes there was also a guard/app/ref tag error
3002 * indication.
3003 */
3004 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
3005 cmd->sense_buffer[8] = 0; /* Information descriptor type */
3006 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
3007 cmd->sense_buffer[10] = 0x80; /* Validity bit */
3008
3009 /* bghm is a "on the wire" FC frame based count */
3010 switch (scsi_get_prot_op(cmd)) {
3011 case SCSI_PROT_READ_INSERT:
3012 case SCSI_PROT_WRITE_STRIP:
3013 bghm /= cmd->device->sector_size;
3014 break;
3015 case SCSI_PROT_READ_STRIP:
3016 case SCSI_PROT_WRITE_INSERT:
3017 case SCSI_PROT_READ_PASS:
3018 case SCSI_PROT_WRITE_PASS:
3019 bghm /= (cmd->device->sector_size +
3020 sizeof(struct scsi_dif_tuple));
3021 break;
3022 }
3023
3024 failing_sector = scsi_get_lba(cmd);
3025 failing_sector += bghm;
3026
3027 /* Descriptor Information */
3028 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3029 }
3030
3031 if (!ret) {
3032 /* No error was reported - problem in FW? */
3033 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3034 "9057 BLKGRD: Unknown error in cmd "
3035 "0x%x reftag 0x%x blk cnt 0x%x "
3036 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3037 scsi_prot_ref_tag(cmd),
3038 scsi_logical_block_count(cmd), bgstat, bghm);
3039
3040 /* Calculate what type of error it was */
3041 lpfc_calc_bg_err(phba, lpfc_cmd);
3042 }
3043 out:
3044 return ret;
3045 }
3046
3047 /**
3048 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3049 * @phba: The Hba for which this call is being executed.
3050 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3051 *
3052 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3053 * field of @lpfc_cmd for device with SLI-4 interface spec.
3054 *
3055 * Return codes:
3056 * 2 - Error - Do not retry
3057 * 1 - Error - Retry
3058 * 0 - Success
3059 **/
3060 static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3061 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3062 {
3063 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3064 struct scatterlist *sgel = NULL;
3065 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3066 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3067 struct sli4_sge *first_data_sgl;
3068 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3069 struct lpfc_vport *vport = phba->pport;
3070 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3071 dma_addr_t physaddr;
3072 uint32_t dma_len;
3073 uint32_t dma_offset = 0;
3074 int nseg, i, j;
3075 struct ulp_bde64 *bde;
3076 bool lsp_just_set = false;
3077 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3078
3079 /*
3080 * There are three possibilities here - use scatter-gather segment, use
3081 * the single mapping, or neither. Start the lpfc command prep by
3082 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3083 * data bde entry.
3084 */
3085 if (scsi_sg_count(scsi_cmnd)) {
3086 /*
3087 * The driver stores the segment count returned from dma_map_sg
3088 * because this a count of dma-mappings used to map the use_sg
3089 * pages. They are not guaranteed to be the same for those
3090 * architectures that implement an IOMMU.
3091 */
3092
3093 nseg = scsi_dma_map(scsi_cmnd);
3094 if (unlikely(nseg <= 0))
3095 return 1;
3096 sgl += 1;
3097 /* clear the last flag in the fcp_rsp map entry */
3098 sgl->word2 = le32_to_cpu(sgl->word2);
3099 bf_set(lpfc_sli4_sge_last, sgl, 0);
3100 sgl->word2 = cpu_to_le32(sgl->word2);
3101 sgl += 1;
3102 first_data_sgl = sgl;
3103 lpfc_cmd->seg_cnt = nseg;
3104 if (!phba->cfg_xpsgl &&
3105 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3107 "9074 BLKGRD:"
3108 " %s: Too many sg segments from "
3109 "dma_map_sg. Config %d, seg_cnt %d\n",
3110 __func__, phba->cfg_sg_seg_cnt,
3111 lpfc_cmd->seg_cnt);
3112 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3113 lpfc_cmd->seg_cnt = 0;
3114 scsi_dma_unmap(scsi_cmnd);
3115 return 2;
3116 }
3117
3118 /*
3119 * The driver established a maximum scatter-gather segment count
3120 * during probe that limits the number of sg elements in any
3121 * single scsi command. Just run through the seg_cnt and format
3122 * the sge's.
3123 * When using SLI-3 the driver will try to fit all the BDEs into
3124 * the IOCB. If it can't then the BDEs get added to a BPL as it
3125 * does for SLI-2 mode.
3126 */
3127
3128 /* for tracking segment boundaries */
3129 sgel = scsi_sglist(scsi_cmnd);
3130 j = 2;
3131 for (i = 0; i < nseg; i++) {
3132 sgl->word2 = 0;
3133 if (nseg == 1) {
3134 bf_set(lpfc_sli4_sge_last, sgl, 1);
3135 bf_set(lpfc_sli4_sge_type, sgl,
3136 LPFC_SGE_TYPE_DATA);
3137 } else {
3138 bf_set(lpfc_sli4_sge_last, sgl, 0);
3139
3140 /* do we need to expand the segment */
3141 if (!lsp_just_set &&
3142 !((j + 1) % phba->border_sge_num) &&
3143 ((nseg - 1) != i)) {
3144 /* set LSP type */
3145 bf_set(lpfc_sli4_sge_type, sgl,
3146 LPFC_SGE_TYPE_LSP);
3147
3148 sgl_xtra = lpfc_get_sgl_per_hdwq(
3149 phba, lpfc_cmd);
3150
3151 if (unlikely(!sgl_xtra)) {
3152 lpfc_cmd->seg_cnt = 0;
3153 scsi_dma_unmap(scsi_cmnd);
3154 return 1;
3155 }
3156 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3157 sgl_xtra->dma_phys_sgl));
3158 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3159 sgl_xtra->dma_phys_sgl));
3160
3161 } else {
3162 bf_set(lpfc_sli4_sge_type, sgl,
3163 LPFC_SGE_TYPE_DATA);
3164 }
3165 }
3166
3167 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3168 LPFC_SGE_TYPE_LSP)) {
3169 if ((nseg - 1) == i)
3170 bf_set(lpfc_sli4_sge_last, sgl, 1);
3171
3172 physaddr = sg_dma_address(sgel);
3173 dma_len = sg_dma_len(sgel);
3174 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3175 physaddr));
3176 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3177 physaddr));
3178
3179 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3180 sgl->word2 = cpu_to_le32(sgl->word2);
3181 sgl->sge_len = cpu_to_le32(dma_len);
3182
3183 dma_offset += dma_len;
3184 sgel = sg_next(sgel);
3185
3186 sgl++;
3187 lsp_just_set = false;
3188
3189 } else {
3190 sgl->word2 = cpu_to_le32(sgl->word2);
3191 sgl->sge_len = cpu_to_le32(
3192 phba->cfg_sg_dma_buf_size);
3193
3194 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3195 i = i - 1;
3196
3197 lsp_just_set = true;
3198 }
3199
3200 j++;
3201 }
3202
3203 /* PBDE support for first data SGE only.
3204 * For FCoE, we key off Performance Hints.
3205 * For FC, we key off lpfc_enable_pbde.
3206 */
3207 if (nseg == 1 &&
3208 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3209 phba->cfg_enable_pbde)) {
3210 /* Words 13-15 */
3211 bde = (struct ulp_bde64 *)
3212 &wqe->words[13];
3213 bde->addrLow = first_data_sgl->addr_lo;
3214 bde->addrHigh = first_data_sgl->addr_hi;
3215 bde->tus.f.bdeSize =
3216 le32_to_cpu(first_data_sgl->sge_len);
3217 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3218 bde->tus.w = cpu_to_le32(bde->tus.w);
3219
3220 /* Word 11 - set PBDE bit */
3221 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3222 } else {
3223 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3224 /* Word 11 - PBDE bit disabled by default template */
3225 }
3226 } else {
3227 sgl += 1;
3228 /* set the last flag in the fcp_rsp map entry */
3229 sgl->word2 = le32_to_cpu(sgl->word2);
3230 bf_set(lpfc_sli4_sge_last, sgl, 1);
3231 sgl->word2 = cpu_to_le32(sgl->word2);
3232
3233 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3234 phba->cfg_enable_pbde) {
3235 bde = (struct ulp_bde64 *)
3236 &wqe->words[13];
3237 memset(bde, 0, (sizeof(uint32_t) * 3));
3238 }
3239 }
3240
3241 /*
3242 * Finish initializing those IOCB fields that are dependent on the
3243 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3244 * explicitly reinitialized.
3245 * all iocb memory resources are reused.
3246 */
3247 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3248 /* Set first-burst provided it was successfully negotiated */
3249 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3250 vport->cfg_first_burst_size &&
3251 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3252 u32 init_len, total_len;
3253
3254 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3255 init_len = min(total_len, vport->cfg_first_burst_size);
3256
3257 /* Word 4 & 5 */
3258 wqe->fcp_iwrite.initial_xfer_len = init_len;
3259 wqe->fcp_iwrite.total_xfer_len = total_len;
3260 } else {
3261 /* Word 4 */
3262 wqe->fcp_iwrite.total_xfer_len =
3263 be32_to_cpu(fcp_cmnd->fcpDl);
3264 }
3265
3266 /*
3267 * If the OAS driver feature is enabled and the lun is enabled for
3268 * OAS, set the oas iocb related flags.
3269 */
3270 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3271 scsi_cmnd->device->hostdata)->oas_enabled) {
3272 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3273 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3274 scsi_cmnd->device->hostdata)->priority;
3275
3276 /* Word 10 */
3277 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3278 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3279
3280 if (lpfc_cmd->cur_iocbq.priority)
3281 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3282 (lpfc_cmd->cur_iocbq.priority << 1));
3283 else
3284 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3285 (phba->cfg_XLanePriority << 1));
3286 }
3287
3288 return 0;
3289 }
3290
3291 /**
3292 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3293 * @phba: The Hba for which this call is being executed.
3294 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3295 *
3296 * This is the protection/DIF aware version of
3297 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3298 * two functions eventually, but for now, it's here
3299 * Return codes:
3300 * 2 - Error - Do not retry
3301 * 1 - Error - Retry
3302 * 0 - Success
3303 **/
3304 static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3305 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3306 struct lpfc_io_buf *lpfc_cmd)
3307 {
3308 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3309 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3310 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3311 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3312 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3313 uint32_t num_sge = 0;
3314 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3315 int prot_group_type = 0;
3316 int fcpdl;
3317 int ret = 1;
3318 struct lpfc_vport *vport = phba->pport;
3319
3320 /*
3321 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3322 * fcp_rsp regions to the first data sge entry
3323 */
3324 if (scsi_sg_count(scsi_cmnd)) {
3325 /*
3326 * The driver stores the segment count returned from dma_map_sg
3327 * because this a count of dma-mappings used to map the use_sg
3328 * pages. They are not guaranteed to be the same for those
3329 * architectures that implement an IOMMU.
3330 */
3331 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3332 scsi_sglist(scsi_cmnd),
3333 scsi_sg_count(scsi_cmnd), datadir);
3334 if (unlikely(!datasegcnt))
3335 return 1;
3336
3337 sgl += 1;
3338 /* clear the last flag in the fcp_rsp map entry */
3339 sgl->word2 = le32_to_cpu(sgl->word2);
3340 bf_set(lpfc_sli4_sge_last, sgl, 0);
3341 sgl->word2 = cpu_to_le32(sgl->word2);
3342
3343 sgl += 1;
3344 lpfc_cmd->seg_cnt = datasegcnt;
3345
3346 /* First check if data segment count from SCSI Layer is good */
3347 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3348 !phba->cfg_xpsgl) {
3349 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3350 ret = 2;
3351 goto err;
3352 }
3353
3354 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3355
3356 switch (prot_group_type) {
3357 case LPFC_PG_TYPE_NO_DIF:
3358 /* Here we need to add a DISEED to the count */
3359 if (((lpfc_cmd->seg_cnt + 1) >
3360 phba->cfg_total_seg_cnt) &&
3361 !phba->cfg_xpsgl) {
3362 ret = 2;
3363 goto err;
3364 }
3365
3366 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3367 datasegcnt, lpfc_cmd);
3368
3369 /* we should have 2 or more entries in buffer list */
3370 if (num_sge < 2) {
3371 ret = 2;
3372 goto err;
3373 }
3374 break;
3375
3376 case LPFC_PG_TYPE_DIF_BUF:
3377 /*
3378 * This type indicates that protection buffers are
3379 * passed to the driver, so that needs to be prepared
3380 * for DMA
3381 */
3382 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3383 scsi_prot_sglist(scsi_cmnd),
3384 scsi_prot_sg_count(scsi_cmnd), datadir);
3385 if (unlikely(!protsegcnt)) {
3386 scsi_dma_unmap(scsi_cmnd);
3387 return 1;
3388 }
3389
3390 lpfc_cmd->prot_seg_cnt = protsegcnt;
3391 /*
3392 * There is a minimun of 3 SGEs used for every
3393 * protection data segment.
3394 */
3395 if (((lpfc_cmd->prot_seg_cnt * 3) >
3396 (phba->cfg_total_seg_cnt - 2)) &&
3397 !phba->cfg_xpsgl) {
3398 ret = 2;
3399 goto err;
3400 }
3401
3402 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3403 datasegcnt, protsegcnt, lpfc_cmd);
3404
3405 /* we should have 3 or more entries in buffer list */
3406 if (num_sge < 3 ||
3407 (num_sge > phba->cfg_total_seg_cnt &&
3408 !phba->cfg_xpsgl)) {
3409 ret = 2;
3410 goto err;
3411 }
3412 break;
3413
3414 case LPFC_PG_TYPE_INVALID:
3415 default:
3416 scsi_dma_unmap(scsi_cmnd);
3417 lpfc_cmd->seg_cnt = 0;
3418
3419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3420 "9083 Unexpected protection group %i\n",
3421 prot_group_type);
3422 return 2;
3423 }
3424 }
3425
3426 switch (scsi_get_prot_op(scsi_cmnd)) {
3427 case SCSI_PROT_WRITE_STRIP:
3428 case SCSI_PROT_READ_STRIP:
3429 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_STRIP;
3430 break;
3431 case SCSI_PROT_WRITE_INSERT:
3432 case SCSI_PROT_READ_INSERT:
3433 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_INSERT;
3434 break;
3435 case SCSI_PROT_WRITE_PASS:
3436 case SCSI_PROT_READ_PASS:
3437 lpfc_cmd->cur_iocbq.cmd_flag |= LPFC_IO_DIF_PASS;
3438 break;
3439 }
3440
3441 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3442 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3443
3444 /* Set first-burst provided it was successfully negotiated */
3445 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3446 vport->cfg_first_burst_size &&
3447 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3448 u32 init_len, total_len;
3449
3450 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3451 init_len = min(total_len, vport->cfg_first_burst_size);
3452
3453 /* Word 4 & 5 */
3454 wqe->fcp_iwrite.initial_xfer_len = init_len;
3455 wqe->fcp_iwrite.total_xfer_len = total_len;
3456 } else {
3457 /* Word 4 */
3458 wqe->fcp_iwrite.total_xfer_len =
3459 be32_to_cpu(fcp_cmnd->fcpDl);
3460 }
3461
3462 /*
3463 * If the OAS driver feature is enabled and the lun is enabled for
3464 * OAS, set the oas iocb related flags.
3465 */
3466 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3467 scsi_cmnd->device->hostdata)->oas_enabled) {
3468 lpfc_cmd->cur_iocbq.cmd_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3469
3470 /* Word 10 */
3471 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3472 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3473 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3474 (phba->cfg_XLanePriority << 1));
3475 }
3476
3477 /* Word 7. DIF Flags */
3478 if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_PASS)
3479 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3480 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_STRIP)
3481 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3482 else if (lpfc_cmd->cur_iocbq.cmd_flag & LPFC_IO_DIF_INSERT)
3483 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3484
3485 lpfc_cmd->cur_iocbq.cmd_flag &= ~(LPFC_IO_DIF_PASS |
3486 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3487
3488 return 0;
3489 err:
3490 if (lpfc_cmd->seg_cnt)
3491 scsi_dma_unmap(scsi_cmnd);
3492 if (lpfc_cmd->prot_seg_cnt)
3493 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3494 scsi_prot_sg_count(scsi_cmnd),
3495 scsi_cmnd->sc_data_direction);
3496
3497 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3498 "9084 Cannot setup S/G List for HBA"
3499 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3500 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3501 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3502 prot_group_type, num_sge);
3503
3504 lpfc_cmd->seg_cnt = 0;
3505 lpfc_cmd->prot_seg_cnt = 0;
3506 return ret;
3507 }
3508
3509 /**
3510 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3511 * @phba: The Hba for which this call is being executed.
3512 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3513 *
3514 * This routine wraps the actual DMA mapping function pointer from the
3515 * lpfc_hba struct.
3516 *
3517 * Return codes:
3518 * 1 - Error
3519 * 0 - Success
3520 **/
3521 static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3522 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3523 {
3524 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3525 }
3526
3527 /**
3528 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3529 * using BlockGuard.
3530 * @phba: The Hba for which this call is being executed.
3531 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3532 *
3533 * This routine wraps the actual DMA mapping function pointer from the
3534 * lpfc_hba struct.
3535 *
3536 * Return codes:
3537 * 1 - Error
3538 * 0 - Success
3539 **/
3540 static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3541 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3542 {
3543 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3544 }
3545
3546 /**
3547 * lpfc_scsi_prep_cmnd_buf - Wrapper function for IOCB/WQE mapping of scsi
3548 * buffer
3549 * @vport: Pointer to vport object.
3550 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3551 * @tmo: Timeout value for IO
3552 *
3553 * This routine initializes IOCB/WQE data structure from scsi command
3554 *
3555 * Return codes:
3556 * 1 - Error
3557 * 0 - Success
3558 **/
3559 static inline int
lpfc_scsi_prep_cmnd_buf(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)3560 lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3561 uint8_t tmo)
3562 {
3563 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3564 }
3565
3566 /**
3567 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3568 * @phba: Pointer to hba context object.
3569 * @vport: Pointer to vport object.
3570 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3571 * @fcpi_parm: FCP Initiator parameter.
3572 *
3573 * This function posts an event when there is a SCSI command reporting
3574 * error from the scsi device.
3575 **/
3576 static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3577 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3578 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3579 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3580 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3581 uint32_t resp_info = fcprsp->rspStatus2;
3582 uint32_t scsi_status = fcprsp->rspStatus3;
3583 struct lpfc_fast_path_event *fast_path_evt = NULL;
3584 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3585 unsigned long flags;
3586
3587 if (!pnode)
3588 return;
3589
3590 /* If there is queuefull or busy condition send a scsi event */
3591 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3592 (cmnd->result == SAM_STAT_BUSY)) {
3593 fast_path_evt = lpfc_alloc_fast_evt(phba);
3594 if (!fast_path_evt)
3595 return;
3596 fast_path_evt->un.scsi_evt.event_type =
3597 FC_REG_SCSI_EVENT;
3598 fast_path_evt->un.scsi_evt.subcategory =
3599 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3600 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3601 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3602 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3603 &pnode->nlp_portname, sizeof(struct lpfc_name));
3604 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3605 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3606 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3607 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3608 fast_path_evt = lpfc_alloc_fast_evt(phba);
3609 if (!fast_path_evt)
3610 return;
3611 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3612 FC_REG_SCSI_EVENT;
3613 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3614 LPFC_EVENT_CHECK_COND;
3615 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3616 cmnd->device->lun;
3617 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3618 &pnode->nlp_portname, sizeof(struct lpfc_name));
3619 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3620 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3621 fast_path_evt->un.check_cond_evt.sense_key =
3622 cmnd->sense_buffer[2] & 0xf;
3623 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3624 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3625 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3626 fcpi_parm &&
3627 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3628 ((scsi_status == SAM_STAT_GOOD) &&
3629 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3630 /*
3631 * If status is good or resid does not match with fcp_param and
3632 * there is valid fcpi_parm, then there is a read_check error
3633 */
3634 fast_path_evt = lpfc_alloc_fast_evt(phba);
3635 if (!fast_path_evt)
3636 return;
3637 fast_path_evt->un.read_check_error.header.event_type =
3638 FC_REG_FABRIC_EVENT;
3639 fast_path_evt->un.read_check_error.header.subcategory =
3640 LPFC_EVENT_FCPRDCHKERR;
3641 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3642 &pnode->nlp_portname, sizeof(struct lpfc_name));
3643 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3644 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3645 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3646 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3647 fast_path_evt->un.read_check_error.fcpiparam =
3648 fcpi_parm;
3649 } else
3650 return;
3651
3652 fast_path_evt->vport = vport;
3653 spin_lock_irqsave(&phba->hbalock, flags);
3654 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3655 spin_unlock_irqrestore(&phba->hbalock, flags);
3656 lpfc_worker_wake_up(phba);
3657 return;
3658 }
3659
3660 /**
3661 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3662 * @phba: The HBA for which this call is being executed.
3663 * @psb: The scsi buffer which is going to be un-mapped.
3664 *
3665 * This routine does DMA un-mapping of scatter gather list of scsi command
3666 * field of @lpfc_cmd for device with SLI-3 interface spec.
3667 **/
3668 static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3669 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3670 {
3671 /*
3672 * There are only two special cases to consider. (1) the scsi command
3673 * requested scatter-gather usage or (2) the scsi command allocated
3674 * a request buffer, but did not request use_sg. There is a third
3675 * case, but it does not require resource deallocation.
3676 */
3677 if (psb->seg_cnt > 0)
3678 scsi_dma_unmap(psb->pCmd);
3679 if (psb->prot_seg_cnt > 0)
3680 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3681 scsi_prot_sg_count(psb->pCmd),
3682 psb->pCmd->sc_data_direction);
3683 }
3684
3685 /**
3686 * lpfc_unblock_requests - allow further commands to be queued.
3687 * @phba: pointer to phba object
3688 *
3689 * For single vport, just call scsi_unblock_requests on physical port.
3690 * For multiple vports, send scsi_unblock_requests for all the vports.
3691 */
3692 void
lpfc_unblock_requests(struct lpfc_hba * phba)3693 lpfc_unblock_requests(struct lpfc_hba *phba)
3694 {
3695 struct lpfc_vport **vports;
3696 struct Scsi_Host *shost;
3697 int i;
3698
3699 if (phba->sli_rev == LPFC_SLI_REV4 &&
3700 !phba->sli4_hba.max_cfg_param.vpi_used) {
3701 shost = lpfc_shost_from_vport(phba->pport);
3702 scsi_unblock_requests(shost);
3703 return;
3704 }
3705
3706 vports = lpfc_create_vport_work_array(phba);
3707 if (vports != NULL)
3708 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3709 shost = lpfc_shost_from_vport(vports[i]);
3710 scsi_unblock_requests(shost);
3711 }
3712 lpfc_destroy_vport_work_array(phba, vports);
3713 }
3714
3715 /**
3716 * lpfc_block_requests - prevent further commands from being queued.
3717 * @phba: pointer to phba object
3718 *
3719 * For single vport, just call scsi_block_requests on physical port.
3720 * For multiple vports, send scsi_block_requests for all the vports.
3721 */
3722 void
lpfc_block_requests(struct lpfc_hba * phba)3723 lpfc_block_requests(struct lpfc_hba *phba)
3724 {
3725 struct lpfc_vport **vports;
3726 struct Scsi_Host *shost;
3727 int i;
3728
3729 if (atomic_read(&phba->cmf_stop_io))
3730 return;
3731
3732 if (phba->sli_rev == LPFC_SLI_REV4 &&
3733 !phba->sli4_hba.max_cfg_param.vpi_used) {
3734 shost = lpfc_shost_from_vport(phba->pport);
3735 scsi_block_requests(shost);
3736 return;
3737 }
3738
3739 vports = lpfc_create_vport_work_array(phba);
3740 if (vports != NULL)
3741 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3742 shost = lpfc_shost_from_vport(vports[i]);
3743 scsi_block_requests(shost);
3744 }
3745 lpfc_destroy_vport_work_array(phba, vports);
3746 }
3747
3748 /**
3749 * lpfc_update_cmf_cmpl - Adjust CMF counters for IO completion
3750 * @phba: The HBA for which this call is being executed.
3751 * @time: The latency of the IO that completed (in ns)
3752 * @size: The size of the IO that completed
3753 * @shost: SCSI host the IO completed on (NULL for a NVME IO)
3754 *
3755 * The routine adjusts the various Burst and Bandwidth counters used in
3756 * Congestion management and E2E. If time is set to LPFC_CGN_NOT_SENT,
3757 * that means the IO was never issued to the HBA, so this routine is
3758 * just being called to cleanup the counter from a previous
3759 * lpfc_update_cmf_cmd call.
3760 */
3761 int
lpfc_update_cmf_cmpl(struct lpfc_hba * phba,uint64_t time,uint32_t size,struct Scsi_Host * shost)3762 lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3763 uint64_t time, uint32_t size, struct Scsi_Host *shost)
3764 {
3765 struct lpfc_cgn_stat *cgs;
3766
3767 if (time != LPFC_CGN_NOT_SENT) {
3768 /* lat is ns coming in, save latency in us */
3769 if (time < 1000)
3770 time = 1;
3771 else
3772 time = div_u64(time + 500, 1000); /* round it */
3773
3774 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3775 atomic64_add(size, &cgs->rcv_bytes);
3776 atomic64_add(time, &cgs->rx_latency);
3777 atomic_inc(&cgs->rx_io_cnt);
3778 }
3779 return 0;
3780 }
3781
3782 /**
3783 * lpfc_update_cmf_cmd - Adjust CMF counters for IO submission
3784 * @phba: The HBA for which this call is being executed.
3785 * @size: The size of the IO that will be issued
3786 *
3787 * The routine adjusts the various Burst and Bandwidth counters used in
3788 * Congestion management and E2E.
3789 */
3790 int
lpfc_update_cmf_cmd(struct lpfc_hba * phba,uint32_t size)3791 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3792 {
3793 uint64_t total;
3794 struct lpfc_cgn_stat *cgs;
3795 int cpu;
3796
3797 /* At this point we are either LPFC_CFG_MANAGED or LPFC_CFG_MONITOR */
3798 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3799 phba->cmf_max_bytes_per_interval) {
3800 total = 0;
3801 for_each_present_cpu(cpu) {
3802 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3803 total += atomic64_read(&cgs->total_bytes);
3804 }
3805 if (total >= phba->cmf_max_bytes_per_interval) {
3806 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3807 lpfc_block_requests(phba);
3808 phba->cmf_last_ts =
3809 lpfc_calc_cmf_latency(phba);
3810 }
3811 atomic_inc(&phba->cmf_busy);
3812 return -EBUSY;
3813 }
3814 if (size > atomic_read(&phba->rx_max_read_cnt))
3815 atomic_set(&phba->rx_max_read_cnt, size);
3816 }
3817
3818 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3819 atomic64_add(size, &cgs->total_bytes);
3820 return 0;
3821 }
3822
3823 /**
3824 * lpfc_handle_fcp_err - FCP response handler
3825 * @vport: The virtual port for which this call is being executed.
3826 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3827 * @fcpi_parm: FCP Initiator parameter.
3828 *
3829 * This routine is called to process response IOCB with status field
3830 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3831 * based upon SCSI and FCP error.
3832 **/
3833 static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint32_t fcpi_parm)3834 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3835 uint32_t fcpi_parm)
3836 {
3837 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3838 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3839 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3840 uint32_t resp_info = fcprsp->rspStatus2;
3841 uint32_t scsi_status = fcprsp->rspStatus3;
3842 uint32_t *lp;
3843 uint32_t host_status = DID_OK;
3844 uint32_t rsplen = 0;
3845 uint32_t fcpDl;
3846 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3847
3848
3849 /*
3850 * If this is a task management command, there is no
3851 * scsi packet associated with this lpfc_cmd. The driver
3852 * consumes it.
3853 */
3854 if (fcpcmd->fcpCntl2) {
3855 scsi_status = 0;
3856 goto out;
3857 }
3858
3859 if (resp_info & RSP_LEN_VALID) {
3860 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3861 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3862 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3863 "2719 Invalid response length: "
3864 "tgt x%x lun x%llx cmnd x%x rsplen "
3865 "x%x\n", cmnd->device->id,
3866 cmnd->device->lun, cmnd->cmnd[0],
3867 rsplen);
3868 host_status = DID_ERROR;
3869 goto out;
3870 }
3871 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3872 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3873 "2757 Protocol failure detected during "
3874 "processing of FCP I/O op: "
3875 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3876 cmnd->device->id,
3877 cmnd->device->lun, cmnd->cmnd[0],
3878 fcprsp->rspInfo3);
3879 host_status = DID_ERROR;
3880 goto out;
3881 }
3882 }
3883
3884 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3885 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3886 if (snslen > SCSI_SENSE_BUFFERSIZE)
3887 snslen = SCSI_SENSE_BUFFERSIZE;
3888
3889 if (resp_info & RSP_LEN_VALID)
3890 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3891 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3892 }
3893 lp = (uint32_t *)cmnd->sense_buffer;
3894
3895 /* special handling for under run conditions */
3896 if (!scsi_status && (resp_info & RESID_UNDER)) {
3897 /* don't log under runs if fcp set... */
3898 if (vport->cfg_log_verbose & LOG_FCP)
3899 logit = LOG_FCP_ERROR;
3900 /* unless operator says so */
3901 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3902 logit = LOG_FCP_UNDER;
3903 }
3904
3905 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3906 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3907 "Data: x%x x%x x%x x%x x%x\n",
3908 cmnd->cmnd[0], scsi_status,
3909 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3910 be32_to_cpu(fcprsp->rspResId),
3911 be32_to_cpu(fcprsp->rspSnsLen),
3912 be32_to_cpu(fcprsp->rspRspLen),
3913 fcprsp->rspInfo3);
3914
3915 scsi_set_resid(cmnd, 0);
3916 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3917 if (resp_info & RESID_UNDER) {
3918 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3919
3920 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3921 "9025 FCP Underrun, expected %d, "
3922 "residual %d Data: x%x x%x x%x\n",
3923 fcpDl,
3924 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3925 cmnd->underflow);
3926
3927 /*
3928 * If there is an under run, check if under run reported by
3929 * storage array is same as the under run reported by HBA.
3930 * If this is not same, there is a dropped frame.
3931 */
3932 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3933 lpfc_printf_vlog(vport, KERN_WARNING,
3934 LOG_FCP | LOG_FCP_ERROR,
3935 "9026 FCP Read Check Error "
3936 "and Underrun Data: x%x x%x x%x x%x\n",
3937 fcpDl,
3938 scsi_get_resid(cmnd), fcpi_parm,
3939 cmnd->cmnd[0]);
3940 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3941 host_status = DID_ERROR;
3942 }
3943 /*
3944 * The cmnd->underflow is the minimum number of bytes that must
3945 * be transferred for this command. Provided a sense condition
3946 * is not present, make sure the actual amount transferred is at
3947 * least the underflow value or fail.
3948 */
3949 if (!(resp_info & SNS_LEN_VALID) &&
3950 (scsi_status == SAM_STAT_GOOD) &&
3951 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3952 < cmnd->underflow)) {
3953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3954 "9027 FCP command x%x residual "
3955 "underrun converted to error "
3956 "Data: x%x x%x x%x\n",
3957 cmnd->cmnd[0], scsi_bufflen(cmnd),
3958 scsi_get_resid(cmnd), cmnd->underflow);
3959 host_status = DID_ERROR;
3960 }
3961 } else if (resp_info & RESID_OVER) {
3962 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3963 "9028 FCP command x%x residual overrun error. "
3964 "Data: x%x x%x\n", cmnd->cmnd[0],
3965 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3966 host_status = DID_ERROR;
3967
3968 /*
3969 * Check SLI validation that all the transfer was actually done
3970 * (fcpi_parm should be zero). Apply check only to reads.
3971 */
3972 } else if (fcpi_parm) {
3973 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3974 "9029 FCP %s Check Error Data: "
3975 "x%x x%x x%x x%x x%x\n",
3976 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3977 "Read" : "Write"),
3978 fcpDl, be32_to_cpu(fcprsp->rspResId),
3979 fcpi_parm, cmnd->cmnd[0], scsi_status);
3980
3981 /* There is some issue with the LPe12000 that causes it
3982 * to miscalculate the fcpi_parm and falsely trip this
3983 * recovery logic. Detect this case and don't error when true.
3984 */
3985 if (fcpi_parm > fcpDl)
3986 goto out;
3987
3988 switch (scsi_status) {
3989 case SAM_STAT_GOOD:
3990 case SAM_STAT_CHECK_CONDITION:
3991 /* Fabric dropped a data frame. Fail any successful
3992 * command in which we detected dropped frames.
3993 * A status of good or some check conditions could
3994 * be considered a successful command.
3995 */
3996 host_status = DID_ERROR;
3997 break;
3998 }
3999 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4000 }
4001
4002 out:
4003 cmnd->result = host_status << 16 | scsi_status;
4004 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4005 }
4006
4007 /**
4008 * lpfc_fcp_io_cmd_wqe_cmpl - Complete a FCP IO
4009 * @phba: The hba for which this call is being executed.
4010 * @pwqeIn: The command WQE for the scsi cmnd.
4011 * @pwqeOut: Pointer to driver response WQE object.
4012 *
4013 * This routine assigns scsi command result by looking into response WQE
4014 * status field appropriately. This routine handles QUEUE FULL condition as
4015 * well by ramping down device queue depth.
4016 **/
4017 static void
lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeIn,struct lpfc_iocbq * pwqeOut)4018 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4019 struct lpfc_iocbq *pwqeOut)
4020 {
4021 struct lpfc_io_buf *lpfc_cmd = pwqeIn->io_buf;
4022 struct lpfc_wcqe_complete *wcqe = &pwqeOut->wcqe_cmpl;
4023 struct lpfc_vport *vport = pwqeIn->vport;
4024 struct lpfc_rport_data *rdata;
4025 struct lpfc_nodelist *ndlp;
4026 struct scsi_cmnd *cmd;
4027 unsigned long flags;
4028 struct lpfc_fast_path_event *fast_path_evt;
4029 struct Scsi_Host *shost;
4030 u32 logit = LOG_FCP;
4031 u32 status, idx;
4032 u32 lat;
4033 u8 wait_xb_clr = 0;
4034
4035 /* Sanity check on return of outstanding command */
4036 if (!lpfc_cmd) {
4037 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4038 "9032 Null lpfc_cmd pointer. No "
4039 "release, skip completion\n");
4040 return;
4041 }
4042
4043 rdata = lpfc_cmd->rdata;
4044 ndlp = rdata->pnode;
4045
4046 /* Sanity check on return of outstanding command */
4047 cmd = lpfc_cmd->pCmd;
4048 if (!cmd) {
4049 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4050 "9042 I/O completion: Not an active IO\n");
4051 lpfc_release_scsi_buf(phba, lpfc_cmd);
4052 return;
4053 }
4054 /* Guard against abort handler being called at same time */
4055 spin_lock(&lpfc_cmd->buf_lock);
4056 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4057 if (phba->sli4_hba.hdwq)
4058 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4059
4060 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4061 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4062 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4063 #endif
4064 shost = cmd->device->host;
4065
4066 status = bf_get(lpfc_wcqe_c_status, wcqe);
4067 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4068 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4069
4070 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4071 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4072 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4073 if (phba->cfg_fcp_wait_abts_rsp)
4074 wait_xb_clr = 1;
4075 }
4076
4077 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4078 if (lpfc_cmd->prot_data_type) {
4079 struct scsi_dif_tuple *src = NULL;
4080
4081 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4082 /*
4083 * Used to restore any changes to protection
4084 * data for error injection.
4085 */
4086 switch (lpfc_cmd->prot_data_type) {
4087 case LPFC_INJERR_REFTAG:
4088 src->ref_tag =
4089 lpfc_cmd->prot_data;
4090 break;
4091 case LPFC_INJERR_APPTAG:
4092 src->app_tag =
4093 (uint16_t)lpfc_cmd->prot_data;
4094 break;
4095 case LPFC_INJERR_GUARD:
4096 src->guard_tag =
4097 (uint16_t)lpfc_cmd->prot_data;
4098 break;
4099 default:
4100 break;
4101 }
4102
4103 lpfc_cmd->prot_data = 0;
4104 lpfc_cmd->prot_data_type = 0;
4105 lpfc_cmd->prot_data_segment = NULL;
4106 }
4107 #endif
4108 if (unlikely(lpfc_cmd->status)) {
4109 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4110 (lpfc_cmd->result & IOERR_DRVR_MASK))
4111 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4112 else if (lpfc_cmd->status >= IOSTAT_CNT)
4113 lpfc_cmd->status = IOSTAT_DEFAULT;
4114 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4115 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4116 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4117 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4118 logit = 0;
4119 else
4120 logit = LOG_FCP | LOG_FCP_UNDER;
4121 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4122 "9034 FCP cmd x%x failed <%d/%lld> "
4123 "status: x%x result: x%x "
4124 "sid: x%x did: x%x oxid: x%x "
4125 "Data: x%x x%x x%x\n",
4126 cmd->cmnd[0],
4127 cmd->device ? cmd->device->id : 0xffff,
4128 cmd->device ? cmd->device->lun : 0xffff,
4129 lpfc_cmd->status, lpfc_cmd->result,
4130 vport->fc_myDID,
4131 (ndlp) ? ndlp->nlp_DID : 0,
4132 lpfc_cmd->cur_iocbq.sli4_xritag,
4133 wcqe->parameter, wcqe->total_data_placed,
4134 lpfc_cmd->cur_iocbq.iotag);
4135 }
4136
4137 switch (lpfc_cmd->status) {
4138 case IOSTAT_SUCCESS:
4139 cmd->result = DID_OK << 16;
4140 break;
4141 case IOSTAT_FCP_RSP_ERROR:
4142 lpfc_handle_fcp_err(vport, lpfc_cmd,
4143 pwqeIn->wqe.fcp_iread.total_xfer_len -
4144 wcqe->total_data_placed);
4145 break;
4146 case IOSTAT_NPORT_BSY:
4147 case IOSTAT_FABRIC_BSY:
4148 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4149 fast_path_evt = lpfc_alloc_fast_evt(phba);
4150 if (!fast_path_evt)
4151 break;
4152 fast_path_evt->un.fabric_evt.event_type =
4153 FC_REG_FABRIC_EVENT;
4154 fast_path_evt->un.fabric_evt.subcategory =
4155 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4156 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4157 if (ndlp) {
4158 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4159 &ndlp->nlp_portname,
4160 sizeof(struct lpfc_name));
4161 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4162 &ndlp->nlp_nodename,
4163 sizeof(struct lpfc_name));
4164 }
4165 fast_path_evt->vport = vport;
4166 fast_path_evt->work_evt.evt =
4167 LPFC_EVT_FASTPATH_MGMT_EVT;
4168 spin_lock_irqsave(&phba->hbalock, flags);
4169 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4170 &phba->work_list);
4171 spin_unlock_irqrestore(&phba->hbalock, flags);
4172 lpfc_worker_wake_up(phba);
4173 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4174 "9035 Fabric/Node busy FCP cmd x%x failed"
4175 " <%d/%lld> "
4176 "status: x%x result: x%x "
4177 "sid: x%x did: x%x oxid: x%x "
4178 "Data: x%x x%x x%x\n",
4179 cmd->cmnd[0],
4180 cmd->device ? cmd->device->id : 0xffff,
4181 cmd->device ? cmd->device->lun : 0xffff,
4182 lpfc_cmd->status, lpfc_cmd->result,
4183 vport->fc_myDID,
4184 (ndlp) ? ndlp->nlp_DID : 0,
4185 lpfc_cmd->cur_iocbq.sli4_xritag,
4186 wcqe->parameter,
4187 wcqe->total_data_placed,
4188 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4189 break;
4190 case IOSTAT_REMOTE_STOP:
4191 if (ndlp) {
4192 /* This I/O was aborted by the target, we don't
4193 * know the rxid and because we did not send the
4194 * ABTS we cannot generate and RRQ.
4195 */
4196 lpfc_set_rrq_active(phba, ndlp,
4197 lpfc_cmd->cur_iocbq.sli4_lxritag,
4198 0, 0);
4199 }
4200 fallthrough;
4201 case IOSTAT_LOCAL_REJECT:
4202 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4203 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4204 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4205 lpfc_cmd->result ==
4206 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4207 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4208 lpfc_cmd->result ==
4209 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4210 cmd->result = DID_NO_CONNECT << 16;
4211 break;
4212 }
4213 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4214 lpfc_cmd->result == IOERR_LINK_DOWN ||
4215 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4216 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4217 lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
4218 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4219 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4220 break;
4221 }
4222 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4223 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4224 status == CQE_STATUS_DI_ERROR) {
4225 if (scsi_get_prot_op(cmd) !=
4226 SCSI_PROT_NORMAL) {
4227 /*
4228 * This is a response for a BG enabled
4229 * cmd. Parse BG error
4230 */
4231 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4232 break;
4233 } else {
4234 lpfc_printf_vlog(vport, KERN_WARNING,
4235 LOG_BG,
4236 "9040 non-zero BGSTAT "
4237 "on unprotected cmd\n");
4238 }
4239 }
4240 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4241 "9036 Local Reject FCP cmd x%x failed"
4242 " <%d/%lld> "
4243 "status: x%x result: x%x "
4244 "sid: x%x did: x%x oxid: x%x "
4245 "Data: x%x x%x x%x\n",
4246 cmd->cmnd[0],
4247 cmd->device ? cmd->device->id : 0xffff,
4248 cmd->device ? cmd->device->lun : 0xffff,
4249 lpfc_cmd->status, lpfc_cmd->result,
4250 vport->fc_myDID,
4251 (ndlp) ? ndlp->nlp_DID : 0,
4252 lpfc_cmd->cur_iocbq.sli4_xritag,
4253 wcqe->parameter,
4254 wcqe->total_data_placed,
4255 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4256 fallthrough;
4257 default:
4258 if (lpfc_cmd->status >= IOSTAT_CNT)
4259 lpfc_cmd->status = IOSTAT_DEFAULT;
4260 cmd->result = DID_ERROR << 16;
4261 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4262 "9037 FCP Completion Error: xri %x "
4263 "status x%x result x%x [x%x] "
4264 "placed x%x\n",
4265 lpfc_cmd->cur_iocbq.sli4_xritag,
4266 lpfc_cmd->status, lpfc_cmd->result,
4267 wcqe->parameter,
4268 wcqe->total_data_placed);
4269 }
4270 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4271 u32 *lp = (u32 *)cmd->sense_buffer;
4272
4273 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4274 "9039 Iodone <%d/%llu> cmd x%px, error "
4275 "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
4276 cmd->device->id, cmd->device->lun, cmd,
4277 cmd->result, *lp, *(lp + 3),
4278 (u64)scsi_get_lba(cmd),
4279 cmd->retries, scsi_get_resid(cmd));
4280 }
4281
4282 if (vport->cfg_max_scsicmpl_time &&
4283 time_after(jiffies, lpfc_cmd->start_time +
4284 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4285 spin_lock_irqsave(shost->host_lock, flags);
4286 if (ndlp) {
4287 if (ndlp->cmd_qdepth >
4288 atomic_read(&ndlp->cmd_pending) &&
4289 (atomic_read(&ndlp->cmd_pending) >
4290 LPFC_MIN_TGT_QDEPTH) &&
4291 (cmd->cmnd[0] == READ_10 ||
4292 cmd->cmnd[0] == WRITE_10))
4293 ndlp->cmd_qdepth =
4294 atomic_read(&ndlp->cmd_pending);
4295
4296 ndlp->last_change_time = jiffies;
4297 }
4298 spin_unlock_irqrestore(shost->host_lock, flags);
4299 }
4300 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4301
4302 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4303 if (lpfc_cmd->ts_cmd_start) {
4304 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4305 lpfc_cmd->ts_data_io = ktime_get_ns();
4306 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4307 lpfc_io_ktime(phba, lpfc_cmd);
4308 }
4309 #endif
4310 if (likely(!wait_xb_clr))
4311 lpfc_cmd->pCmd = NULL;
4312 spin_unlock(&lpfc_cmd->buf_lock);
4313
4314 /* Check if IO qualified for CMF */
4315 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4316 cmd->sc_data_direction == DMA_FROM_DEVICE &&
4317 (scsi_sg_count(cmd))) {
4318 /* Used when calculating average latency */
4319 lat = ktime_get_ns() - lpfc_cmd->rx_cmd_start;
4320 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4321 }
4322
4323 if (wait_xb_clr)
4324 goto out;
4325
4326 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4327 scsi_done(cmd);
4328
4329 /*
4330 * If there is an abort thread waiting for command completion
4331 * wake up the thread.
4332 */
4333 spin_lock(&lpfc_cmd->buf_lock);
4334 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4335 if (lpfc_cmd->waitq)
4336 wake_up(lpfc_cmd->waitq);
4337 spin_unlock(&lpfc_cmd->buf_lock);
4338 out:
4339 lpfc_release_scsi_buf(phba, lpfc_cmd);
4340 }
4341
4342 /**
4343 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
4344 * @phba: The Hba for which this call is being executed.
4345 * @pIocbIn: The command IOCBQ for the scsi cmnd.
4346 * @pIocbOut: The response IOCBQ for the scsi cmnd.
4347 *
4348 * This routine assigns scsi command result by looking into response IOCB
4349 * status field appropriately. This routine handles QUEUE FULL condition as
4350 * well by ramping down device queue depth.
4351 **/
4352 static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)4353 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4354 struct lpfc_iocbq *pIocbOut)
4355 {
4356 struct lpfc_io_buf *lpfc_cmd =
4357 (struct lpfc_io_buf *) pIocbIn->io_buf;
4358 struct lpfc_vport *vport = pIocbIn->vport;
4359 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4360 struct lpfc_nodelist *pnode = rdata->pnode;
4361 struct scsi_cmnd *cmd;
4362 unsigned long flags;
4363 struct lpfc_fast_path_event *fast_path_evt;
4364 struct Scsi_Host *shost;
4365 int idx;
4366 uint32_t logit = LOG_FCP;
4367
4368 /* Guard against abort handler being called at same time */
4369 spin_lock(&lpfc_cmd->buf_lock);
4370
4371 /* Sanity check on return of outstanding command */
4372 cmd = lpfc_cmd->pCmd;
4373 if (!cmd || !phba) {
4374 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4375 "2621 IO completion: Not an active IO\n");
4376 spin_unlock(&lpfc_cmd->buf_lock);
4377 return;
4378 }
4379
4380 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4381 if (phba->sli4_hba.hdwq)
4382 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4383
4384 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4385 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4386 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4387 #endif
4388 shost = cmd->device->host;
4389
4390 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4391 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4392 /* pick up SLI4 exchange busy status from HBA */
4393 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4394 if (pIocbOut->cmd_flag & LPFC_EXCHANGE_BUSY)
4395 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4396
4397 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4398 if (lpfc_cmd->prot_data_type) {
4399 struct scsi_dif_tuple *src = NULL;
4400
4401 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4402 /*
4403 * Used to restore any changes to protection
4404 * data for error injection.
4405 */
4406 switch (lpfc_cmd->prot_data_type) {
4407 case LPFC_INJERR_REFTAG:
4408 src->ref_tag =
4409 lpfc_cmd->prot_data;
4410 break;
4411 case LPFC_INJERR_APPTAG:
4412 src->app_tag =
4413 (uint16_t)lpfc_cmd->prot_data;
4414 break;
4415 case LPFC_INJERR_GUARD:
4416 src->guard_tag =
4417 (uint16_t)lpfc_cmd->prot_data;
4418 break;
4419 default:
4420 break;
4421 }
4422
4423 lpfc_cmd->prot_data = 0;
4424 lpfc_cmd->prot_data_type = 0;
4425 lpfc_cmd->prot_data_segment = NULL;
4426 }
4427 #endif
4428
4429 if (unlikely(lpfc_cmd->status)) {
4430 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4431 (lpfc_cmd->result & IOERR_DRVR_MASK))
4432 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4433 else if (lpfc_cmd->status >= IOSTAT_CNT)
4434 lpfc_cmd->status = IOSTAT_DEFAULT;
4435 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4436 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4437 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4438 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4439 logit = 0;
4440 else
4441 logit = LOG_FCP | LOG_FCP_UNDER;
4442 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4443 "9030 FCP cmd x%x failed <%d/%lld> "
4444 "status: x%x result: x%x "
4445 "sid: x%x did: x%x oxid: x%x "
4446 "Data: x%x x%x\n",
4447 cmd->cmnd[0],
4448 cmd->device ? cmd->device->id : 0xffff,
4449 cmd->device ? cmd->device->lun : 0xffff,
4450 lpfc_cmd->status, lpfc_cmd->result,
4451 vport->fc_myDID,
4452 (pnode) ? pnode->nlp_DID : 0,
4453 phba->sli_rev == LPFC_SLI_REV4 ?
4454 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4455 pIocbOut->iocb.ulpContext,
4456 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4457
4458 switch (lpfc_cmd->status) {
4459 case IOSTAT_FCP_RSP_ERROR:
4460 /* Call FCP RSP handler to determine result */
4461 lpfc_handle_fcp_err(vport, lpfc_cmd,
4462 pIocbOut->iocb.un.fcpi.fcpi_parm);
4463 break;
4464 case IOSTAT_NPORT_BSY:
4465 case IOSTAT_FABRIC_BSY:
4466 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4467 fast_path_evt = lpfc_alloc_fast_evt(phba);
4468 if (!fast_path_evt)
4469 break;
4470 fast_path_evt->un.fabric_evt.event_type =
4471 FC_REG_FABRIC_EVENT;
4472 fast_path_evt->un.fabric_evt.subcategory =
4473 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4474 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4475 if (pnode) {
4476 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4477 &pnode->nlp_portname,
4478 sizeof(struct lpfc_name));
4479 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4480 &pnode->nlp_nodename,
4481 sizeof(struct lpfc_name));
4482 }
4483 fast_path_evt->vport = vport;
4484 fast_path_evt->work_evt.evt =
4485 LPFC_EVT_FASTPATH_MGMT_EVT;
4486 spin_lock_irqsave(&phba->hbalock, flags);
4487 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4488 &phba->work_list);
4489 spin_unlock_irqrestore(&phba->hbalock, flags);
4490 lpfc_worker_wake_up(phba);
4491 break;
4492 case IOSTAT_LOCAL_REJECT:
4493 case IOSTAT_REMOTE_STOP:
4494 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4495 lpfc_cmd->result ==
4496 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4497 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4498 lpfc_cmd->result ==
4499 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4500 cmd->result = DID_NO_CONNECT << 16;
4501 break;
4502 }
4503 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4504 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4505 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4506 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4507 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4508 break;
4509 }
4510 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4511 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4512 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4513 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4514 /*
4515 * This is a response for a BG enabled
4516 * cmd. Parse BG error
4517 */
4518 lpfc_parse_bg_err(phba, lpfc_cmd,
4519 pIocbOut);
4520 break;
4521 } else {
4522 lpfc_printf_vlog(vport, KERN_WARNING,
4523 LOG_BG,
4524 "9031 non-zero BGSTAT "
4525 "on unprotected cmd\n");
4526 }
4527 }
4528 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4529 && (phba->sli_rev == LPFC_SLI_REV4)
4530 && pnode) {
4531 /* This IO was aborted by the target, we don't
4532 * know the rxid and because we did not send the
4533 * ABTS we cannot generate and RRQ.
4534 */
4535 lpfc_set_rrq_active(phba, pnode,
4536 lpfc_cmd->cur_iocbq.sli4_lxritag,
4537 0, 0);
4538 }
4539 fallthrough;
4540 default:
4541 cmd->result = DID_ERROR << 16;
4542 break;
4543 }
4544
4545 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4546 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4547 SAM_STAT_BUSY;
4548 } else
4549 cmd->result = DID_OK << 16;
4550
4551 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4552 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4553
4554 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4555 "0710 Iodone <%d/%llu> cmd x%px, error "
4556 "x%x SNS x%x x%x Data: x%x x%x\n",
4557 cmd->device->id, cmd->device->lun, cmd,
4558 cmd->result, *lp, *(lp + 3), cmd->retries,
4559 scsi_get_resid(cmd));
4560 }
4561
4562 if (vport->cfg_max_scsicmpl_time &&
4563 time_after(jiffies, lpfc_cmd->start_time +
4564 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4565 spin_lock_irqsave(shost->host_lock, flags);
4566 if (pnode) {
4567 if (pnode->cmd_qdepth >
4568 atomic_read(&pnode->cmd_pending) &&
4569 (atomic_read(&pnode->cmd_pending) >
4570 LPFC_MIN_TGT_QDEPTH) &&
4571 ((cmd->cmnd[0] == READ_10) ||
4572 (cmd->cmnd[0] == WRITE_10)))
4573 pnode->cmd_qdepth =
4574 atomic_read(&pnode->cmd_pending);
4575
4576 pnode->last_change_time = jiffies;
4577 }
4578 spin_unlock_irqrestore(shost->host_lock, flags);
4579 }
4580 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4581
4582 lpfc_cmd->pCmd = NULL;
4583 spin_unlock(&lpfc_cmd->buf_lock);
4584
4585 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4586 if (lpfc_cmd->ts_cmd_start) {
4587 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4588 lpfc_cmd->ts_data_io = ktime_get_ns();
4589 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4590 lpfc_io_ktime(phba, lpfc_cmd);
4591 }
4592 #endif
4593
4594 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4595 scsi_done(cmd);
4596
4597 /*
4598 * If there is an abort thread waiting for command completion
4599 * wake up the thread.
4600 */
4601 spin_lock(&lpfc_cmd->buf_lock);
4602 lpfc_cmd->cur_iocbq.cmd_flag &= ~LPFC_DRIVER_ABORTED;
4603 if (lpfc_cmd->waitq)
4604 wake_up(lpfc_cmd->waitq);
4605 spin_unlock(&lpfc_cmd->buf_lock);
4606
4607 lpfc_release_scsi_buf(phba, lpfc_cmd);
4608 }
4609
4610 /**
4611 * lpfc_scsi_prep_cmnd_buf_s3 - SLI-3 IOCB init for the IO
4612 * @vport: Pointer to vport object.
4613 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4614 * @tmo: timeout value for the IO
4615 *
4616 * Based on the data-direction of the command, initialize IOCB
4617 * in the I/O buffer. Fill in the IOCB fields which are independent
4618 * of the scsi buffer
4619 *
4620 * RETURNS 0 - SUCCESS,
4621 **/
lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4622 static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4623 struct lpfc_io_buf *lpfc_cmd,
4624 uint8_t tmo)
4625 {
4626 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4627 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4628 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4629 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4630 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4631 int datadir = scsi_cmnd->sc_data_direction;
4632 u32 fcpdl;
4633
4634 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4635
4636 /*
4637 * There are three possibilities here - use scatter-gather segment, use
4638 * the single mapping, or neither. Start the lpfc command prep by
4639 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4640 * data bde entry.
4641 */
4642 if (scsi_sg_count(scsi_cmnd)) {
4643 if (datadir == DMA_TO_DEVICE) {
4644 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4645 iocb_cmd->ulpPU = PARM_READ_CHECK;
4646 if (vport->cfg_first_burst_size &&
4647 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4648 u32 xrdy_len;
4649
4650 fcpdl = scsi_bufflen(scsi_cmnd);
4651 xrdy_len = min(fcpdl,
4652 vport->cfg_first_burst_size);
4653 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4654 }
4655 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4656 } else {
4657 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4658 iocb_cmd->ulpPU = PARM_READ_CHECK;
4659 fcp_cmnd->fcpCntl3 = READ_DATA;
4660 }
4661 } else {
4662 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4663 iocb_cmd->un.fcpi.fcpi_parm = 0;
4664 iocb_cmd->ulpPU = 0;
4665 fcp_cmnd->fcpCntl3 = 0;
4666 }
4667
4668 /*
4669 * Finish initializing those IOCB fields that are independent
4670 * of the scsi_cmnd request_buffer
4671 */
4672 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4673 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4674 piocbq->iocb.ulpFCP2Rcvy = 1;
4675 else
4676 piocbq->iocb.ulpFCP2Rcvy = 0;
4677
4678 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4679 piocbq->io_buf = lpfc_cmd;
4680 if (!piocbq->cmd_cmpl)
4681 piocbq->cmd_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4682 piocbq->iocb.ulpTimeout = tmo;
4683 piocbq->vport = vport;
4684 return 0;
4685 }
4686
4687 /**
4688 * lpfc_scsi_prep_cmnd_buf_s4 - SLI-4 WQE init for the IO
4689 * @vport: Pointer to vport object.
4690 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
4691 * @tmo: timeout value for the IO
4692 *
4693 * Based on the data-direction of the command copy WQE template
4694 * to I/O buffer WQE. Fill in the WQE fields which are independent
4695 * of the scsi buffer
4696 *
4697 * RETURNS 0 - SUCCESS,
4698 **/
lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint8_t tmo)4699 static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4700 struct lpfc_io_buf *lpfc_cmd,
4701 uint8_t tmo)
4702 {
4703 struct lpfc_hba *phba = vport->phba;
4704 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4705 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4706 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4707 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4708 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4709 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4710 u16 idx = lpfc_cmd->hdwq_no;
4711 int datadir = scsi_cmnd->sc_data_direction;
4712
4713 hdwq = &phba->sli4_hba.hdwq[idx];
4714
4715 /* Initialize 64 bytes only */
4716 memset(wqe, 0, sizeof(union lpfc_wqe128));
4717
4718 /*
4719 * There are three possibilities here - use scatter-gather segment, use
4720 * the single mapping, or neither.
4721 */
4722 if (scsi_sg_count(scsi_cmnd)) {
4723 if (datadir == DMA_TO_DEVICE) {
4724 /* From the iwrite template, initialize words 7 - 11 */
4725 memcpy(&wqe->words[7],
4726 &lpfc_iwrite_cmd_template.words[7],
4727 sizeof(uint32_t) * 5);
4728
4729 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4730 if (hdwq)
4731 hdwq->scsi_cstat.output_requests++;
4732 } else {
4733 /* From the iread template, initialize words 7 - 11 */
4734 memcpy(&wqe->words[7],
4735 &lpfc_iread_cmd_template.words[7],
4736 sizeof(uint32_t) * 5);
4737
4738 /* Word 7 */
4739 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4740
4741 fcp_cmnd->fcpCntl3 = READ_DATA;
4742 if (hdwq)
4743 hdwq->scsi_cstat.input_requests++;
4744
4745 /* For a CMF Managed port, iod must be zero'ed */
4746 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4747 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
4748 LPFC_WQE_IOD_NONE);
4749 }
4750 } else {
4751 /* From the icmnd template, initialize words 4 - 11 */
4752 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4753 sizeof(uint32_t) * 8);
4754
4755 /* Word 7 */
4756 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4757
4758 fcp_cmnd->fcpCntl3 = 0;
4759 if (hdwq)
4760 hdwq->scsi_cstat.control_requests++;
4761 }
4762
4763 /*
4764 * Finish initializing those WQE fields that are independent
4765 * of the request_buffer
4766 */
4767
4768 /* Word 3 */
4769 bf_set(payload_offset_len, &wqe->fcp_icmd,
4770 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4771
4772 /* Word 6 */
4773 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4774 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4775 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4776
4777 /* Word 7*/
4778 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4779 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4780
4781 bf_set(wqe_class, &wqe->generic.wqe_com,
4782 (pnode->nlp_fcp_info & 0x0f));
4783
4784 /* Word 8 */
4785 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4786
4787 /* Word 9 */
4788 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4789
4790 pwqeq->vport = vport;
4791 pwqeq->io_buf = lpfc_cmd;
4792 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4793 pwqeq->cmd_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4794
4795 return 0;
4796 }
4797
4798 /**
4799 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4800 * @vport: The virtual port for which this call is being executed.
4801 * @lpfc_cmd: The scsi command which needs to send.
4802 * @pnode: Pointer to lpfc_nodelist.
4803 *
4804 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4805 * to transfer for device with SLI3 interface spec.
4806 **/
4807 static int
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4808 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4809 struct lpfc_nodelist *pnode)
4810 {
4811 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4812 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4813 u8 *ptr;
4814
4815 if (!pnode)
4816 return 0;
4817
4818 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4819 /* clear task management bits */
4820 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4821
4822 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4823 &lpfc_cmd->fcp_cmnd->fcp_lun);
4824
4825 ptr = &fcp_cmnd->fcpCdb[0];
4826 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4827 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4828 ptr += scsi_cmnd->cmd_len;
4829 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4830 }
4831
4832 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4833
4834 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4835
4836 return 0;
4837 }
4838
4839 /**
4840 * lpfc_scsi_prep_task_mgmt_cmd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
4841 * @vport: The virtual port for which this call is being executed.
4842 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4843 * @lun: Logical unit number.
4844 * @task_mgmt_cmd: SCSI task management command.
4845 *
4846 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4847 * for device with SLI-3 interface spec.
4848 *
4849 * Return codes:
4850 * 0 - Error
4851 * 1 - Success
4852 **/
4853 static int
lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4854 lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
4855 struct lpfc_io_buf *lpfc_cmd,
4856 u64 lun, u8 task_mgmt_cmd)
4857 {
4858 struct lpfc_iocbq *piocbq;
4859 IOCB_t *piocb;
4860 struct fcp_cmnd *fcp_cmnd;
4861 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4862 struct lpfc_nodelist *ndlp = rdata->pnode;
4863
4864 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4865 return 0;
4866
4867 piocbq = &(lpfc_cmd->cur_iocbq);
4868 piocbq->vport = vport;
4869
4870 piocb = &piocbq->iocb;
4871
4872 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4873 /* Clear out any old data in the FCP command area */
4874 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4875 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4876 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4877 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4878 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4879 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4880 piocb->ulpContext = ndlp->nlp_rpi;
4881 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4882 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4883 piocb->ulpPU = 0;
4884 piocb->un.fcpi.fcpi_parm = 0;
4885
4886 /* ulpTimeout is only one byte */
4887 if (lpfc_cmd->timeout > 0xff) {
4888 /*
4889 * Do not timeout the command at the firmware level.
4890 * The driver will provide the timeout mechanism.
4891 */
4892 piocb->ulpTimeout = 0;
4893 } else
4894 piocb->ulpTimeout = lpfc_cmd->timeout;
4895
4896 return 1;
4897 }
4898
4899 /**
4900 * lpfc_scsi_prep_task_mgmt_cmd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
4901 * @vport: The virtual port for which this call is being executed.
4902 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4903 * @lun: Logical unit number.
4904 * @task_mgmt_cmd: SCSI task management command.
4905 *
4906 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4907 * for device with SLI-4 interface spec.
4908 *
4909 * Return codes:
4910 * 0 - Error
4911 * 1 - Success
4912 **/
4913 static int
lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,u64 lun,u8 task_mgmt_cmd)4914 lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
4915 struct lpfc_io_buf *lpfc_cmd,
4916 u64 lun, u8 task_mgmt_cmd)
4917 {
4918 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4919 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4920 struct fcp_cmnd *fcp_cmnd;
4921 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4922 struct lpfc_nodelist *ndlp = rdata->pnode;
4923
4924 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4925 return 0;
4926
4927 pwqeq->vport = vport;
4928 /* Initialize 64 bytes only */
4929 memset(wqe, 0, sizeof(union lpfc_wqe128));
4930
4931 /* From the icmnd template, initialize words 4 - 11 */
4932 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4933 sizeof(uint32_t) * 8);
4934
4935 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4936 /* Clear out any old data in the FCP command area */
4937 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4938 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4939 fcp_cmnd->fcpCntl3 = 0;
4940 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4941
4942 bf_set(payload_offset_len, &wqe->fcp_icmd,
4943 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4944 bf_set(cmd_buff_len, &wqe->fcp_icmd, 0);
4945 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com, /* ulpContext */
4946 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
4947 bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
4948 ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0));
4949 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com,
4950 (ndlp->nlp_fcp_info & 0x0f));
4951
4952 /* ulpTimeout is only one byte */
4953 if (lpfc_cmd->timeout > 0xff) {
4954 /*
4955 * Do not timeout the command at the firmware level.
4956 * The driver will provide the timeout mechanism.
4957 */
4958 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, 0);
4959 } else {
4960 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, lpfc_cmd->timeout);
4961 }
4962
4963 lpfc_prep_embed_io(vport->phba, lpfc_cmd);
4964 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4965 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4966 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4967
4968 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4969
4970 return 1;
4971 }
4972
4973 /**
4974 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4975 * @phba: The hba struct for which this call is being executed.
4976 * @dev_grp: The HBA PCI-Device group number.
4977 *
4978 * This routine sets up the SCSI interface API function jump table in @phba
4979 * struct.
4980 * Returns: 0 - success, -ENODEV - failure.
4981 **/
4982 int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)4983 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4984 {
4985
4986 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4987
4988 switch (dev_grp) {
4989 case LPFC_PCI_DEV_LP:
4990 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4991 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4992 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4993 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4994 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4995 phba->lpfc_scsi_prep_task_mgmt_cmd =
4996 lpfc_scsi_prep_task_mgmt_cmd_s3;
4997 break;
4998 case LPFC_PCI_DEV_OC:
4999 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
5000 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
5001 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
5002 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
5003 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
5004 phba->lpfc_scsi_prep_task_mgmt_cmd =
5005 lpfc_scsi_prep_task_mgmt_cmd_s4;
5006 break;
5007 default:
5008 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5009 "1418 Invalid HBA PCI-device group: 0x%x\n",
5010 dev_grp);
5011 return -ENODEV;
5012 }
5013 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5014 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
5015 return 0;
5016 }
5017
5018 /**
5019 * lpfc_tskmgmt_def_cmpl - IOCB completion routine for task management command
5020 * @phba: The Hba for which this call is being executed.
5021 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
5022 * @rspiocbq: Pointer to lpfc_iocbq data structure.
5023 *
5024 * This routine is IOCB completion routine for device reset and target reset
5025 * routine. This routine release scsi buffer associated with lpfc_cmd.
5026 **/
5027 static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)5028 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5029 struct lpfc_iocbq *cmdiocbq,
5030 struct lpfc_iocbq *rspiocbq)
5031 {
5032 struct lpfc_io_buf *lpfc_cmd = cmdiocbq->io_buf;
5033 if (lpfc_cmd)
5034 lpfc_release_scsi_buf(phba, lpfc_cmd);
5035 return;
5036 }
5037
5038 /**
5039 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
5040 * if issuing a pci_bus_reset is possibly unsafe
5041 * @phba: lpfc_hba pointer.
5042 *
5043 * Description:
5044 * Walks the bus_list to ensure only PCI devices with Emulex
5045 * vendor id, device ids that support hot reset, and only one occurrence
5046 * of function 0.
5047 *
5048 * Returns:
5049 * -EBADSLT, detected invalid device
5050 * 0, successful
5051 */
5052 int
lpfc_check_pci_resettable(struct lpfc_hba * phba)5053 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5054 {
5055 const struct pci_dev *pdev = phba->pcidev;
5056 struct pci_dev *ptr = NULL;
5057 u8 counter = 0;
5058
5059 /* Walk the list of devices on the pci_dev's bus */
5060 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5061 /* Check for Emulex Vendor ID */
5062 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5063 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5064 "8346 Non-Emulex vendor found: "
5065 "0x%04x\n", ptr->vendor);
5066 return -EBADSLT;
5067 }
5068
5069 /* Check for valid Emulex Device ID */
5070 if (phba->sli_rev != LPFC_SLI_REV4 ||
5071 phba->hba_flag & HBA_FCOE_MODE) {
5072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5073 "8347 Incapable PCI reset device: "
5074 "0x%04x\n", ptr->device);
5075 return -EBADSLT;
5076 }
5077
5078 /* Check for only one function 0 ID to ensure only one HBA on
5079 * secondary bus
5080 */
5081 if (ptr->devfn == 0) {
5082 if (++counter > 1) {
5083 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5084 "8348 More than one device on "
5085 "secondary bus found\n");
5086 return -EBADSLT;
5087 }
5088 }
5089 }
5090
5091 return 0;
5092 }
5093
5094 /**
5095 * lpfc_info - Info entry point of scsi_host_template data structure
5096 * @host: The scsi host for which this call is being executed.
5097 *
5098 * This routine provides module information about hba.
5099 *
5100 * Reutrn code:
5101 * Pointer to char - Success.
5102 **/
5103 const char *
lpfc_info(struct Scsi_Host * host)5104 lpfc_info(struct Scsi_Host *host)
5105 {
5106 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5107 struct lpfc_hba *phba = vport->phba;
5108 int link_speed = 0;
5109 static char lpfcinfobuf[384];
5110 char tmp[384] = {0};
5111
5112 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5113 if (phba && phba->pcidev){
5114 /* Model Description */
5115 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5116 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5117 sizeof(lpfcinfobuf))
5118 goto buffer_done;
5119
5120 /* PCI Info */
5121 scnprintf(tmp, sizeof(tmp),
5122 " on PCI bus %02x device %02x irq %d",
5123 phba->pcidev->bus->number, phba->pcidev->devfn,
5124 phba->pcidev->irq);
5125 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5126 sizeof(lpfcinfobuf))
5127 goto buffer_done;
5128
5129 /* Port Number */
5130 if (phba->Port[0]) {
5131 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5132 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5133 sizeof(lpfcinfobuf))
5134 goto buffer_done;
5135 }
5136
5137 /* Link Speed */
5138 link_speed = lpfc_sli_port_speed_get(phba);
5139 if (link_speed != 0) {
5140 scnprintf(tmp, sizeof(tmp),
5141 " Logical Link Speed: %d Mbps", link_speed);
5142 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5143 sizeof(lpfcinfobuf))
5144 goto buffer_done;
5145 }
5146
5147 /* PCI resettable */
5148 if (!lpfc_check_pci_resettable(phba)) {
5149 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5150 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5151 }
5152 }
5153
5154 buffer_done:
5155 return lpfcinfobuf;
5156 }
5157
5158 /**
5159 * lpfc_poll_rearm_timer - Routine to modify fcp_poll timer of hba
5160 * @phba: The Hba for which this call is being executed.
5161 *
5162 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5163 * The default value of cfg_poll_tmo is 10 milliseconds.
5164 **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)5165 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5166 {
5167 unsigned long poll_tmo_expires =
5168 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5169
5170 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5171 mod_timer(&phba->fcp_poll_timer,
5172 poll_tmo_expires);
5173 }
5174
5175 /**
5176 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
5177 * @phba: The Hba for which this call is being executed.
5178 *
5179 * This routine starts the fcp_poll_timer of @phba.
5180 **/
lpfc_poll_start_timer(struct lpfc_hba * phba)5181 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5182 {
5183 lpfc_poll_rearm_timer(phba);
5184 }
5185
5186 /**
5187 * lpfc_poll_timeout - Restart polling timer
5188 * @t: Timer construct where lpfc_hba data structure pointer is obtained.
5189 *
5190 * This routine restarts fcp_poll timer, when FCP ring polling is enable
5191 * and FCP Ring interrupt is disable.
5192 **/
lpfc_poll_timeout(struct timer_list * t)5193 void lpfc_poll_timeout(struct timer_list *t)
5194 {
5195 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5196
5197 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5198 lpfc_sli_handle_fast_ring_event(phba,
5199 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5200
5201 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5202 lpfc_poll_rearm_timer(phba);
5203 }
5204 }
5205
5206 /*
5207 * lpfc_is_command_vm_io - get the UUID from blk cgroup
5208 * @cmd: Pointer to scsi_cmnd data structure
5209 * Returns UUID if present, otherwise NULL
5210 */
lpfc_is_command_vm_io(struct scsi_cmnd * cmd)5211 static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5212 {
5213 struct bio *bio = scsi_cmd_to_rq(cmd)->bio;
5214
5215 if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !bio)
5216 return NULL;
5217 return blkcg_get_fc_appid(bio);
5218 }
5219
5220 /**
5221 * lpfc_queuecommand - scsi_host_template queuecommand entry point
5222 * @shost: kernel scsi host pointer.
5223 * @cmnd: Pointer to scsi_cmnd data structure.
5224 *
5225 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
5226 * This routine prepares an IOCB from scsi command and provides to firmware.
5227 * The @done callback is invoked after driver finished processing the command.
5228 *
5229 * Return value :
5230 * 0 - Success
5231 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
5232 **/
5233 static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5234 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5235 {
5236 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5237 struct lpfc_hba *phba = vport->phba;
5238 struct lpfc_iocbq *cur_iocbq = NULL;
5239 struct lpfc_rport_data *rdata;
5240 struct lpfc_nodelist *ndlp;
5241 struct lpfc_io_buf *lpfc_cmd;
5242 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5243 int err, idx;
5244 u8 *uuid = NULL;
5245 uint64_t start;
5246
5247 start = ktime_get_ns();
5248 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5249
5250 /* sanity check on references */
5251 if (unlikely(!rdata) || unlikely(!rport))
5252 goto out_fail_command;
5253
5254 err = fc_remote_port_chkready(rport);
5255 if (err) {
5256 cmnd->result = err;
5257 goto out_fail_command;
5258 }
5259 ndlp = rdata->pnode;
5260
5261 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5262 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5263
5264 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5265 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5266 " op:%02x str=%s without registering for"
5267 " BlockGuard - Rejecting command\n",
5268 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5269 dif_op_str[scsi_get_prot_op(cmnd)]);
5270 goto out_fail_command;
5271 }
5272
5273 /*
5274 * Catch race where our node has transitioned, but the
5275 * transport is still transitioning.
5276 */
5277 if (!ndlp)
5278 goto out_tgt_busy1;
5279
5280 /* Check if IO qualifies for CMF */
5281 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5282 cmnd->sc_data_direction == DMA_FROM_DEVICE &&
5283 (scsi_sg_count(cmnd))) {
5284 /* Latency start time saved in rx_cmd_start later in routine */
5285 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5286 if (err)
5287 goto out_tgt_busy1;
5288 }
5289
5290 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5291 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5292 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5293 "3377 Target Queue Full, scsi Id:%d "
5294 "Qdepth:%d Pending command:%d"
5295 " WWNN:%02x:%02x:%02x:%02x:"
5296 "%02x:%02x:%02x:%02x, "
5297 " WWPN:%02x:%02x:%02x:%02x:"
5298 "%02x:%02x:%02x:%02x",
5299 ndlp->nlp_sid, ndlp->cmd_qdepth,
5300 atomic_read(&ndlp->cmd_pending),
5301 ndlp->nlp_nodename.u.wwn[0],
5302 ndlp->nlp_nodename.u.wwn[1],
5303 ndlp->nlp_nodename.u.wwn[2],
5304 ndlp->nlp_nodename.u.wwn[3],
5305 ndlp->nlp_nodename.u.wwn[4],
5306 ndlp->nlp_nodename.u.wwn[5],
5307 ndlp->nlp_nodename.u.wwn[6],
5308 ndlp->nlp_nodename.u.wwn[7],
5309 ndlp->nlp_portname.u.wwn[0],
5310 ndlp->nlp_portname.u.wwn[1],
5311 ndlp->nlp_portname.u.wwn[2],
5312 ndlp->nlp_portname.u.wwn[3],
5313 ndlp->nlp_portname.u.wwn[4],
5314 ndlp->nlp_portname.u.wwn[5],
5315 ndlp->nlp_portname.u.wwn[6],
5316 ndlp->nlp_portname.u.wwn[7]);
5317 goto out_tgt_busy2;
5318 }
5319 }
5320
5321 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5322 if (lpfc_cmd == NULL) {
5323 lpfc_rampdown_queue_depth(phba);
5324
5325 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5326 "0707 driver's buffer pool is empty, "
5327 "IO busied\n");
5328 goto out_host_busy;
5329 }
5330 lpfc_cmd->rx_cmd_start = start;
5331
5332 cur_iocbq = &lpfc_cmd->cur_iocbq;
5333 /*
5334 * Store the midlayer's command structure for the completion phase
5335 * and complete the command initialization.
5336 */
5337 lpfc_cmd->pCmd = cmnd;
5338 lpfc_cmd->rdata = rdata;
5339 lpfc_cmd->ndlp = ndlp;
5340 cur_iocbq->cmd_cmpl = NULL;
5341 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5342
5343 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5344 if (err)
5345 goto out_host_busy_release_buf;
5346
5347 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5348 if (vport->phba->cfg_enable_bg) {
5349 lpfc_printf_vlog(vport,
5350 KERN_INFO, LOG_SCSI_CMD,
5351 "9033 BLKGRD: rcvd %s cmd:x%x "
5352 "reftag x%x cnt %u pt %x\n",
5353 dif_op_str[scsi_get_prot_op(cmnd)],
5354 cmnd->cmnd[0],
5355 scsi_prot_ref_tag(cmnd),
5356 scsi_logical_block_count(cmnd),
5357 (cmnd->cmnd[1]>>5));
5358 }
5359 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5360 } else {
5361 if (vport->phba->cfg_enable_bg) {
5362 lpfc_printf_vlog(vport,
5363 KERN_INFO, LOG_SCSI_CMD,
5364 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5365 "x%x reftag x%x cnt %u pt %x\n",
5366 cmnd->cmnd[0],
5367 scsi_prot_ref_tag(cmnd),
5368 scsi_logical_block_count(cmnd),
5369 (cmnd->cmnd[1]>>5));
5370 }
5371 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5372 }
5373
5374 if (unlikely(err)) {
5375 if (err == 2) {
5376 cmnd->result = DID_ERROR << 16;
5377 goto out_fail_command_release_buf;
5378 }
5379 goto out_host_busy_free_buf;
5380 }
5381
5382 /* check the necessary and sufficient condition to support VMID */
5383 if (lpfc_is_vmid_enabled(phba) &&
5384 (ndlp->vmid_support ||
5385 phba->pport->vmid_priority_tagging ==
5386 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5387 /* is the I/O generated by a VM, get the associated virtual */
5388 /* entity id */
5389 uuid = lpfc_is_command_vm_io(cmnd);
5390
5391 if (uuid) {
5392 err = lpfc_vmid_get_appid(vport, uuid,
5393 cmnd->sc_data_direction,
5394 (union lpfc_vmid_io_tag *)
5395 &cur_iocbq->vmid_tag);
5396 if (!err)
5397 cur_iocbq->cmd_flag |= LPFC_IO_VMID;
5398 }
5399 }
5400
5401 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5402 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5403 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5404 #endif
5405 /* Issue I/O to adapter */
5406 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5407 SLI_IOCB_RET_IOCB);
5408 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5409 if (start) {
5410 lpfc_cmd->ts_cmd_start = start;
5411 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5412 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5413 } else {
5414 lpfc_cmd->ts_cmd_start = 0;
5415 }
5416 #endif
5417 if (err) {
5418 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5419 "3376 FCP could not issue iocb err %x "
5420 "FCP cmd x%x <%d/%llu> "
5421 "sid: x%x did: x%x oxid: x%x "
5422 "Data: x%x x%x x%x x%x\n",
5423 err, cmnd->cmnd[0],
5424 cmnd->device ? cmnd->device->id : 0xffff,
5425 cmnd->device ? cmnd->device->lun : (u64)-1,
5426 vport->fc_myDID, ndlp->nlp_DID,
5427 phba->sli_rev == LPFC_SLI_REV4 ?
5428 cur_iocbq->sli4_xritag : 0xffff,
5429 phba->sli_rev == LPFC_SLI_REV4 ?
5430 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5431 cur_iocbq->iocb.ulpContext,
5432 cur_iocbq->iotag,
5433 phba->sli_rev == LPFC_SLI_REV4 ?
5434 bf_get(wqe_tmo,
5435 &cur_iocbq->wqe.generic.wqe_com) :
5436 cur_iocbq->iocb.ulpTimeout,
5437 (uint32_t)(scsi_cmd_to_rq(cmnd)->timeout / 1000));
5438
5439 goto out_host_busy_free_buf;
5440 }
5441
5442 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5443 lpfc_sli_handle_fast_ring_event(phba,
5444 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5445
5446 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5447 lpfc_poll_rearm_timer(phba);
5448 }
5449
5450 if (phba->cfg_xri_rebalancing)
5451 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5452
5453 return 0;
5454
5455 out_host_busy_free_buf:
5456 idx = lpfc_cmd->hdwq_no;
5457 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5458 if (phba->sli4_hba.hdwq) {
5459 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5460 case WRITE_DATA:
5461 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5462 break;
5463 case READ_DATA:
5464 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5465 break;
5466 default:
5467 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5468 }
5469 }
5470 out_host_busy_release_buf:
5471 lpfc_release_scsi_buf(phba, lpfc_cmd);
5472 out_host_busy:
5473 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5474 shost);
5475 return SCSI_MLQUEUE_HOST_BUSY;
5476
5477 out_tgt_busy2:
5478 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5479 shost);
5480 out_tgt_busy1:
5481 return SCSI_MLQUEUE_TARGET_BUSY;
5482
5483 out_fail_command_release_buf:
5484 lpfc_release_scsi_buf(phba, lpfc_cmd);
5485 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5486 shost);
5487
5488 out_fail_command:
5489 scsi_done(cmnd);
5490 return 0;
5491 }
5492
5493 /*
5494 * lpfc_vmid_vport_cleanup - cleans up the resources associated with a vport
5495 * @vport: The virtual port for which this call is being executed.
5496 */
lpfc_vmid_vport_cleanup(struct lpfc_vport * vport)5497 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5498 {
5499 u32 bucket;
5500 struct lpfc_vmid *cur;
5501
5502 if (vport->port_type == LPFC_PHYSICAL_PORT)
5503 del_timer_sync(&vport->phba->inactive_vmid_poll);
5504
5505 kfree(vport->qfpa_res);
5506 kfree(vport->vmid_priority.vmid_range);
5507 kfree(vport->vmid);
5508
5509 if (!hash_empty(vport->hash_table))
5510 hash_for_each(vport->hash_table, bucket, cur, hnode)
5511 hash_del(&cur->hnode);
5512
5513 vport->qfpa_res = NULL;
5514 vport->vmid_priority.vmid_range = NULL;
5515 vport->vmid = NULL;
5516 vport->cur_vmid_cnt = 0;
5517 }
5518
5519 /**
5520 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
5521 * @cmnd: Pointer to scsi_cmnd data structure.
5522 *
5523 * This routine aborts @cmnd pending in base driver.
5524 *
5525 * Return code :
5526 * 0x2003 - Error
5527 * 0x2002 - Success
5528 **/
5529 static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)5530 lpfc_abort_handler(struct scsi_cmnd *cmnd)
5531 {
5532 struct Scsi_Host *shost = cmnd->device->host;
5533 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5534 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5535 struct lpfc_hba *phba = vport->phba;
5536 struct lpfc_iocbq *iocb;
5537 struct lpfc_io_buf *lpfc_cmd;
5538 int ret = SUCCESS, status = 0;
5539 struct lpfc_sli_ring *pring_s4 = NULL;
5540 struct lpfc_sli_ring *pring = NULL;
5541 int ret_val;
5542 unsigned long flags;
5543 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5544
5545 status = fc_block_rport(rport);
5546 if (status != 0 && status != SUCCESS)
5547 return status;
5548
5549 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5550 if (!lpfc_cmd)
5551 return ret;
5552
5553 /* Guard against IO completion being called at same time */
5554 spin_lock_irqsave(&lpfc_cmd->buf_lock, flags);
5555
5556 spin_lock(&phba->hbalock);
5557 /* driver queued commands are in process of being flushed */
5558 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5559 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5560 "3168 SCSI Layer abort requested I/O has been "
5561 "flushed by LLD.\n");
5562 ret = FAILED;
5563 goto out_unlock_hba;
5564 }
5565
5566 if (!lpfc_cmd->pCmd) {
5567 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5568 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5569 "x%x ID %d LUN %llu\n",
5570 SUCCESS, cmnd->device->id, cmnd->device->lun);
5571 goto out_unlock_hba;
5572 }
5573
5574 iocb = &lpfc_cmd->cur_iocbq;
5575 if (phba->sli_rev == LPFC_SLI_REV4) {
5576 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5577 if (!pring_s4) {
5578 ret = FAILED;
5579 goto out_unlock_hba;
5580 }
5581 spin_lock(&pring_s4->ring_lock);
5582 }
5583 /* the command is in process of being cancelled */
5584 if (!(iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
5585 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5586 "3169 SCSI Layer abort requested I/O has been "
5587 "cancelled by LLD.\n");
5588 ret = FAILED;
5589 goto out_unlock_ring;
5590 }
5591 /*
5592 * If pCmd field of the corresponding lpfc_io_buf structure
5593 * points to a different SCSI command, then the driver has
5594 * already completed this command, but the midlayer did not
5595 * see the completion before the eh fired. Just return SUCCESS.
5596 */
5597 if (lpfc_cmd->pCmd != cmnd) {
5598 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5599 "3170 SCSI Layer abort requested I/O has been "
5600 "completed by LLD.\n");
5601 goto out_unlock_ring;
5602 }
5603
5604 WARN_ON(iocb->io_buf != lpfc_cmd);
5605
5606 /* abort issued in recovery is still in progress */
5607 if (iocb->cmd_flag & LPFC_DRIVER_ABORTED) {
5608 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5609 "3389 SCSI Layer I/O Abort Request is pending\n");
5610 if (phba->sli_rev == LPFC_SLI_REV4)
5611 spin_unlock(&pring_s4->ring_lock);
5612 spin_unlock(&phba->hbalock);
5613 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5614 goto wait_for_cmpl;
5615 }
5616
5617 lpfc_cmd->waitq = &waitq;
5618 if (phba->sli_rev == LPFC_SLI_REV4) {
5619 spin_unlock(&pring_s4->ring_lock);
5620 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5621 lpfc_sli_abort_fcp_cmpl);
5622 } else {
5623 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5624 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5625 lpfc_sli_abort_fcp_cmpl);
5626 }
5627
5628 /* Make sure HBA is alive */
5629 lpfc_issue_hb_tmo(phba);
5630
5631 if (ret_val != IOCB_SUCCESS) {
5632 /* Indicate the IO is not being aborted by the driver. */
5633 lpfc_cmd->waitq = NULL;
5634 ret = FAILED;
5635 goto out_unlock_hba;
5636 }
5637
5638 /* no longer need the lock after this point */
5639 spin_unlock(&phba->hbalock);
5640 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5641
5642 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5643 lpfc_sli_handle_fast_ring_event(phba,
5644 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5645
5646 wait_for_cmpl:
5647 /*
5648 * cmd_flag is set to LPFC_DRIVER_ABORTED before we wait
5649 * for abort to complete.
5650 */
5651 wait_event_timeout(waitq,
5652 (lpfc_cmd->pCmd != cmnd),
5653 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5654
5655 spin_lock(&lpfc_cmd->buf_lock);
5656
5657 if (lpfc_cmd->pCmd == cmnd) {
5658 ret = FAILED;
5659 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5660 "0748 abort handler timed out waiting "
5661 "for aborting I/O (xri:x%x) to complete: "
5662 "ret %#x, ID %d, LUN %llu\n",
5663 iocb->sli4_xritag, ret,
5664 cmnd->device->id, cmnd->device->lun);
5665 }
5666
5667 lpfc_cmd->waitq = NULL;
5668
5669 spin_unlock(&lpfc_cmd->buf_lock);
5670 goto out;
5671
5672 out_unlock_ring:
5673 if (phba->sli_rev == LPFC_SLI_REV4)
5674 spin_unlock(&pring_s4->ring_lock);
5675 out_unlock_hba:
5676 spin_unlock(&phba->hbalock);
5677 spin_unlock_irqrestore(&lpfc_cmd->buf_lock, flags);
5678 out:
5679 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5680 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5681 "LUN %llu\n", ret, cmnd->device->id,
5682 cmnd->device->lun);
5683 return ret;
5684 }
5685
5686 static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)5687 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5688 {
5689 switch (task_mgmt_cmd) {
5690 case FCP_ABORT_TASK_SET:
5691 return "ABORT_TASK_SET";
5692 case FCP_CLEAR_TASK_SET:
5693 return "FCP_CLEAR_TASK_SET";
5694 case FCP_BUS_RESET:
5695 return "FCP_BUS_RESET";
5696 case FCP_LUN_RESET:
5697 return "FCP_LUN_RESET";
5698 case FCP_TARGET_RESET:
5699 return "FCP_TARGET_RESET";
5700 case FCP_CLEAR_ACA:
5701 return "FCP_CLEAR_ACA";
5702 case FCP_TERMINATE_TASK:
5703 return "FCP_TERMINATE_TASK";
5704 default:
5705 return "unknown";
5706 }
5707 }
5708
5709
5710 /**
5711 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
5712 * @vport: The virtual port for which this call is being executed.
5713 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
5714 *
5715 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
5716 *
5717 * Return code :
5718 * 0x2003 - Error
5719 * 0x2002 - Success
5720 **/
5721 static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)5722 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5723 {
5724 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5725 uint32_t rsp_info;
5726 uint32_t rsp_len;
5727 uint8_t rsp_info_code;
5728 int ret = FAILED;
5729
5730
5731 if (fcprsp == NULL)
5732 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5733 "0703 fcp_rsp is missing\n");
5734 else {
5735 rsp_info = fcprsp->rspStatus2;
5736 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5737 rsp_info_code = fcprsp->rspInfo3;
5738
5739
5740 lpfc_printf_vlog(vport, KERN_INFO,
5741 LOG_FCP,
5742 "0706 fcp_rsp valid 0x%x,"
5743 " rsp len=%d code 0x%x\n",
5744 rsp_info,
5745 rsp_len, rsp_info_code);
5746
5747 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
5748 * field specifies the number of valid bytes of FCP_RSP_INFO.
5749 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
5750 */
5751 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5752 ((rsp_len == 8) || (rsp_len == 4))) {
5753 switch (rsp_info_code) {
5754 case RSP_NO_FAILURE:
5755 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5756 "0715 Task Mgmt No Failure\n");
5757 ret = SUCCESS;
5758 break;
5759 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
5760 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5761 "0716 Task Mgmt Target "
5762 "reject\n");
5763 break;
5764 case RSP_TM_NOT_COMPLETED: /* TM failed */
5765 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5766 "0717 Task Mgmt Target "
5767 "failed TM\n");
5768 break;
5769 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
5770 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5771 "0718 Task Mgmt to invalid "
5772 "LUN\n");
5773 break;
5774 }
5775 }
5776 }
5777 return ret;
5778 }
5779
5780
5781 /**
5782 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5783 * @vport: The virtual port for which this call is being executed.
5784 * @rport: Pointer to remote port
5785 * @tgt_id: Target ID of remote device.
5786 * @lun_id: Lun number for the TMF
5787 * @task_mgmt_cmd: type of TMF to send
5788 *
5789 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5790 * a remote port.
5791 *
5792 * Return Code:
5793 * 0x2003 - Error
5794 * 0x2002 - Success.
5795 **/
5796 static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct fc_rport * rport,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5797 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport,
5798 unsigned int tgt_id, uint64_t lun_id,
5799 uint8_t task_mgmt_cmd)
5800 {
5801 struct lpfc_hba *phba = vport->phba;
5802 struct lpfc_io_buf *lpfc_cmd;
5803 struct lpfc_iocbq *iocbq;
5804 struct lpfc_iocbq *iocbqrsp;
5805 struct lpfc_rport_data *rdata;
5806 struct lpfc_nodelist *pnode;
5807 int ret;
5808 int status;
5809
5810 rdata = rport->dd_data;
5811 if (!rdata || !rdata->pnode)
5812 return FAILED;
5813 pnode = rdata->pnode;
5814
5815 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
5816 if (lpfc_cmd == NULL)
5817 return FAILED;
5818 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5819 lpfc_cmd->rdata = rdata;
5820 lpfc_cmd->pCmd = NULL;
5821 lpfc_cmd->ndlp = pnode;
5822
5823 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5824 task_mgmt_cmd);
5825 if (!status) {
5826 lpfc_release_scsi_buf(phba, lpfc_cmd);
5827 return FAILED;
5828 }
5829
5830 iocbq = &lpfc_cmd->cur_iocbq;
5831 iocbqrsp = lpfc_sli_get_iocbq(phba);
5832 if (iocbqrsp == NULL) {
5833 lpfc_release_scsi_buf(phba, lpfc_cmd);
5834 return FAILED;
5835 }
5836 iocbq->cmd_cmpl = lpfc_tskmgmt_def_cmpl;
5837 iocbq->vport = vport;
5838
5839 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5840 "0702 Issue %s to TGT %d LUN %llu "
5841 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5842 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5843 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5844 iocbq->cmd_flag);
5845
5846 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5847 iocbq, iocbqrsp, lpfc_cmd->timeout);
5848 if ((status != IOCB_SUCCESS) ||
5849 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
5850 if (status != IOCB_SUCCESS ||
5851 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
5852 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5853 "0727 TMF %s to TGT %d LUN %llu "
5854 "failed (%d, %d) cmd_flag x%x\n",
5855 lpfc_taskmgmt_name(task_mgmt_cmd),
5856 tgt_id, lun_id,
5857 get_job_ulpstatus(phba, iocbqrsp),
5858 get_job_word4(phba, iocbqrsp),
5859 iocbq->cmd_flag);
5860 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5861 if (status == IOCB_SUCCESS) {
5862 if (get_job_ulpstatus(phba, iocbqrsp) ==
5863 IOSTAT_FCP_RSP_ERROR)
5864 /* Something in the FCP_RSP was invalid.
5865 * Check conditions */
5866 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5867 else
5868 ret = FAILED;
5869 } else if ((status == IOCB_TIMEDOUT) ||
5870 (status == IOCB_ABORTED)) {
5871 ret = TIMEOUT_ERROR;
5872 } else {
5873 ret = FAILED;
5874 }
5875 } else
5876 ret = SUCCESS;
5877
5878 lpfc_sli_release_iocbq(phba, iocbqrsp);
5879
5880 if (status != IOCB_TIMEDOUT)
5881 lpfc_release_scsi_buf(phba, lpfc_cmd);
5882
5883 return ret;
5884 }
5885
5886 /**
5887 * lpfc_chk_tgt_mapped -
5888 * @vport: The virtual port to check on
5889 * @rport: Pointer to fc_rport data structure.
5890 *
5891 * This routine delays until the scsi target (aka rport) for the
5892 * command exists (is present and logged in) or we declare it non-existent.
5893 *
5894 * Return code :
5895 * 0x2003 - Error
5896 * 0x2002 - Success
5897 **/
5898 static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct fc_rport * rport)5899 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
5900 {
5901 struct lpfc_rport_data *rdata;
5902 struct lpfc_nodelist *pnode = NULL;
5903 unsigned long later;
5904
5905 rdata = rport->dd_data;
5906 if (!rdata) {
5907 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5908 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5909 return FAILED;
5910 }
5911 pnode = rdata->pnode;
5912
5913 /*
5914 * If target is not in a MAPPED state, delay until
5915 * target is rediscovered or devloss timeout expires.
5916 */
5917 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5918 while (time_after(later, jiffies)) {
5919 if (!pnode)
5920 return FAILED;
5921 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5922 return SUCCESS;
5923 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5924 rdata = rport->dd_data;
5925 if (!rdata)
5926 return FAILED;
5927 pnode = rdata->pnode;
5928 }
5929 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5930 return FAILED;
5931 return SUCCESS;
5932 }
5933
5934 /**
5935 * lpfc_reset_flush_io_context -
5936 * @vport: The virtual port (scsi_host) for the flush context
5937 * @tgt_id: If aborting by Target contect - specifies the target id
5938 * @lun_id: If aborting by Lun context - specifies the lun id
5939 * @context: specifies the context level to flush at.
5940 *
5941 * After a reset condition via TMF, we need to flush orphaned i/o
5942 * contexts from the adapter. This routine aborts any contexts
5943 * outstanding, then waits for their completions. The wait is
5944 * bounded by devloss_tmo though.
5945 *
5946 * Return code :
5947 * 0x2003 - Error
5948 * 0x2002 - Success
5949 **/
5950 static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)5951 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5952 uint64_t lun_id, lpfc_ctx_cmd context)
5953 {
5954 struct lpfc_hba *phba = vport->phba;
5955 unsigned long later;
5956 int cnt;
5957
5958 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5959 if (cnt)
5960 lpfc_sli_abort_taskmgmt(vport,
5961 &phba->sli.sli3_ring[LPFC_FCP_RING],
5962 tgt_id, lun_id, context);
5963 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5964 while (time_after(later, jiffies) && cnt) {
5965 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5966 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5967 }
5968 if (cnt) {
5969 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5970 "0724 I/O flush failure for context %s : cnt x%x\n",
5971 ((context == LPFC_CTX_LUN) ? "LUN" :
5972 ((context == LPFC_CTX_TGT) ? "TGT" :
5973 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5974 cnt);
5975 return FAILED;
5976 }
5977 return SUCCESS;
5978 }
5979
5980 /**
5981 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5982 * @cmnd: Pointer to scsi_cmnd data structure.
5983 *
5984 * This routine does a device reset by sending a LUN_RESET task management
5985 * command.
5986 *
5987 * Return code :
5988 * 0x2003 - Error
5989 * 0x2002 - Success
5990 **/
5991 static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)5992 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5993 {
5994 struct Scsi_Host *shost = cmnd->device->host;
5995 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5996 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5997 struct lpfc_rport_data *rdata;
5998 struct lpfc_nodelist *pnode;
5999 unsigned tgt_id = cmnd->device->id;
6000 uint64_t lun_id = cmnd->device->lun;
6001 struct lpfc_scsi_event_header scsi_event;
6002 int status;
6003 u32 logit = LOG_FCP;
6004
6005 if (!rport)
6006 return FAILED;
6007
6008 rdata = rport->dd_data;
6009 if (!rdata || !rdata->pnode) {
6010 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6011 "0798 Device Reset rdata failure: rdata x%px\n",
6012 rdata);
6013 return FAILED;
6014 }
6015 pnode = rdata->pnode;
6016 status = fc_block_rport(rport);
6017 if (status != 0 && status != SUCCESS)
6018 return status;
6019
6020 status = lpfc_chk_tgt_mapped(vport, rport);
6021 if (status == FAILED) {
6022 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6023 "0721 Device Reset rport failure: rdata x%px\n", rdata);
6024 return FAILED;
6025 }
6026
6027 scsi_event.event_type = FC_REG_SCSI_EVENT;
6028 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6029 scsi_event.lun = lun_id;
6030 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6031 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6032
6033 fc_host_post_vendor_event(shost, fc_get_event_number(),
6034 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6035
6036 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6037 FCP_LUN_RESET);
6038 if (status != SUCCESS)
6039 logit = LOG_TRACE_EVENT;
6040
6041 lpfc_printf_vlog(vport, KERN_ERR, logit,
6042 "0713 SCSI layer issued Device Reset (%d, %llu) "
6043 "return x%x\n", tgt_id, lun_id, status);
6044
6045 /*
6046 * We have to clean up i/o as : they may be orphaned by the TMF;
6047 * or if the TMF failed, they may be in an indeterminate state.
6048 * So, continue on.
6049 * We will report success if all the i/o aborts successfully.
6050 */
6051 if (status == SUCCESS)
6052 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6053 LPFC_CTX_LUN);
6054
6055 return status;
6056 }
6057
6058 /**
6059 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
6060 * @cmnd: Pointer to scsi_cmnd data structure.
6061 *
6062 * This routine does a target reset by sending a TARGET_RESET task management
6063 * command.
6064 *
6065 * Return code :
6066 * 0x2003 - Error
6067 * 0x2002 - Success
6068 **/
6069 static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)6070 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6071 {
6072 struct Scsi_Host *shost = cmnd->device->host;
6073 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
6074 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6075 struct lpfc_rport_data *rdata;
6076 struct lpfc_nodelist *pnode;
6077 unsigned tgt_id = cmnd->device->id;
6078 uint64_t lun_id = cmnd->device->lun;
6079 struct lpfc_scsi_event_header scsi_event;
6080 int status;
6081 u32 logit = LOG_FCP;
6082 u32 dev_loss_tmo = vport->cfg_devloss_tmo;
6083 unsigned long flags;
6084 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6085
6086 if (!rport)
6087 return FAILED;
6088
6089 rdata = rport->dd_data;
6090 if (!rdata || !rdata->pnode) {
6091 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6092 "0799 Target Reset rdata failure: rdata x%px\n",
6093 rdata);
6094 return FAILED;
6095 }
6096 pnode = rdata->pnode;
6097 status = fc_block_rport(rport);
6098 if (status != 0 && status != SUCCESS)
6099 return status;
6100
6101 status = lpfc_chk_tgt_mapped(vport, rport);
6102 if (status == FAILED) {
6103 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6104 "0722 Target Reset rport failure: rdata x%px\n", rdata);
6105 if (pnode) {
6106 spin_lock_irqsave(&pnode->lock, flags);
6107 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6108 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6109 spin_unlock_irqrestore(&pnode->lock, flags);
6110 }
6111 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6112 LPFC_CTX_TGT);
6113 return FAST_IO_FAIL;
6114 }
6115
6116 scsi_event.event_type = FC_REG_SCSI_EVENT;
6117 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6118 scsi_event.lun = 0;
6119 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6120 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6121
6122 fc_host_post_vendor_event(shost, fc_get_event_number(),
6123 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6124
6125 status = lpfc_send_taskmgmt(vport, rport, tgt_id, lun_id,
6126 FCP_TARGET_RESET);
6127 if (status != SUCCESS) {
6128 logit = LOG_TRACE_EVENT;
6129
6130 /* Issue LOGO, if no LOGO is outstanding */
6131 spin_lock_irqsave(&pnode->lock, flags);
6132 if (!(pnode->save_flags & NLP_WAIT_FOR_LOGO) &&
6133 !pnode->logo_waitq) {
6134 pnode->logo_waitq = &waitq;
6135 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6136 pnode->nlp_flag |= NLP_ISSUE_LOGO;
6137 pnode->save_flags |= NLP_WAIT_FOR_LOGO;
6138 spin_unlock_irqrestore(&pnode->lock, flags);
6139 lpfc_unreg_rpi(vport, pnode);
6140 wait_event_timeout(waitq,
6141 (!(pnode->save_flags &
6142 NLP_WAIT_FOR_LOGO)),
6143 msecs_to_jiffies(dev_loss_tmo *
6144 1000));
6145
6146 if (pnode->save_flags & NLP_WAIT_FOR_LOGO) {
6147 lpfc_printf_vlog(vport, KERN_ERR, logit,
6148 "0725 SCSI layer TGTRST "
6149 "failed & LOGO TMO (%d, %llu) "
6150 "return x%x\n",
6151 tgt_id, lun_id, status);
6152 spin_lock_irqsave(&pnode->lock, flags);
6153 pnode->save_flags &= ~NLP_WAIT_FOR_LOGO;
6154 } else {
6155 spin_lock_irqsave(&pnode->lock, flags);
6156 }
6157 pnode->logo_waitq = NULL;
6158 spin_unlock_irqrestore(&pnode->lock, flags);
6159 status = SUCCESS;
6160
6161 } else {
6162 spin_unlock_irqrestore(&pnode->lock, flags);
6163 status = FAILED;
6164 }
6165 }
6166
6167 lpfc_printf_vlog(vport, KERN_ERR, logit,
6168 "0723 SCSI layer issued Target Reset (%d, %llu) "
6169 "return x%x\n", tgt_id, lun_id, status);
6170
6171 /*
6172 * We have to clean up i/o as : they may be orphaned by the TMF;
6173 * or if the TMF failed, they may be in an indeterminate state.
6174 * So, continue on.
6175 * We will report success if all the i/o aborts successfully.
6176 */
6177 if (status == SUCCESS)
6178 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6179 LPFC_CTX_TGT);
6180 return status;
6181 }
6182
6183 /**
6184 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
6185 * @cmnd: Pointer to scsi_cmnd data structure.
6186 *
6187 * This routine does host reset to the adaptor port. It brings the HBA
6188 * offline, performs a board restart, and then brings the board back online.
6189 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
6190 * reject all outstanding SCSI commands to the host and error returned
6191 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
6192 * of error handling, it will only return error if resetting of the adapter
6193 * is not successful; in all other cases, will return success.
6194 *
6195 * Return code :
6196 * 0x2003 - Error
6197 * 0x2002 - Success
6198 **/
6199 static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)6200 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6201 {
6202 struct Scsi_Host *shost = cmnd->device->host;
6203 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6204 struct lpfc_hba *phba = vport->phba;
6205 int rc, ret = SUCCESS;
6206
6207 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6208 "3172 SCSI layer issued Host Reset Data:\n");
6209
6210 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6211 lpfc_offline(phba);
6212 rc = lpfc_sli_brdrestart(phba);
6213 if (rc)
6214 goto error;
6215
6216 /* Wait for successful restart of adapter */
6217 if (phba->sli_rev < LPFC_SLI_REV4) {
6218 rc = lpfc_sli_chipset_init(phba);
6219 if (rc)
6220 goto error;
6221 }
6222
6223 rc = lpfc_online(phba);
6224 if (rc)
6225 goto error;
6226
6227 lpfc_unblock_mgmt_io(phba);
6228
6229 return ret;
6230 error:
6231 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6232 "3323 Failed host reset\n");
6233 lpfc_unblock_mgmt_io(phba);
6234 return FAILED;
6235 }
6236
6237 /**
6238 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
6239 * @sdev: Pointer to scsi_device.
6240 *
6241 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
6242 * globally available list of scsi buffers. This routine also makes sure scsi
6243 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
6244 * of scsi buffer exists for the lifetime of the driver.
6245 *
6246 * Return codes:
6247 * non-0 - Error
6248 * 0 - Success
6249 **/
6250 static int
lpfc_slave_alloc(struct scsi_device * sdev)6251 lpfc_slave_alloc(struct scsi_device *sdev)
6252 {
6253 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6254 struct lpfc_hba *phba = vport->phba;
6255 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6256 uint32_t total = 0;
6257 uint32_t num_to_alloc = 0;
6258 int num_allocated = 0;
6259 uint32_t sdev_cnt;
6260 struct lpfc_device_data *device_data;
6261 unsigned long flags;
6262 struct lpfc_name target_wwpn;
6263
6264 if (!rport || fc_remote_port_chkready(rport))
6265 return -ENXIO;
6266
6267 if (phba->cfg_fof) {
6268
6269 /*
6270 * Check to see if the device data structure for the lun
6271 * exists. If not, create one.
6272 */
6273
6274 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6275 spin_lock_irqsave(&phba->devicelock, flags);
6276 device_data = __lpfc_get_device_data(phba,
6277 &phba->luns,
6278 &vport->fc_portname,
6279 &target_wwpn,
6280 sdev->lun);
6281 if (!device_data) {
6282 spin_unlock_irqrestore(&phba->devicelock, flags);
6283 device_data = lpfc_create_device_data(phba,
6284 &vport->fc_portname,
6285 &target_wwpn,
6286 sdev->lun,
6287 phba->cfg_XLanePriority,
6288 true);
6289 if (!device_data)
6290 return -ENOMEM;
6291 spin_lock_irqsave(&phba->devicelock, flags);
6292 list_add_tail(&device_data->listentry, &phba->luns);
6293 }
6294 device_data->rport_data = rport->dd_data;
6295 device_data->available = true;
6296 spin_unlock_irqrestore(&phba->devicelock, flags);
6297 sdev->hostdata = device_data;
6298 } else {
6299 sdev->hostdata = rport->dd_data;
6300 }
6301 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6302
6303 /* For SLI4, all IO buffers are pre-allocated */
6304 if (phba->sli_rev == LPFC_SLI_REV4)
6305 return 0;
6306
6307 /* This code path is now ONLY for SLI3 adapters */
6308
6309 /*
6310 * Populate the cmds_per_lun count scsi_bufs into this host's globally
6311 * available list of scsi buffers. Don't allocate more than the
6312 * HBA limit conveyed to the midlayer via the host structure. The
6313 * formula accounts for the lun_queue_depth + error handlers + 1
6314 * extra. This list of scsi bufs exists for the lifetime of the driver.
6315 */
6316 total = phba->total_scsi_bufs;
6317 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6318
6319 /* If allocated buffers are enough do nothing */
6320 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6321 return 0;
6322
6323 /* Allow some exchanges to be available always to complete discovery */
6324 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6325 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6326 "0704 At limitation of %d preallocated "
6327 "command buffers\n", total);
6328 return 0;
6329 /* Allow some exchanges to be available always to complete discovery */
6330 } else if (total + num_to_alloc >
6331 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6332 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6333 "0705 Allocation request of %d "
6334 "command buffers will exceed max of %d. "
6335 "Reducing allocation request to %d.\n",
6336 num_to_alloc, phba->cfg_hba_queue_depth,
6337 (phba->cfg_hba_queue_depth - total));
6338 num_to_alloc = phba->cfg_hba_queue_depth - total;
6339 }
6340 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6341 if (num_to_alloc != num_allocated) {
6342 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6343 "0708 Allocation request of %d "
6344 "command buffers did not succeed. "
6345 "Allocated %d buffers.\n",
6346 num_to_alloc, num_allocated);
6347 }
6348 if (num_allocated > 0)
6349 phba->total_scsi_bufs += num_allocated;
6350 return 0;
6351 }
6352
6353 /**
6354 * lpfc_slave_configure - scsi_host_template slave_configure entry point
6355 * @sdev: Pointer to scsi_device.
6356 *
6357 * This routine configures following items
6358 * - Tag command queuing support for @sdev if supported.
6359 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
6360 *
6361 * Return codes:
6362 * 0 - Success
6363 **/
6364 static int
lpfc_slave_configure(struct scsi_device * sdev)6365 lpfc_slave_configure(struct scsi_device *sdev)
6366 {
6367 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6368 struct lpfc_hba *phba = vport->phba;
6369
6370 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6371
6372 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6373 lpfc_sli_handle_fast_ring_event(phba,
6374 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6375 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6376 lpfc_poll_rearm_timer(phba);
6377 }
6378
6379 return 0;
6380 }
6381
6382 /**
6383 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
6384 * @sdev: Pointer to scsi_device.
6385 *
6386 * This routine sets @sdev hostatdata filed to null.
6387 **/
6388 static void
lpfc_slave_destroy(struct scsi_device * sdev)6389 lpfc_slave_destroy(struct scsi_device *sdev)
6390 {
6391 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6392 struct lpfc_hba *phba = vport->phba;
6393 unsigned long flags;
6394 struct lpfc_device_data *device_data = sdev->hostdata;
6395
6396 atomic_dec(&phba->sdev_cnt);
6397 if ((phba->cfg_fof) && (device_data)) {
6398 spin_lock_irqsave(&phba->devicelock, flags);
6399 device_data->available = false;
6400 if (!device_data->oas_enabled)
6401 lpfc_delete_device_data(phba, device_data);
6402 spin_unlock_irqrestore(&phba->devicelock, flags);
6403 }
6404 sdev->hostdata = NULL;
6405 return;
6406 }
6407
6408 /**
6409 * lpfc_create_device_data - creates and initializes device data structure for OAS
6410 * @phba: Pointer to host bus adapter structure.
6411 * @vport_wwpn: Pointer to vport's wwpn information
6412 * @target_wwpn: Pointer to target's wwpn information
6413 * @lun: Lun on target
6414 * @pri: Priority
6415 * @atomic_create: Flag to indicate if memory should be allocated using the
6416 * GFP_ATOMIC flag or not.
6417 *
6418 * This routine creates a device data structure which will contain identifying
6419 * information for the device (host wwpn, target wwpn, lun), state of OAS,
6420 * whether or not the corresponding lun is available by the system,
6421 * and pointer to the rport data.
6422 *
6423 * Return codes:
6424 * NULL - Error
6425 * Pointer to lpfc_device_data - Success
6426 **/
6427 struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)6428 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6429 struct lpfc_name *target_wwpn, uint64_t lun,
6430 uint32_t pri, bool atomic_create)
6431 {
6432
6433 struct lpfc_device_data *lun_info;
6434 int memory_flags;
6435
6436 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6437 !(phba->cfg_fof))
6438 return NULL;
6439
6440 /* Attempt to create the device data to contain lun info */
6441
6442 if (atomic_create)
6443 memory_flags = GFP_ATOMIC;
6444 else
6445 memory_flags = GFP_KERNEL;
6446 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6447 if (!lun_info)
6448 return NULL;
6449 INIT_LIST_HEAD(&lun_info->listentry);
6450 lun_info->rport_data = NULL;
6451 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6452 sizeof(struct lpfc_name));
6453 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6454 sizeof(struct lpfc_name));
6455 lun_info->device_id.lun = lun;
6456 lun_info->oas_enabled = false;
6457 lun_info->priority = pri;
6458 lun_info->available = false;
6459 return lun_info;
6460 }
6461
6462 /**
6463 * lpfc_delete_device_data - frees a device data structure for OAS
6464 * @phba: Pointer to host bus adapter structure.
6465 * @lun_info: Pointer to device data structure to free.
6466 *
6467 * This routine frees the previously allocated device data structure passed.
6468 *
6469 **/
6470 void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)6471 lpfc_delete_device_data(struct lpfc_hba *phba,
6472 struct lpfc_device_data *lun_info)
6473 {
6474
6475 if (unlikely(!phba) || !lun_info ||
6476 !(phba->cfg_fof))
6477 return;
6478
6479 if (!list_empty(&lun_info->listentry))
6480 list_del(&lun_info->listentry);
6481 mempool_free(lun_info, phba->device_data_mem_pool);
6482 return;
6483 }
6484
6485 /**
6486 * __lpfc_get_device_data - returns the device data for the specified lun
6487 * @phba: Pointer to host bus adapter structure.
6488 * @list: Point to list to search.
6489 * @vport_wwpn: Pointer to vport's wwpn information
6490 * @target_wwpn: Pointer to target's wwpn information
6491 * @lun: Lun on target
6492 *
6493 * This routine searches the list passed for the specified lun's device data.
6494 * This function does not hold locks, it is the responsibility of the caller
6495 * to ensure the proper lock is held before calling the function.
6496 *
6497 * Return codes:
6498 * NULL - Error
6499 * Pointer to lpfc_device_data - Success
6500 **/
6501 struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)6502 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6503 struct lpfc_name *vport_wwpn,
6504 struct lpfc_name *target_wwpn, uint64_t lun)
6505 {
6506
6507 struct lpfc_device_data *lun_info;
6508
6509 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6510 !phba->cfg_fof)
6511 return NULL;
6512
6513 /* Check to see if the lun is already enabled for OAS. */
6514
6515 list_for_each_entry(lun_info, list, listentry) {
6516 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6517 sizeof(struct lpfc_name)) == 0) &&
6518 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6519 sizeof(struct lpfc_name)) == 0) &&
6520 (lun_info->device_id.lun == lun))
6521 return lun_info;
6522 }
6523
6524 return NULL;
6525 }
6526
6527 /**
6528 * lpfc_find_next_oas_lun - searches for the next oas lun
6529 * @phba: Pointer to host bus adapter structure.
6530 * @vport_wwpn: Pointer to vport's wwpn information
6531 * @target_wwpn: Pointer to target's wwpn information
6532 * @starting_lun: Pointer to the lun to start searching for
6533 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
6534 * @found_target_wwpn: Pointer to the found lun's target wwpn information
6535 * @found_lun: Pointer to the found lun.
6536 * @found_lun_status: Pointer to status of the found lun.
6537 * @found_lun_pri: Pointer to priority of the found lun.
6538 *
6539 * This routine searches the luns list for the specified lun
6540 * or the first lun for the vport/target. If the vport wwpn contains
6541 * a zero value then a specific vport is not specified. In this case
6542 * any vport which contains the lun will be considered a match. If the
6543 * target wwpn contains a zero value then a specific target is not specified.
6544 * In this case any target which contains the lun will be considered a
6545 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
6546 * are returned. The function will also return the next lun if available.
6547 * If the next lun is not found, starting_lun parameter will be set to
6548 * NO_MORE_OAS_LUN.
6549 *
6550 * Return codes:
6551 * non-0 - Error
6552 * 0 - Success
6553 **/
6554 bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)6555 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6556 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6557 struct lpfc_name *found_vport_wwpn,
6558 struct lpfc_name *found_target_wwpn,
6559 uint64_t *found_lun,
6560 uint32_t *found_lun_status,
6561 uint32_t *found_lun_pri)
6562 {
6563
6564 unsigned long flags;
6565 struct lpfc_device_data *lun_info;
6566 struct lpfc_device_id *device_id;
6567 uint64_t lun;
6568 bool found = false;
6569
6570 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6571 !starting_lun || !found_vport_wwpn ||
6572 !found_target_wwpn || !found_lun || !found_lun_status ||
6573 (*starting_lun == NO_MORE_OAS_LUN) ||
6574 !phba->cfg_fof)
6575 return false;
6576
6577 lun = *starting_lun;
6578 *found_lun = NO_MORE_OAS_LUN;
6579 *starting_lun = NO_MORE_OAS_LUN;
6580
6581 /* Search for lun or the lun closet in value */
6582
6583 spin_lock_irqsave(&phba->devicelock, flags);
6584 list_for_each_entry(lun_info, &phba->luns, listentry) {
6585 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6586 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6587 sizeof(struct lpfc_name)) == 0)) &&
6588 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6589 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6590 sizeof(struct lpfc_name)) == 0)) &&
6591 (lun_info->oas_enabled)) {
6592 device_id = &lun_info->device_id;
6593 if ((!found) &&
6594 ((lun == FIND_FIRST_OAS_LUN) ||
6595 (device_id->lun == lun))) {
6596 *found_lun = device_id->lun;
6597 memcpy(found_vport_wwpn,
6598 &device_id->vport_wwpn,
6599 sizeof(struct lpfc_name));
6600 memcpy(found_target_wwpn,
6601 &device_id->target_wwpn,
6602 sizeof(struct lpfc_name));
6603 if (lun_info->available)
6604 *found_lun_status =
6605 OAS_LUN_STATUS_EXISTS;
6606 else
6607 *found_lun_status = 0;
6608 *found_lun_pri = lun_info->priority;
6609 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6610 memset(vport_wwpn, 0x0,
6611 sizeof(struct lpfc_name));
6612 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6613 memset(target_wwpn, 0x0,
6614 sizeof(struct lpfc_name));
6615 found = true;
6616 } else if (found) {
6617 *starting_lun = device_id->lun;
6618 memcpy(vport_wwpn, &device_id->vport_wwpn,
6619 sizeof(struct lpfc_name));
6620 memcpy(target_wwpn, &device_id->target_wwpn,
6621 sizeof(struct lpfc_name));
6622 break;
6623 }
6624 }
6625 }
6626 spin_unlock_irqrestore(&phba->devicelock, flags);
6627 return found;
6628 }
6629
6630 /**
6631 * lpfc_enable_oas_lun - enables a lun for OAS operations
6632 * @phba: Pointer to host bus adapter structure.
6633 * @vport_wwpn: Pointer to vport's wwpn information
6634 * @target_wwpn: Pointer to target's wwpn information
6635 * @lun: Lun
6636 * @pri: Priority
6637 *
6638 * This routine enables a lun for oas operations. The routines does so by
6639 * doing the following :
6640 *
6641 * 1) Checks to see if the device data for the lun has been created.
6642 * 2) If found, sets the OAS enabled flag if not set and returns.
6643 * 3) Otherwise, creates a device data structure.
6644 * 4) If successfully created, indicates the device data is for an OAS lun,
6645 * indicates the lun is not available and add to the list of luns.
6646 *
6647 * Return codes:
6648 * false - Error
6649 * true - Success
6650 **/
6651 bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6652 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6653 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6654 {
6655
6656 struct lpfc_device_data *lun_info;
6657 unsigned long flags;
6658
6659 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6660 !phba->cfg_fof)
6661 return false;
6662
6663 spin_lock_irqsave(&phba->devicelock, flags);
6664
6665 /* Check to see if the device data for the lun has been created */
6666 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6667 target_wwpn, lun);
6668 if (lun_info) {
6669 if (!lun_info->oas_enabled)
6670 lun_info->oas_enabled = true;
6671 lun_info->priority = pri;
6672 spin_unlock_irqrestore(&phba->devicelock, flags);
6673 return true;
6674 }
6675
6676 /* Create an lun info structure and add to list of luns */
6677 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6678 pri, true);
6679 if (lun_info) {
6680 lun_info->oas_enabled = true;
6681 lun_info->priority = pri;
6682 lun_info->available = false;
6683 list_add_tail(&lun_info->listentry, &phba->luns);
6684 spin_unlock_irqrestore(&phba->devicelock, flags);
6685 return true;
6686 }
6687 spin_unlock_irqrestore(&phba->devicelock, flags);
6688 return false;
6689 }
6690
6691 /**
6692 * lpfc_disable_oas_lun - disables a lun for OAS operations
6693 * @phba: Pointer to host bus adapter structure.
6694 * @vport_wwpn: Pointer to vport's wwpn information
6695 * @target_wwpn: Pointer to target's wwpn information
6696 * @lun: Lun
6697 * @pri: Priority
6698 *
6699 * This routine disables a lun for oas operations. The routines does so by
6700 * doing the following :
6701 *
6702 * 1) Checks to see if the device data for the lun is created.
6703 * 2) If present, clears the flag indicating this lun is for OAS.
6704 * 3) If the lun is not available by the system, the device data is
6705 * freed.
6706 *
6707 * Return codes:
6708 * false - Error
6709 * true - Success
6710 **/
6711 bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)6712 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6713 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6714 {
6715
6716 struct lpfc_device_data *lun_info;
6717 unsigned long flags;
6718
6719 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6720 !phba->cfg_fof)
6721 return false;
6722
6723 spin_lock_irqsave(&phba->devicelock, flags);
6724
6725 /* Check to see if the lun is available. */
6726 lun_info = __lpfc_get_device_data(phba,
6727 &phba->luns, vport_wwpn,
6728 target_wwpn, lun);
6729 if (lun_info) {
6730 lun_info->oas_enabled = false;
6731 lun_info->priority = pri;
6732 if (!lun_info->available)
6733 lpfc_delete_device_data(phba, lun_info);
6734 spin_unlock_irqrestore(&phba->devicelock, flags);
6735 return true;
6736 }
6737
6738 spin_unlock_irqrestore(&phba->devicelock, flags);
6739 return false;
6740 }
6741
6742 static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)6743 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
6744 {
6745 return SCSI_MLQUEUE_HOST_BUSY;
6746 }
6747
6748 static int
lpfc_no_slave(struct scsi_device * sdev)6749 lpfc_no_slave(struct scsi_device *sdev)
6750 {
6751 return -ENODEV;
6752 }
6753
6754 struct scsi_host_template lpfc_template_nvme = {
6755 .module = THIS_MODULE,
6756 .name = LPFC_DRIVER_NAME,
6757 .proc_name = LPFC_DRIVER_NAME,
6758 .info = lpfc_info,
6759 .queuecommand = lpfc_no_command,
6760 .slave_alloc = lpfc_no_slave,
6761 .slave_configure = lpfc_no_slave,
6762 .scan_finished = lpfc_scan_finished,
6763 .this_id = -1,
6764 .sg_tablesize = 1,
6765 .cmd_per_lun = 1,
6766 .shost_groups = lpfc_hba_groups,
6767 .max_sectors = 0xFFFFFFFF,
6768 .vendor_id = LPFC_NL_VENDOR_ID,
6769 .track_queue_depth = 0,
6770 };
6771
6772 struct scsi_host_template lpfc_template = {
6773 .module = THIS_MODULE,
6774 .name = LPFC_DRIVER_NAME,
6775 .proc_name = LPFC_DRIVER_NAME,
6776 .info = lpfc_info,
6777 .queuecommand = lpfc_queuecommand,
6778 .eh_timed_out = fc_eh_timed_out,
6779 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
6780 .eh_abort_handler = lpfc_abort_handler,
6781 .eh_device_reset_handler = lpfc_device_reset_handler,
6782 .eh_target_reset_handler = lpfc_target_reset_handler,
6783 .eh_host_reset_handler = lpfc_host_reset_handler,
6784 .slave_alloc = lpfc_slave_alloc,
6785 .slave_configure = lpfc_slave_configure,
6786 .slave_destroy = lpfc_slave_destroy,
6787 .scan_finished = lpfc_scan_finished,
6788 .this_id = -1,
6789 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6790 .cmd_per_lun = LPFC_CMD_PER_LUN,
6791 .shost_groups = lpfc_hba_groups,
6792 .max_sectors = 0xFFFFFFFF,
6793 .vendor_id = LPFC_NL_VENDOR_ID,
6794 .change_queue_depth = scsi_change_queue_depth,
6795 .track_queue_depth = 1,
6796 };
6797
6798 struct scsi_host_template lpfc_vport_template = {
6799 .module = THIS_MODULE,
6800 .name = LPFC_DRIVER_NAME,
6801 .proc_name = LPFC_DRIVER_NAME,
6802 .info = lpfc_info,
6803 .queuecommand = lpfc_queuecommand,
6804 .eh_timed_out = fc_eh_timed_out,
6805 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
6806 .eh_abort_handler = lpfc_abort_handler,
6807 .eh_device_reset_handler = lpfc_device_reset_handler,
6808 .eh_target_reset_handler = lpfc_target_reset_handler,
6809 .eh_bus_reset_handler = NULL,
6810 .eh_host_reset_handler = NULL,
6811 .slave_alloc = lpfc_slave_alloc,
6812 .slave_configure = lpfc_slave_configure,
6813 .slave_destroy = lpfc_slave_destroy,
6814 .scan_finished = lpfc_scan_finished,
6815 .this_id = -1,
6816 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6817 .cmd_per_lun = LPFC_CMD_PER_LUN,
6818 .shost_groups = lpfc_vport_groups,
6819 .max_sectors = 0xFFFFFFFF,
6820 .vendor_id = 0,
6821 .change_queue_depth = scsi_change_queue_depth,
6822 .track_queue_depth = 1,
6823 };
6824