1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
10 * *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/export.h>
27 #include <linux/delay.h>
28 #include <asm/unaligned.h>
29 #include <linux/t10-pi.h>
30 #include <linux/crc-t10dif.h>
31 #include <net/checksum.h>
32
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_eh.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_tcq.h>
38 #include <scsi/scsi_transport_fc.h>
39
40 #include "lpfc_version.h"
41 #include "lpfc_hw4.h"
42 #include "lpfc_hw.h"
43 #include "lpfc_sli.h"
44 #include "lpfc_sli4.h"
45 #include "lpfc_nl.h"
46 #include "lpfc_disc.h"
47 #include "lpfc.h"
48 #include "lpfc_scsi.h"
49 #include "lpfc_logmsg.h"
50 #include "lpfc_crtn.h"
51 #include "lpfc_vport.h"
52
53 #define LPFC_RESET_WAIT 2
54 #define LPFC_ABORT_WAIT 2
55
56 static char *dif_op_str[] = {
57 "PROT_NORMAL",
58 "PROT_READ_INSERT",
59 "PROT_WRITE_STRIP",
60 "PROT_READ_STRIP",
61 "PROT_WRITE_INSERT",
62 "PROT_READ_PASS",
63 "PROT_WRITE_PASS",
64 };
65
66 struct scsi_dif_tuple {
67 __be16 guard_tag; /* Checksum */
68 __be16 app_tag; /* Opaque storage */
69 __be32 ref_tag; /* Target LBA or indirect LBA */
70 };
71
72 static struct lpfc_rport_data *
lpfc_rport_data_from_scsi_device(struct scsi_device * sdev)73 lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
74 {
75 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
76
77 if (vport->phba->cfg_fof)
78 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
79 else
80 return (struct lpfc_rport_data *)sdev->hostdata;
81 }
82
83 static void
84 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
85 static void
86 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 static int
88 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
89
90 static inline unsigned
lpfc_cmd_blksize(struct scsi_cmnd * sc)91 lpfc_cmd_blksize(struct scsi_cmnd *sc)
92 {
93 return sc->device->sector_size;
94 }
95
96 #define LPFC_CHECK_PROTECT_GUARD 1
97 #define LPFC_CHECK_PROTECT_REF 2
98 static inline unsigned
lpfc_cmd_protect(struct scsi_cmnd * sc,int flag)99 lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
100 {
101 return 1;
102 }
103
104 static inline unsigned
lpfc_cmd_guard_csum(struct scsi_cmnd * sc)105 lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
106 {
107 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
108 return 0;
109 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
110 return 1;
111 return 0;
112 }
113
114 /**
115 * lpfc_sli4_set_rsp_sgl_last - Set the last bit in the response sge.
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
118 *
119 * This function is called from the lpfc_prep_task_mgmt_cmd function to
120 * set the last bit in the response sge entry.
121 **/
122 static void
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)123 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
124 struct lpfc_io_buf *lpfc_cmd)
125 {
126 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
127 if (sgl) {
128 sgl += 1;
129 sgl->word2 = le32_to_cpu(sgl->word2);
130 bf_set(lpfc_sli4_sge_last, sgl, 1);
131 sgl->word2 = cpu_to_le32(sgl->word2);
132 }
133 }
134
135 /**
136 * lpfc_update_stats - Update statistical data for the command completion
137 * @phba: Pointer to HBA object.
138 * @lpfc_cmd: lpfc scsi command object pointer.
139 *
140 * This function is called when there is a command completion and this
141 * function updates the statistical data for the command completion.
142 **/
143 static void
lpfc_update_stats(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)144 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
145 {
146 struct lpfc_rport_data *rdata;
147 struct lpfc_nodelist *pnode;
148 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
149 unsigned long flags;
150 struct Scsi_Host *shost = cmd->device->host;
151 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
152 unsigned long latency;
153 int i;
154
155 if (!vport->stat_data_enabled ||
156 vport->stat_data_blocked ||
157 (cmd->result))
158 return;
159
160 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
161 rdata = lpfc_cmd->rdata;
162 pnode = rdata->pnode;
163
164 spin_lock_irqsave(shost->host_lock, flags);
165 if (!pnode ||
166 !pnode->lat_data ||
167 (phba->bucket_type == LPFC_NO_BUCKET)) {
168 spin_unlock_irqrestore(shost->host_lock, flags);
169 return;
170 }
171
172 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
173 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
174 phba->bucket_step;
175 /* check array subscript bounds */
176 if (i < 0)
177 i = 0;
178 else if (i >= LPFC_MAX_BUCKET_COUNT)
179 i = LPFC_MAX_BUCKET_COUNT - 1;
180 } else {
181 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
182 if (latency <= (phba->bucket_base +
183 ((1<<i)*phba->bucket_step)))
184 break;
185 }
186
187 pnode->lat_data[i].cmd_count++;
188 spin_unlock_irqrestore(shost->host_lock, flags);
189 }
190
191 /**
192 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
193 * @phba: The Hba for which this call is being executed.
194 *
195 * This routine is called when there is resource error in driver or firmware.
196 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
197 * posts at most 1 event each second. This routine wakes up worker thread of
198 * @phba to process WORKER_RAM_DOWN_EVENT event.
199 *
200 * This routine should be called with no lock held.
201 **/
202 void
lpfc_rampdown_queue_depth(struct lpfc_hba * phba)203 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
204 {
205 unsigned long flags;
206 uint32_t evt_posted;
207 unsigned long expires;
208
209 spin_lock_irqsave(&phba->hbalock, flags);
210 atomic_inc(&phba->num_rsrc_err);
211 phba->last_rsrc_error_time = jiffies;
212
213 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
214 if (time_after(expires, jiffies)) {
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 return;
217 }
218
219 phba->last_ramp_down_time = jiffies;
220
221 spin_unlock_irqrestore(&phba->hbalock, flags);
222
223 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
224 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
225 if (!evt_posted)
226 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
227 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
228
229 if (!evt_posted)
230 lpfc_worker_wake_up(phba);
231 return;
232 }
233
234 /**
235 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
236 * @phba: The Hba for which this call is being executed.
237 *
238 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
239 * thread.This routine reduces queue depth for all scsi device on each vport
240 * associated with @phba.
241 **/
242 void
lpfc_ramp_down_queue_handler(struct lpfc_hba * phba)243 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
244 {
245 struct lpfc_vport **vports;
246 struct Scsi_Host *shost;
247 struct scsi_device *sdev;
248 unsigned long new_queue_depth;
249 unsigned long num_rsrc_err, num_cmd_success;
250 int i;
251
252 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
253 num_cmd_success = atomic_read(&phba->num_cmd_success);
254
255 /*
256 * The error and success command counters are global per
257 * driver instance. If another handler has already
258 * operated on this error event, just exit.
259 */
260 if (num_rsrc_err == 0)
261 return;
262
263 vports = lpfc_create_vport_work_array(phba);
264 if (vports != NULL)
265 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
266 shost = lpfc_shost_from_vport(vports[i]);
267 shost_for_each_device(sdev, shost) {
268 new_queue_depth =
269 sdev->queue_depth * num_rsrc_err /
270 (num_rsrc_err + num_cmd_success);
271 if (!new_queue_depth)
272 new_queue_depth = sdev->queue_depth - 1;
273 else
274 new_queue_depth = sdev->queue_depth -
275 new_queue_depth;
276 scsi_change_queue_depth(sdev, new_queue_depth);
277 }
278 }
279 lpfc_destroy_vport_work_array(phba, vports);
280 atomic_set(&phba->num_rsrc_err, 0);
281 atomic_set(&phba->num_cmd_success, 0);
282 }
283
284 /**
285 * lpfc_scsi_dev_block - set all scsi hosts to block state
286 * @phba: Pointer to HBA context object.
287 *
288 * This function walks vport list and set each SCSI host to block state
289 * by invoking fc_remote_port_delete() routine. This function is invoked
290 * with EEH when device's PCI slot has been permanently disabled.
291 **/
292 void
lpfc_scsi_dev_block(struct lpfc_hba * phba)293 lpfc_scsi_dev_block(struct lpfc_hba *phba)
294 {
295 struct lpfc_vport **vports;
296 struct Scsi_Host *shost;
297 struct scsi_device *sdev;
298 struct fc_rport *rport;
299 int i;
300
301 vports = lpfc_create_vport_work_array(phba);
302 if (vports != NULL)
303 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
304 shost = lpfc_shost_from_vport(vports[i]);
305 shost_for_each_device(sdev, shost) {
306 rport = starget_to_rport(scsi_target(sdev));
307 fc_remote_port_delete(rport);
308 }
309 }
310 lpfc_destroy_vport_work_array(phba, vports);
311 }
312
313 /**
314 * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
315 * @vport: The virtual port for which this call being executed.
316 * @num_to_allocate: The requested number of buffers to allocate.
317 *
318 * This routine allocates a scsi buffer for device with SLI-3 interface spec,
319 * the scsi buffer contains all the necessary information needed to initiate
320 * a SCSI I/O. The non-DMAable buffer region contains information to build
321 * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
322 * and the initial BPL. In addition to allocating memory, the FCP CMND and
323 * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
324 *
325 * Return codes:
326 * int - number of scsi buffers that were allocated.
327 * 0 = failure, less than num_to_alloc is a partial failure.
328 **/
329 static int
lpfc_new_scsi_buf_s3(struct lpfc_vport * vport,int num_to_alloc)330 lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
331 {
332 struct lpfc_hba *phba = vport->phba;
333 struct lpfc_io_buf *psb;
334 struct ulp_bde64 *bpl;
335 IOCB_t *iocb;
336 dma_addr_t pdma_phys_fcp_cmd;
337 dma_addr_t pdma_phys_fcp_rsp;
338 dma_addr_t pdma_phys_sgl;
339 uint16_t iotag;
340 int bcnt, bpl_size;
341
342 bpl_size = phba->cfg_sg_dma_buf_size -
343 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
344
345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
346 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
347 num_to_alloc, phba->cfg_sg_dma_buf_size,
348 (int)sizeof(struct fcp_cmnd),
349 (int)sizeof(struct fcp_rsp), bpl_size);
350
351 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
352 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
353 if (!psb)
354 break;
355
356 /*
357 * Get memory from the pci pool to map the virt space to pci
358 * bus space for an I/O. The DMA buffer includes space for the
359 * struct fcp_cmnd, struct fcp_rsp and the number of bde's
360 * necessary to support the sg_tablesize.
361 */
362 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
363 GFP_KERNEL, &psb->dma_handle);
364 if (!psb->data) {
365 kfree(psb);
366 break;
367 }
368
369
370 /* Allocate iotag for psb->cur_iocbq. */
371 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
372 if (iotag == 0) {
373 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
374 psb->data, psb->dma_handle);
375 kfree(psb);
376 break;
377 }
378 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
379
380 psb->fcp_cmnd = psb->data;
381 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
382 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
383 sizeof(struct fcp_rsp);
384
385 /* Initialize local short-hand pointers. */
386 bpl = (struct ulp_bde64 *)psb->dma_sgl;
387 pdma_phys_fcp_cmd = psb->dma_handle;
388 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
389 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
390 sizeof(struct fcp_rsp);
391
392 /*
393 * The first two bdes are the FCP_CMD and FCP_RSP. The balance
394 * are sg list bdes. Initialize the first two and leave the
395 * rest for queuecommand.
396 */
397 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
398 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
399 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
400 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
401 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
402
403 /* Setup the physical region for the FCP RSP */
404 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
405 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
406 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
407 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
408 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
409
410 /*
411 * Since the IOCB for the FCP I/O is built into this
412 * lpfc_scsi_buf, initialize it with all known data now.
413 */
414 iocb = &psb->cur_iocbq.iocb;
415 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
416 if ((phba->sli_rev == 3) &&
417 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
418 /* fill in immediate fcp command BDE */
419 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
420 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
421 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
422 unsli3.fcp_ext.icd);
423 iocb->un.fcpi64.bdl.addrHigh = 0;
424 iocb->ulpBdeCount = 0;
425 iocb->ulpLe = 0;
426 /* fill in response BDE */
427 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
428 BUFF_TYPE_BDE_64;
429 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
430 sizeof(struct fcp_rsp);
431 iocb->unsli3.fcp_ext.rbde.addrLow =
432 putPaddrLow(pdma_phys_fcp_rsp);
433 iocb->unsli3.fcp_ext.rbde.addrHigh =
434 putPaddrHigh(pdma_phys_fcp_rsp);
435 } else {
436 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
437 iocb->un.fcpi64.bdl.bdeSize =
438 (2 * sizeof(struct ulp_bde64));
439 iocb->un.fcpi64.bdl.addrLow =
440 putPaddrLow(pdma_phys_sgl);
441 iocb->un.fcpi64.bdl.addrHigh =
442 putPaddrHigh(pdma_phys_sgl);
443 iocb->ulpBdeCount = 1;
444 iocb->ulpLe = 1;
445 }
446 iocb->ulpClass = CLASS3;
447 psb->status = IOSTAT_SUCCESS;
448 /* Put it back into the SCSI buffer list */
449 psb->cur_iocbq.context1 = psb;
450 spin_lock_init(&psb->buf_lock);
451 lpfc_release_scsi_buf_s3(phba, psb);
452
453 }
454
455 return bcnt;
456 }
457
458 /**
459 * lpfc_sli4_vport_delete_fcp_xri_aborted -Remove all ndlp references for vport
460 * @vport: pointer to lpfc vport data structure.
461 *
462 * This routine is invoked by the vport cleanup for deletions and the cleanup
463 * for an ndlp on removal.
464 **/
465 void
lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport * vport)466 lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
467 {
468 struct lpfc_hba *phba = vport->phba;
469 struct lpfc_io_buf *psb, *next_psb;
470 struct lpfc_sli4_hdw_queue *qp;
471 unsigned long iflag = 0;
472 int idx;
473
474 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
475 return;
476
477 spin_lock_irqsave(&phba->hbalock, iflag);
478 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
479 qp = &phba->sli4_hba.hdwq[idx];
480
481 spin_lock(&qp->abts_io_buf_list_lock);
482 list_for_each_entry_safe(psb, next_psb,
483 &qp->lpfc_abts_io_buf_list, list) {
484 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME)
485 continue;
486
487 if (psb->rdata && psb->rdata->pnode &&
488 psb->rdata->pnode->vport == vport)
489 psb->rdata = NULL;
490 }
491 spin_unlock(&qp->abts_io_buf_list_lock);
492 }
493 spin_unlock_irqrestore(&phba->hbalock, iflag);
494 }
495
496 /**
497 * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort
498 * @phba: pointer to lpfc hba data structure.
499 * @axri: pointer to the fcp xri abort wcqe structure.
500 *
501 * This routine is invoked by the worker thread to process a SLI4 fast-path
502 * FCP or NVME aborted xri.
503 **/
504 void
lpfc_sli4_io_xri_aborted(struct lpfc_hba * phba,struct sli4_wcqe_xri_aborted * axri,int idx)505 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
506 struct sli4_wcqe_xri_aborted *axri, int idx)
507 {
508 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
509 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
510 struct lpfc_io_buf *psb, *next_psb;
511 struct lpfc_sli4_hdw_queue *qp;
512 unsigned long iflag = 0;
513 struct lpfc_iocbq *iocbq;
514 int i;
515 struct lpfc_nodelist *ndlp;
516 int rrq_empty = 0;
517 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
518
519 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
520 return;
521
522 qp = &phba->sli4_hba.hdwq[idx];
523 spin_lock_irqsave(&phba->hbalock, iflag);
524 spin_lock(&qp->abts_io_buf_list_lock);
525 list_for_each_entry_safe(psb, next_psb,
526 &qp->lpfc_abts_io_buf_list, list) {
527 if (psb->cur_iocbq.sli4_xritag == xri) {
528 list_del_init(&psb->list);
529 psb->exch_busy = 0;
530 psb->status = IOSTAT_SUCCESS;
531 if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
532 qp->abts_nvme_io_bufs--;
533 spin_unlock(&qp->abts_io_buf_list_lock);
534 spin_unlock_irqrestore(&phba->hbalock, iflag);
535 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
536 return;
537 }
538 qp->abts_scsi_io_bufs--;
539 spin_unlock(&qp->abts_io_buf_list_lock);
540
541 if (psb->rdata && psb->rdata->pnode)
542 ndlp = psb->rdata->pnode;
543 else
544 ndlp = NULL;
545
546 rrq_empty = list_empty(&phba->active_rrq_list);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 if (ndlp) {
549 lpfc_set_rrq_active(phba, ndlp,
550 psb->cur_iocbq.sli4_lxritag, rxid, 1);
551 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
552 }
553 lpfc_release_scsi_buf_s4(phba, psb);
554 if (rrq_empty)
555 lpfc_worker_wake_up(phba);
556 return;
557 }
558 }
559 spin_unlock(&qp->abts_io_buf_list_lock);
560 for (i = 1; i <= phba->sli.last_iotag; i++) {
561 iocbq = phba->sli.iocbq_lookup[i];
562
563 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
564 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
565 continue;
566 if (iocbq->sli4_xritag != xri)
567 continue;
568 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
569 psb->exch_busy = 0;
570 spin_unlock_irqrestore(&phba->hbalock, iflag);
571 if (!list_empty(&pring->txq))
572 lpfc_worker_wake_up(phba);
573 return;
574
575 }
576 spin_unlock_irqrestore(&phba->hbalock, iflag);
577 }
578
579 /**
580 * lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
581 * @phba: The HBA for which this call is being executed.
582 *
583 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
584 * and returns to caller.
585 *
586 * Return codes:
587 * NULL - Error
588 * Pointer to lpfc_scsi_buf - Success
589 **/
590 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)591 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
592 struct scsi_cmnd *cmnd)
593 {
594 struct lpfc_io_buf *lpfc_cmd = NULL;
595 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
596 unsigned long iflag = 0;
597
598 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
599 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
600 list);
601 if (!lpfc_cmd) {
602 spin_lock(&phba->scsi_buf_list_put_lock);
603 list_splice(&phba->lpfc_scsi_buf_list_put,
604 &phba->lpfc_scsi_buf_list_get);
605 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
606 list_remove_head(scsi_buf_list_get, lpfc_cmd,
607 struct lpfc_io_buf, list);
608 spin_unlock(&phba->scsi_buf_list_put_lock);
609 }
610 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
611
612 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
613 atomic_inc(&ndlp->cmd_pending);
614 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
615 }
616 return lpfc_cmd;
617 }
618 /**
619 * lpfc_get_scsi_buf_s4 - Get a scsi buffer from io_buf_list of the HBA
620 * @phba: The HBA for which this call is being executed.
621 *
622 * This routine removes a scsi buffer from head of @hdwq io_buf_list
623 * and returns to caller.
624 *
625 * Return codes:
626 * NULL - Error
627 * Pointer to lpfc_scsi_buf - Success
628 **/
629 static struct lpfc_io_buf *
lpfc_get_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)630 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
631 struct scsi_cmnd *cmnd)
632 {
633 struct lpfc_io_buf *lpfc_cmd;
634 struct lpfc_sli4_hdw_queue *qp;
635 struct sli4_sge *sgl;
636 IOCB_t *iocb;
637 dma_addr_t pdma_phys_fcp_rsp;
638 dma_addr_t pdma_phys_fcp_cmd;
639 uint32_t cpu, idx;
640 int tag;
641 struct fcp_cmd_rsp_buf *tmp = NULL;
642
643 cpu = raw_smp_processor_id();
644 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
645 tag = blk_mq_unique_tag(cmnd->request);
646 idx = blk_mq_unique_tag_to_hwq(tag);
647 } else {
648 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
649 }
650
651 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
652 !phba->cfg_xri_rebalancing);
653 if (!lpfc_cmd) {
654 qp = &phba->sli4_hba.hdwq[idx];
655 qp->empty_io_bufs++;
656 return NULL;
657 }
658
659 /* Setup key fields in buffer that may have been changed
660 * if other protocols used this buffer.
661 */
662 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
663 lpfc_cmd->prot_seg_cnt = 0;
664 lpfc_cmd->seg_cnt = 0;
665 lpfc_cmd->timeout = 0;
666 lpfc_cmd->flags = 0;
667 lpfc_cmd->start_time = jiffies;
668 lpfc_cmd->waitq = NULL;
669 lpfc_cmd->cpu = cpu;
670 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
671 lpfc_cmd->prot_data_type = 0;
672 #endif
673 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
674 if (!tmp)
675 return NULL;
676
677 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
678 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
679
680 /*
681 * The first two SGEs are the FCP_CMD and FCP_RSP.
682 * The balance are sg list bdes. Initialize the
683 * first two and leave the rest for queuecommand.
684 */
685 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
686 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
687 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
688 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
689 sgl->word2 = le32_to_cpu(sgl->word2);
690 bf_set(lpfc_sli4_sge_last, sgl, 0);
691 sgl->word2 = cpu_to_le32(sgl->word2);
692 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
693 sgl++;
694
695 /* Setup the physical region for the FCP RSP */
696 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
697 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
698 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
699 sgl->word2 = le32_to_cpu(sgl->word2);
700 bf_set(lpfc_sli4_sge_last, sgl, 1);
701 sgl->word2 = cpu_to_le32(sgl->word2);
702 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
703
704 /*
705 * Since the IOCB for the FCP I/O is built into this
706 * lpfc_io_buf, initialize it with all known data now.
707 */
708 iocb = &lpfc_cmd->cur_iocbq.iocb;
709 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
710 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
711 /* setting the BLP size to 2 * sizeof BDE may not be correct.
712 * We are setting the bpl to point to out sgl. An sgl's
713 * entries are 16 bytes, a bpl entries are 12 bytes.
714 */
715 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
716 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
717 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
718 iocb->ulpBdeCount = 1;
719 iocb->ulpLe = 1;
720 iocb->ulpClass = CLASS3;
721
722 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
723 atomic_inc(&ndlp->cmd_pending);
724 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
725 }
726 return lpfc_cmd;
727 }
728 /**
729 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
730 * @phba: The HBA for which this call is being executed.
731 *
732 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
733 * and returns to caller.
734 *
735 * Return codes:
736 * NULL - Error
737 * Pointer to lpfc_scsi_buf - Success
738 **/
739 static struct lpfc_io_buf*
lpfc_get_scsi_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct scsi_cmnd * cmnd)740 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
741 struct scsi_cmnd *cmnd)
742 {
743 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
744 }
745
746 /**
747 * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
748 * @phba: The Hba for which this call is being executed.
749 * @psb: The scsi buffer which is being released.
750 *
751 * This routine releases @psb scsi buffer by adding it to tail of @phba
752 * lpfc_scsi_buf_list list.
753 **/
754 static void
lpfc_release_scsi_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * psb)755 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
756 {
757 unsigned long iflag = 0;
758
759 psb->seg_cnt = 0;
760 psb->prot_seg_cnt = 0;
761
762 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
763 psb->pCmd = NULL;
764 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
765 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
766 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
767 }
768
769 /**
770 * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
771 * @phba: The Hba for which this call is being executed.
772 * @psb: The scsi buffer which is being released.
773 *
774 * This routine releases @psb scsi buffer by adding it to tail of @hdwq
775 * io_buf_list list. For SLI4 XRI's are tied to the scsi buffer
776 * and cannot be reused for at least RA_TOV amount of time if it was
777 * aborted.
778 **/
779 static void
lpfc_release_scsi_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * psb)780 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
781 {
782 struct lpfc_sli4_hdw_queue *qp;
783 unsigned long iflag = 0;
784
785 psb->seg_cnt = 0;
786 psb->prot_seg_cnt = 0;
787
788 qp = psb->hdwq;
789 if (psb->exch_busy) {
790 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
791 psb->pCmd = NULL;
792 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
793 qp->abts_scsi_io_bufs++;
794 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
795 } else {
796 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
797 }
798 }
799
800 /**
801 * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
802 * @phba: The Hba for which this call is being executed.
803 * @psb: The scsi buffer which is being released.
804 *
805 * This routine releases @psb scsi buffer by adding it to tail of @phba
806 * lpfc_scsi_buf_list list.
807 **/
808 static void
lpfc_release_scsi_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)809 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
810 {
811 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
812 atomic_dec(&psb->ndlp->cmd_pending);
813
814 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
815 phba->lpfc_release_scsi_buf(phba, psb);
816 }
817
818 /**
819 * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
820 * @phba: The Hba for which this call is being executed.
821 * @lpfc_cmd: The scsi buffer which is going to be mapped.
822 *
823 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
824 * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
825 * through sg elements and format the bde. This routine also initializes all
826 * IOCB fields which are dependent on scsi command request buffer.
827 *
828 * Return codes:
829 * 1 - Error
830 * 0 - Success
831 **/
832 static int
lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)833 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
834 {
835 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
836 struct scatterlist *sgel = NULL;
837 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
838 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
839 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
840 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
841 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
842 dma_addr_t physaddr;
843 uint32_t num_bde = 0;
844 int nseg, datadir = scsi_cmnd->sc_data_direction;
845
846 /*
847 * There are three possibilities here - use scatter-gather segment, use
848 * the single mapping, or neither. Start the lpfc command prep by
849 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
850 * data bde entry.
851 */
852 bpl += 2;
853 if (scsi_sg_count(scsi_cmnd)) {
854 /*
855 * The driver stores the segment count returned from pci_map_sg
856 * because this a count of dma-mappings used to map the use_sg
857 * pages. They are not guaranteed to be the same for those
858 * architectures that implement an IOMMU.
859 */
860
861 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
862 scsi_sg_count(scsi_cmnd), datadir);
863 if (unlikely(!nseg))
864 return 1;
865
866 lpfc_cmd->seg_cnt = nseg;
867 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
868 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
869 "9064 BLKGRD: %s: Too many sg segments from "
870 "dma_map_sg. Config %d, seg_cnt %d\n",
871 __func__, phba->cfg_sg_seg_cnt,
872 lpfc_cmd->seg_cnt);
873 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
874 lpfc_cmd->seg_cnt = 0;
875 scsi_dma_unmap(scsi_cmnd);
876 return 2;
877 }
878
879 /*
880 * The driver established a maximum scatter-gather segment count
881 * during probe that limits the number of sg elements in any
882 * single scsi command. Just run through the seg_cnt and format
883 * the bde's.
884 * When using SLI-3 the driver will try to fit all the BDEs into
885 * the IOCB. If it can't then the BDEs get added to a BPL as it
886 * does for SLI-2 mode.
887 */
888 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
889 physaddr = sg_dma_address(sgel);
890 if (phba->sli_rev == 3 &&
891 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
892 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
893 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
894 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
895 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
896 data_bde->addrLow = putPaddrLow(physaddr);
897 data_bde->addrHigh = putPaddrHigh(physaddr);
898 data_bde++;
899 } else {
900 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
901 bpl->tus.f.bdeSize = sg_dma_len(sgel);
902 bpl->tus.w = le32_to_cpu(bpl->tus.w);
903 bpl->addrLow =
904 le32_to_cpu(putPaddrLow(physaddr));
905 bpl->addrHigh =
906 le32_to_cpu(putPaddrHigh(physaddr));
907 bpl++;
908 }
909 }
910 }
911
912 /*
913 * Finish initializing those IOCB fields that are dependent on the
914 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
915 * explicitly reinitialized and for SLI-3 the extended bde count is
916 * explicitly reinitialized since all iocb memory resources are reused.
917 */
918 if (phba->sli_rev == 3 &&
919 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
920 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
921 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
922 /*
923 * The extended IOCB format can only fit 3 BDE or a BPL.
924 * This I/O has more than 3 BDE so the 1st data bde will
925 * be a BPL that is filled in here.
926 */
927 physaddr = lpfc_cmd->dma_handle;
928 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
929 data_bde->tus.f.bdeSize = (num_bde *
930 sizeof(struct ulp_bde64));
931 physaddr += (sizeof(struct fcp_cmnd) +
932 sizeof(struct fcp_rsp) +
933 (2 * sizeof(struct ulp_bde64)));
934 data_bde->addrHigh = putPaddrHigh(physaddr);
935 data_bde->addrLow = putPaddrLow(physaddr);
936 /* ebde count includes the response bde and data bpl */
937 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
938 } else {
939 /* ebde count includes the response bde and data bdes */
940 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
941 }
942 } else {
943 iocb_cmd->un.fcpi64.bdl.bdeSize =
944 ((num_bde + 2) * sizeof(struct ulp_bde64));
945 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
946 }
947 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
948
949 /*
950 * Due to difference in data length between DIF/non-DIF paths,
951 * we need to set word 4 of IOCB here
952 */
953 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
954 return 0;
955 }
956
957 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
958
959 /* Return BG_ERR_INIT if error injection is detected by Initiator */
960 #define BG_ERR_INIT 0x1
961 /* Return BG_ERR_TGT if error injection is detected by Target */
962 #define BG_ERR_TGT 0x2
963 /* Return BG_ERR_SWAP if swapping CSUM<-->CRC is required for error injection */
964 #define BG_ERR_SWAP 0x10
965 /**
966 * Return BG_ERR_CHECK if disabling Guard/Ref/App checking is required for
967 * error injection
968 **/
969 #define BG_ERR_CHECK 0x20
970
971 /**
972 * lpfc_bg_err_inject - Determine if we should inject an error
973 * @phba: The Hba for which this call is being executed.
974 * @sc: The SCSI command to examine
975 * @reftag: (out) BlockGuard reference tag for transmitted data
976 * @apptag: (out) BlockGuard application tag for transmitted data
977 * @new_guard (in) Value to replace CRC with if needed
978 *
979 * Returns BG_ERR_* bit mask or 0 if request ignored
980 **/
981 static int
lpfc_bg_err_inject(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint32_t * reftag,uint16_t * apptag,uint32_t new_guard)982 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
983 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
984 {
985 struct scatterlist *sgpe; /* s/g prot entry */
986 struct lpfc_io_buf *lpfc_cmd = NULL;
987 struct scsi_dif_tuple *src = NULL;
988 struct lpfc_nodelist *ndlp;
989 struct lpfc_rport_data *rdata;
990 uint32_t op = scsi_get_prot_op(sc);
991 uint32_t blksize;
992 uint32_t numblks;
993 sector_t lba;
994 int rc = 0;
995 int blockoff = 0;
996
997 if (op == SCSI_PROT_NORMAL)
998 return 0;
999
1000 sgpe = scsi_prot_sglist(sc);
1001 lba = scsi_get_lba(sc);
1002
1003 /* First check if we need to match the LBA */
1004 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1005 blksize = lpfc_cmd_blksize(sc);
1006 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1007
1008 /* Make sure we have the right LBA if one is specified */
1009 if ((phba->lpfc_injerr_lba < lba) ||
1010 (phba->lpfc_injerr_lba >= (lba + numblks)))
1011 return 0;
1012 if (sgpe) {
1013 blockoff = phba->lpfc_injerr_lba - lba;
1014 numblks = sg_dma_len(sgpe) /
1015 sizeof(struct scsi_dif_tuple);
1016 if (numblks < blockoff)
1017 blockoff = numblks;
1018 }
1019 }
1020
1021 /* Next check if we need to match the remote NPortID or WWPN */
1022 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1023 if (rdata && rdata->pnode) {
1024 ndlp = rdata->pnode;
1025
1026 /* Make sure we have the right NPortID if one is specified */
1027 if (phba->lpfc_injerr_nportid &&
1028 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1029 return 0;
1030
1031 /*
1032 * Make sure we have the right WWPN if one is specified.
1033 * wwn[0] should be a non-zero NAA in a good WWPN.
1034 */
1035 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1036 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1037 sizeof(struct lpfc_name)) != 0))
1038 return 0;
1039 }
1040
1041 /* Setup a ptr to the protection data if the SCSI host provides it */
1042 if (sgpe) {
1043 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1044 src += blockoff;
1045 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1046 }
1047
1048 /* Should we change the Reference Tag */
1049 if (reftag) {
1050 if (phba->lpfc_injerr_wref_cnt) {
1051 switch (op) {
1052 case SCSI_PROT_WRITE_PASS:
1053 if (src) {
1054 /*
1055 * For WRITE_PASS, force the error
1056 * to be sent on the wire. It should
1057 * be detected by the Target.
1058 * If blockoff != 0 error will be
1059 * inserted in middle of the IO.
1060 */
1061
1062 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1063 "9076 BLKGRD: Injecting reftag error: "
1064 "write lba x%lx + x%x oldrefTag x%x\n",
1065 (unsigned long)lba, blockoff,
1066 be32_to_cpu(src->ref_tag));
1067
1068 /*
1069 * Save the old ref_tag so we can
1070 * restore it on completion.
1071 */
1072 if (lpfc_cmd) {
1073 lpfc_cmd->prot_data_type =
1074 LPFC_INJERR_REFTAG;
1075 lpfc_cmd->prot_data_segment =
1076 src;
1077 lpfc_cmd->prot_data =
1078 src->ref_tag;
1079 }
1080 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1081 phba->lpfc_injerr_wref_cnt--;
1082 if (phba->lpfc_injerr_wref_cnt == 0) {
1083 phba->lpfc_injerr_nportid = 0;
1084 phba->lpfc_injerr_lba =
1085 LPFC_INJERR_LBA_OFF;
1086 memset(&phba->lpfc_injerr_wwpn,
1087 0, sizeof(struct lpfc_name));
1088 }
1089 rc = BG_ERR_TGT | BG_ERR_CHECK;
1090
1091 break;
1092 }
1093 /* fall through */
1094 case SCSI_PROT_WRITE_INSERT:
1095 /*
1096 * For WRITE_INSERT, force the error
1097 * to be sent on the wire. It should be
1098 * detected by the Target.
1099 */
1100 /* DEADBEEF will be the reftag on the wire */
1101 *reftag = 0xDEADBEEF;
1102 phba->lpfc_injerr_wref_cnt--;
1103 if (phba->lpfc_injerr_wref_cnt == 0) {
1104 phba->lpfc_injerr_nportid = 0;
1105 phba->lpfc_injerr_lba =
1106 LPFC_INJERR_LBA_OFF;
1107 memset(&phba->lpfc_injerr_wwpn,
1108 0, sizeof(struct lpfc_name));
1109 }
1110 rc = BG_ERR_TGT | BG_ERR_CHECK;
1111
1112 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1113 "9078 BLKGRD: Injecting reftag error: "
1114 "write lba x%lx\n", (unsigned long)lba);
1115 break;
1116 case SCSI_PROT_WRITE_STRIP:
1117 /*
1118 * For WRITE_STRIP and WRITE_PASS,
1119 * force the error on data
1120 * being copied from SLI-Host to SLI-Port.
1121 */
1122 *reftag = 0xDEADBEEF;
1123 phba->lpfc_injerr_wref_cnt--;
1124 if (phba->lpfc_injerr_wref_cnt == 0) {
1125 phba->lpfc_injerr_nportid = 0;
1126 phba->lpfc_injerr_lba =
1127 LPFC_INJERR_LBA_OFF;
1128 memset(&phba->lpfc_injerr_wwpn,
1129 0, sizeof(struct lpfc_name));
1130 }
1131 rc = BG_ERR_INIT;
1132
1133 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1134 "9077 BLKGRD: Injecting reftag error: "
1135 "write lba x%lx\n", (unsigned long)lba);
1136 break;
1137 }
1138 }
1139 if (phba->lpfc_injerr_rref_cnt) {
1140 switch (op) {
1141 case SCSI_PROT_READ_INSERT:
1142 case SCSI_PROT_READ_STRIP:
1143 case SCSI_PROT_READ_PASS:
1144 /*
1145 * For READ_STRIP and READ_PASS, force the
1146 * error on data being read off the wire. It
1147 * should force an IO error to the driver.
1148 */
1149 *reftag = 0xDEADBEEF;
1150 phba->lpfc_injerr_rref_cnt--;
1151 if (phba->lpfc_injerr_rref_cnt == 0) {
1152 phba->lpfc_injerr_nportid = 0;
1153 phba->lpfc_injerr_lba =
1154 LPFC_INJERR_LBA_OFF;
1155 memset(&phba->lpfc_injerr_wwpn,
1156 0, sizeof(struct lpfc_name));
1157 }
1158 rc = BG_ERR_INIT;
1159
1160 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1161 "9079 BLKGRD: Injecting reftag error: "
1162 "read lba x%lx\n", (unsigned long)lba);
1163 break;
1164 }
1165 }
1166 }
1167
1168 /* Should we change the Application Tag */
1169 if (apptag) {
1170 if (phba->lpfc_injerr_wapp_cnt) {
1171 switch (op) {
1172 case SCSI_PROT_WRITE_PASS:
1173 if (src) {
1174 /*
1175 * For WRITE_PASS, force the error
1176 * to be sent on the wire. It should
1177 * be detected by the Target.
1178 * If blockoff != 0 error will be
1179 * inserted in middle of the IO.
1180 */
1181
1182 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1183 "9080 BLKGRD: Injecting apptag error: "
1184 "write lba x%lx + x%x oldappTag x%x\n",
1185 (unsigned long)lba, blockoff,
1186 be16_to_cpu(src->app_tag));
1187
1188 /*
1189 * Save the old app_tag so we can
1190 * restore it on completion.
1191 */
1192 if (lpfc_cmd) {
1193 lpfc_cmd->prot_data_type =
1194 LPFC_INJERR_APPTAG;
1195 lpfc_cmd->prot_data_segment =
1196 src;
1197 lpfc_cmd->prot_data =
1198 src->app_tag;
1199 }
1200 src->app_tag = cpu_to_be16(0xDEAD);
1201 phba->lpfc_injerr_wapp_cnt--;
1202 if (phba->lpfc_injerr_wapp_cnt == 0) {
1203 phba->lpfc_injerr_nportid = 0;
1204 phba->lpfc_injerr_lba =
1205 LPFC_INJERR_LBA_OFF;
1206 memset(&phba->lpfc_injerr_wwpn,
1207 0, sizeof(struct lpfc_name));
1208 }
1209 rc = BG_ERR_TGT | BG_ERR_CHECK;
1210 break;
1211 }
1212 /* fall through */
1213 case SCSI_PROT_WRITE_INSERT:
1214 /*
1215 * For WRITE_INSERT, force the
1216 * error to be sent on the wire. It should be
1217 * detected by the Target.
1218 */
1219 /* DEAD will be the apptag on the wire */
1220 *apptag = 0xDEAD;
1221 phba->lpfc_injerr_wapp_cnt--;
1222 if (phba->lpfc_injerr_wapp_cnt == 0) {
1223 phba->lpfc_injerr_nportid = 0;
1224 phba->lpfc_injerr_lba =
1225 LPFC_INJERR_LBA_OFF;
1226 memset(&phba->lpfc_injerr_wwpn,
1227 0, sizeof(struct lpfc_name));
1228 }
1229 rc = BG_ERR_TGT | BG_ERR_CHECK;
1230
1231 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1232 "0813 BLKGRD: Injecting apptag error: "
1233 "write lba x%lx\n", (unsigned long)lba);
1234 break;
1235 case SCSI_PROT_WRITE_STRIP:
1236 /*
1237 * For WRITE_STRIP and WRITE_PASS,
1238 * force the error on data
1239 * being copied from SLI-Host to SLI-Port.
1240 */
1241 *apptag = 0xDEAD;
1242 phba->lpfc_injerr_wapp_cnt--;
1243 if (phba->lpfc_injerr_wapp_cnt == 0) {
1244 phba->lpfc_injerr_nportid = 0;
1245 phba->lpfc_injerr_lba =
1246 LPFC_INJERR_LBA_OFF;
1247 memset(&phba->lpfc_injerr_wwpn,
1248 0, sizeof(struct lpfc_name));
1249 }
1250 rc = BG_ERR_INIT;
1251
1252 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1253 "0812 BLKGRD: Injecting apptag error: "
1254 "write lba x%lx\n", (unsigned long)lba);
1255 break;
1256 }
1257 }
1258 if (phba->lpfc_injerr_rapp_cnt) {
1259 switch (op) {
1260 case SCSI_PROT_READ_INSERT:
1261 case SCSI_PROT_READ_STRIP:
1262 case SCSI_PROT_READ_PASS:
1263 /*
1264 * For READ_STRIP and READ_PASS, force the
1265 * error on data being read off the wire. It
1266 * should force an IO error to the driver.
1267 */
1268 *apptag = 0xDEAD;
1269 phba->lpfc_injerr_rapp_cnt--;
1270 if (phba->lpfc_injerr_rapp_cnt == 0) {
1271 phba->lpfc_injerr_nportid = 0;
1272 phba->lpfc_injerr_lba =
1273 LPFC_INJERR_LBA_OFF;
1274 memset(&phba->lpfc_injerr_wwpn,
1275 0, sizeof(struct lpfc_name));
1276 }
1277 rc = BG_ERR_INIT;
1278
1279 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1280 "0814 BLKGRD: Injecting apptag error: "
1281 "read lba x%lx\n", (unsigned long)lba);
1282 break;
1283 }
1284 }
1285 }
1286
1287
1288 /* Should we change the Guard Tag */
1289 if (new_guard) {
1290 if (phba->lpfc_injerr_wgrd_cnt) {
1291 switch (op) {
1292 case SCSI_PROT_WRITE_PASS:
1293 rc = BG_ERR_CHECK;
1294 /* fall through */
1295
1296 case SCSI_PROT_WRITE_INSERT:
1297 /*
1298 * For WRITE_INSERT, force the
1299 * error to be sent on the wire. It should be
1300 * detected by the Target.
1301 */
1302 phba->lpfc_injerr_wgrd_cnt--;
1303 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1304 phba->lpfc_injerr_nportid = 0;
1305 phba->lpfc_injerr_lba =
1306 LPFC_INJERR_LBA_OFF;
1307 memset(&phba->lpfc_injerr_wwpn,
1308 0, sizeof(struct lpfc_name));
1309 }
1310
1311 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1312 /* Signals the caller to swap CRC->CSUM */
1313
1314 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1315 "0817 BLKGRD: Injecting guard error: "
1316 "write lba x%lx\n", (unsigned long)lba);
1317 break;
1318 case SCSI_PROT_WRITE_STRIP:
1319 /*
1320 * For WRITE_STRIP and WRITE_PASS,
1321 * force the error on data
1322 * being copied from SLI-Host to SLI-Port.
1323 */
1324 phba->lpfc_injerr_wgrd_cnt--;
1325 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1326 phba->lpfc_injerr_nportid = 0;
1327 phba->lpfc_injerr_lba =
1328 LPFC_INJERR_LBA_OFF;
1329 memset(&phba->lpfc_injerr_wwpn,
1330 0, sizeof(struct lpfc_name));
1331 }
1332
1333 rc = BG_ERR_INIT | BG_ERR_SWAP;
1334 /* Signals the caller to swap CRC->CSUM */
1335
1336 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1337 "0816 BLKGRD: Injecting guard error: "
1338 "write lba x%lx\n", (unsigned long)lba);
1339 break;
1340 }
1341 }
1342 if (phba->lpfc_injerr_rgrd_cnt) {
1343 switch (op) {
1344 case SCSI_PROT_READ_INSERT:
1345 case SCSI_PROT_READ_STRIP:
1346 case SCSI_PROT_READ_PASS:
1347 /*
1348 * For READ_STRIP and READ_PASS, force the
1349 * error on data being read off the wire. It
1350 * should force an IO error to the driver.
1351 */
1352 phba->lpfc_injerr_rgrd_cnt--;
1353 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1354 phba->lpfc_injerr_nportid = 0;
1355 phba->lpfc_injerr_lba =
1356 LPFC_INJERR_LBA_OFF;
1357 memset(&phba->lpfc_injerr_wwpn,
1358 0, sizeof(struct lpfc_name));
1359 }
1360
1361 rc = BG_ERR_INIT | BG_ERR_SWAP;
1362 /* Signals the caller to swap CRC->CSUM */
1363
1364 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1365 "0818 BLKGRD: Injecting guard error: "
1366 "read lba x%lx\n", (unsigned long)lba);
1367 }
1368 }
1369 }
1370
1371 return rc;
1372 }
1373 #endif
1374
1375 /**
1376 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1377 * the specified SCSI command.
1378 * @phba: The Hba for which this call is being executed.
1379 * @sc: The SCSI command to examine
1380 * @txopt: (out) BlockGuard operation for transmitted data
1381 * @rxopt: (out) BlockGuard operation for received data
1382 *
1383 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1384 *
1385 **/
1386 static int
lpfc_sc_to_bg_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1387 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1388 uint8_t *txop, uint8_t *rxop)
1389 {
1390 uint8_t ret = 0;
1391
1392 if (lpfc_cmd_guard_csum(sc)) {
1393 switch (scsi_get_prot_op(sc)) {
1394 case SCSI_PROT_READ_INSERT:
1395 case SCSI_PROT_WRITE_STRIP:
1396 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1397 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1398 break;
1399
1400 case SCSI_PROT_READ_STRIP:
1401 case SCSI_PROT_WRITE_INSERT:
1402 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1403 *txop = BG_OP_IN_NODIF_OUT_CRC;
1404 break;
1405
1406 case SCSI_PROT_READ_PASS:
1407 case SCSI_PROT_WRITE_PASS:
1408 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1409 *txop = BG_OP_IN_CSUM_OUT_CRC;
1410 break;
1411
1412 case SCSI_PROT_NORMAL:
1413 default:
1414 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1415 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1416 scsi_get_prot_op(sc));
1417 ret = 1;
1418 break;
1419
1420 }
1421 } else {
1422 switch (scsi_get_prot_op(sc)) {
1423 case SCSI_PROT_READ_STRIP:
1424 case SCSI_PROT_WRITE_INSERT:
1425 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1426 *txop = BG_OP_IN_NODIF_OUT_CRC;
1427 break;
1428
1429 case SCSI_PROT_READ_PASS:
1430 case SCSI_PROT_WRITE_PASS:
1431 *rxop = BG_OP_IN_CRC_OUT_CRC;
1432 *txop = BG_OP_IN_CRC_OUT_CRC;
1433 break;
1434
1435 case SCSI_PROT_READ_INSERT:
1436 case SCSI_PROT_WRITE_STRIP:
1437 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1438 *txop = BG_OP_IN_CRC_OUT_NODIF;
1439 break;
1440
1441 case SCSI_PROT_NORMAL:
1442 default:
1443 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1444 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1445 scsi_get_prot_op(sc));
1446 ret = 1;
1447 break;
1448 }
1449 }
1450
1451 return ret;
1452 }
1453
1454 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1455 /**
1456 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1457 * the specified SCSI command in order to force a guard tag error.
1458 * @phba: The Hba for which this call is being executed.
1459 * @sc: The SCSI command to examine
1460 * @txopt: (out) BlockGuard operation for transmitted data
1461 * @rxopt: (out) BlockGuard operation for received data
1462 *
1463 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1464 *
1465 **/
1466 static int
lpfc_bg_err_opcodes(struct lpfc_hba * phba,struct scsi_cmnd * sc,uint8_t * txop,uint8_t * rxop)1467 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1468 uint8_t *txop, uint8_t *rxop)
1469 {
1470 uint8_t ret = 0;
1471
1472 if (lpfc_cmd_guard_csum(sc)) {
1473 switch (scsi_get_prot_op(sc)) {
1474 case SCSI_PROT_READ_INSERT:
1475 case SCSI_PROT_WRITE_STRIP:
1476 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1477 *txop = BG_OP_IN_CRC_OUT_NODIF;
1478 break;
1479
1480 case SCSI_PROT_READ_STRIP:
1481 case SCSI_PROT_WRITE_INSERT:
1482 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1483 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1484 break;
1485
1486 case SCSI_PROT_READ_PASS:
1487 case SCSI_PROT_WRITE_PASS:
1488 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1489 *txop = BG_OP_IN_CRC_OUT_CSUM;
1490 break;
1491
1492 case SCSI_PROT_NORMAL:
1493 default:
1494 break;
1495
1496 }
1497 } else {
1498 switch (scsi_get_prot_op(sc)) {
1499 case SCSI_PROT_READ_STRIP:
1500 case SCSI_PROT_WRITE_INSERT:
1501 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1502 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1503 break;
1504
1505 case SCSI_PROT_READ_PASS:
1506 case SCSI_PROT_WRITE_PASS:
1507 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1508 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1509 break;
1510
1511 case SCSI_PROT_READ_INSERT:
1512 case SCSI_PROT_WRITE_STRIP:
1513 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1514 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1515 break;
1516
1517 case SCSI_PROT_NORMAL:
1518 default:
1519 break;
1520 }
1521 }
1522
1523 return ret;
1524 }
1525 #endif
1526
1527 /**
1528 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1529 * @phba: The Hba for which this call is being executed.
1530 * @sc: pointer to scsi command we're working on
1531 * @bpl: pointer to buffer list for protection groups
1532 * @datacnt: number of segments of data that have been dma mapped
1533 *
1534 * This function sets up BPL buffer list for protection groups of
1535 * type LPFC_PG_TYPE_NO_DIF
1536 *
1537 * This is usually used when the HBA is instructed to generate
1538 * DIFs and insert them into data stream (or strip DIF from
1539 * incoming data stream)
1540 *
1541 * The buffer list consists of just one protection group described
1542 * below:
1543 * +-------------------------+
1544 * start of prot group --> | PDE_5 |
1545 * +-------------------------+
1546 * | PDE_6 |
1547 * +-------------------------+
1548 * | Data BDE |
1549 * +-------------------------+
1550 * |more Data BDE's ... (opt)|
1551 * +-------------------------+
1552 *
1553 *
1554 * Note: Data s/g buffers have been dma mapped
1555 *
1556 * Returns the number of BDEs added to the BPL.
1557 **/
1558 static int
lpfc_bg_setup_bpl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datasegcnt)1559 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1560 struct ulp_bde64 *bpl, int datasegcnt)
1561 {
1562 struct scatterlist *sgde = NULL; /* s/g data entry */
1563 struct lpfc_pde5 *pde5 = NULL;
1564 struct lpfc_pde6 *pde6 = NULL;
1565 dma_addr_t physaddr;
1566 int i = 0, num_bde = 0, status;
1567 int datadir = sc->sc_data_direction;
1568 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1569 uint32_t rc;
1570 #endif
1571 uint32_t checking = 1;
1572 uint32_t reftag;
1573 uint8_t txop, rxop;
1574
1575 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1576 if (status)
1577 goto out;
1578
1579 /* extract some info from the scsi command for pde*/
1580 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1581
1582 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1583 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1584 if (rc) {
1585 if (rc & BG_ERR_SWAP)
1586 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1587 if (rc & BG_ERR_CHECK)
1588 checking = 0;
1589 }
1590 #endif
1591
1592 /* setup PDE5 with what we have */
1593 pde5 = (struct lpfc_pde5 *) bpl;
1594 memset(pde5, 0, sizeof(struct lpfc_pde5));
1595 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1596
1597 /* Endianness conversion if necessary for PDE5 */
1598 pde5->word0 = cpu_to_le32(pde5->word0);
1599 pde5->reftag = cpu_to_le32(reftag);
1600
1601 /* advance bpl and increment bde count */
1602 num_bde++;
1603 bpl++;
1604 pde6 = (struct lpfc_pde6 *) bpl;
1605
1606 /* setup PDE6 with the rest of the info */
1607 memset(pde6, 0, sizeof(struct lpfc_pde6));
1608 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1609 bf_set(pde6_optx, pde6, txop);
1610 bf_set(pde6_oprx, pde6, rxop);
1611
1612 /*
1613 * We only need to check the data on READs, for WRITEs
1614 * protection data is automatically generated, not checked.
1615 */
1616 if (datadir == DMA_FROM_DEVICE) {
1617 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1618 bf_set(pde6_ce, pde6, checking);
1619 else
1620 bf_set(pde6_ce, pde6, 0);
1621
1622 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1623 bf_set(pde6_re, pde6, checking);
1624 else
1625 bf_set(pde6_re, pde6, 0);
1626 }
1627 bf_set(pde6_ai, pde6, 1);
1628 bf_set(pde6_ae, pde6, 0);
1629 bf_set(pde6_apptagval, pde6, 0);
1630
1631 /* Endianness conversion if necessary for PDE6 */
1632 pde6->word0 = cpu_to_le32(pde6->word0);
1633 pde6->word1 = cpu_to_le32(pde6->word1);
1634 pde6->word2 = cpu_to_le32(pde6->word2);
1635
1636 /* advance bpl and increment bde count */
1637 num_bde++;
1638 bpl++;
1639
1640 /* assumption: caller has already run dma_map_sg on command data */
1641 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1642 physaddr = sg_dma_address(sgde);
1643 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1644 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1645 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1646 if (datadir == DMA_TO_DEVICE)
1647 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1648 else
1649 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1650 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1651 bpl++;
1652 num_bde++;
1653 }
1654
1655 out:
1656 return num_bde;
1657 }
1658
1659 /**
1660 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1661 * @phba: The Hba for which this call is being executed.
1662 * @sc: pointer to scsi command we're working on
1663 * @bpl: pointer to buffer list for protection groups
1664 * @datacnt: number of segments of data that have been dma mapped
1665 * @protcnt: number of segment of protection data that have been dma mapped
1666 *
1667 * This function sets up BPL buffer list for protection groups of
1668 * type LPFC_PG_TYPE_DIF
1669 *
1670 * This is usually used when DIFs are in their own buffers,
1671 * separate from the data. The HBA can then by instructed
1672 * to place the DIFs in the outgoing stream. For read operations,
1673 * The HBA could extract the DIFs and place it in DIF buffers.
1674 *
1675 * The buffer list for this type consists of one or more of the
1676 * protection groups described below:
1677 * +-------------------------+
1678 * start of first prot group --> | PDE_5 |
1679 * +-------------------------+
1680 * | PDE_6 |
1681 * +-------------------------+
1682 * | PDE_7 (Prot BDE) |
1683 * +-------------------------+
1684 * | Data BDE |
1685 * +-------------------------+
1686 * |more Data BDE's ... (opt)|
1687 * +-------------------------+
1688 * start of new prot group --> | PDE_5 |
1689 * +-------------------------+
1690 * | ... |
1691 * +-------------------------+
1692 *
1693 * Note: It is assumed that both data and protection s/g buffers have been
1694 * mapped for DMA
1695 *
1696 * Returns the number of BDEs added to the BPL.
1697 **/
1698 static int
lpfc_bg_setup_bpl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct ulp_bde64 * bpl,int datacnt,int protcnt)1699 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1700 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1701 {
1702 struct scatterlist *sgde = NULL; /* s/g data entry */
1703 struct scatterlist *sgpe = NULL; /* s/g prot entry */
1704 struct lpfc_pde5 *pde5 = NULL;
1705 struct lpfc_pde6 *pde6 = NULL;
1706 struct lpfc_pde7 *pde7 = NULL;
1707 dma_addr_t dataphysaddr, protphysaddr;
1708 unsigned short curr_data = 0, curr_prot = 0;
1709 unsigned int split_offset;
1710 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1711 unsigned int protgrp_blks, protgrp_bytes;
1712 unsigned int remainder, subtotal;
1713 int status;
1714 int datadir = sc->sc_data_direction;
1715 unsigned char pgdone = 0, alldone = 0;
1716 unsigned blksize;
1717 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1718 uint32_t rc;
1719 #endif
1720 uint32_t checking = 1;
1721 uint32_t reftag;
1722 uint8_t txop, rxop;
1723 int num_bde = 0;
1724
1725 sgpe = scsi_prot_sglist(sc);
1726 sgde = scsi_sglist(sc);
1727
1728 if (!sgpe || !sgde) {
1729 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1730 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1731 sgpe, sgde);
1732 return 0;
1733 }
1734
1735 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1736 if (status)
1737 goto out;
1738
1739 /* extract some info from the scsi command */
1740 blksize = lpfc_cmd_blksize(sc);
1741 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1742
1743 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1744 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1745 if (rc) {
1746 if (rc & BG_ERR_SWAP)
1747 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1748 if (rc & BG_ERR_CHECK)
1749 checking = 0;
1750 }
1751 #endif
1752
1753 split_offset = 0;
1754 do {
1755 /* Check to see if we ran out of space */
1756 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1757 return num_bde + 3;
1758
1759 /* setup PDE5 with what we have */
1760 pde5 = (struct lpfc_pde5 *) bpl;
1761 memset(pde5, 0, sizeof(struct lpfc_pde5));
1762 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1763
1764 /* Endianness conversion if necessary for PDE5 */
1765 pde5->word0 = cpu_to_le32(pde5->word0);
1766 pde5->reftag = cpu_to_le32(reftag);
1767
1768 /* advance bpl and increment bde count */
1769 num_bde++;
1770 bpl++;
1771 pde6 = (struct lpfc_pde6 *) bpl;
1772
1773 /* setup PDE6 with the rest of the info */
1774 memset(pde6, 0, sizeof(struct lpfc_pde6));
1775 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1776 bf_set(pde6_optx, pde6, txop);
1777 bf_set(pde6_oprx, pde6, rxop);
1778
1779 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1780 bf_set(pde6_ce, pde6, checking);
1781 else
1782 bf_set(pde6_ce, pde6, 0);
1783
1784 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1785 bf_set(pde6_re, pde6, checking);
1786 else
1787 bf_set(pde6_re, pde6, 0);
1788
1789 bf_set(pde6_ai, pde6, 1);
1790 bf_set(pde6_ae, pde6, 0);
1791 bf_set(pde6_apptagval, pde6, 0);
1792
1793 /* Endianness conversion if necessary for PDE6 */
1794 pde6->word0 = cpu_to_le32(pde6->word0);
1795 pde6->word1 = cpu_to_le32(pde6->word1);
1796 pde6->word2 = cpu_to_le32(pde6->word2);
1797
1798 /* advance bpl and increment bde count */
1799 num_bde++;
1800 bpl++;
1801
1802 /* setup the first BDE that points to protection buffer */
1803 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1804 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1805
1806 /* must be integer multiple of the DIF block length */
1807 BUG_ON(protgroup_len % 8);
1808
1809 pde7 = (struct lpfc_pde7 *) bpl;
1810 memset(pde7, 0, sizeof(struct lpfc_pde7));
1811 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1812
1813 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1814 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1815
1816 protgrp_blks = protgroup_len / 8;
1817 protgrp_bytes = protgrp_blks * blksize;
1818
1819 /* check if this pde is crossing the 4K boundary; if so split */
1820 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1821 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1822 protgroup_offset += protgroup_remainder;
1823 protgrp_blks = protgroup_remainder / 8;
1824 protgrp_bytes = protgrp_blks * blksize;
1825 } else {
1826 protgroup_offset = 0;
1827 curr_prot++;
1828 }
1829
1830 num_bde++;
1831
1832 /* setup BDE's for data blocks associated with DIF data */
1833 pgdone = 0;
1834 subtotal = 0; /* total bytes processed for current prot grp */
1835 while (!pgdone) {
1836 /* Check to see if we ran out of space */
1837 if (num_bde >= phba->cfg_total_seg_cnt)
1838 return num_bde + 1;
1839
1840 if (!sgde) {
1841 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1842 "9065 BLKGRD:%s Invalid data segment\n",
1843 __func__);
1844 return 0;
1845 }
1846 bpl++;
1847 dataphysaddr = sg_dma_address(sgde) + split_offset;
1848 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1849 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1850
1851 remainder = sg_dma_len(sgde) - split_offset;
1852
1853 if ((subtotal + remainder) <= protgrp_bytes) {
1854 /* we can use this whole buffer */
1855 bpl->tus.f.bdeSize = remainder;
1856 split_offset = 0;
1857
1858 if ((subtotal + remainder) == protgrp_bytes)
1859 pgdone = 1;
1860 } else {
1861 /* must split this buffer with next prot grp */
1862 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1863 split_offset += bpl->tus.f.bdeSize;
1864 }
1865
1866 subtotal += bpl->tus.f.bdeSize;
1867
1868 if (datadir == DMA_TO_DEVICE)
1869 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1870 else
1871 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1872 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1873
1874 num_bde++;
1875 curr_data++;
1876
1877 if (split_offset)
1878 break;
1879
1880 /* Move to the next s/g segment if possible */
1881 sgde = sg_next(sgde);
1882
1883 }
1884
1885 if (protgroup_offset) {
1886 /* update the reference tag */
1887 reftag += protgrp_blks;
1888 bpl++;
1889 continue;
1890 }
1891
1892 /* are we done ? */
1893 if (curr_prot == protcnt) {
1894 alldone = 1;
1895 } else if (curr_prot < protcnt) {
1896 /* advance to next prot buffer */
1897 sgpe = sg_next(sgpe);
1898 bpl++;
1899
1900 /* update the reference tag */
1901 reftag += protgrp_blks;
1902 } else {
1903 /* if we're here, we have a bug */
1904 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1905 "9054 BLKGRD: bug in %s\n", __func__);
1906 }
1907
1908 } while (!alldone);
1909 out:
1910
1911 return num_bde;
1912 }
1913
1914 /**
1915 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
1916 * @phba: The Hba for which this call is being executed.
1917 * @sc: pointer to scsi command we're working on
1918 * @sgl: pointer to buffer list for protection groups
1919 * @datacnt: number of segments of data that have been dma mapped
1920 *
1921 * This function sets up SGL buffer list for protection groups of
1922 * type LPFC_PG_TYPE_NO_DIF
1923 *
1924 * This is usually used when the HBA is instructed to generate
1925 * DIFs and insert them into data stream (or strip DIF from
1926 * incoming data stream)
1927 *
1928 * The buffer list consists of just one protection group described
1929 * below:
1930 * +-------------------------+
1931 * start of prot group --> | DI_SEED |
1932 * +-------------------------+
1933 * | Data SGE |
1934 * +-------------------------+
1935 * |more Data SGE's ... (opt)|
1936 * +-------------------------+
1937 *
1938 *
1939 * Note: Data s/g buffers have been dma mapped
1940 *
1941 * Returns the number of SGEs added to the SGL.
1942 **/
1943 static int
lpfc_bg_setup_sgl(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datasegcnt,struct lpfc_io_buf * lpfc_cmd)1944 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1945 struct sli4_sge *sgl, int datasegcnt,
1946 struct lpfc_io_buf *lpfc_cmd)
1947 {
1948 struct scatterlist *sgde = NULL; /* s/g data entry */
1949 struct sli4_sge_diseed *diseed = NULL;
1950 dma_addr_t physaddr;
1951 int i = 0, num_sge = 0, status;
1952 uint32_t reftag;
1953 uint8_t txop, rxop;
1954 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1955 uint32_t rc;
1956 #endif
1957 uint32_t checking = 1;
1958 uint32_t dma_len;
1959 uint32_t dma_offset = 0;
1960 struct sli4_hybrid_sgl *sgl_xtra = NULL;
1961 int j;
1962 bool lsp_just_set = false;
1963
1964 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1965 if (status)
1966 goto out;
1967
1968 /* extract some info from the scsi command for pde*/
1969 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1970
1971 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1972 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1973 if (rc) {
1974 if (rc & BG_ERR_SWAP)
1975 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1976 if (rc & BG_ERR_CHECK)
1977 checking = 0;
1978 }
1979 #endif
1980
1981 /* setup DISEED with what we have */
1982 diseed = (struct sli4_sge_diseed *) sgl;
1983 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
1984 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
1985
1986 /* Endianness conversion if necessary */
1987 diseed->ref_tag = cpu_to_le32(reftag);
1988 diseed->ref_tag_tran = diseed->ref_tag;
1989
1990 /*
1991 * We only need to check the data on READs, for WRITEs
1992 * protection data is automatically generated, not checked.
1993 */
1994 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
1995 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1996 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
1997 else
1998 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
1999
2000 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2001 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2002 else
2003 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2004 }
2005
2006 /* setup DISEED with the rest of the info */
2007 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2008 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2009
2010 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2011 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2012
2013 /* Endianness conversion if necessary for DISEED */
2014 diseed->word2 = cpu_to_le32(diseed->word2);
2015 diseed->word3 = cpu_to_le32(diseed->word3);
2016
2017 /* advance bpl and increment sge count */
2018 num_sge++;
2019 sgl++;
2020
2021 /* assumption: caller has already run dma_map_sg on command data */
2022 sgde = scsi_sglist(sc);
2023 j = 3;
2024 for (i = 0; i < datasegcnt; i++) {
2025 /* clear it */
2026 sgl->word2 = 0;
2027
2028 /* do we need to expand the segment */
2029 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2030 ((datasegcnt - 1) != i)) {
2031 /* set LSP type */
2032 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2033
2034 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2035
2036 if (unlikely(!sgl_xtra)) {
2037 lpfc_cmd->seg_cnt = 0;
2038 return 0;
2039 }
2040 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2041 sgl_xtra->dma_phys_sgl));
2042 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2043 sgl_xtra->dma_phys_sgl));
2044
2045 } else {
2046 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2047 }
2048
2049 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2050 if ((datasegcnt - 1) == i)
2051 bf_set(lpfc_sli4_sge_last, sgl, 1);
2052 physaddr = sg_dma_address(sgde);
2053 dma_len = sg_dma_len(sgde);
2054 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2055 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2056
2057 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2058 sgl->word2 = cpu_to_le32(sgl->word2);
2059 sgl->sge_len = cpu_to_le32(dma_len);
2060
2061 dma_offset += dma_len;
2062 sgde = sg_next(sgde);
2063
2064 sgl++;
2065 num_sge++;
2066 lsp_just_set = false;
2067
2068 } else {
2069 sgl->word2 = cpu_to_le32(sgl->word2);
2070 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2071
2072 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2073 i = i - 1;
2074
2075 lsp_just_set = true;
2076 }
2077
2078 j++;
2079
2080 }
2081
2082 out:
2083 return num_sge;
2084 }
2085
2086 /**
2087 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2088 * @phba: The Hba for which this call is being executed.
2089 * @sc: pointer to scsi command we're working on
2090 * @sgl: pointer to buffer list for protection groups
2091 * @datacnt: number of segments of data that have been dma mapped
2092 * @protcnt: number of segment of protection data that have been dma mapped
2093 *
2094 * This function sets up SGL buffer list for protection groups of
2095 * type LPFC_PG_TYPE_DIF
2096 *
2097 * This is usually used when DIFs are in their own buffers,
2098 * separate from the data. The HBA can then by instructed
2099 * to place the DIFs in the outgoing stream. For read operations,
2100 * The HBA could extract the DIFs and place it in DIF buffers.
2101 *
2102 * The buffer list for this type consists of one or more of the
2103 * protection groups described below:
2104 * +-------------------------+
2105 * start of first prot group --> | DISEED |
2106 * +-------------------------+
2107 * | DIF (Prot SGE) |
2108 * +-------------------------+
2109 * | Data SGE |
2110 * +-------------------------+
2111 * |more Data SGE's ... (opt)|
2112 * +-------------------------+
2113 * start of new prot group --> | DISEED |
2114 * +-------------------------+
2115 * | ... |
2116 * +-------------------------+
2117 *
2118 * Note: It is assumed that both data and protection s/g buffers have been
2119 * mapped for DMA
2120 *
2121 * Returns the number of SGEs added to the SGL.
2122 **/
2123 static int
lpfc_bg_setup_sgl_prot(struct lpfc_hba * phba,struct scsi_cmnd * sc,struct sli4_sge * sgl,int datacnt,int protcnt,struct lpfc_io_buf * lpfc_cmd)2124 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2125 struct sli4_sge *sgl, int datacnt, int protcnt,
2126 struct lpfc_io_buf *lpfc_cmd)
2127 {
2128 struct scatterlist *sgde = NULL; /* s/g data entry */
2129 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2130 struct sli4_sge_diseed *diseed = NULL;
2131 dma_addr_t dataphysaddr, protphysaddr;
2132 unsigned short curr_data = 0, curr_prot = 0;
2133 unsigned int split_offset;
2134 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2135 unsigned int protgrp_blks, protgrp_bytes;
2136 unsigned int remainder, subtotal;
2137 int status;
2138 unsigned char pgdone = 0, alldone = 0;
2139 unsigned blksize;
2140 uint32_t reftag;
2141 uint8_t txop, rxop;
2142 uint32_t dma_len;
2143 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2144 uint32_t rc;
2145 #endif
2146 uint32_t checking = 1;
2147 uint32_t dma_offset = 0;
2148 int num_sge = 0, j = 2;
2149 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2150
2151 sgpe = scsi_prot_sglist(sc);
2152 sgde = scsi_sglist(sc);
2153
2154 if (!sgpe || !sgde) {
2155 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2156 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2157 sgpe, sgde);
2158 return 0;
2159 }
2160
2161 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2162 if (status)
2163 goto out;
2164
2165 /* extract some info from the scsi command */
2166 blksize = lpfc_cmd_blksize(sc);
2167 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2168
2169 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2170 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2171 if (rc) {
2172 if (rc & BG_ERR_SWAP)
2173 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2174 if (rc & BG_ERR_CHECK)
2175 checking = 0;
2176 }
2177 #endif
2178
2179 split_offset = 0;
2180 do {
2181 /* Check to see if we ran out of space */
2182 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2183 !(phba->cfg_xpsgl))
2184 return num_sge + 3;
2185
2186 /* DISEED and DIF have to be together */
2187 if (!((j + 1) % phba->border_sge_num) ||
2188 !((j + 2) % phba->border_sge_num) ||
2189 !((j + 3) % phba->border_sge_num)) {
2190 sgl->word2 = 0;
2191
2192 /* set LSP type */
2193 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2194
2195 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2196
2197 if (unlikely(!sgl_xtra)) {
2198 goto out;
2199 } else {
2200 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2201 sgl_xtra->dma_phys_sgl));
2202 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2203 sgl_xtra->dma_phys_sgl));
2204 }
2205
2206 sgl->word2 = cpu_to_le32(sgl->word2);
2207 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2208
2209 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2210 j = 0;
2211 }
2212
2213 /* setup DISEED with what we have */
2214 diseed = (struct sli4_sge_diseed *) sgl;
2215 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2216 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2217
2218 /* Endianness conversion if necessary */
2219 diseed->ref_tag = cpu_to_le32(reftag);
2220 diseed->ref_tag_tran = diseed->ref_tag;
2221
2222 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2223 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2224
2225 } else {
2226 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2227 /*
2228 * When in this mode, the hardware will replace
2229 * the guard tag from the host with a
2230 * newly generated good CRC for the wire.
2231 * Switch to raw mode here to avoid this
2232 * behavior. What the host sends gets put on the wire.
2233 */
2234 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2235 txop = BG_OP_RAW_MODE;
2236 rxop = BG_OP_RAW_MODE;
2237 }
2238 }
2239
2240
2241 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2242 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2243 else
2244 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2245
2246 /* setup DISEED with the rest of the info */
2247 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2248 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2249
2250 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2251 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2252
2253 /* Endianness conversion if necessary for DISEED */
2254 diseed->word2 = cpu_to_le32(diseed->word2);
2255 diseed->word3 = cpu_to_le32(diseed->word3);
2256
2257 /* advance sgl and increment bde count */
2258 num_sge++;
2259
2260 sgl++;
2261 j++;
2262
2263 /* setup the first BDE that points to protection buffer */
2264 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2265 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2266
2267 /* must be integer multiple of the DIF block length */
2268 BUG_ON(protgroup_len % 8);
2269
2270 /* Now setup DIF SGE */
2271 sgl->word2 = 0;
2272 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2273 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2274 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2275 sgl->word2 = cpu_to_le32(sgl->word2);
2276 sgl->sge_len = 0;
2277
2278 protgrp_blks = protgroup_len / 8;
2279 protgrp_bytes = protgrp_blks * blksize;
2280
2281 /* check if DIF SGE is crossing the 4K boundary; if so split */
2282 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2283 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2284 protgroup_offset += protgroup_remainder;
2285 protgrp_blks = protgroup_remainder / 8;
2286 protgrp_bytes = protgrp_blks * blksize;
2287 } else {
2288 protgroup_offset = 0;
2289 curr_prot++;
2290 }
2291
2292 num_sge++;
2293
2294 /* setup SGE's for data blocks associated with DIF data */
2295 pgdone = 0;
2296 subtotal = 0; /* total bytes processed for current prot grp */
2297
2298 sgl++;
2299 j++;
2300
2301 while (!pgdone) {
2302 /* Check to see if we ran out of space */
2303 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2304 !phba->cfg_xpsgl)
2305 return num_sge + 1;
2306
2307 if (!sgde) {
2308 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2309 "9086 BLKGRD:%s Invalid data segment\n",
2310 __func__);
2311 return 0;
2312 }
2313
2314 if (!((j + 1) % phba->border_sge_num)) {
2315 sgl->word2 = 0;
2316
2317 /* set LSP type */
2318 bf_set(lpfc_sli4_sge_type, sgl,
2319 LPFC_SGE_TYPE_LSP);
2320
2321 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2322 lpfc_cmd);
2323
2324 if (unlikely(!sgl_xtra)) {
2325 goto out;
2326 } else {
2327 sgl->addr_lo = cpu_to_le32(
2328 putPaddrLow(sgl_xtra->dma_phys_sgl));
2329 sgl->addr_hi = cpu_to_le32(
2330 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2331 }
2332
2333 sgl->word2 = cpu_to_le32(sgl->word2);
2334 sgl->sge_len = cpu_to_le32(
2335 phba->cfg_sg_dma_buf_size);
2336
2337 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2338 } else {
2339 dataphysaddr = sg_dma_address(sgde) +
2340 split_offset;
2341
2342 remainder = sg_dma_len(sgde) - split_offset;
2343
2344 if ((subtotal + remainder) <= protgrp_bytes) {
2345 /* we can use this whole buffer */
2346 dma_len = remainder;
2347 split_offset = 0;
2348
2349 if ((subtotal + remainder) ==
2350 protgrp_bytes)
2351 pgdone = 1;
2352 } else {
2353 /* must split this buffer with next
2354 * prot grp
2355 */
2356 dma_len = protgrp_bytes - subtotal;
2357 split_offset += dma_len;
2358 }
2359
2360 subtotal += dma_len;
2361
2362 sgl->word2 = 0;
2363 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2364 dataphysaddr));
2365 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2366 dataphysaddr));
2367 bf_set(lpfc_sli4_sge_last, sgl, 0);
2368 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2369 bf_set(lpfc_sli4_sge_type, sgl,
2370 LPFC_SGE_TYPE_DATA);
2371
2372 sgl->sge_len = cpu_to_le32(dma_len);
2373 dma_offset += dma_len;
2374
2375 num_sge++;
2376 curr_data++;
2377
2378 if (split_offset) {
2379 sgl++;
2380 j++;
2381 break;
2382 }
2383
2384 /* Move to the next s/g segment if possible */
2385 sgde = sg_next(sgde);
2386
2387 sgl++;
2388 }
2389
2390 j++;
2391 }
2392
2393 if (protgroup_offset) {
2394 /* update the reference tag */
2395 reftag += protgrp_blks;
2396 continue;
2397 }
2398
2399 /* are we done ? */
2400 if (curr_prot == protcnt) {
2401 /* mark the last SGL */
2402 sgl--;
2403 bf_set(lpfc_sli4_sge_last, sgl, 1);
2404 alldone = 1;
2405 } else if (curr_prot < protcnt) {
2406 /* advance to next prot buffer */
2407 sgpe = sg_next(sgpe);
2408
2409 /* update the reference tag */
2410 reftag += protgrp_blks;
2411 } else {
2412 /* if we're here, we have a bug */
2413 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2414 "9085 BLKGRD: bug in %s\n", __func__);
2415 }
2416
2417 } while (!alldone);
2418
2419 out:
2420
2421 return num_sge;
2422 }
2423
2424 /**
2425 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2426 * @phba: The Hba for which this call is being executed.
2427 * @sc: pointer to scsi command we're working on
2428 *
2429 * Given a SCSI command that supports DIF, determine composition of protection
2430 * groups involved in setting up buffer lists
2431 *
2432 * Returns: Protection group type (with or without DIF)
2433 *
2434 **/
2435 static int
lpfc_prot_group_type(struct lpfc_hba * phba,struct scsi_cmnd * sc)2436 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2437 {
2438 int ret = LPFC_PG_TYPE_INVALID;
2439 unsigned char op = scsi_get_prot_op(sc);
2440
2441 switch (op) {
2442 case SCSI_PROT_READ_STRIP:
2443 case SCSI_PROT_WRITE_INSERT:
2444 ret = LPFC_PG_TYPE_NO_DIF;
2445 break;
2446 case SCSI_PROT_READ_INSERT:
2447 case SCSI_PROT_WRITE_STRIP:
2448 case SCSI_PROT_READ_PASS:
2449 case SCSI_PROT_WRITE_PASS:
2450 ret = LPFC_PG_TYPE_DIF_BUF;
2451 break;
2452 default:
2453 if (phba)
2454 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2455 "9021 Unsupported protection op:%d\n",
2456 op);
2457 break;
2458 }
2459 return ret;
2460 }
2461
2462 /**
2463 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2464 * @phba: The Hba for which this call is being executed.
2465 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2466 *
2467 * Adjust the data length to account for how much data
2468 * is actually on the wire.
2469 *
2470 * returns the adjusted data length
2471 **/
2472 static int
lpfc_bg_scsi_adjust_dl(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2473 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2474 struct lpfc_io_buf *lpfc_cmd)
2475 {
2476 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2477 int fcpdl;
2478
2479 fcpdl = scsi_bufflen(sc);
2480
2481 /* Check if there is protection data on the wire */
2482 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2483 /* Read check for protection data */
2484 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2485 return fcpdl;
2486
2487 } else {
2488 /* Write check for protection data */
2489 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2490 return fcpdl;
2491 }
2492
2493 /*
2494 * If we are in DIF Type 1 mode every data block has a 8 byte
2495 * DIF (trailer) attached to it. Must ajust FCP data length
2496 * to account for the protection data.
2497 */
2498 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2499
2500 return fcpdl;
2501 }
2502
2503 /**
2504 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2505 * @phba: The Hba for which this call is being executed.
2506 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2507 *
2508 * This is the protection/DIF aware version of
2509 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2510 * two functions eventually, but for now, it's here.
2511 * RETURNS 0 - SUCCESS,
2512 * 1 - Failed DMA map, retry.
2513 * 2 - Invalid scsi cmd or prot-type. Do not rety.
2514 **/
2515 static int
lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2516 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2517 struct lpfc_io_buf *lpfc_cmd)
2518 {
2519 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2520 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2521 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2522 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2523 uint32_t num_bde = 0;
2524 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2525 int prot_group_type = 0;
2526 int fcpdl;
2527 int ret = 1;
2528 struct lpfc_vport *vport = phba->pport;
2529
2530 /*
2531 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
2532 * fcp_rsp regions to the first data bde entry
2533 */
2534 bpl += 2;
2535 if (scsi_sg_count(scsi_cmnd)) {
2536 /*
2537 * The driver stores the segment count returned from pci_map_sg
2538 * because this a count of dma-mappings used to map the use_sg
2539 * pages. They are not guaranteed to be the same for those
2540 * architectures that implement an IOMMU.
2541 */
2542 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2543 scsi_sglist(scsi_cmnd),
2544 scsi_sg_count(scsi_cmnd), datadir);
2545 if (unlikely(!datasegcnt))
2546 return 1;
2547
2548 lpfc_cmd->seg_cnt = datasegcnt;
2549
2550 /* First check if data segment count from SCSI Layer is good */
2551 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2552 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2553 ret = 2;
2554 goto err;
2555 }
2556
2557 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2558
2559 switch (prot_group_type) {
2560 case LPFC_PG_TYPE_NO_DIF:
2561
2562 /* Here we need to add a PDE5 and PDE6 to the count */
2563 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2564 ret = 2;
2565 goto err;
2566 }
2567
2568 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2569 datasegcnt);
2570 /* we should have 2 or more entries in buffer list */
2571 if (num_bde < 2) {
2572 ret = 2;
2573 goto err;
2574 }
2575 break;
2576
2577 case LPFC_PG_TYPE_DIF_BUF:
2578 /*
2579 * This type indicates that protection buffers are
2580 * passed to the driver, so that needs to be prepared
2581 * for DMA
2582 */
2583 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2584 scsi_prot_sglist(scsi_cmnd),
2585 scsi_prot_sg_count(scsi_cmnd), datadir);
2586 if (unlikely(!protsegcnt)) {
2587 scsi_dma_unmap(scsi_cmnd);
2588 return 1;
2589 }
2590
2591 lpfc_cmd->prot_seg_cnt = protsegcnt;
2592
2593 /*
2594 * There is a minimun of 4 BPLs used for every
2595 * protection data segment.
2596 */
2597 if ((lpfc_cmd->prot_seg_cnt * 4) >
2598 (phba->cfg_total_seg_cnt - 2)) {
2599 ret = 2;
2600 goto err;
2601 }
2602
2603 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2604 datasegcnt, protsegcnt);
2605 /* we should have 3 or more entries in buffer list */
2606 if ((num_bde < 3) ||
2607 (num_bde > phba->cfg_total_seg_cnt)) {
2608 ret = 2;
2609 goto err;
2610 }
2611 break;
2612
2613 case LPFC_PG_TYPE_INVALID:
2614 default:
2615 scsi_dma_unmap(scsi_cmnd);
2616 lpfc_cmd->seg_cnt = 0;
2617
2618 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2619 "9022 Unexpected protection group %i\n",
2620 prot_group_type);
2621 return 2;
2622 }
2623 }
2624
2625 /*
2626 * Finish initializing those IOCB fields that are dependent on the
2627 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
2628 * reinitialized since all iocb memory resources are used many times
2629 * for transmit, receive, and continuation bpl's.
2630 */
2631 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2632 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2633 iocb_cmd->ulpBdeCount = 1;
2634 iocb_cmd->ulpLe = 1;
2635
2636 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2637 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2638
2639 /*
2640 * Due to difference in data length between DIF/non-DIF paths,
2641 * we need to set word 4 of IOCB here
2642 */
2643 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2644
2645 /*
2646 * For First burst, we may need to adjust the initial transfer
2647 * length for DIF
2648 */
2649 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2650 (fcpdl < vport->cfg_first_burst_size))
2651 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2652
2653 return 0;
2654 err:
2655 if (lpfc_cmd->seg_cnt)
2656 scsi_dma_unmap(scsi_cmnd);
2657 if (lpfc_cmd->prot_seg_cnt)
2658 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2659 scsi_prot_sg_count(scsi_cmnd),
2660 scsi_cmnd->sc_data_direction);
2661
2662 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2663 "9023 Cannot setup S/G List for HBA"
2664 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2665 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2666 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2667 prot_group_type, num_bde);
2668
2669 lpfc_cmd->seg_cnt = 0;
2670 lpfc_cmd->prot_seg_cnt = 0;
2671 return ret;
2672 }
2673
2674 /*
2675 * This function calcuates the T10 DIF guard tag
2676 * on the specified data using a CRC algorithmn
2677 * using crc_t10dif.
2678 */
2679 static uint16_t
lpfc_bg_crc(uint8_t * data,int count)2680 lpfc_bg_crc(uint8_t *data, int count)
2681 {
2682 uint16_t crc = 0;
2683 uint16_t x;
2684
2685 crc = crc_t10dif(data, count);
2686 x = cpu_to_be16(crc);
2687 return x;
2688 }
2689
2690 /*
2691 * This function calcuates the T10 DIF guard tag
2692 * on the specified data using a CSUM algorithmn
2693 * using ip_compute_csum.
2694 */
2695 static uint16_t
lpfc_bg_csum(uint8_t * data,int count)2696 lpfc_bg_csum(uint8_t *data, int count)
2697 {
2698 uint16_t ret;
2699
2700 ret = ip_compute_csum(data, count);
2701 return ret;
2702 }
2703
2704 /*
2705 * This function examines the protection data to try to determine
2706 * what type of T10-DIF error occurred.
2707 */
2708 static void
lpfc_calc_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)2709 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2710 {
2711 struct scatterlist *sgpe; /* s/g prot entry */
2712 struct scatterlist *sgde; /* s/g data entry */
2713 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2714 struct scsi_dif_tuple *src = NULL;
2715 uint8_t *data_src = NULL;
2716 uint16_t guard_tag;
2717 uint16_t start_app_tag, app_tag;
2718 uint32_t start_ref_tag, ref_tag;
2719 int prot, protsegcnt;
2720 int err_type, len, data_len;
2721 int chk_ref, chk_app, chk_guard;
2722 uint16_t sum;
2723 unsigned blksize;
2724
2725 err_type = BGS_GUARD_ERR_MASK;
2726 sum = 0;
2727 guard_tag = 0;
2728
2729 /* First check to see if there is protection data to examine */
2730 prot = scsi_get_prot_op(cmd);
2731 if ((prot == SCSI_PROT_READ_STRIP) ||
2732 (prot == SCSI_PROT_WRITE_INSERT) ||
2733 (prot == SCSI_PROT_NORMAL))
2734 goto out;
2735
2736 /* Currently the driver just supports ref_tag and guard_tag checking */
2737 chk_ref = 1;
2738 chk_app = 0;
2739 chk_guard = 0;
2740
2741 /* Setup a ptr to the protection data provided by the SCSI host */
2742 sgpe = scsi_prot_sglist(cmd);
2743 protsegcnt = lpfc_cmd->prot_seg_cnt;
2744
2745 if (sgpe && protsegcnt) {
2746
2747 /*
2748 * We will only try to verify guard tag if the segment
2749 * data length is a multiple of the blksize.
2750 */
2751 sgde = scsi_sglist(cmd);
2752 blksize = lpfc_cmd_blksize(cmd);
2753 data_src = (uint8_t *)sg_virt(sgde);
2754 data_len = sgde->length;
2755 if ((data_len & (blksize - 1)) == 0)
2756 chk_guard = 1;
2757
2758 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2759 start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */
2760 start_app_tag = src->app_tag;
2761 len = sgpe->length;
2762 while (src && protsegcnt) {
2763 while (len) {
2764
2765 /*
2766 * First check to see if a protection data
2767 * check is valid
2768 */
2769 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2770 (src->app_tag == T10_PI_APP_ESCAPE)) {
2771 start_ref_tag++;
2772 goto skipit;
2773 }
2774
2775 /* First Guard Tag checking */
2776 if (chk_guard) {
2777 guard_tag = src->guard_tag;
2778 if (lpfc_cmd_guard_csum(cmd))
2779 sum = lpfc_bg_csum(data_src,
2780 blksize);
2781 else
2782 sum = lpfc_bg_crc(data_src,
2783 blksize);
2784 if ((guard_tag != sum)) {
2785 err_type = BGS_GUARD_ERR_MASK;
2786 goto out;
2787 }
2788 }
2789
2790 /* Reference Tag checking */
2791 ref_tag = be32_to_cpu(src->ref_tag);
2792 if (chk_ref && (ref_tag != start_ref_tag)) {
2793 err_type = BGS_REFTAG_ERR_MASK;
2794 goto out;
2795 }
2796 start_ref_tag++;
2797
2798 /* App Tag checking */
2799 app_tag = src->app_tag;
2800 if (chk_app && (app_tag != start_app_tag)) {
2801 err_type = BGS_APPTAG_ERR_MASK;
2802 goto out;
2803 }
2804 skipit:
2805 len -= sizeof(struct scsi_dif_tuple);
2806 if (len < 0)
2807 len = 0;
2808 src++;
2809
2810 data_src += blksize;
2811 data_len -= blksize;
2812
2813 /*
2814 * Are we at the end of the Data segment?
2815 * The data segment is only used for Guard
2816 * tag checking.
2817 */
2818 if (chk_guard && (data_len == 0)) {
2819 chk_guard = 0;
2820 sgde = sg_next(sgde);
2821 if (!sgde)
2822 goto out;
2823
2824 data_src = (uint8_t *)sg_virt(sgde);
2825 data_len = sgde->length;
2826 if ((data_len & (blksize - 1)) == 0)
2827 chk_guard = 1;
2828 }
2829 }
2830
2831 /* Goto the next Protection data segment */
2832 sgpe = sg_next(sgpe);
2833 if (sgpe) {
2834 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2835 len = sgpe->length;
2836 } else {
2837 src = NULL;
2838 }
2839 protsegcnt--;
2840 }
2841 }
2842 out:
2843 if (err_type == BGS_GUARD_ERR_MASK) {
2844 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2845 0x10, 0x1);
2846 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2847 SAM_STAT_CHECK_CONDITION;
2848 phba->bg_guard_err_cnt++;
2849 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2850 "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
2851 (unsigned long)scsi_get_lba(cmd),
2852 sum, guard_tag);
2853
2854 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2855 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2856 0x10, 0x3);
2857 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2858 SAM_STAT_CHECK_CONDITION;
2859
2860 phba->bg_reftag_err_cnt++;
2861 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2862 "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n",
2863 (unsigned long)scsi_get_lba(cmd),
2864 ref_tag, start_ref_tag);
2865
2866 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2867 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2868 0x10, 0x2);
2869 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2870 SAM_STAT_CHECK_CONDITION;
2871
2872 phba->bg_apptag_err_cnt++;
2873 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2874 "9041 BLKGRD: LBA %lx app_tag error %x != %x\n",
2875 (unsigned long)scsi_get_lba(cmd),
2876 app_tag, start_app_tag);
2877 }
2878 }
2879
2880
2881 /*
2882 * This function checks for BlockGuard errors detected by
2883 * the HBA. In case of errors, the ASC/ASCQ fields in the
2884 * sense buffer will be set accordingly, paired with
2885 * ILLEGAL_REQUEST to signal to the kernel that the HBA
2886 * detected corruption.
2887 *
2888 * Returns:
2889 * 0 - No error found
2890 * 1 - BlockGuard error found
2891 * -1 - Internal error (bad profile, ...etc)
2892 */
2893 static int
lpfc_parse_bg_err(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * pIocbOut)2894 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2895 struct lpfc_iocbq *pIocbOut)
2896 {
2897 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2898 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
2899 int ret = 0;
2900 uint32_t bghm = bgf->bghm;
2901 uint32_t bgstat = bgf->bgstat;
2902 uint64_t failing_sector = 0;
2903
2904 if (lpfc_bgs_get_invalid_prof(bgstat)) {
2905 cmd->result = DID_ERROR << 16;
2906 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2907 "9072 BLKGRD: Invalid BG Profile in cmd"
2908 " 0x%x lba 0x%llx blk cnt 0x%x "
2909 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2910 (unsigned long long)scsi_get_lba(cmd),
2911 blk_rq_sectors(cmd->request), bgstat, bghm);
2912 ret = (-1);
2913 goto out;
2914 }
2915
2916 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
2917 cmd->result = DID_ERROR << 16;
2918 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2919 "9073 BLKGRD: Invalid BG PDIF Block in cmd"
2920 " 0x%x lba 0x%llx blk cnt 0x%x "
2921 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2922 (unsigned long long)scsi_get_lba(cmd),
2923 blk_rq_sectors(cmd->request), bgstat, bghm);
2924 ret = (-1);
2925 goto out;
2926 }
2927
2928 if (lpfc_bgs_get_guard_err(bgstat)) {
2929 ret = 1;
2930
2931 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2932 0x10, 0x1);
2933 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2934 SAM_STAT_CHECK_CONDITION;
2935 phba->bg_guard_err_cnt++;
2936 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2937 "9055 BLKGRD: Guard Tag error in cmd"
2938 " 0x%x lba 0x%llx blk cnt 0x%x "
2939 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2940 (unsigned long long)scsi_get_lba(cmd),
2941 blk_rq_sectors(cmd->request), bgstat, bghm);
2942 }
2943
2944 if (lpfc_bgs_get_reftag_err(bgstat)) {
2945 ret = 1;
2946
2947 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2948 0x10, 0x3);
2949 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2950 SAM_STAT_CHECK_CONDITION;
2951
2952 phba->bg_reftag_err_cnt++;
2953 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2954 "9056 BLKGRD: Ref Tag error in cmd"
2955 " 0x%x lba 0x%llx blk cnt 0x%x "
2956 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2957 (unsigned long long)scsi_get_lba(cmd),
2958 blk_rq_sectors(cmd->request), bgstat, bghm);
2959 }
2960
2961 if (lpfc_bgs_get_apptag_err(bgstat)) {
2962 ret = 1;
2963
2964 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2965 0x10, 0x2);
2966 cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
2967 SAM_STAT_CHECK_CONDITION;
2968
2969 phba->bg_apptag_err_cnt++;
2970 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2971 "9061 BLKGRD: App Tag error in cmd"
2972 " 0x%x lba 0x%llx blk cnt 0x%x "
2973 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2974 (unsigned long long)scsi_get_lba(cmd),
2975 blk_rq_sectors(cmd->request), bgstat, bghm);
2976 }
2977
2978 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
2979 /*
2980 * setup sense data descriptor 0 per SPC-4 as an information
2981 * field, and put the failing LBA in it.
2982 * This code assumes there was also a guard/app/ref tag error
2983 * indication.
2984 */
2985 cmd->sense_buffer[7] = 0xc; /* Additional sense length */
2986 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2987 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2988 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2989
2990 /* bghm is a "on the wire" FC frame based count */
2991 switch (scsi_get_prot_op(cmd)) {
2992 case SCSI_PROT_READ_INSERT:
2993 case SCSI_PROT_WRITE_STRIP:
2994 bghm /= cmd->device->sector_size;
2995 break;
2996 case SCSI_PROT_READ_STRIP:
2997 case SCSI_PROT_WRITE_INSERT:
2998 case SCSI_PROT_READ_PASS:
2999 case SCSI_PROT_WRITE_PASS:
3000 bghm /= (cmd->device->sector_size +
3001 sizeof(struct scsi_dif_tuple));
3002 break;
3003 }
3004
3005 failing_sector = scsi_get_lba(cmd);
3006 failing_sector += bghm;
3007
3008 /* Descriptor Information */
3009 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3010 }
3011
3012 if (!ret) {
3013 /* No error was reported - problem in FW? */
3014 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3015 "9057 BLKGRD: Unknown error in cmd"
3016 " 0x%x lba 0x%llx blk cnt 0x%x "
3017 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3018 (unsigned long long)scsi_get_lba(cmd),
3019 blk_rq_sectors(cmd->request), bgstat, bghm);
3020
3021 /* Calcuate what type of error it was */
3022 lpfc_calc_bg_err(phba, lpfc_cmd);
3023 }
3024 out:
3025 return ret;
3026 }
3027
3028 /**
3029 * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3030 * @phba: The Hba for which this call is being executed.
3031 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3032 *
3033 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
3034 * field of @lpfc_cmd for device with SLI-4 interface spec.
3035 *
3036 * Return codes:
3037 * 2 - Error - Do not retry
3038 * 1 - Error - Retry
3039 * 0 - Success
3040 **/
3041 static int
lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3042 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3043 {
3044 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3045 struct scatterlist *sgel = NULL;
3046 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3047 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3048 struct sli4_sge *first_data_sgl;
3049 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3050 dma_addr_t physaddr;
3051 uint32_t num_bde = 0;
3052 uint32_t dma_len;
3053 uint32_t dma_offset = 0;
3054 int nseg, i, j;
3055 struct ulp_bde64 *bde;
3056 bool lsp_just_set = false;
3057 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3058
3059 /*
3060 * There are three possibilities here - use scatter-gather segment, use
3061 * the single mapping, or neither. Start the lpfc command prep by
3062 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
3063 * data bde entry.
3064 */
3065 if (scsi_sg_count(scsi_cmnd)) {
3066 /*
3067 * The driver stores the segment count returned from pci_map_sg
3068 * because this a count of dma-mappings used to map the use_sg
3069 * pages. They are not guaranteed to be the same for those
3070 * architectures that implement an IOMMU.
3071 */
3072
3073 nseg = scsi_dma_map(scsi_cmnd);
3074 if (unlikely(nseg <= 0))
3075 return 1;
3076 sgl += 1;
3077 /* clear the last flag in the fcp_rsp map entry */
3078 sgl->word2 = le32_to_cpu(sgl->word2);
3079 bf_set(lpfc_sli4_sge_last, sgl, 0);
3080 sgl->word2 = cpu_to_le32(sgl->word2);
3081 sgl += 1;
3082 first_data_sgl = sgl;
3083 lpfc_cmd->seg_cnt = nseg;
3084 if (!phba->cfg_xpsgl &&
3085 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3086 lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:"
3087 " %s: Too many sg segments from "
3088 "dma_map_sg. Config %d, seg_cnt %d\n",
3089 __func__, phba->cfg_sg_seg_cnt,
3090 lpfc_cmd->seg_cnt);
3091 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3092 lpfc_cmd->seg_cnt = 0;
3093 scsi_dma_unmap(scsi_cmnd);
3094 return 2;
3095 }
3096
3097 /*
3098 * The driver established a maximum scatter-gather segment count
3099 * during probe that limits the number of sg elements in any
3100 * single scsi command. Just run through the seg_cnt and format
3101 * the sge's.
3102 * When using SLI-3 the driver will try to fit all the BDEs into
3103 * the IOCB. If it can't then the BDEs get added to a BPL as it
3104 * does for SLI-2 mode.
3105 */
3106
3107 /* for tracking segment boundaries */
3108 sgel = scsi_sglist(scsi_cmnd);
3109 j = 2;
3110 for (i = 0; i < nseg; i++) {
3111 sgl->word2 = 0;
3112 if ((num_bde + 1) == nseg) {
3113 bf_set(lpfc_sli4_sge_last, sgl, 1);
3114 bf_set(lpfc_sli4_sge_type, sgl,
3115 LPFC_SGE_TYPE_DATA);
3116 } else {
3117 bf_set(lpfc_sli4_sge_last, sgl, 0);
3118
3119 /* do we need to expand the segment */
3120 if (!lsp_just_set &&
3121 !((j + 1) % phba->border_sge_num) &&
3122 ((nseg - 1) != i)) {
3123 /* set LSP type */
3124 bf_set(lpfc_sli4_sge_type, sgl,
3125 LPFC_SGE_TYPE_LSP);
3126
3127 sgl_xtra = lpfc_get_sgl_per_hdwq(
3128 phba, lpfc_cmd);
3129
3130 if (unlikely(!sgl_xtra)) {
3131 lpfc_cmd->seg_cnt = 0;
3132 scsi_dma_unmap(scsi_cmnd);
3133 return 1;
3134 }
3135 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3136 sgl_xtra->dma_phys_sgl));
3137 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3138 sgl_xtra->dma_phys_sgl));
3139
3140 } else {
3141 bf_set(lpfc_sli4_sge_type, sgl,
3142 LPFC_SGE_TYPE_DATA);
3143 }
3144 }
3145
3146 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3147 LPFC_SGE_TYPE_LSP)) {
3148 if ((nseg - 1) == i)
3149 bf_set(lpfc_sli4_sge_last, sgl, 1);
3150
3151 physaddr = sg_dma_address(sgel);
3152 dma_len = sg_dma_len(sgel);
3153 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3154 physaddr));
3155 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3156 physaddr));
3157
3158 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3159 sgl->word2 = cpu_to_le32(sgl->word2);
3160 sgl->sge_len = cpu_to_le32(dma_len);
3161
3162 dma_offset += dma_len;
3163 sgel = sg_next(sgel);
3164
3165 sgl++;
3166 lsp_just_set = false;
3167
3168 } else {
3169 sgl->word2 = cpu_to_le32(sgl->word2);
3170 sgl->sge_len = cpu_to_le32(
3171 phba->cfg_sg_dma_buf_size);
3172
3173 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3174 i = i - 1;
3175
3176 lsp_just_set = true;
3177 }
3178
3179 j++;
3180 }
3181 /*
3182 * Setup the first Payload BDE. For FCoE we just key off
3183 * Performance Hints, for FC we use lpfc_enable_pbde.
3184 * We populate words 13-15 of IOCB/WQE.
3185 */
3186 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3187 phba->cfg_enable_pbde) {
3188 bde = (struct ulp_bde64 *)
3189 &(iocb_cmd->unsli3.sli3Words[5]);
3190 bde->addrLow = first_data_sgl->addr_lo;
3191 bde->addrHigh = first_data_sgl->addr_hi;
3192 bde->tus.f.bdeSize =
3193 le32_to_cpu(first_data_sgl->sge_len);
3194 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3195 bde->tus.w = cpu_to_le32(bde->tus.w);
3196 }
3197 } else {
3198 sgl += 1;
3199 /* clear the last flag in the fcp_rsp map entry */
3200 sgl->word2 = le32_to_cpu(sgl->word2);
3201 bf_set(lpfc_sli4_sge_last, sgl, 1);
3202 sgl->word2 = cpu_to_le32(sgl->word2);
3203
3204 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3205 phba->cfg_enable_pbde) {
3206 bde = (struct ulp_bde64 *)
3207 &(iocb_cmd->unsli3.sli3Words[5]);
3208 memset(bde, 0, (sizeof(uint32_t) * 3));
3209 }
3210 }
3211
3212 /*
3213 * Finish initializing those IOCB fields that are dependent on the
3214 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
3215 * explicitly reinitialized.
3216 * all iocb memory resources are reused.
3217 */
3218 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3219
3220 /*
3221 * Due to difference in data length between DIF/non-DIF paths,
3222 * we need to set word 4 of IOCB here
3223 */
3224 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
3225
3226 /*
3227 * If the OAS driver feature is enabled and the lun is enabled for
3228 * OAS, set the oas iocb related flags.
3229 */
3230 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3231 scsi_cmnd->device->hostdata)->oas_enabled) {
3232 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3233 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3234 scsi_cmnd->device->hostdata)->priority;
3235 }
3236
3237 return 0;
3238 }
3239
3240 /**
3241 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
3242 * @phba: The Hba for which this call is being executed.
3243 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3244 *
3245 * This is the protection/DIF aware version of
3246 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
3247 * two functions eventually, but for now, it's here
3248 * Return codes:
3249 * 2 - Error - Do not retry
3250 * 1 - Error - Retry
3251 * 0 - Success
3252 **/
3253 static int
lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3254 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3255 struct lpfc_io_buf *lpfc_cmd)
3256 {
3257 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3258 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3259 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3260 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
3261 uint32_t num_sge = 0;
3262 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3263 int prot_group_type = 0;
3264 int fcpdl;
3265 int ret = 1;
3266 struct lpfc_vport *vport = phba->pport;
3267
3268 /*
3269 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
3270 * fcp_rsp regions to the first data sge entry
3271 */
3272 if (scsi_sg_count(scsi_cmnd)) {
3273 /*
3274 * The driver stores the segment count returned from pci_map_sg
3275 * because this a count of dma-mappings used to map the use_sg
3276 * pages. They are not guaranteed to be the same for those
3277 * architectures that implement an IOMMU.
3278 */
3279 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3280 scsi_sglist(scsi_cmnd),
3281 scsi_sg_count(scsi_cmnd), datadir);
3282 if (unlikely(!datasegcnt))
3283 return 1;
3284
3285 sgl += 1;
3286 /* clear the last flag in the fcp_rsp map entry */
3287 sgl->word2 = le32_to_cpu(sgl->word2);
3288 bf_set(lpfc_sli4_sge_last, sgl, 0);
3289 sgl->word2 = cpu_to_le32(sgl->word2);
3290
3291 sgl += 1;
3292 lpfc_cmd->seg_cnt = datasegcnt;
3293
3294 /* First check if data segment count from SCSI Layer is good */
3295 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3296 !phba->cfg_xpsgl) {
3297 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3298 ret = 2;
3299 goto err;
3300 }
3301
3302 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3303
3304 switch (prot_group_type) {
3305 case LPFC_PG_TYPE_NO_DIF:
3306 /* Here we need to add a DISEED to the count */
3307 if (((lpfc_cmd->seg_cnt + 1) >
3308 phba->cfg_total_seg_cnt) &&
3309 !phba->cfg_xpsgl) {
3310 ret = 2;
3311 goto err;
3312 }
3313
3314 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3315 datasegcnt, lpfc_cmd);
3316
3317 /* we should have 2 or more entries in buffer list */
3318 if (num_sge < 2) {
3319 ret = 2;
3320 goto err;
3321 }
3322 break;
3323
3324 case LPFC_PG_TYPE_DIF_BUF:
3325 /*
3326 * This type indicates that protection buffers are
3327 * passed to the driver, so that needs to be prepared
3328 * for DMA
3329 */
3330 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3331 scsi_prot_sglist(scsi_cmnd),
3332 scsi_prot_sg_count(scsi_cmnd), datadir);
3333 if (unlikely(!protsegcnt)) {
3334 scsi_dma_unmap(scsi_cmnd);
3335 return 1;
3336 }
3337
3338 lpfc_cmd->prot_seg_cnt = protsegcnt;
3339 /*
3340 * There is a minimun of 3 SGEs used for every
3341 * protection data segment.
3342 */
3343 if (((lpfc_cmd->prot_seg_cnt * 3) >
3344 (phba->cfg_total_seg_cnt - 2)) &&
3345 !phba->cfg_xpsgl) {
3346 ret = 2;
3347 goto err;
3348 }
3349
3350 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3351 datasegcnt, protsegcnt, lpfc_cmd);
3352
3353 /* we should have 3 or more entries in buffer list */
3354 if (num_sge < 3 ||
3355 (num_sge > phba->cfg_total_seg_cnt &&
3356 !phba->cfg_xpsgl)) {
3357 ret = 2;
3358 goto err;
3359 }
3360 break;
3361
3362 case LPFC_PG_TYPE_INVALID:
3363 default:
3364 scsi_dma_unmap(scsi_cmnd);
3365 lpfc_cmd->seg_cnt = 0;
3366
3367 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3368 "9083 Unexpected protection group %i\n",
3369 prot_group_type);
3370 return 2;
3371 }
3372 }
3373
3374 switch (scsi_get_prot_op(scsi_cmnd)) {
3375 case SCSI_PROT_WRITE_STRIP:
3376 case SCSI_PROT_READ_STRIP:
3377 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3378 break;
3379 case SCSI_PROT_WRITE_INSERT:
3380 case SCSI_PROT_READ_INSERT:
3381 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3382 break;
3383 case SCSI_PROT_WRITE_PASS:
3384 case SCSI_PROT_READ_PASS:
3385 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3386 break;
3387 }
3388
3389 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3390 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3391
3392 /*
3393 * Due to difference in data length between DIF/non-DIF paths,
3394 * we need to set word 4 of IOCB here
3395 */
3396 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3397
3398 /*
3399 * For First burst, we may need to adjust the initial transfer
3400 * length for DIF
3401 */
3402 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
3403 (fcpdl < vport->cfg_first_burst_size))
3404 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
3405
3406 /*
3407 * If the OAS driver feature is enabled and the lun is enabled for
3408 * OAS, set the oas iocb related flags.
3409 */
3410 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3411 scsi_cmnd->device->hostdata)->oas_enabled)
3412 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3413
3414 return 0;
3415 err:
3416 if (lpfc_cmd->seg_cnt)
3417 scsi_dma_unmap(scsi_cmnd);
3418 if (lpfc_cmd->prot_seg_cnt)
3419 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3420 scsi_prot_sg_count(scsi_cmnd),
3421 scsi_cmnd->sc_data_direction);
3422
3423 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3424 "9084 Cannot setup S/G List for HBA"
3425 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3426 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3427 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3428 prot_group_type, num_sge);
3429
3430 lpfc_cmd->seg_cnt = 0;
3431 lpfc_cmd->prot_seg_cnt = 0;
3432 return ret;
3433 }
3434
3435 /**
3436 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3437 * @phba: The Hba for which this call is being executed.
3438 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3439 *
3440 * This routine wraps the actual DMA mapping function pointer from the
3441 * lpfc_hba struct.
3442 *
3443 * Return codes:
3444 * 1 - Error
3445 * 0 - Success
3446 **/
3447 static inline int
lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3448 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3449 {
3450 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3451 }
3452
3453 /**
3454 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3455 * using BlockGuard.
3456 * @phba: The Hba for which this call is being executed.
3457 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3458 *
3459 * This routine wraps the actual DMA mapping function pointer from the
3460 * lpfc_hba struct.
3461 *
3462 * Return codes:
3463 * 1 - Error
3464 * 0 - Success
3465 **/
3466 static inline int
lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)3467 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3468 {
3469 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3470 }
3471
3472 /**
3473 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
3474 * @phba: Pointer to hba context object.
3475 * @vport: Pointer to vport object.
3476 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
3477 * @rsp_iocb: Pointer to response iocb object which reported error.
3478 *
3479 * This function posts an event when there is a SCSI command reporting
3480 * error from the scsi device.
3481 **/
3482 static void
lpfc_send_scsi_error_event(struct lpfc_hba * phba,struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3483 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3484 struct lpfc_io_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
3485 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3486 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3487 uint32_t resp_info = fcprsp->rspStatus2;
3488 uint32_t scsi_status = fcprsp->rspStatus3;
3489 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3490 struct lpfc_fast_path_event *fast_path_evt = NULL;
3491 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3492 unsigned long flags;
3493
3494 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
3495 return;
3496
3497 /* If there is queuefull or busy condition send a scsi event */
3498 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3499 (cmnd->result == SAM_STAT_BUSY)) {
3500 fast_path_evt = lpfc_alloc_fast_evt(phba);
3501 if (!fast_path_evt)
3502 return;
3503 fast_path_evt->un.scsi_evt.event_type =
3504 FC_REG_SCSI_EVENT;
3505 fast_path_evt->un.scsi_evt.subcategory =
3506 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3507 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3508 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3509 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3510 &pnode->nlp_portname, sizeof(struct lpfc_name));
3511 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3512 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3513 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3514 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3515 fast_path_evt = lpfc_alloc_fast_evt(phba);
3516 if (!fast_path_evt)
3517 return;
3518 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3519 FC_REG_SCSI_EVENT;
3520 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3521 LPFC_EVENT_CHECK_COND;
3522 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3523 cmnd->device->lun;
3524 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3525 &pnode->nlp_portname, sizeof(struct lpfc_name));
3526 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3527 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3528 fast_path_evt->un.check_cond_evt.sense_key =
3529 cmnd->sense_buffer[2] & 0xf;
3530 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3531 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3532 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3533 fcpi_parm &&
3534 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3535 ((scsi_status == SAM_STAT_GOOD) &&
3536 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3537 /*
3538 * If status is good or resid does not match with fcp_param and
3539 * there is valid fcpi_parm, then there is a read_check error
3540 */
3541 fast_path_evt = lpfc_alloc_fast_evt(phba);
3542 if (!fast_path_evt)
3543 return;
3544 fast_path_evt->un.read_check_error.header.event_type =
3545 FC_REG_FABRIC_EVENT;
3546 fast_path_evt->un.read_check_error.header.subcategory =
3547 LPFC_EVENT_FCPRDCHKERR;
3548 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3549 &pnode->nlp_portname, sizeof(struct lpfc_name));
3550 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3551 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3552 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3553 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3554 fast_path_evt->un.read_check_error.fcpiparam =
3555 fcpi_parm;
3556 } else
3557 return;
3558
3559 fast_path_evt->vport = vport;
3560 spin_lock_irqsave(&phba->hbalock, flags);
3561 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3562 spin_unlock_irqrestore(&phba->hbalock, flags);
3563 lpfc_worker_wake_up(phba);
3564 return;
3565 }
3566
3567 /**
3568 * lpfc_scsi_unprep_dma_buf - Un-map DMA mapping of SG-list for dev
3569 * @phba: The HBA for which this call is being executed.
3570 * @psb: The scsi buffer which is going to be un-mapped.
3571 *
3572 * This routine does DMA un-mapping of scatter gather list of scsi command
3573 * field of @lpfc_cmd for device with SLI-3 interface spec.
3574 **/
3575 static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba,struct lpfc_io_buf * psb)3576 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3577 {
3578 /*
3579 * There are only two special cases to consider. (1) the scsi command
3580 * requested scatter-gather usage or (2) the scsi command allocated
3581 * a request buffer, but did not request use_sg. There is a third
3582 * case, but it does not require resource deallocation.
3583 */
3584 if (psb->seg_cnt > 0)
3585 scsi_dma_unmap(psb->pCmd);
3586 if (psb->prot_seg_cnt > 0)
3587 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3588 scsi_prot_sg_count(psb->pCmd),
3589 psb->pCmd->sc_data_direction);
3590 }
3591
3592 /**
3593 * lpfc_handler_fcp_err - FCP response handler
3594 * @vport: The virtual port for which this call is being executed.
3595 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
3596 * @rsp_iocb: The response IOCB which contains FCP error.
3597 *
3598 * This routine is called to process response IOCB with status field
3599 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
3600 * based upon SCSI and FCP error.
3601 **/
3602 static void
lpfc_handle_fcp_err(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_iocbq * rsp_iocb)3603 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3604 struct lpfc_iocbq *rsp_iocb)
3605 {
3606 struct lpfc_hba *phba = vport->phba;
3607 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3608 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3609 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3610 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
3611 uint32_t resp_info = fcprsp->rspStatus2;
3612 uint32_t scsi_status = fcprsp->rspStatus3;
3613 uint32_t *lp;
3614 uint32_t host_status = DID_OK;
3615 uint32_t rsplen = 0;
3616 uint32_t fcpDl;
3617 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3618
3619
3620 /*
3621 * If this is a task management command, there is no
3622 * scsi packet associated with this lpfc_cmd. The driver
3623 * consumes it.
3624 */
3625 if (fcpcmd->fcpCntl2) {
3626 scsi_status = 0;
3627 goto out;
3628 }
3629
3630 if (resp_info & RSP_LEN_VALID) {
3631 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3632 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3633 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3634 "2719 Invalid response length: "
3635 "tgt x%x lun x%llx cmnd x%x rsplen x%x\n",
3636 cmnd->device->id,
3637 cmnd->device->lun, cmnd->cmnd[0],
3638 rsplen);
3639 host_status = DID_ERROR;
3640 goto out;
3641 }
3642 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3643 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
3644 "2757 Protocol failure detected during "
3645 "processing of FCP I/O op: "
3646 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3647 cmnd->device->id,
3648 cmnd->device->lun, cmnd->cmnd[0],
3649 fcprsp->rspInfo3);
3650 host_status = DID_ERROR;
3651 goto out;
3652 }
3653 }
3654
3655 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3656 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3657 if (snslen > SCSI_SENSE_BUFFERSIZE)
3658 snslen = SCSI_SENSE_BUFFERSIZE;
3659
3660 if (resp_info & RSP_LEN_VALID)
3661 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3662 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3663 }
3664 lp = (uint32_t *)cmnd->sense_buffer;
3665
3666 /* special handling for under run conditions */
3667 if (!scsi_status && (resp_info & RESID_UNDER)) {
3668 /* don't log under runs if fcp set... */
3669 if (vport->cfg_log_verbose & LOG_FCP)
3670 logit = LOG_FCP_ERROR;
3671 /* unless operator says so */
3672 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3673 logit = LOG_FCP_UNDER;
3674 }
3675
3676 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3677 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3678 "Data: x%x x%x x%x x%x x%x\n",
3679 cmnd->cmnd[0], scsi_status,
3680 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3681 be32_to_cpu(fcprsp->rspResId),
3682 be32_to_cpu(fcprsp->rspSnsLen),
3683 be32_to_cpu(fcprsp->rspRspLen),
3684 fcprsp->rspInfo3);
3685
3686 scsi_set_resid(cmnd, 0);
3687 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3688 if (resp_info & RESID_UNDER) {
3689 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3690
3691 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3692 "9025 FCP Underrun, expected %d, "
3693 "residual %d Data: x%x x%x x%x\n",
3694 fcpDl,
3695 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3696 cmnd->underflow);
3697
3698 /*
3699 * If there is an under run, check if under run reported by
3700 * storage array is same as the under run reported by HBA.
3701 * If this is not same, there is a dropped frame.
3702 */
3703 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3704 lpfc_printf_vlog(vport, KERN_WARNING,
3705 LOG_FCP | LOG_FCP_ERROR,
3706 "9026 FCP Read Check Error "
3707 "and Underrun Data: x%x x%x x%x x%x\n",
3708 fcpDl,
3709 scsi_get_resid(cmnd), fcpi_parm,
3710 cmnd->cmnd[0]);
3711 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3712 host_status = DID_ERROR;
3713 }
3714 /*
3715 * The cmnd->underflow is the minimum number of bytes that must
3716 * be transferred for this command. Provided a sense condition
3717 * is not present, make sure the actual amount transferred is at
3718 * least the underflow value or fail.
3719 */
3720 if (!(resp_info & SNS_LEN_VALID) &&
3721 (scsi_status == SAM_STAT_GOOD) &&
3722 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3723 < cmnd->underflow)) {
3724 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3725 "9027 FCP command x%x residual "
3726 "underrun converted to error "
3727 "Data: x%x x%x x%x\n",
3728 cmnd->cmnd[0], scsi_bufflen(cmnd),
3729 scsi_get_resid(cmnd), cmnd->underflow);
3730 host_status = DID_ERROR;
3731 }
3732 } else if (resp_info & RESID_OVER) {
3733 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3734 "9028 FCP command x%x residual overrun error. "
3735 "Data: x%x x%x\n", cmnd->cmnd[0],
3736 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3737 host_status = DID_ERROR;
3738
3739 /*
3740 * Check SLI validation that all the transfer was actually done
3741 * (fcpi_parm should be zero). Apply check only to reads.
3742 */
3743 } else if (fcpi_parm) {
3744 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
3745 "9029 FCP %s Check Error xri x%x Data: "
3746 "x%x x%x x%x x%x x%x\n",
3747 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
3748 "Read" : "Write"),
3749 ((phba->sli_rev == LPFC_SLI_REV4) ?
3750 lpfc_cmd->cur_iocbq.sli4_xritag :
3751 rsp_iocb->iocb.ulpContext),
3752 fcpDl, be32_to_cpu(fcprsp->rspResId),
3753 fcpi_parm, cmnd->cmnd[0], scsi_status);
3754
3755 /* There is some issue with the LPe12000 that causes it
3756 * to miscalculate the fcpi_parm and falsely trip this
3757 * recovery logic. Detect this case and don't error when true.
3758 */
3759 if (fcpi_parm > fcpDl)
3760 goto out;
3761
3762 switch (scsi_status) {
3763 case SAM_STAT_GOOD:
3764 case SAM_STAT_CHECK_CONDITION:
3765 /* Fabric dropped a data frame. Fail any successful
3766 * command in which we detected dropped frames.
3767 * A status of good or some check conditions could
3768 * be considered a successful command.
3769 */
3770 host_status = DID_ERROR;
3771 break;
3772 }
3773 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3774 }
3775
3776 out:
3777 cmnd->result = host_status << 16 | scsi_status;
3778 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
3779 }
3780
3781 /**
3782 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
3783 * @phba: The Hba for which this call is being executed.
3784 * @pIocbIn: The command IOCBQ for the scsi cmnd.
3785 * @pIocbOut: The response IOCBQ for the scsi cmnd.
3786 *
3787 * This routine assigns scsi command result by looking into response IOCB
3788 * status field appropriately. This routine handles QUEUE FULL condition as
3789 * well by ramping down device queue depth.
3790 **/
3791 static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * pIocbIn,struct lpfc_iocbq * pIocbOut)3792 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3793 struct lpfc_iocbq *pIocbOut)
3794 {
3795 struct lpfc_io_buf *lpfc_cmd =
3796 (struct lpfc_io_buf *) pIocbIn->context1;
3797 struct lpfc_vport *vport = pIocbIn->vport;
3798 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
3799 struct lpfc_nodelist *pnode = rdata->pnode;
3800 struct scsi_cmnd *cmd;
3801 unsigned long flags;
3802 struct lpfc_fast_path_event *fast_path_evt;
3803 struct Scsi_Host *shost;
3804 int idx;
3805 uint32_t logit = LOG_FCP;
3806 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3807 int cpu;
3808 #endif
3809
3810 /* Guard against abort handler being called at same time */
3811 spin_lock(&lpfc_cmd->buf_lock);
3812
3813 /* Sanity check on return of outstanding command */
3814 cmd = lpfc_cmd->pCmd;
3815 if (!cmd) {
3816 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3817 "2621 IO completion: Not an active IO\n");
3818 spin_unlock(&lpfc_cmd->buf_lock);
3819 return;
3820 }
3821
3822 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
3823 if (phba->sli4_hba.hdwq)
3824 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
3825
3826 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3827 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
3828 cpu = raw_smp_processor_id();
3829 if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
3830 phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
3831 }
3832 #endif
3833 shost = cmd->device->host;
3834
3835 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
3836 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
3837 /* pick up SLI4 exhange busy status from HBA */
3838 lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY;
3839
3840 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
3841 if (lpfc_cmd->prot_data_type) {
3842 struct scsi_dif_tuple *src = NULL;
3843
3844 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
3845 /*
3846 * Used to restore any changes to protection
3847 * data for error injection.
3848 */
3849 switch (lpfc_cmd->prot_data_type) {
3850 case LPFC_INJERR_REFTAG:
3851 src->ref_tag =
3852 lpfc_cmd->prot_data;
3853 break;
3854 case LPFC_INJERR_APPTAG:
3855 src->app_tag =
3856 (uint16_t)lpfc_cmd->prot_data;
3857 break;
3858 case LPFC_INJERR_GUARD:
3859 src->guard_tag =
3860 (uint16_t)lpfc_cmd->prot_data;
3861 break;
3862 default:
3863 break;
3864 }
3865
3866 lpfc_cmd->prot_data = 0;
3867 lpfc_cmd->prot_data_type = 0;
3868 lpfc_cmd->prot_data_segment = NULL;
3869 }
3870 #endif
3871
3872 if (lpfc_cmd->status) {
3873 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
3874 (lpfc_cmd->result & IOERR_DRVR_MASK))
3875 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
3876 else if (lpfc_cmd->status >= IOSTAT_CNT)
3877 lpfc_cmd->status = IOSTAT_DEFAULT;
3878 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
3879 !lpfc_cmd->fcp_rsp->rspStatus3 &&
3880 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
3881 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
3882 logit = 0;
3883 else
3884 logit = LOG_FCP | LOG_FCP_UNDER;
3885 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3886 "9030 FCP cmd x%x failed <%d/%lld> "
3887 "status: x%x result: x%x "
3888 "sid: x%x did: x%x oxid: x%x "
3889 "Data: x%x x%x\n",
3890 cmd->cmnd[0],
3891 cmd->device ? cmd->device->id : 0xffff,
3892 cmd->device ? cmd->device->lun : 0xffff,
3893 lpfc_cmd->status, lpfc_cmd->result,
3894 vport->fc_myDID,
3895 (pnode) ? pnode->nlp_DID : 0,
3896 phba->sli_rev == LPFC_SLI_REV4 ?
3897 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
3898 pIocbOut->iocb.ulpContext,
3899 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
3900
3901 switch (lpfc_cmd->status) {
3902 case IOSTAT_FCP_RSP_ERROR:
3903 /* Call FCP RSP handler to determine result */
3904 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
3905 break;
3906 case IOSTAT_NPORT_BSY:
3907 case IOSTAT_FABRIC_BSY:
3908 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
3909 fast_path_evt = lpfc_alloc_fast_evt(phba);
3910 if (!fast_path_evt)
3911 break;
3912 fast_path_evt->un.fabric_evt.event_type =
3913 FC_REG_FABRIC_EVENT;
3914 fast_path_evt->un.fabric_evt.subcategory =
3915 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
3916 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
3917 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
3918 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
3919 &pnode->nlp_portname,
3920 sizeof(struct lpfc_name));
3921 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
3922 &pnode->nlp_nodename,
3923 sizeof(struct lpfc_name));
3924 }
3925 fast_path_evt->vport = vport;
3926 fast_path_evt->work_evt.evt =
3927 LPFC_EVT_FASTPATH_MGMT_EVT;
3928 spin_lock_irqsave(&phba->hbalock, flags);
3929 list_add_tail(&fast_path_evt->work_evt.evt_listp,
3930 &phba->work_list);
3931 spin_unlock_irqrestore(&phba->hbalock, flags);
3932 lpfc_worker_wake_up(phba);
3933 break;
3934 case IOSTAT_LOCAL_REJECT:
3935 case IOSTAT_REMOTE_STOP:
3936 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
3937 lpfc_cmd->result ==
3938 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
3939 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
3940 lpfc_cmd->result ==
3941 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
3942 cmd->result = DID_NO_CONNECT << 16;
3943 break;
3944 }
3945 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
3946 lpfc_cmd->result == IOERR_NO_RESOURCES ||
3947 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
3948 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
3949 cmd->result = DID_REQUEUE << 16;
3950 break;
3951 }
3952 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
3953 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
3954 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
3955 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
3956 /*
3957 * This is a response for a BG enabled
3958 * cmd. Parse BG error
3959 */
3960 lpfc_parse_bg_err(phba, lpfc_cmd,
3961 pIocbOut);
3962 break;
3963 } else {
3964 lpfc_printf_vlog(vport, KERN_WARNING,
3965 LOG_BG,
3966 "9031 non-zero BGSTAT "
3967 "on unprotected cmd\n");
3968 }
3969 }
3970 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
3971 && (phba->sli_rev == LPFC_SLI_REV4)
3972 && (pnode && NLP_CHK_NODE_ACT(pnode))) {
3973 /* This IO was aborted by the target, we don't
3974 * know the rxid and because we did not send the
3975 * ABTS we cannot generate and RRQ.
3976 */
3977 lpfc_set_rrq_active(phba, pnode,
3978 lpfc_cmd->cur_iocbq.sli4_lxritag,
3979 0, 0);
3980 }
3981 /* fall through */
3982 default:
3983 cmd->result = DID_ERROR << 16;
3984 break;
3985 }
3986
3987 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
3988 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
3989 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
3990 SAM_STAT_BUSY;
3991 } else
3992 cmd->result = DID_OK << 16;
3993
3994 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
3995 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
3996
3997 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3998 "0710 Iodone <%d/%llu> cmd x%px, error "
3999 "x%x SNS x%x x%x Data: x%x x%x\n",
4000 cmd->device->id, cmd->device->lun, cmd,
4001 cmd->result, *lp, *(lp + 3), cmd->retries,
4002 scsi_get_resid(cmd));
4003 }
4004
4005 lpfc_update_stats(phba, lpfc_cmd);
4006 if (vport->cfg_max_scsicmpl_time &&
4007 time_after(jiffies, lpfc_cmd->start_time +
4008 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4009 spin_lock_irqsave(shost->host_lock, flags);
4010 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
4011 if (pnode->cmd_qdepth >
4012 atomic_read(&pnode->cmd_pending) &&
4013 (atomic_read(&pnode->cmd_pending) >
4014 LPFC_MIN_TGT_QDEPTH) &&
4015 ((cmd->cmnd[0] == READ_10) ||
4016 (cmd->cmnd[0] == WRITE_10)))
4017 pnode->cmd_qdepth =
4018 atomic_read(&pnode->cmd_pending);
4019
4020 pnode->last_change_time = jiffies;
4021 }
4022 spin_unlock_irqrestore(shost->host_lock, flags);
4023 }
4024 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4025
4026 lpfc_cmd->pCmd = NULL;
4027 spin_unlock(&lpfc_cmd->buf_lock);
4028
4029 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
4030 cmd->scsi_done(cmd);
4031
4032 /*
4033 * If there is an abort thread waiting for command completion
4034 * wake up the thread.
4035 */
4036 spin_lock(&lpfc_cmd->buf_lock);
4037 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4038 if (lpfc_cmd->waitq)
4039 wake_up(lpfc_cmd->waitq);
4040 spin_unlock(&lpfc_cmd->buf_lock);
4041
4042 lpfc_release_scsi_buf(phba, lpfc_cmd);
4043 }
4044
4045 /**
4046 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
4047 * @data: A pointer to the immediate command data portion of the IOCB.
4048 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
4049 *
4050 * The routine copies the entire FCP command from @fcp_cmnd to @data while
4051 * byte swapping the data to big endian format for transmission on the wire.
4052 **/
4053 static void
lpfc_fcpcmd_to_iocb(uint8_t * data,struct fcp_cmnd * fcp_cmnd)4054 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
4055 {
4056 int i, j;
4057 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
4058 i += sizeof(uint32_t), j++) {
4059 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
4060 }
4061 }
4062
4063 /**
4064 * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
4065 * @vport: The virtual port for which this call is being executed.
4066 * @lpfc_cmd: The scsi command which needs to send.
4067 * @pnode: Pointer to lpfc_nodelist.
4068 *
4069 * This routine initializes fcp_cmnd and iocb data structure from scsi command
4070 * to transfer for device with SLI3 interface spec.
4071 **/
4072 static void
lpfc_scsi_prep_cmnd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,struct lpfc_nodelist * pnode)4073 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4074 struct lpfc_nodelist *pnode)
4075 {
4076 struct lpfc_hba *phba = vport->phba;
4077 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4078 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4079 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4080 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
4081 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4082 int datadir = scsi_cmnd->sc_data_direction;
4083 int idx;
4084 uint8_t *ptr;
4085 bool sli4;
4086 uint32_t fcpdl;
4087
4088 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
4089 return;
4090
4091 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4092 /* clear task management bits */
4093 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4094
4095 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4096 &lpfc_cmd->fcp_cmnd->fcp_lun);
4097
4098 ptr = &fcp_cmnd->fcpCdb[0];
4099 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4100 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4101 ptr += scsi_cmnd->cmd_len;
4102 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4103 }
4104
4105 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4106
4107 sli4 = (phba->sli_rev == LPFC_SLI_REV4);
4108 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4109 idx = lpfc_cmd->hdwq_no;
4110 if (phba->sli4_hba.hdwq)
4111 hdwq = &phba->sli4_hba.hdwq[idx];
4112
4113 /*
4114 * There are three possibilities here - use scatter-gather segment, use
4115 * the single mapping, or neither. Start the lpfc command prep by
4116 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
4117 * data bde entry.
4118 */
4119 if (scsi_sg_count(scsi_cmnd)) {
4120 if (datadir == DMA_TO_DEVICE) {
4121 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4122 iocb_cmd->ulpPU = PARM_READ_CHECK;
4123 if (vport->cfg_first_burst_size &&
4124 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4125 fcpdl = scsi_bufflen(scsi_cmnd);
4126 if (fcpdl < vport->cfg_first_burst_size)
4127 piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl;
4128 else
4129 piocbq->iocb.un.fcpi.fcpi_XRdy =
4130 vport->cfg_first_burst_size;
4131 }
4132 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4133 if (hdwq)
4134 hdwq->scsi_cstat.output_requests++;
4135 } else {
4136 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4137 iocb_cmd->ulpPU = PARM_READ_CHECK;
4138 fcp_cmnd->fcpCntl3 = READ_DATA;
4139 if (hdwq)
4140 hdwq->scsi_cstat.input_requests++;
4141 }
4142 } else {
4143 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4144 iocb_cmd->un.fcpi.fcpi_parm = 0;
4145 iocb_cmd->ulpPU = 0;
4146 fcp_cmnd->fcpCntl3 = 0;
4147 if (hdwq)
4148 hdwq->scsi_cstat.control_requests++;
4149 }
4150 if (phba->sli_rev == 3 &&
4151 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4152 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
4153 /*
4154 * Finish initializing those IOCB fields that are independent
4155 * of the scsi_cmnd request_buffer
4156 */
4157 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4158 if (sli4)
4159 piocbq->iocb.ulpContext =
4160 phba->sli4_hba.rpi_ids[pnode->nlp_rpi];
4161 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4162 piocbq->iocb.ulpFCP2Rcvy = 1;
4163 else
4164 piocbq->iocb.ulpFCP2Rcvy = 0;
4165
4166 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4167 piocbq->context1 = lpfc_cmd;
4168 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4169 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
4170 piocbq->vport = vport;
4171 }
4172
4173 /**
4174 * lpfc_scsi_prep_task_mgmt_cmd - Convert SLI3 scsi TM cmd to FCP info unit
4175 * @vport: The virtual port for which this call is being executed.
4176 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4177 * @lun: Logical unit number.
4178 * @task_mgmt_cmd: SCSI task management command.
4179 *
4180 * This routine creates FCP information unit corresponding to @task_mgmt_cmd
4181 * for device with SLI-3 interface spec.
4182 *
4183 * Return codes:
4184 * 0 - Error
4185 * 1 - Success
4186 **/
4187 static int
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd,uint64_t lun,uint8_t task_mgmt_cmd)4188 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4189 struct lpfc_io_buf *lpfc_cmd,
4190 uint64_t lun,
4191 uint8_t task_mgmt_cmd)
4192 {
4193 struct lpfc_iocbq *piocbq;
4194 IOCB_t *piocb;
4195 struct fcp_cmnd *fcp_cmnd;
4196 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4197 struct lpfc_nodelist *ndlp = rdata->pnode;
4198
4199 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
4200 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4201 return 0;
4202
4203 piocbq = &(lpfc_cmd->cur_iocbq);
4204 piocbq->vport = vport;
4205
4206 piocb = &piocbq->iocb;
4207
4208 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4209 /* Clear out any old data in the FCP command area */
4210 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4211 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4212 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4213 if (vport->phba->sli_rev == 3 &&
4214 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4215 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4216 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4217 piocb->ulpContext = ndlp->nlp_rpi;
4218 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4219 piocb->ulpContext =
4220 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4221 }
4222 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4223 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4224 piocb->ulpPU = 0;
4225 piocb->un.fcpi.fcpi_parm = 0;
4226
4227 /* ulpTimeout is only one byte */
4228 if (lpfc_cmd->timeout > 0xff) {
4229 /*
4230 * Do not timeout the command at the firmware level.
4231 * The driver will provide the timeout mechanism.
4232 */
4233 piocb->ulpTimeout = 0;
4234 } else
4235 piocb->ulpTimeout = lpfc_cmd->timeout;
4236
4237 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4238 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4239
4240 return 1;
4241 }
4242
4243 /**
4244 * lpfc_scsi_api_table_setup - Set up scsi api function jump table
4245 * @phba: The hba struct for which this call is being executed.
4246 * @dev_grp: The HBA PCI-Device group number.
4247 *
4248 * This routine sets up the SCSI interface API function jump table in @phba
4249 * struct.
4250 * Returns: 0 - success, -ENODEV - failure.
4251 **/
4252 int
lpfc_scsi_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)4253 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4254 {
4255
4256 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4257 phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd;
4258
4259 switch (dev_grp) {
4260 case LPFC_PCI_DEV_LP:
4261 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4262 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4263 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4264 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4265 break;
4266 case LPFC_PCI_DEV_OC:
4267 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4268 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4269 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4270 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4271 break;
4272 default:
4273 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4274 "1418 Invalid HBA PCI-device group: 0x%x\n",
4275 dev_grp);
4276 return -ENODEV;
4277 break;
4278 }
4279 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4280 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4281 return 0;
4282 }
4283
4284 /**
4285 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
4286 * @phba: The Hba for which this call is being executed.
4287 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
4288 * @rspiocbq: Pointer to lpfc_iocbq data structure.
4289 *
4290 * This routine is IOCB completion routine for device reset and target reset
4291 * routine. This routine release scsi buffer associated with lpfc_cmd.
4292 **/
4293 static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)4294 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4295 struct lpfc_iocbq *cmdiocbq,
4296 struct lpfc_iocbq *rspiocbq)
4297 {
4298 struct lpfc_io_buf *lpfc_cmd =
4299 (struct lpfc_io_buf *) cmdiocbq->context1;
4300 if (lpfc_cmd)
4301 lpfc_release_scsi_buf(phba, lpfc_cmd);
4302 return;
4303 }
4304
4305 /**
4306 * lpfc_check_pci_resettable - Walks list of devices on pci_dev's bus to check
4307 * if issuing a pci_bus_reset is possibly unsafe
4308 * @phba: lpfc_hba pointer.
4309 *
4310 * Description:
4311 * Walks the bus_list to ensure only PCI devices with Emulex
4312 * vendor id, device ids that support hot reset, and only one occurrence
4313 * of function 0.
4314 *
4315 * Returns:
4316 * -EBADSLT, detected invalid device
4317 * 0, successful
4318 */
4319 int
lpfc_check_pci_resettable(const struct lpfc_hba * phba)4320 lpfc_check_pci_resettable(const struct lpfc_hba *phba)
4321 {
4322 const struct pci_dev *pdev = phba->pcidev;
4323 struct pci_dev *ptr = NULL;
4324 u8 counter = 0;
4325
4326 /* Walk the list of devices on the pci_dev's bus */
4327 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
4328 /* Check for Emulex Vendor ID */
4329 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
4330 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4331 "8346 Non-Emulex vendor found: "
4332 "0x%04x\n", ptr->vendor);
4333 return -EBADSLT;
4334 }
4335
4336 /* Check for valid Emulex Device ID */
4337 switch (ptr->device) {
4338 case PCI_DEVICE_ID_LANCER_FC:
4339 case PCI_DEVICE_ID_LANCER_G6_FC:
4340 case PCI_DEVICE_ID_LANCER_G7_FC:
4341 break;
4342 default:
4343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4344 "8347 Invalid device found: "
4345 "0x%04x\n", ptr->device);
4346 return -EBADSLT;
4347 }
4348
4349 /* Check for only one function 0 ID to ensure only one HBA on
4350 * secondary bus
4351 */
4352 if (ptr->devfn == 0) {
4353 if (++counter > 1) {
4354 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4355 "8348 More than one device on "
4356 "secondary bus found\n");
4357 return -EBADSLT;
4358 }
4359 }
4360 }
4361
4362 return 0;
4363 }
4364
4365 /**
4366 * lpfc_info - Info entry point of scsi_host_template data structure
4367 * @host: The scsi host for which this call is being executed.
4368 *
4369 * This routine provides module information about hba.
4370 *
4371 * Reutrn code:
4372 * Pointer to char - Success.
4373 **/
4374 const char *
lpfc_info(struct Scsi_Host * host)4375 lpfc_info(struct Scsi_Host *host)
4376 {
4377 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
4378 struct lpfc_hba *phba = vport->phba;
4379 int link_speed = 0;
4380 static char lpfcinfobuf[384];
4381 char tmp[384] = {0};
4382
4383 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
4384 if (phba && phba->pcidev){
4385 /* Model Description */
4386 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
4387 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4388 sizeof(lpfcinfobuf))
4389 goto buffer_done;
4390
4391 /* PCI Info */
4392 scnprintf(tmp, sizeof(tmp),
4393 " on PCI bus %02x device %02x irq %d",
4394 phba->pcidev->bus->number, phba->pcidev->devfn,
4395 phba->pcidev->irq);
4396 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4397 sizeof(lpfcinfobuf))
4398 goto buffer_done;
4399
4400 /* Port Number */
4401 if (phba->Port[0]) {
4402 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
4403 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4404 sizeof(lpfcinfobuf))
4405 goto buffer_done;
4406 }
4407
4408 /* Link Speed */
4409 link_speed = lpfc_sli_port_speed_get(phba);
4410 if (link_speed != 0) {
4411 scnprintf(tmp, sizeof(tmp),
4412 " Logical Link Speed: %d Mbps", link_speed);
4413 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
4414 sizeof(lpfcinfobuf))
4415 goto buffer_done;
4416 }
4417
4418 /* PCI resettable */
4419 if (!lpfc_check_pci_resettable(phba)) {
4420 scnprintf(tmp, sizeof(tmp), " PCI resettable");
4421 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
4422 }
4423 }
4424
4425 buffer_done:
4426 return lpfcinfobuf;
4427 }
4428
4429 /**
4430 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
4431 * @phba: The Hba for which this call is being executed.
4432 *
4433 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
4434 * The default value of cfg_poll_tmo is 10 milliseconds.
4435 **/
lpfc_poll_rearm_timer(struct lpfc_hba * phba)4436 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
4437 {
4438 unsigned long poll_tmo_expires =
4439 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
4440
4441 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
4442 mod_timer(&phba->fcp_poll_timer,
4443 poll_tmo_expires);
4444 }
4445
4446 /**
4447 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
4448 * @phba: The Hba for which this call is being executed.
4449 *
4450 * This routine starts the fcp_poll_timer of @phba.
4451 **/
lpfc_poll_start_timer(struct lpfc_hba * phba)4452 void lpfc_poll_start_timer(struct lpfc_hba * phba)
4453 {
4454 lpfc_poll_rearm_timer(phba);
4455 }
4456
4457 /**
4458 * lpfc_poll_timeout - Restart polling timer
4459 * @ptr: Map to lpfc_hba data structure pointer.
4460 *
4461 * This routine restarts fcp_poll timer, when FCP ring polling is enable
4462 * and FCP Ring interrupt is disable.
4463 **/
4464
lpfc_poll_timeout(struct timer_list * t)4465 void lpfc_poll_timeout(struct timer_list *t)
4466 {
4467 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
4468
4469 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4470 lpfc_sli_handle_fast_ring_event(phba,
4471 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4472
4473 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4474 lpfc_poll_rearm_timer(phba);
4475 }
4476 }
4477
4478 /**
4479 * lpfc_queuecommand - scsi_host_template queuecommand entry point
4480 * @cmnd: Pointer to scsi_cmnd data structure.
4481 * @done: Pointer to done routine.
4482 *
4483 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
4484 * This routine prepares an IOCB from scsi command and provides to firmware.
4485 * The @done callback is invoked after driver finished processing the command.
4486 *
4487 * Return value :
4488 * 0 - Success
4489 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
4490 **/
4491 static int
lpfc_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)4492 lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
4493 {
4494 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4495 struct lpfc_hba *phba = vport->phba;
4496 struct lpfc_rport_data *rdata;
4497 struct lpfc_nodelist *ndlp;
4498 struct lpfc_io_buf *lpfc_cmd;
4499 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
4500 int err, idx;
4501 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4502 int cpu;
4503 #endif
4504
4505 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
4506
4507 /* sanity check on references */
4508 if (unlikely(!rdata) || unlikely(!rport))
4509 goto out_fail_command;
4510
4511 err = fc_remote_port_chkready(rport);
4512 if (err) {
4513 cmnd->result = err;
4514 goto out_fail_command;
4515 }
4516 ndlp = rdata->pnode;
4517
4518 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
4519 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
4520
4521 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
4522 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
4523 " op:%02x str=%s without registering for"
4524 " BlockGuard - Rejecting command\n",
4525 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
4526 dif_op_str[scsi_get_prot_op(cmnd)]);
4527 goto out_fail_command;
4528 }
4529
4530 /*
4531 * Catch race where our node has transitioned, but the
4532 * transport is still transitioning.
4533 */
4534 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
4535 goto out_tgt_busy;
4536 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
4537 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
4538 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4539 "3377 Target Queue Full, scsi Id:%d "
4540 "Qdepth:%d Pending command:%d"
4541 " WWNN:%02x:%02x:%02x:%02x:"
4542 "%02x:%02x:%02x:%02x, "
4543 " WWPN:%02x:%02x:%02x:%02x:"
4544 "%02x:%02x:%02x:%02x",
4545 ndlp->nlp_sid, ndlp->cmd_qdepth,
4546 atomic_read(&ndlp->cmd_pending),
4547 ndlp->nlp_nodename.u.wwn[0],
4548 ndlp->nlp_nodename.u.wwn[1],
4549 ndlp->nlp_nodename.u.wwn[2],
4550 ndlp->nlp_nodename.u.wwn[3],
4551 ndlp->nlp_nodename.u.wwn[4],
4552 ndlp->nlp_nodename.u.wwn[5],
4553 ndlp->nlp_nodename.u.wwn[6],
4554 ndlp->nlp_nodename.u.wwn[7],
4555 ndlp->nlp_portname.u.wwn[0],
4556 ndlp->nlp_portname.u.wwn[1],
4557 ndlp->nlp_portname.u.wwn[2],
4558 ndlp->nlp_portname.u.wwn[3],
4559 ndlp->nlp_portname.u.wwn[4],
4560 ndlp->nlp_portname.u.wwn[5],
4561 ndlp->nlp_portname.u.wwn[6],
4562 ndlp->nlp_portname.u.wwn[7]);
4563 goto out_tgt_busy;
4564 }
4565 }
4566
4567 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
4568 if (lpfc_cmd == NULL) {
4569 lpfc_rampdown_queue_depth(phba);
4570
4571 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
4572 "0707 driver's buffer pool is empty, "
4573 "IO busied\n");
4574 goto out_host_busy;
4575 }
4576
4577 /*
4578 * Store the midlayer's command structure for the completion phase
4579 * and complete the command initialization.
4580 */
4581 lpfc_cmd->pCmd = cmnd;
4582 lpfc_cmd->rdata = rdata;
4583 lpfc_cmd->ndlp = ndlp;
4584 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
4585
4586 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
4587 if (vport->phba->cfg_enable_bg) {
4588 lpfc_printf_vlog(vport,
4589 KERN_INFO, LOG_SCSI_CMD,
4590 "9033 BLKGRD: rcvd %s cmd:x%x "
4591 "sector x%llx cnt %u pt %x\n",
4592 dif_op_str[scsi_get_prot_op(cmnd)],
4593 cmnd->cmnd[0],
4594 (unsigned long long)scsi_get_lba(cmnd),
4595 blk_rq_sectors(cmnd->request),
4596 (cmnd->cmnd[1]>>5));
4597 }
4598 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
4599 } else {
4600 if (vport->phba->cfg_enable_bg) {
4601 lpfc_printf_vlog(vport,
4602 KERN_INFO, LOG_SCSI_CMD,
4603 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
4604 "x%x sector x%llx cnt %u pt %x\n",
4605 cmnd->cmnd[0],
4606 (unsigned long long)scsi_get_lba(cmnd),
4607 blk_rq_sectors(cmnd->request),
4608 (cmnd->cmnd[1]>>5));
4609 }
4610 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
4611 }
4612
4613 if (err == 2) {
4614 cmnd->result = DID_ERROR << 16;
4615 goto out_fail_command_release_buf;
4616 } else if (err) {
4617 goto out_host_busy_free_buf;
4618 }
4619
4620 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
4621
4622 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4623 if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) {
4624 cpu = raw_smp_processor_id();
4625 if (cpu < LPFC_CHECK_CPU_CNT) {
4626 struct lpfc_sli4_hdw_queue *hdwq =
4627 &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
4628 hdwq->cpucheck_xmt_io[cpu]++;
4629 }
4630 }
4631 #endif
4632 err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4633 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
4634 if (err) {
4635 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4636 "3376 FCP could not issue IOCB err %x"
4637 "FCP cmd x%x <%d/%llu> "
4638 "sid: x%x did: x%x oxid: x%x "
4639 "Data: x%x x%x x%x x%x\n",
4640 err, cmnd->cmnd[0],
4641 cmnd->device ? cmnd->device->id : 0xffff,
4642 cmnd->device ? cmnd->device->lun : (u64) -1,
4643 vport->fc_myDID, ndlp->nlp_DID,
4644 phba->sli_rev == LPFC_SLI_REV4 ?
4645 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4646 lpfc_cmd->cur_iocbq.iocb.ulpContext,
4647 lpfc_cmd->cur_iocbq.iocb.ulpIoTag,
4648 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
4649 (uint32_t)
4650 (cmnd->request->timeout / 1000));
4651
4652 goto out_host_busy_free_buf;
4653 }
4654 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
4655 lpfc_sli_handle_fast_ring_event(phba,
4656 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4657
4658 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4659 lpfc_poll_rearm_timer(phba);
4660 }
4661
4662 if (phba->cfg_xri_rebalancing)
4663 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
4664
4665 return 0;
4666
4667 out_host_busy_free_buf:
4668 idx = lpfc_cmd->hdwq_no;
4669 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4670 if (phba->sli4_hba.hdwq) {
4671 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
4672 case WRITE_DATA:
4673 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
4674 break;
4675 case READ_DATA:
4676 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
4677 break;
4678 default:
4679 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
4680 }
4681 }
4682 lpfc_release_scsi_buf(phba, lpfc_cmd);
4683 out_host_busy:
4684 return SCSI_MLQUEUE_HOST_BUSY;
4685
4686 out_tgt_busy:
4687 return SCSI_MLQUEUE_TARGET_BUSY;
4688
4689 out_fail_command_release_buf:
4690 lpfc_release_scsi_buf(phba, lpfc_cmd);
4691
4692 out_fail_command:
4693 cmnd->scsi_done(cmnd);
4694 return 0;
4695 }
4696
4697
4698 /**
4699 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
4700 * @cmnd: Pointer to scsi_cmnd data structure.
4701 *
4702 * This routine aborts @cmnd pending in base driver.
4703 *
4704 * Return code :
4705 * 0x2003 - Error
4706 * 0x2002 - Success
4707 **/
4708 static int
lpfc_abort_handler(struct scsi_cmnd * cmnd)4709 lpfc_abort_handler(struct scsi_cmnd *cmnd)
4710 {
4711 struct Scsi_Host *shost = cmnd->device->host;
4712 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4713 struct lpfc_hba *phba = vport->phba;
4714 struct lpfc_iocbq *iocb;
4715 struct lpfc_iocbq *abtsiocb;
4716 struct lpfc_io_buf *lpfc_cmd;
4717 IOCB_t *cmd, *icmd;
4718 int ret = SUCCESS, status = 0;
4719 struct lpfc_sli_ring *pring_s4 = NULL;
4720 int ret_val;
4721 unsigned long flags;
4722 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
4723
4724 status = fc_block_scsi_eh(cmnd);
4725 if (status != 0 && status != SUCCESS)
4726 return status;
4727
4728 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
4729 if (!lpfc_cmd)
4730 return ret;
4731
4732 spin_lock_irqsave(&phba->hbalock, flags);
4733 /* driver queued commands are in process of being flushed */
4734 if (phba->hba_flag & HBA_IOQ_FLUSH) {
4735 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4736 "3168 SCSI Layer abort requested I/O has been "
4737 "flushed by LLD.\n");
4738 ret = FAILED;
4739 goto out_unlock;
4740 }
4741
4742 /* Guard against IO completion being called at same time */
4743 spin_lock(&lpfc_cmd->buf_lock);
4744
4745 if (!lpfc_cmd->pCmd) {
4746 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4747 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
4748 "x%x ID %d LUN %llu\n",
4749 SUCCESS, cmnd->device->id, cmnd->device->lun);
4750 goto out_unlock_buf;
4751 }
4752
4753 iocb = &lpfc_cmd->cur_iocbq;
4754 if (phba->sli_rev == LPFC_SLI_REV4) {
4755 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
4756 if (!pring_s4) {
4757 ret = FAILED;
4758 goto out_unlock_buf;
4759 }
4760 spin_lock(&pring_s4->ring_lock);
4761 }
4762 /* the command is in process of being cancelled */
4763 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
4764 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4765 "3169 SCSI Layer abort requested I/O has been "
4766 "cancelled by LLD.\n");
4767 ret = FAILED;
4768 goto out_unlock_ring;
4769 }
4770 /*
4771 * If pCmd field of the corresponding lpfc_io_buf structure
4772 * points to a different SCSI command, then the driver has
4773 * already completed this command, but the midlayer did not
4774 * see the completion before the eh fired. Just return SUCCESS.
4775 */
4776 if (lpfc_cmd->pCmd != cmnd) {
4777 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4778 "3170 SCSI Layer abort requested I/O has been "
4779 "completed by LLD.\n");
4780 goto out_unlock_ring;
4781 }
4782
4783 BUG_ON(iocb->context1 != lpfc_cmd);
4784
4785 /* abort issued in recovery is still in progress */
4786 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
4787 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4788 "3389 SCSI Layer I/O Abort Request is pending\n");
4789 if (phba->sli_rev == LPFC_SLI_REV4)
4790 spin_unlock(&pring_s4->ring_lock);
4791 spin_unlock(&lpfc_cmd->buf_lock);
4792 spin_unlock_irqrestore(&phba->hbalock, flags);
4793 goto wait_for_cmpl;
4794 }
4795
4796 abtsiocb = __lpfc_sli_get_iocbq(phba);
4797 if (abtsiocb == NULL) {
4798 ret = FAILED;
4799 goto out_unlock_ring;
4800 }
4801
4802 /* Indicate the IO is being aborted by the driver. */
4803 iocb->iocb_flag |= LPFC_DRIVER_ABORTED;
4804
4805 /*
4806 * The scsi command can not be in txq and it is in flight because the
4807 * pCmd is still pointig at the SCSI command we have to abort. There
4808 * is no need to search the txcmplq. Just send an abort to the FW.
4809 */
4810
4811 cmd = &iocb->iocb;
4812 icmd = &abtsiocb->iocb;
4813 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
4814 icmd->un.acxri.abortContextTag = cmd->ulpContext;
4815 if (phba->sli_rev == LPFC_SLI_REV4)
4816 icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
4817 else
4818 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
4819
4820 icmd->ulpLe = 1;
4821 icmd->ulpClass = cmd->ulpClass;
4822
4823 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4824 abtsiocb->hba_wqidx = iocb->hba_wqidx;
4825 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4826 if (iocb->iocb_flag & LPFC_IO_FOF)
4827 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4828
4829 if (lpfc_is_link_up(phba))
4830 icmd->ulpCommand = CMD_ABORT_XRI_CN;
4831 else
4832 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
4833
4834 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
4835 abtsiocb->vport = vport;
4836 lpfc_cmd->waitq = &waitq;
4837 if (phba->sli_rev == LPFC_SLI_REV4) {
4838 /* Note: both hbalock and ring_lock must be set here */
4839 ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
4840 abtsiocb, 0);
4841 spin_unlock(&pring_s4->ring_lock);
4842 } else {
4843 ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
4844 abtsiocb, 0);
4845 }
4846 /* no longer need the lock after this point */
4847 spin_unlock_irqrestore(&phba->hbalock, flags);
4848
4849 if (ret_val == IOCB_ERROR) {
4850 /* Indicate the IO is not being aborted by the driver. */
4851 iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
4852 lpfc_cmd->waitq = NULL;
4853 spin_unlock(&lpfc_cmd->buf_lock);
4854 lpfc_sli_release_iocbq(phba, abtsiocb);
4855 ret = FAILED;
4856 goto out;
4857 }
4858
4859 spin_unlock(&lpfc_cmd->buf_lock);
4860
4861 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
4862 lpfc_sli_handle_fast_ring_event(phba,
4863 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
4864
4865 wait_for_cmpl:
4866 /* Wait for abort to complete */
4867 wait_event_timeout(waitq,
4868 (lpfc_cmd->pCmd != cmnd),
4869 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
4870
4871 spin_lock(&lpfc_cmd->buf_lock);
4872
4873 if (lpfc_cmd->pCmd == cmnd) {
4874 ret = FAILED;
4875 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
4876 "0748 abort handler timed out waiting "
4877 "for aborting I/O (xri:x%x) to complete: "
4878 "ret %#x, ID %d, LUN %llu\n",
4879 iocb->sli4_xritag, ret,
4880 cmnd->device->id, cmnd->device->lun);
4881 }
4882
4883 lpfc_cmd->waitq = NULL;
4884
4885 spin_unlock(&lpfc_cmd->buf_lock);
4886 goto out;
4887
4888 out_unlock_ring:
4889 if (phba->sli_rev == LPFC_SLI_REV4)
4890 spin_unlock(&pring_s4->ring_lock);
4891 out_unlock_buf:
4892 spin_unlock(&lpfc_cmd->buf_lock);
4893 out_unlock:
4894 spin_unlock_irqrestore(&phba->hbalock, flags);
4895 out:
4896 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
4897 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
4898 "LUN %llu\n", ret, cmnd->device->id,
4899 cmnd->device->lun);
4900 return ret;
4901 }
4902
4903 static char *
lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)4904 lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
4905 {
4906 switch (task_mgmt_cmd) {
4907 case FCP_ABORT_TASK_SET:
4908 return "ABORT_TASK_SET";
4909 case FCP_CLEAR_TASK_SET:
4910 return "FCP_CLEAR_TASK_SET";
4911 case FCP_BUS_RESET:
4912 return "FCP_BUS_RESET";
4913 case FCP_LUN_RESET:
4914 return "FCP_LUN_RESET";
4915 case FCP_TARGET_RESET:
4916 return "FCP_TARGET_RESET";
4917 case FCP_CLEAR_ACA:
4918 return "FCP_CLEAR_ACA";
4919 case FCP_TERMINATE_TASK:
4920 return "FCP_TERMINATE_TASK";
4921 default:
4922 return "unknown";
4923 }
4924 }
4925
4926
4927 /**
4928 * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed
4929 * @vport: The virtual port for which this call is being executed.
4930 * @lpfc_cmd: Pointer to lpfc_io_buf data structure.
4931 *
4932 * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded
4933 *
4934 * Return code :
4935 * 0x2003 - Error
4936 * 0x2002 - Success
4937 **/
4938 static int
lpfc_check_fcp_rsp(struct lpfc_vport * vport,struct lpfc_io_buf * lpfc_cmd)4939 lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
4940 {
4941 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
4942 uint32_t rsp_info;
4943 uint32_t rsp_len;
4944 uint8_t rsp_info_code;
4945 int ret = FAILED;
4946
4947
4948 if (fcprsp == NULL)
4949 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4950 "0703 fcp_rsp is missing\n");
4951 else {
4952 rsp_info = fcprsp->rspStatus2;
4953 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
4954 rsp_info_code = fcprsp->rspInfo3;
4955
4956
4957 lpfc_printf_vlog(vport, KERN_INFO,
4958 LOG_FCP,
4959 "0706 fcp_rsp valid 0x%x,"
4960 " rsp len=%d code 0x%x\n",
4961 rsp_info,
4962 rsp_len, rsp_info_code);
4963
4964 /* If FCP_RSP_LEN_VALID bit is one, then the FCP_RSP_LEN
4965 * field specifies the number of valid bytes of FCP_RSP_INFO.
4966 * The FCP_RSP_LEN field shall be set to 0x04 or 0x08
4967 */
4968 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
4969 ((rsp_len == 8) || (rsp_len == 4))) {
4970 switch (rsp_info_code) {
4971 case RSP_NO_FAILURE:
4972 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4973 "0715 Task Mgmt No Failure\n");
4974 ret = SUCCESS;
4975 break;
4976 case RSP_TM_NOT_SUPPORTED: /* TM rejected */
4977 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4978 "0716 Task Mgmt Target "
4979 "reject\n");
4980 break;
4981 case RSP_TM_NOT_COMPLETED: /* TM failed */
4982 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4983 "0717 Task Mgmt Target "
4984 "failed TM\n");
4985 break;
4986 case RSP_TM_INVALID_LU: /* TM to invalid LU! */
4987 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4988 "0718 Task Mgmt to invalid "
4989 "LUN\n");
4990 break;
4991 }
4992 }
4993 }
4994 return ret;
4995 }
4996
4997
4998 /**
4999 * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler
5000 * @vport: The virtual port for which this call is being executed.
5001 * @rdata: Pointer to remote port local data
5002 * @tgt_id: Target ID of remote device.
5003 * @lun_id: Lun number for the TMF
5004 * @task_mgmt_cmd: type of TMF to send
5005 *
5006 * This routine builds and sends a TMF (SCSI Task Mgmt Function) to
5007 * a remote port.
5008 *
5009 * Return Code:
5010 * 0x2003 - Error
5011 * 0x2002 - Success.
5012 **/
5013 static int
lpfc_send_taskmgmt(struct lpfc_vport * vport,struct scsi_cmnd * cmnd,unsigned int tgt_id,uint64_t lun_id,uint8_t task_mgmt_cmd)5014 lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
5015 unsigned int tgt_id, uint64_t lun_id,
5016 uint8_t task_mgmt_cmd)
5017 {
5018 struct lpfc_hba *phba = vport->phba;
5019 struct lpfc_io_buf *lpfc_cmd;
5020 struct lpfc_iocbq *iocbq;
5021 struct lpfc_iocbq *iocbqrsp;
5022 struct lpfc_rport_data *rdata;
5023 struct lpfc_nodelist *pnode;
5024 int ret;
5025 int status;
5026
5027 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5028 if (!rdata || !rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
5029 return FAILED;
5030 pnode = rdata->pnode;
5031
5032 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
5033 if (lpfc_cmd == NULL)
5034 return FAILED;
5035 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5036 lpfc_cmd->rdata = rdata;
5037 lpfc_cmd->pCmd = cmnd;
5038 lpfc_cmd->ndlp = pnode;
5039
5040 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5041 task_mgmt_cmd);
5042 if (!status) {
5043 lpfc_release_scsi_buf(phba, lpfc_cmd);
5044 return FAILED;
5045 }
5046
5047 iocbq = &lpfc_cmd->cur_iocbq;
5048 iocbqrsp = lpfc_sli_get_iocbq(phba);
5049 if (iocbqrsp == NULL) {
5050 lpfc_release_scsi_buf(phba, lpfc_cmd);
5051 return FAILED;
5052 }
5053 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
5054
5055 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5056 "0702 Issue %s to TGT %d LUN %llu "
5057 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
5058 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
5059 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
5060 iocbq->iocb_flag);
5061
5062 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5063 iocbq, iocbqrsp, lpfc_cmd->timeout);
5064 if ((status != IOCB_SUCCESS) ||
5065 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
5066 if (status != IOCB_SUCCESS ||
5067 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
5068 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5069 "0727 TMF %s to TGT %d LUN %llu "
5070 "failed (%d, %d) iocb_flag x%x\n",
5071 lpfc_taskmgmt_name(task_mgmt_cmd),
5072 tgt_id, lun_id,
5073 iocbqrsp->iocb.ulpStatus,
5074 iocbqrsp->iocb.un.ulpWord[4],
5075 iocbq->iocb_flag);
5076 /* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */
5077 if (status == IOCB_SUCCESS) {
5078 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
5079 /* Something in the FCP_RSP was invalid.
5080 * Check conditions */
5081 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
5082 else
5083 ret = FAILED;
5084 } else if (status == IOCB_TIMEDOUT) {
5085 ret = TIMEOUT_ERROR;
5086 } else {
5087 ret = FAILED;
5088 }
5089 } else
5090 ret = SUCCESS;
5091
5092 lpfc_sli_release_iocbq(phba, iocbqrsp);
5093
5094 if (ret != TIMEOUT_ERROR)
5095 lpfc_release_scsi_buf(phba, lpfc_cmd);
5096
5097 return ret;
5098 }
5099
5100 /**
5101 * lpfc_chk_tgt_mapped -
5102 * @vport: The virtual port to check on
5103 * @cmnd: Pointer to scsi_cmnd data structure.
5104 *
5105 * This routine delays until the scsi target (aka rport) for the
5106 * command exists (is present and logged in) or we declare it non-existent.
5107 *
5108 * Return code :
5109 * 0x2003 - Error
5110 * 0x2002 - Success
5111 **/
5112 static int
lpfc_chk_tgt_mapped(struct lpfc_vport * vport,struct scsi_cmnd * cmnd)5113 lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
5114 {
5115 struct lpfc_rport_data *rdata;
5116 struct lpfc_nodelist *pnode;
5117 unsigned long later;
5118
5119 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5120 if (!rdata) {
5121 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5122 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
5123 return FAILED;
5124 }
5125 pnode = rdata->pnode;
5126 /*
5127 * If target is not in a MAPPED state, delay until
5128 * target is rediscovered or devloss timeout expires.
5129 */
5130 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5131 while (time_after(later, jiffies)) {
5132 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
5133 return FAILED;
5134 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
5135 return SUCCESS;
5136 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
5137 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5138 if (!rdata)
5139 return FAILED;
5140 pnode = rdata->pnode;
5141 }
5142 if (!pnode || !NLP_CHK_NODE_ACT(pnode) ||
5143 (pnode->nlp_state != NLP_STE_MAPPED_NODE))
5144 return FAILED;
5145 return SUCCESS;
5146 }
5147
5148 /**
5149 * lpfc_reset_flush_io_context -
5150 * @vport: The virtual port (scsi_host) for the flush context
5151 * @tgt_id: If aborting by Target contect - specifies the target id
5152 * @lun_id: If aborting by Lun context - specifies the lun id
5153 * @context: specifies the context level to flush at.
5154 *
5155 * After a reset condition via TMF, we need to flush orphaned i/o
5156 * contexts from the adapter. This routine aborts any contexts
5157 * outstanding, then waits for their completions. The wait is
5158 * bounded by devloss_tmo though.
5159 *
5160 * Return code :
5161 * 0x2003 - Error
5162 * 0x2002 - Success
5163 **/
5164 static int
lpfc_reset_flush_io_context(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd context)5165 lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
5166 uint64_t lun_id, lpfc_ctx_cmd context)
5167 {
5168 struct lpfc_hba *phba = vport->phba;
5169 unsigned long later;
5170 int cnt;
5171
5172 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5173 if (cnt)
5174 lpfc_sli_abort_taskmgmt(vport,
5175 &phba->sli.sli3_ring[LPFC_FCP_RING],
5176 tgt_id, lun_id, context);
5177 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
5178 while (time_after(later, jiffies) && cnt) {
5179 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
5180 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
5181 }
5182 if (cnt) {
5183 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5184 "0724 I/O flush failure for context %s : cnt x%x\n",
5185 ((context == LPFC_CTX_LUN) ? "LUN" :
5186 ((context == LPFC_CTX_TGT) ? "TGT" :
5187 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
5188 cnt);
5189 return FAILED;
5190 }
5191 return SUCCESS;
5192 }
5193
5194 /**
5195 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
5196 * @cmnd: Pointer to scsi_cmnd data structure.
5197 *
5198 * This routine does a device reset by sending a LUN_RESET task management
5199 * command.
5200 *
5201 * Return code :
5202 * 0x2003 - Error
5203 * 0x2002 - Success
5204 **/
5205 static int
lpfc_device_reset_handler(struct scsi_cmnd * cmnd)5206 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
5207 {
5208 struct Scsi_Host *shost = cmnd->device->host;
5209 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5210 struct lpfc_rport_data *rdata;
5211 struct lpfc_nodelist *pnode;
5212 unsigned tgt_id = cmnd->device->id;
5213 uint64_t lun_id = cmnd->device->lun;
5214 struct lpfc_scsi_event_header scsi_event;
5215 int status;
5216
5217 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5218 if (!rdata || !rdata->pnode) {
5219 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5220 "0798 Device Reset rdata failure: rdata x%px\n",
5221 rdata);
5222 return FAILED;
5223 }
5224 pnode = rdata->pnode;
5225 status = fc_block_scsi_eh(cmnd);
5226 if (status != 0 && status != SUCCESS)
5227 return status;
5228
5229 status = lpfc_chk_tgt_mapped(vport, cmnd);
5230 if (status == FAILED) {
5231 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5232 "0721 Device Reset rport failure: rdata x%px\n", rdata);
5233 return FAILED;
5234 }
5235
5236 scsi_event.event_type = FC_REG_SCSI_EVENT;
5237 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
5238 scsi_event.lun = lun_id;
5239 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5240 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5241
5242 fc_host_post_vendor_event(shost, fc_get_event_number(),
5243 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5244
5245 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5246 FCP_LUN_RESET);
5247
5248 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5249 "0713 SCSI layer issued Device Reset (%d, %llu) "
5250 "return x%x\n", tgt_id, lun_id, status);
5251
5252 /*
5253 * We have to clean up i/o as : they may be orphaned by the TMF;
5254 * or if the TMF failed, they may be in an indeterminate state.
5255 * So, continue on.
5256 * We will report success if all the i/o aborts successfully.
5257 */
5258 if (status == SUCCESS)
5259 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5260 LPFC_CTX_LUN);
5261
5262 return status;
5263 }
5264
5265 /**
5266 * lpfc_target_reset_handler - scsi_host_template eh_target_reset entry point
5267 * @cmnd: Pointer to scsi_cmnd data structure.
5268 *
5269 * This routine does a target reset by sending a TARGET_RESET task management
5270 * command.
5271 *
5272 * Return code :
5273 * 0x2003 - Error
5274 * 0x2002 - Success
5275 **/
5276 static int
lpfc_target_reset_handler(struct scsi_cmnd * cmnd)5277 lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5278 {
5279 struct Scsi_Host *shost = cmnd->device->host;
5280 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5281 struct lpfc_rport_data *rdata;
5282 struct lpfc_nodelist *pnode;
5283 unsigned tgt_id = cmnd->device->id;
5284 uint64_t lun_id = cmnd->device->lun;
5285 struct lpfc_scsi_event_header scsi_event;
5286 int status;
5287
5288 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5289 if (!rdata || !rdata->pnode) {
5290 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5291 "0799 Target Reset rdata failure: rdata x%px\n",
5292 rdata);
5293 return FAILED;
5294 }
5295 pnode = rdata->pnode;
5296 status = fc_block_scsi_eh(cmnd);
5297 if (status != 0 && status != SUCCESS)
5298 return status;
5299
5300 status = lpfc_chk_tgt_mapped(vport, cmnd);
5301 if (status == FAILED) {
5302 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5303 "0722 Target Reset rport failure: rdata x%px\n", rdata);
5304 if (pnode) {
5305 spin_lock_irq(shost->host_lock);
5306 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5307 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5308 spin_unlock_irq(shost->host_lock);
5309 }
5310 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5311 LPFC_CTX_TGT);
5312 return FAST_IO_FAIL;
5313 }
5314
5315 scsi_event.event_type = FC_REG_SCSI_EVENT;
5316 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
5317 scsi_event.lun = 0;
5318 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
5319 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
5320
5321 fc_host_post_vendor_event(shost, fc_get_event_number(),
5322 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5323
5324 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
5325 FCP_TARGET_RESET);
5326
5327 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5328 "0723 SCSI layer issued Target Reset (%d, %llu) "
5329 "return x%x\n", tgt_id, lun_id, status);
5330
5331 /*
5332 * We have to clean up i/o as : they may be orphaned by the TMF;
5333 * or if the TMF failed, they may be in an indeterminate state.
5334 * So, continue on.
5335 * We will report success if all the i/o aborts successfully.
5336 */
5337 if (status == SUCCESS)
5338 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5339 LPFC_CTX_TGT);
5340 return status;
5341 }
5342
5343 /**
5344 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
5345 * @cmnd: Pointer to scsi_cmnd data structure.
5346 *
5347 * This routine does target reset to all targets on @cmnd->device->host.
5348 * This emulates Parallel SCSI Bus Reset Semantics.
5349 *
5350 * Return code :
5351 * 0x2003 - Error
5352 * 0x2002 - Success
5353 **/
5354 static int
lpfc_bus_reset_handler(struct scsi_cmnd * cmnd)5355 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
5356 {
5357 struct Scsi_Host *shost = cmnd->device->host;
5358 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5359 struct lpfc_nodelist *ndlp = NULL;
5360 struct lpfc_scsi_event_header scsi_event;
5361 int match;
5362 int ret = SUCCESS, status, i;
5363
5364 scsi_event.event_type = FC_REG_SCSI_EVENT;
5365 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
5366 scsi_event.lun = 0;
5367 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
5368 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
5369
5370 fc_host_post_vendor_event(shost, fc_get_event_number(),
5371 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
5372
5373 status = fc_block_scsi_eh(cmnd);
5374 if (status != 0 && status != SUCCESS)
5375 return status;
5376
5377 /*
5378 * Since the driver manages a single bus device, reset all
5379 * targets known to the driver. Should any target reset
5380 * fail, this routine returns failure to the midlayer.
5381 */
5382 for (i = 0; i < LPFC_MAX_TARGET; i++) {
5383 /* Search for mapped node by target ID */
5384 match = 0;
5385 spin_lock_irq(shost->host_lock);
5386 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5387 if (!NLP_CHK_NODE_ACT(ndlp))
5388 continue;
5389 if (vport->phba->cfg_fcp2_no_tgt_reset &&
5390 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
5391 continue;
5392 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
5393 ndlp->nlp_sid == i &&
5394 ndlp->rport &&
5395 ndlp->nlp_type & NLP_FCP_TARGET) {
5396 match = 1;
5397 break;
5398 }
5399 }
5400 spin_unlock_irq(shost->host_lock);
5401 if (!match)
5402 continue;
5403
5404 status = lpfc_send_taskmgmt(vport, cmnd,
5405 i, 0, FCP_TARGET_RESET);
5406
5407 if (status != SUCCESS) {
5408 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5409 "0700 Bus Reset on target %d failed\n",
5410 i);
5411 ret = FAILED;
5412 }
5413 }
5414 /*
5415 * We have to clean up i/o as : they may be orphaned by the TMFs
5416 * above; or if any of the TMFs failed, they may be in an
5417 * indeterminate state.
5418 * We will report success if all the i/o aborts successfully.
5419 */
5420
5421 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
5422 if (status != SUCCESS)
5423 ret = FAILED;
5424
5425 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5426 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
5427 return ret;
5428 }
5429
5430 /**
5431 * lpfc_host_reset_handler - scsi_host_template eh_host_reset_handler entry pt
5432 * @cmnd: Pointer to scsi_cmnd data structure.
5433 *
5434 * This routine does host reset to the adaptor port. It brings the HBA
5435 * offline, performs a board restart, and then brings the board back online.
5436 * The lpfc_offline calls lpfc_sli_hba_down which will abort and local
5437 * reject all outstanding SCSI commands to the host and error returned
5438 * back to SCSI mid-level. As this will be SCSI mid-level's last resort
5439 * of error handling, it will only return error if resetting of the adapter
5440 * is not successful; in all other cases, will return success.
5441 *
5442 * Return code :
5443 * 0x2003 - Error
5444 * 0x2002 - Success
5445 **/
5446 static int
lpfc_host_reset_handler(struct scsi_cmnd * cmnd)5447 lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
5448 {
5449 struct Scsi_Host *shost = cmnd->device->host;
5450 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5451 struct lpfc_hba *phba = vport->phba;
5452 int rc, ret = SUCCESS;
5453
5454 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5455 "3172 SCSI layer issued Host Reset Data:\n");
5456
5457 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
5458 lpfc_offline(phba);
5459 rc = lpfc_sli_brdrestart(phba);
5460 if (rc)
5461 goto error;
5462
5463 rc = lpfc_online(phba);
5464 if (rc)
5465 goto error;
5466
5467 lpfc_unblock_mgmt_io(phba);
5468
5469 return ret;
5470 error:
5471 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5472 "3323 Failed host reset\n");
5473 lpfc_unblock_mgmt_io(phba);
5474 return FAILED;
5475 }
5476
5477 /**
5478 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
5479 * @sdev: Pointer to scsi_device.
5480 *
5481 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
5482 * globally available list of scsi buffers. This routine also makes sure scsi
5483 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
5484 * of scsi buffer exists for the lifetime of the driver.
5485 *
5486 * Return codes:
5487 * non-0 - Error
5488 * 0 - Success
5489 **/
5490 static int
lpfc_slave_alloc(struct scsi_device * sdev)5491 lpfc_slave_alloc(struct scsi_device *sdev)
5492 {
5493 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5494 struct lpfc_hba *phba = vport->phba;
5495 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
5496 uint32_t total = 0;
5497 uint32_t num_to_alloc = 0;
5498 int num_allocated = 0;
5499 uint32_t sdev_cnt;
5500 struct lpfc_device_data *device_data;
5501 unsigned long flags;
5502 struct lpfc_name target_wwpn;
5503
5504 if (!rport || fc_remote_port_chkready(rport))
5505 return -ENXIO;
5506
5507 if (phba->cfg_fof) {
5508
5509 /*
5510 * Check to see if the device data structure for the lun
5511 * exists. If not, create one.
5512 */
5513
5514 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
5515 spin_lock_irqsave(&phba->devicelock, flags);
5516 device_data = __lpfc_get_device_data(phba,
5517 &phba->luns,
5518 &vport->fc_portname,
5519 &target_wwpn,
5520 sdev->lun);
5521 if (!device_data) {
5522 spin_unlock_irqrestore(&phba->devicelock, flags);
5523 device_data = lpfc_create_device_data(phba,
5524 &vport->fc_portname,
5525 &target_wwpn,
5526 sdev->lun,
5527 phba->cfg_XLanePriority,
5528 true);
5529 if (!device_data)
5530 return -ENOMEM;
5531 spin_lock_irqsave(&phba->devicelock, flags);
5532 list_add_tail(&device_data->listentry, &phba->luns);
5533 }
5534 device_data->rport_data = rport->dd_data;
5535 device_data->available = true;
5536 spin_unlock_irqrestore(&phba->devicelock, flags);
5537 sdev->hostdata = device_data;
5538 } else {
5539 sdev->hostdata = rport->dd_data;
5540 }
5541 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
5542
5543 /* For SLI4, all IO buffers are pre-allocated */
5544 if (phba->sli_rev == LPFC_SLI_REV4)
5545 return 0;
5546
5547 /* This code path is now ONLY for SLI3 adapters */
5548
5549 /*
5550 * Populate the cmds_per_lun count scsi_bufs into this host's globally
5551 * available list of scsi buffers. Don't allocate more than the
5552 * HBA limit conveyed to the midlayer via the host structure. The
5553 * formula accounts for the lun_queue_depth + error handlers + 1
5554 * extra. This list of scsi bufs exists for the lifetime of the driver.
5555 */
5556 total = phba->total_scsi_bufs;
5557 num_to_alloc = vport->cfg_lun_queue_depth + 2;
5558
5559 /* If allocated buffers are enough do nothing */
5560 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
5561 return 0;
5562
5563 /* Allow some exchanges to be available always to complete discovery */
5564 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5565 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5566 "0704 At limitation of %d preallocated "
5567 "command buffers\n", total);
5568 return 0;
5569 /* Allow some exchanges to be available always to complete discovery */
5570 } else if (total + num_to_alloc >
5571 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
5572 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5573 "0705 Allocation request of %d "
5574 "command buffers will exceed max of %d. "
5575 "Reducing allocation request to %d.\n",
5576 num_to_alloc, phba->cfg_hba_queue_depth,
5577 (phba->cfg_hba_queue_depth - total));
5578 num_to_alloc = phba->cfg_hba_queue_depth - total;
5579 }
5580 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
5581 if (num_to_alloc != num_allocated) {
5582 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5583 "0708 Allocation request of %d "
5584 "command buffers did not succeed. "
5585 "Allocated %d buffers.\n",
5586 num_to_alloc, num_allocated);
5587 }
5588 if (num_allocated > 0)
5589 phba->total_scsi_bufs += num_allocated;
5590 return 0;
5591 }
5592
5593 /**
5594 * lpfc_slave_configure - scsi_host_template slave_configure entry point
5595 * @sdev: Pointer to scsi_device.
5596 *
5597 * This routine configures following items
5598 * - Tag command queuing support for @sdev if supported.
5599 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
5600 *
5601 * Return codes:
5602 * 0 - Success
5603 **/
5604 static int
lpfc_slave_configure(struct scsi_device * sdev)5605 lpfc_slave_configure(struct scsi_device *sdev)
5606 {
5607 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5608 struct lpfc_hba *phba = vport->phba;
5609
5610 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
5611
5612 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5613 lpfc_sli_handle_fast_ring_event(phba,
5614 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5615 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5616 lpfc_poll_rearm_timer(phba);
5617 }
5618
5619 return 0;
5620 }
5621
5622 /**
5623 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
5624 * @sdev: Pointer to scsi_device.
5625 *
5626 * This routine sets @sdev hostatdata filed to null.
5627 **/
5628 static void
lpfc_slave_destroy(struct scsi_device * sdev)5629 lpfc_slave_destroy(struct scsi_device *sdev)
5630 {
5631 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
5632 struct lpfc_hba *phba = vport->phba;
5633 unsigned long flags;
5634 struct lpfc_device_data *device_data = sdev->hostdata;
5635
5636 atomic_dec(&phba->sdev_cnt);
5637 if ((phba->cfg_fof) && (device_data)) {
5638 spin_lock_irqsave(&phba->devicelock, flags);
5639 device_data->available = false;
5640 if (!device_data->oas_enabled)
5641 lpfc_delete_device_data(phba, device_data);
5642 spin_unlock_irqrestore(&phba->devicelock, flags);
5643 }
5644 sdev->hostdata = NULL;
5645 return;
5646 }
5647
5648 /**
5649 * lpfc_create_device_data - creates and initializes device data structure for OAS
5650 * @pha: Pointer to host bus adapter structure.
5651 * @vport_wwpn: Pointer to vport's wwpn information
5652 * @target_wwpn: Pointer to target's wwpn information
5653 * @lun: Lun on target
5654 * @atomic_create: Flag to indicate if memory should be allocated using the
5655 * GFP_ATOMIC flag or not.
5656 *
5657 * This routine creates a device data structure which will contain identifying
5658 * information for the device (host wwpn, target wwpn, lun), state of OAS,
5659 * whether or not the corresponding lun is available by the system,
5660 * and pointer to the rport data.
5661 *
5662 * Return codes:
5663 * NULL - Error
5664 * Pointer to lpfc_device_data - Success
5665 **/
5666 struct lpfc_device_data*
lpfc_create_device_data(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint32_t pri,bool atomic_create)5667 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5668 struct lpfc_name *target_wwpn, uint64_t lun,
5669 uint32_t pri, bool atomic_create)
5670 {
5671
5672 struct lpfc_device_data *lun_info;
5673 int memory_flags;
5674
5675 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5676 !(phba->cfg_fof))
5677 return NULL;
5678
5679 /* Attempt to create the device data to contain lun info */
5680
5681 if (atomic_create)
5682 memory_flags = GFP_ATOMIC;
5683 else
5684 memory_flags = GFP_KERNEL;
5685 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
5686 if (!lun_info)
5687 return NULL;
5688 INIT_LIST_HEAD(&lun_info->listentry);
5689 lun_info->rport_data = NULL;
5690 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
5691 sizeof(struct lpfc_name));
5692 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
5693 sizeof(struct lpfc_name));
5694 lun_info->device_id.lun = lun;
5695 lun_info->oas_enabled = false;
5696 lun_info->priority = pri;
5697 lun_info->available = false;
5698 return lun_info;
5699 }
5700
5701 /**
5702 * lpfc_delete_device_data - frees a device data structure for OAS
5703 * @pha: Pointer to host bus adapter structure.
5704 * @lun_info: Pointer to device data structure to free.
5705 *
5706 * This routine frees the previously allocated device data structure passed.
5707 *
5708 **/
5709 void
lpfc_delete_device_data(struct lpfc_hba * phba,struct lpfc_device_data * lun_info)5710 lpfc_delete_device_data(struct lpfc_hba *phba,
5711 struct lpfc_device_data *lun_info)
5712 {
5713
5714 if (unlikely(!phba) || !lun_info ||
5715 !(phba->cfg_fof))
5716 return;
5717
5718 if (!list_empty(&lun_info->listentry))
5719 list_del(&lun_info->listentry);
5720 mempool_free(lun_info, phba->device_data_mem_pool);
5721 return;
5722 }
5723
5724 /**
5725 * __lpfc_get_device_data - returns the device data for the specified lun
5726 * @pha: Pointer to host bus adapter structure.
5727 * @list: Point to list to search.
5728 * @vport_wwpn: Pointer to vport's wwpn information
5729 * @target_wwpn: Pointer to target's wwpn information
5730 * @lun: Lun on target
5731 *
5732 * This routine searches the list passed for the specified lun's device data.
5733 * This function does not hold locks, it is the responsibility of the caller
5734 * to ensure the proper lock is held before calling the function.
5735 *
5736 * Return codes:
5737 * NULL - Error
5738 * Pointer to lpfc_device_data - Success
5739 **/
5740 struct lpfc_device_data*
__lpfc_get_device_data(struct lpfc_hba * phba,struct list_head * list,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun)5741 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
5742 struct lpfc_name *vport_wwpn,
5743 struct lpfc_name *target_wwpn, uint64_t lun)
5744 {
5745
5746 struct lpfc_device_data *lun_info;
5747
5748 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
5749 !phba->cfg_fof)
5750 return NULL;
5751
5752 /* Check to see if the lun is already enabled for OAS. */
5753
5754 list_for_each_entry(lun_info, list, listentry) {
5755 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5756 sizeof(struct lpfc_name)) == 0) &&
5757 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5758 sizeof(struct lpfc_name)) == 0) &&
5759 (lun_info->device_id.lun == lun))
5760 return lun_info;
5761 }
5762
5763 return NULL;
5764 }
5765
5766 /**
5767 * lpfc_find_next_oas_lun - searches for the next oas lun
5768 * @pha: Pointer to host bus adapter structure.
5769 * @vport_wwpn: Pointer to vport's wwpn information
5770 * @target_wwpn: Pointer to target's wwpn information
5771 * @starting_lun: Pointer to the lun to start searching for
5772 * @found_vport_wwpn: Pointer to the found lun's vport wwpn information
5773 * @found_target_wwpn: Pointer to the found lun's target wwpn information
5774 * @found_lun: Pointer to the found lun.
5775 * @found_lun_status: Pointer to status of the found lun.
5776 *
5777 * This routine searches the luns list for the specified lun
5778 * or the first lun for the vport/target. If the vport wwpn contains
5779 * a zero value then a specific vport is not specified. In this case
5780 * any vport which contains the lun will be considered a match. If the
5781 * target wwpn contains a zero value then a specific target is not specified.
5782 * In this case any target which contains the lun will be considered a
5783 * match. If the lun is found, the lun, vport wwpn, target wwpn and lun status
5784 * are returned. The function will also return the next lun if available.
5785 * If the next lun is not found, starting_lun parameter will be set to
5786 * NO_MORE_OAS_LUN.
5787 *
5788 * Return codes:
5789 * non-0 - Error
5790 * 0 - Success
5791 **/
5792 bool
lpfc_find_next_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t * starting_lun,struct lpfc_name * found_vport_wwpn,struct lpfc_name * found_target_wwpn,uint64_t * found_lun,uint32_t * found_lun_status,uint32_t * found_lun_pri)5793 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5794 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
5795 struct lpfc_name *found_vport_wwpn,
5796 struct lpfc_name *found_target_wwpn,
5797 uint64_t *found_lun,
5798 uint32_t *found_lun_status,
5799 uint32_t *found_lun_pri)
5800 {
5801
5802 unsigned long flags;
5803 struct lpfc_device_data *lun_info;
5804 struct lpfc_device_id *device_id;
5805 uint64_t lun;
5806 bool found = false;
5807
5808 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5809 !starting_lun || !found_vport_wwpn ||
5810 !found_target_wwpn || !found_lun || !found_lun_status ||
5811 (*starting_lun == NO_MORE_OAS_LUN) ||
5812 !phba->cfg_fof)
5813 return false;
5814
5815 lun = *starting_lun;
5816 *found_lun = NO_MORE_OAS_LUN;
5817 *starting_lun = NO_MORE_OAS_LUN;
5818
5819 /* Search for lun or the lun closet in value */
5820
5821 spin_lock_irqsave(&phba->devicelock, flags);
5822 list_for_each_entry(lun_info, &phba->luns, listentry) {
5823 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
5824 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
5825 sizeof(struct lpfc_name)) == 0)) &&
5826 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
5827 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
5828 sizeof(struct lpfc_name)) == 0)) &&
5829 (lun_info->oas_enabled)) {
5830 device_id = &lun_info->device_id;
5831 if ((!found) &&
5832 ((lun == FIND_FIRST_OAS_LUN) ||
5833 (device_id->lun == lun))) {
5834 *found_lun = device_id->lun;
5835 memcpy(found_vport_wwpn,
5836 &device_id->vport_wwpn,
5837 sizeof(struct lpfc_name));
5838 memcpy(found_target_wwpn,
5839 &device_id->target_wwpn,
5840 sizeof(struct lpfc_name));
5841 if (lun_info->available)
5842 *found_lun_status =
5843 OAS_LUN_STATUS_EXISTS;
5844 else
5845 *found_lun_status = 0;
5846 *found_lun_pri = lun_info->priority;
5847 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
5848 memset(vport_wwpn, 0x0,
5849 sizeof(struct lpfc_name));
5850 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
5851 memset(target_wwpn, 0x0,
5852 sizeof(struct lpfc_name));
5853 found = true;
5854 } else if (found) {
5855 *starting_lun = device_id->lun;
5856 memcpy(vport_wwpn, &device_id->vport_wwpn,
5857 sizeof(struct lpfc_name));
5858 memcpy(target_wwpn, &device_id->target_wwpn,
5859 sizeof(struct lpfc_name));
5860 break;
5861 }
5862 }
5863 }
5864 spin_unlock_irqrestore(&phba->devicelock, flags);
5865 return found;
5866 }
5867
5868 /**
5869 * lpfc_enable_oas_lun - enables a lun for OAS operations
5870 * @pha: Pointer to host bus adapter structure.
5871 * @vport_wwpn: Pointer to vport's wwpn information
5872 * @target_wwpn: Pointer to target's wwpn information
5873 * @lun: Lun
5874 *
5875 * This routine enables a lun for oas operations. The routines does so by
5876 * doing the following :
5877 *
5878 * 1) Checks to see if the device data for the lun has been created.
5879 * 2) If found, sets the OAS enabled flag if not set and returns.
5880 * 3) Otherwise, creates a device data structure.
5881 * 4) If successfully created, indicates the device data is for an OAS lun,
5882 * indicates the lun is not available and add to the list of luns.
5883 *
5884 * Return codes:
5885 * false - Error
5886 * true - Success
5887 **/
5888 bool
lpfc_enable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5889 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5890 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5891 {
5892
5893 struct lpfc_device_data *lun_info;
5894 unsigned long flags;
5895
5896 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5897 !phba->cfg_fof)
5898 return false;
5899
5900 spin_lock_irqsave(&phba->devicelock, flags);
5901
5902 /* Check to see if the device data for the lun has been created */
5903 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
5904 target_wwpn, lun);
5905 if (lun_info) {
5906 if (!lun_info->oas_enabled)
5907 lun_info->oas_enabled = true;
5908 lun_info->priority = pri;
5909 spin_unlock_irqrestore(&phba->devicelock, flags);
5910 return true;
5911 }
5912
5913 /* Create an lun info structure and add to list of luns */
5914 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
5915 pri, true);
5916 if (lun_info) {
5917 lun_info->oas_enabled = true;
5918 lun_info->priority = pri;
5919 lun_info->available = false;
5920 list_add_tail(&lun_info->listentry, &phba->luns);
5921 spin_unlock_irqrestore(&phba->devicelock, flags);
5922 return true;
5923 }
5924 spin_unlock_irqrestore(&phba->devicelock, flags);
5925 return false;
5926 }
5927
5928 /**
5929 * lpfc_disable_oas_lun - disables a lun for OAS operations
5930 * @pha: Pointer to host bus adapter structure.
5931 * @vport_wwpn: Pointer to vport's wwpn information
5932 * @target_wwpn: Pointer to target's wwpn information
5933 * @lun: Lun
5934 *
5935 * This routine disables a lun for oas operations. The routines does so by
5936 * doing the following :
5937 *
5938 * 1) Checks to see if the device data for the lun is created.
5939 * 2) If present, clears the flag indicating this lun is for OAS.
5940 * 3) If the lun is not available by the system, the device data is
5941 * freed.
5942 *
5943 * Return codes:
5944 * false - Error
5945 * true - Success
5946 **/
5947 bool
lpfc_disable_oas_lun(struct lpfc_hba * phba,struct lpfc_name * vport_wwpn,struct lpfc_name * target_wwpn,uint64_t lun,uint8_t pri)5948 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
5949 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
5950 {
5951
5952 struct lpfc_device_data *lun_info;
5953 unsigned long flags;
5954
5955 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
5956 !phba->cfg_fof)
5957 return false;
5958
5959 spin_lock_irqsave(&phba->devicelock, flags);
5960
5961 /* Check to see if the lun is available. */
5962 lun_info = __lpfc_get_device_data(phba,
5963 &phba->luns, vport_wwpn,
5964 target_wwpn, lun);
5965 if (lun_info) {
5966 lun_info->oas_enabled = false;
5967 lun_info->priority = pri;
5968 if (!lun_info->available)
5969 lpfc_delete_device_data(phba, lun_info);
5970 spin_unlock_irqrestore(&phba->devicelock, flags);
5971 return true;
5972 }
5973
5974 spin_unlock_irqrestore(&phba->devicelock, flags);
5975 return false;
5976 }
5977
5978 static int
lpfc_no_command(struct Scsi_Host * shost,struct scsi_cmnd * cmnd)5979 lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5980 {
5981 return SCSI_MLQUEUE_HOST_BUSY;
5982 }
5983
5984 static int
lpfc_no_handler(struct scsi_cmnd * cmnd)5985 lpfc_no_handler(struct scsi_cmnd *cmnd)
5986 {
5987 return FAILED;
5988 }
5989
5990 static int
lpfc_no_slave(struct scsi_device * sdev)5991 lpfc_no_slave(struct scsi_device *sdev)
5992 {
5993 return -ENODEV;
5994 }
5995
5996 struct scsi_host_template lpfc_template_nvme = {
5997 .module = THIS_MODULE,
5998 .name = LPFC_DRIVER_NAME,
5999 .proc_name = LPFC_DRIVER_NAME,
6000 .info = lpfc_info,
6001 .queuecommand = lpfc_no_command,
6002 .eh_abort_handler = lpfc_no_handler,
6003 .eh_device_reset_handler = lpfc_no_handler,
6004 .eh_target_reset_handler = lpfc_no_handler,
6005 .eh_bus_reset_handler = lpfc_no_handler,
6006 .eh_host_reset_handler = lpfc_no_handler,
6007 .slave_alloc = lpfc_no_slave,
6008 .slave_configure = lpfc_no_slave,
6009 .scan_finished = lpfc_scan_finished,
6010 .this_id = -1,
6011 .sg_tablesize = 1,
6012 .cmd_per_lun = 1,
6013 .shost_attrs = lpfc_hba_attrs,
6014 .max_sectors = 0xFFFF,
6015 .vendor_id = LPFC_NL_VENDOR_ID,
6016 .track_queue_depth = 0,
6017 };
6018
6019 struct scsi_host_template lpfc_template_no_hr = {
6020 .module = THIS_MODULE,
6021 .name = LPFC_DRIVER_NAME,
6022 .proc_name = LPFC_DRIVER_NAME,
6023 .info = lpfc_info,
6024 .queuecommand = lpfc_queuecommand,
6025 .eh_timed_out = fc_eh_timed_out,
6026 .eh_abort_handler = lpfc_abort_handler,
6027 .eh_device_reset_handler = lpfc_device_reset_handler,
6028 .eh_target_reset_handler = lpfc_target_reset_handler,
6029 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6030 .slave_alloc = lpfc_slave_alloc,
6031 .slave_configure = lpfc_slave_configure,
6032 .slave_destroy = lpfc_slave_destroy,
6033 .scan_finished = lpfc_scan_finished,
6034 .this_id = -1,
6035 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6036 .cmd_per_lun = LPFC_CMD_PER_LUN,
6037 .shost_attrs = lpfc_hba_attrs,
6038 .max_sectors = 0xFFFFFFFF,
6039 .vendor_id = LPFC_NL_VENDOR_ID,
6040 .change_queue_depth = scsi_change_queue_depth,
6041 .track_queue_depth = 1,
6042 };
6043
6044 struct scsi_host_template lpfc_template = {
6045 .module = THIS_MODULE,
6046 .name = LPFC_DRIVER_NAME,
6047 .proc_name = LPFC_DRIVER_NAME,
6048 .info = lpfc_info,
6049 .queuecommand = lpfc_queuecommand,
6050 .eh_timed_out = fc_eh_timed_out,
6051 .eh_abort_handler = lpfc_abort_handler,
6052 .eh_device_reset_handler = lpfc_device_reset_handler,
6053 .eh_target_reset_handler = lpfc_target_reset_handler,
6054 .eh_bus_reset_handler = lpfc_bus_reset_handler,
6055 .eh_host_reset_handler = lpfc_host_reset_handler,
6056 .slave_alloc = lpfc_slave_alloc,
6057 .slave_configure = lpfc_slave_configure,
6058 .slave_destroy = lpfc_slave_destroy,
6059 .scan_finished = lpfc_scan_finished,
6060 .this_id = -1,
6061 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6062 .cmd_per_lun = LPFC_CMD_PER_LUN,
6063 .shost_attrs = lpfc_hba_attrs,
6064 .max_sectors = 0xFFFF,
6065 .vendor_id = LPFC_NL_VENDOR_ID,
6066 .change_queue_depth = scsi_change_queue_depth,
6067 .track_queue_depth = 1,
6068 };
6069
6070 struct scsi_host_template lpfc_vport_template = {
6071 .module = THIS_MODULE,
6072 .name = LPFC_DRIVER_NAME,
6073 .proc_name = LPFC_DRIVER_NAME,
6074 .info = lpfc_info,
6075 .queuecommand = lpfc_queuecommand,
6076 .eh_timed_out = fc_eh_timed_out,
6077 .eh_abort_handler = lpfc_abort_handler,
6078 .eh_device_reset_handler = lpfc_device_reset_handler,
6079 .eh_target_reset_handler = lpfc_target_reset_handler,
6080 .slave_alloc = lpfc_slave_alloc,
6081 .slave_configure = lpfc_slave_configure,
6082 .slave_destroy = lpfc_slave_destroy,
6083 .scan_finished = lpfc_scan_finished,
6084 .this_id = -1,
6085 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
6086 .cmd_per_lun = LPFC_CMD_PER_LUN,
6087 .shost_attrs = lpfc_vport_attrs,
6088 .max_sectors = 0xFFFF,
6089 .change_queue_depth = scsi_change_queue_depth,
6090 .track_queue_depth = 1,
6091 };
6092