1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 /**
16 * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17 * @sp: SCSI command
18 *
19 * Returns the proper CF_* direction based on CDB.
20 */
21 static inline uint16_t
qla2x00_get_cmd_direction(srb_t * sp)22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 struct scsi_qla_host *vha = sp->vha;
27
28 cflags = 0;
29
30 /* Set transfer direction */
31 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 cflags = CF_WRITE;
33 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 vha->qla_stats.output_requests++;
35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 cflags = CF_READ;
37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 vha->qla_stats.input_requests++;
39 }
40 return (cflags);
41 }
42
43 /**
44 * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45 * Continuation Type 0 IOCBs to allocate.
46 *
47 * @dsds: number of data segment decriptors needed
48 *
49 * Returns the number of IOCB entries needed to store @dsds.
50 */
51 uint16_t
qla2x00_calc_iocbs_32(uint16_t dsds)52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54 uint16_t iocbs;
55
56 iocbs = 1;
57 if (dsds > 3) {
58 iocbs += (dsds - 3) / 7;
59 if ((dsds - 3) % 7)
60 iocbs++;
61 }
62 return (iocbs);
63 }
64
65 /**
66 * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67 * Continuation Type 1 IOCBs to allocate.
68 *
69 * @dsds: number of data segment decriptors needed
70 *
71 * Returns the number of IOCB entries needed to store @dsds.
72 */
73 uint16_t
qla2x00_calc_iocbs_64(uint16_t dsds)74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76 uint16_t iocbs;
77
78 iocbs = 1;
79 if (dsds > 2) {
80 iocbs += (dsds - 2) / 5;
81 if ((dsds - 2) % 5)
82 iocbs++;
83 }
84 return (iocbs);
85 }
86
87 /**
88 * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89 * @vha: HA context
90 *
91 * Returns a pointer to the Continuation Type 0 IOCB packet.
92 */
93 static inline cont_entry_t *
qla2x00_prep_cont_type0_iocb(struct scsi_qla_host * vha)94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96 cont_entry_t *cont_pkt;
97 struct req_que *req = vha->req;
98 /* Adjust ring index. */
99 req->ring_index++;
100 if (req->ring_index == req->length) {
101 req->ring_index = 0;
102 req->ring_ptr = req->ring;
103 } else {
104 req->ring_ptr++;
105 }
106
107 cont_pkt = (cont_entry_t *)req->ring_ptr;
108
109 /* Load packet defaults. */
110 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111
112 return (cont_pkt);
113 }
114
115 /**
116 * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117 * @vha: HA context
118 * @req: request queue
119 *
120 * Returns a pointer to the continuation type 1 IOCB packet.
121 */
122 static inline cont_a64_entry_t *
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t * vha,struct req_que * req)123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
124 {
125 cont_a64_entry_t *cont_pkt;
126
127 /* Adjust ring index. */
128 req->ring_index++;
129 if (req->ring_index == req->length) {
130 req->ring_index = 0;
131 req->ring_ptr = req->ring;
132 } else {
133 req->ring_ptr++;
134 }
135
136 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137
138 /* Load packet defaults. */
139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
141 cpu_to_le32(CONTINUE_A64_TYPE);
142
143 return (cont_pkt);
144 }
145
146 inline int
qla24xx_configure_prot_mode(srb_t * sp,uint16_t * fw_prot_opts)147 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
148 {
149 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
150 uint8_t guard = scsi_host_get_guard(cmd->device->host);
151
152 /* We always use DIFF Bundling for best performance */
153 *fw_prot_opts = 0;
154
155 /* Translate SCSI opcode to a protection opcode */
156 switch (scsi_get_prot_op(cmd)) {
157 case SCSI_PROT_READ_STRIP:
158 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
159 break;
160 case SCSI_PROT_WRITE_INSERT:
161 *fw_prot_opts |= PO_MODE_DIF_INSERT;
162 break;
163 case SCSI_PROT_READ_INSERT:
164 *fw_prot_opts |= PO_MODE_DIF_INSERT;
165 break;
166 case SCSI_PROT_WRITE_STRIP:
167 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
168 break;
169 case SCSI_PROT_READ_PASS:
170 case SCSI_PROT_WRITE_PASS:
171 if (guard & SHOST_DIX_GUARD_IP)
172 *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
173 else
174 *fw_prot_opts |= PO_MODE_DIF_PASS;
175 break;
176 default: /* Normal Request */
177 *fw_prot_opts |= PO_MODE_DIF_PASS;
178 break;
179 }
180
181 return scsi_prot_sg_count(cmd);
182 }
183
184 /*
185 * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186 * capable IOCB types.
187 *
188 * @sp: SRB command to process
189 * @cmd_pkt: Command type 2 IOCB
190 * @tot_dsds: Total number of segments to transfer
191 */
qla2x00_build_scsi_iocbs_32(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193 uint16_t tot_dsds)
194 {
195 uint16_t avail_dsds;
196 uint32_t *cur_dsd;
197 scsi_qla_host_t *vha;
198 struct scsi_cmnd *cmd;
199 struct scatterlist *sg;
200 int i;
201
202 cmd = GET_CMD_SP(sp);
203
204 /* Update entry type to indicate Command Type 2 IOCB */
205 *((uint32_t *)(&cmd_pkt->entry_type)) =
206 cpu_to_le32(COMMAND_TYPE);
207
208 /* No data transfer */
209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
210 cmd_pkt->byte_count = cpu_to_le32(0);
211 return;
212 }
213
214 vha = sp->vha;
215 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
216
217 /* Three DSDs are available in the Command Type 2 IOCB */
218 avail_dsds = 3;
219 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
220
221 /* Load data segments */
222 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
223 cont_entry_t *cont_pkt;
224
225 /* Allocate additional continuation packets? */
226 if (avail_dsds == 0) {
227 /*
228 * Seven DSDs are available in the Continuation
229 * Type 0 IOCB.
230 */
231 cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
232 cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
233 avail_dsds = 7;
234 }
235
236 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
237 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
238 avail_dsds--;
239 }
240 }
241
242 /**
243 * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
244 * capable IOCB types.
245 *
246 * @sp: SRB command to process
247 * @cmd_pkt: Command type 3 IOCB
248 * @tot_dsds: Total number of segments to transfer
249 */
qla2x00_build_scsi_iocbs_64(srb_t * sp,cmd_entry_t * cmd_pkt,uint16_t tot_dsds)250 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
251 uint16_t tot_dsds)
252 {
253 uint16_t avail_dsds;
254 uint32_t *cur_dsd;
255 scsi_qla_host_t *vha;
256 struct scsi_cmnd *cmd;
257 struct scatterlist *sg;
258 int i;
259
260 cmd = GET_CMD_SP(sp);
261
262 /* Update entry type to indicate Command Type 3 IOCB */
263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
264
265 /* No data transfer */
266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
267 cmd_pkt->byte_count = cpu_to_le32(0);
268 return;
269 }
270
271 vha = sp->vha;
272 cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
273
274 /* Two DSDs are available in the Command Type 3 IOCB */
275 avail_dsds = 2;
276 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
277
278 /* Load data segments */
279 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
280 dma_addr_t sle_dma;
281 cont_a64_entry_t *cont_pkt;
282
283 /* Allocate additional continuation packets? */
284 if (avail_dsds == 0) {
285 /*
286 * Five DSDs are available in the Continuation
287 * Type 1 IOCB.
288 */
289 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
290 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
291 avail_dsds = 5;
292 }
293
294 sle_dma = sg_dma_address(sg);
295 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
296 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
297 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
298 avail_dsds--;
299 }
300 }
301
302 /**
303 * qla2x00_start_scsi() - Send a SCSI command to the ISP
304 * @sp: command to send to the ISP
305 *
306 * Returns non-zero if a failure occurred, else zero.
307 */
308 int
qla2x00_start_scsi(srb_t * sp)309 qla2x00_start_scsi(srb_t *sp)
310 {
311 int nseg;
312 unsigned long flags;
313 scsi_qla_host_t *vha;
314 struct scsi_cmnd *cmd;
315 uint32_t *clr_ptr;
316 uint32_t index;
317 uint32_t handle;
318 cmd_entry_t *cmd_pkt;
319 uint16_t cnt;
320 uint16_t req_cnt;
321 uint16_t tot_dsds;
322 struct device_reg_2xxx __iomem *reg;
323 struct qla_hw_data *ha;
324 struct req_que *req;
325 struct rsp_que *rsp;
326
327 /* Setup device pointers. */
328 vha = sp->vha;
329 ha = vha->hw;
330 reg = &ha->iobase->isp;
331 cmd = GET_CMD_SP(sp);
332 req = ha->req_q_map[0];
333 rsp = ha->rsp_q_map[0];
334 /* So we know we haven't pci_map'ed anything yet */
335 tot_dsds = 0;
336
337 /* Send marker if required */
338 if (vha->marker_needed != 0) {
339 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
340 QLA_SUCCESS) {
341 return (QLA_FUNCTION_FAILED);
342 }
343 vha->marker_needed = 0;
344 }
345
346 /* Acquire ring specific lock */
347 spin_lock_irqsave(&ha->hardware_lock, flags);
348
349 /* Check for room in outstanding command list. */
350 handle = req->current_outstanding_cmd;
351 for (index = 1; index < req->num_outstanding_cmds; index++) {
352 handle++;
353 if (handle == req->num_outstanding_cmds)
354 handle = 1;
355 if (!req->outstanding_cmds[handle])
356 break;
357 }
358 if (index == req->num_outstanding_cmds)
359 goto queuing_error;
360
361 /* Map the sg table so we have an accurate count of sg entries needed */
362 if (scsi_sg_count(cmd)) {
363 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
364 scsi_sg_count(cmd), cmd->sc_data_direction);
365 if (unlikely(!nseg))
366 goto queuing_error;
367 } else
368 nseg = 0;
369
370 tot_dsds = nseg;
371
372 /* Calculate the number of request entries needed. */
373 req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
374 if (req->cnt < (req_cnt + 2)) {
375 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
376 if (req->ring_index < cnt)
377 req->cnt = cnt - req->ring_index;
378 else
379 req->cnt = req->length -
380 (req->ring_index - cnt);
381 /* If still no head room then bail out */
382 if (req->cnt < (req_cnt + 2))
383 goto queuing_error;
384 }
385
386 /* Build command packet */
387 req->current_outstanding_cmd = handle;
388 req->outstanding_cmds[handle] = sp;
389 sp->handle = handle;
390 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
391 req->cnt -= req_cnt;
392
393 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
394 cmd_pkt->handle = handle;
395 /* Zero out remaining portion of packet. */
396 clr_ptr = (uint32_t *)cmd_pkt + 2;
397 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
398 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
399
400 /* Set target ID and LUN number*/
401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
404
405 /* Load SCSI command packet. */
406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
407 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
408
409 /* Build IOCB segments */
410 ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
411
412 /* Set total data segment count. */
413 cmd_pkt->entry_count = (uint8_t)req_cnt;
414 wmb();
415
416 /* Adjust ring index. */
417 req->ring_index++;
418 if (req->ring_index == req->length) {
419 req->ring_index = 0;
420 req->ring_ptr = req->ring;
421 } else
422 req->ring_ptr++;
423
424 sp->flags |= SRB_DMA_VALID;
425
426 /* Set chip new ring index. */
427 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
428 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */
429
430 /* Manage unprocessed RIO/ZIO commands in response queue. */
431 if (vha->flags.process_response_queue &&
432 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
433 qla2x00_process_response_queue(rsp);
434
435 spin_unlock_irqrestore(&ha->hardware_lock, flags);
436 return (QLA_SUCCESS);
437
438 queuing_error:
439 if (tot_dsds)
440 scsi_dma_unmap(cmd);
441
442 spin_unlock_irqrestore(&ha->hardware_lock, flags);
443
444 return (QLA_FUNCTION_FAILED);
445 }
446
447 /**
448 * qla2x00_start_iocbs() - Execute the IOCB command
449 * @vha: HA context
450 * @req: request queue
451 */
452 void
qla2x00_start_iocbs(struct scsi_qla_host * vha,struct req_que * req)453 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
454 {
455 struct qla_hw_data *ha = vha->hw;
456 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
457
458 if (IS_P3P_TYPE(ha)) {
459 qla82xx_start_iocbs(vha);
460 } else {
461 /* Adjust ring index. */
462 req->ring_index++;
463 if (req->ring_index == req->length) {
464 req->ring_index = 0;
465 req->ring_ptr = req->ring;
466 } else
467 req->ring_ptr++;
468
469 /* Set chip new ring index. */
470 if (ha->mqenable || IS_QLA27XX(ha)) {
471 WRT_REG_DWORD(req->req_q_in, req->ring_index);
472 } else if (IS_QLA83XX(ha)) {
473 WRT_REG_DWORD(req->req_q_in, req->ring_index);
474 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
475 } else if (IS_QLAFX00(ha)) {
476 WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index);
477 RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in);
478 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
479 } else if (IS_FWI2_CAPABLE(ha)) {
480 WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index);
481 RD_REG_DWORD_RELAXED(®->isp24.req_q_in);
482 } else {
483 WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp),
484 req->ring_index);
485 RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp));
486 }
487 }
488 }
489
490 /**
491 * qla2x00_marker() - Send a marker IOCB to the firmware.
492 * @vha: HA context
493 * @req: request queue
494 * @rsp: response queue
495 * @loop_id: loop ID
496 * @lun: LUN
497 * @type: marker modifier
498 *
499 * Can be called from both normal and interrupt context.
500 *
501 * Returns non-zero if a failure occurred, else zero.
502 */
503 static int
__qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint64_t lun,uint8_t type)504 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
505 struct rsp_que *rsp, uint16_t loop_id,
506 uint64_t lun, uint8_t type)
507 {
508 mrk_entry_t *mrk;
509 struct mrk_entry_24xx *mrk24 = NULL;
510
511 struct qla_hw_data *ha = vha->hw;
512 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
513
514 req = ha->req_q_map[0];
515 mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
516 if (mrk == NULL) {
517 ql_log(ql_log_warn, base_vha, 0x3026,
518 "Failed to allocate Marker IOCB.\n");
519
520 return (QLA_FUNCTION_FAILED);
521 }
522
523 mrk->entry_type = MARKER_TYPE;
524 mrk->modifier = type;
525 if (type != MK_SYNC_ALL) {
526 if (IS_FWI2_CAPABLE(ha)) {
527 mrk24 = (struct mrk_entry_24xx *) mrk;
528 mrk24->nport_handle = cpu_to_le16(loop_id);
529 int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
530 host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
531 mrk24->vp_index = vha->vp_idx;
532 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
533 } else {
534 SET_TARGET_ID(ha, mrk->target, loop_id);
535 mrk->lun = cpu_to_le16((uint16_t)lun);
536 }
537 }
538 wmb();
539
540 qla2x00_start_iocbs(vha, req);
541
542 return (QLA_SUCCESS);
543 }
544
545 int
qla2x00_marker(struct scsi_qla_host * vha,struct req_que * req,struct rsp_que * rsp,uint16_t loop_id,uint64_t lun,uint8_t type)546 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
547 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
548 uint8_t type)
549 {
550 int ret;
551 unsigned long flags = 0;
552
553 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
554 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
555 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
556
557 return (ret);
558 }
559
560 /*
561 * qla2x00_issue_marker
562 *
563 * Issue marker
564 * Caller CAN have hardware lock held as specified by ha_locked parameter.
565 * Might release it, then reaquire.
566 */
qla2x00_issue_marker(scsi_qla_host_t * vha,int ha_locked)567 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
568 {
569 if (ha_locked) {
570 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
571 MK_SYNC_ALL) != QLA_SUCCESS)
572 return QLA_FUNCTION_FAILED;
573 } else {
574 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
575 MK_SYNC_ALL) != QLA_SUCCESS)
576 return QLA_FUNCTION_FAILED;
577 }
578 vha->marker_needed = 0;
579
580 return QLA_SUCCESS;
581 }
582
583 static inline int
qla24xx_build_scsi_type_6_iocbs(srb_t * sp,struct cmd_type_6 * cmd_pkt,uint16_t tot_dsds)584 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
585 uint16_t tot_dsds)
586 {
587 uint32_t *cur_dsd = NULL;
588 scsi_qla_host_t *vha;
589 struct qla_hw_data *ha;
590 struct scsi_cmnd *cmd;
591 struct scatterlist *cur_seg;
592 uint32_t *dsd_seg;
593 void *next_dsd;
594 uint8_t avail_dsds;
595 uint8_t first_iocb = 1;
596 uint32_t dsd_list_len;
597 struct dsd_dma *dsd_ptr;
598 struct ct6_dsd *ctx;
599
600 cmd = GET_CMD_SP(sp);
601
602 /* Update entry type to indicate Command Type 3 IOCB */
603 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
604
605 /* No data transfer */
606 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
607 cmd_pkt->byte_count = cpu_to_le32(0);
608 return 0;
609 }
610
611 vha = sp->vha;
612 ha = vha->hw;
613
614 /* Set transfer direction */
615 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
616 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
617 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
618 vha->qla_stats.output_requests++;
619 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
620 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
621 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622 vha->qla_stats.input_requests++;
623 }
624
625 cur_seg = scsi_sglist(cmd);
626 ctx = GET_CMD_CTX_SP(sp);
627
628 while (tot_dsds) {
629 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
630 QLA_DSDS_PER_IOCB : tot_dsds;
631 tot_dsds -= avail_dsds;
632 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
633
634 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
635 struct dsd_dma, list);
636 next_dsd = dsd_ptr->dsd_addr;
637 list_del(&dsd_ptr->list);
638 ha->gbl_dsd_avail--;
639 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
640 ctx->dsd_use_cnt++;
641 ha->gbl_dsd_inuse++;
642
643 if (first_iocb) {
644 first_iocb = 0;
645 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
646 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
647 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
648 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
649 } else {
650 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
651 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
652 *cur_dsd++ = cpu_to_le32(dsd_list_len);
653 }
654 cur_dsd = (uint32_t *)next_dsd;
655 while (avail_dsds) {
656 dma_addr_t sle_dma;
657
658 sle_dma = sg_dma_address(cur_seg);
659 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
660 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
661 *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
662 cur_seg = sg_next(cur_seg);
663 avail_dsds--;
664 }
665 }
666
667 /* Null termination */
668 *cur_dsd++ = 0;
669 *cur_dsd++ = 0;
670 *cur_dsd++ = 0;
671 cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
672 return 0;
673 }
674
675 /*
676 * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677 * for Command Type 6.
678 *
679 * @dsds: number of data segment decriptors needed
680 *
681 * Returns the number of dsd list needed to store @dsds.
682 */
683 static inline uint16_t
qla24xx_calc_dsd_lists(uint16_t dsds)684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686 uint16_t dsd_lists = 0;
687
688 dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689 if (dsds % QLA_DSDS_PER_IOCB)
690 dsd_lists++;
691 return dsd_lists;
692 }
693
694
695 /**
696 * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697 * IOCB types.
698 *
699 * @sp: SRB command to process
700 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer
702 * @req: pointer to request queue
703 */
704 inline void
qla24xx_build_scsi_iocbs(srb_t * sp,struct cmd_type_7 * cmd_pkt,uint16_t tot_dsds,struct req_que * req)705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706 uint16_t tot_dsds, struct req_que *req)
707 {
708 uint16_t avail_dsds;
709 uint32_t *cur_dsd;
710 scsi_qla_host_t *vha;
711 struct scsi_cmnd *cmd;
712 struct scatterlist *sg;
713 int i;
714
715 cmd = GET_CMD_SP(sp);
716
717 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719
720 /* No data transfer */
721 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
722 cmd_pkt->byte_count = cpu_to_le32(0);
723 return;
724 }
725
726 vha = sp->vha;
727
728 /* Set transfer direction */
729 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
730 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
731 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
732 vha->qla_stats.output_requests++;
733 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
734 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
735 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
736 vha->qla_stats.input_requests++;
737 }
738
739 /* One DSD is available in the Command Type 3 IOCB */
740 avail_dsds = 1;
741 cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
742
743 /* Load data segments */
744
745 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
746 dma_addr_t sle_dma;
747 cont_a64_entry_t *cont_pkt;
748
749 /* Allocate additional continuation packets? */
750 if (avail_dsds == 0) {
751 /*
752 * Five DSDs are available in the Continuation
753 * Type 1 IOCB.
754 */
755 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756 cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
757 avail_dsds = 5;
758 }
759
760 sle_dma = sg_dma_address(sg);
761 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
762 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
763 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
764 avail_dsds--;
765 }
766 }
767
768 struct fw_dif_context {
769 uint32_t ref_tag;
770 uint16_t app_tag;
771 uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/
772 uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/
773 };
774
775 /*
776 * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
777 *
778 */
779 static inline void
qla24xx_set_t10dif_tags(srb_t * sp,struct fw_dif_context * pkt,unsigned int protcnt)780 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
781 unsigned int protcnt)
782 {
783 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
784
785 switch (scsi_get_prot_type(cmd)) {
786 case SCSI_PROT_DIF_TYPE0:
787 /*
788 * No check for ql2xenablehba_err_chk, as it would be an
789 * I/O error if hba tag generation is not done.
790 */
791 pkt->ref_tag = cpu_to_le32((uint32_t)
792 (0xffffffff & scsi_get_lba(cmd)));
793
794 if (!qla2x00_hba_err_chk_enabled(sp))
795 break;
796
797 pkt->ref_tag_mask[0] = 0xff;
798 pkt->ref_tag_mask[1] = 0xff;
799 pkt->ref_tag_mask[2] = 0xff;
800 pkt->ref_tag_mask[3] = 0xff;
801 break;
802
803 /*
804 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
805 * match LBA in CDB + N
806 */
807 case SCSI_PROT_DIF_TYPE2:
808 pkt->app_tag = cpu_to_le16(0);
809 pkt->app_tag_mask[0] = 0x0;
810 pkt->app_tag_mask[1] = 0x0;
811
812 pkt->ref_tag = cpu_to_le32((uint32_t)
813 (0xffffffff & scsi_get_lba(cmd)));
814
815 if (!qla2x00_hba_err_chk_enabled(sp))
816 break;
817
818 /* enable ALL bytes of the ref tag */
819 pkt->ref_tag_mask[0] = 0xff;
820 pkt->ref_tag_mask[1] = 0xff;
821 pkt->ref_tag_mask[2] = 0xff;
822 pkt->ref_tag_mask[3] = 0xff;
823 break;
824
825 /* For Type 3 protection: 16 bit GUARD only */
826 case SCSI_PROT_DIF_TYPE3:
827 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
828 pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
829 0x00;
830 break;
831
832 /*
833 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
834 * 16 bit app tag.
835 */
836 case SCSI_PROT_DIF_TYPE1:
837 pkt->ref_tag = cpu_to_le32((uint32_t)
838 (0xffffffff & scsi_get_lba(cmd)));
839 pkt->app_tag = cpu_to_le16(0);
840 pkt->app_tag_mask[0] = 0x0;
841 pkt->app_tag_mask[1] = 0x0;
842
843 if (!qla2x00_hba_err_chk_enabled(sp))
844 break;
845
846 /* enable ALL bytes of the ref tag */
847 pkt->ref_tag_mask[0] = 0xff;
848 pkt->ref_tag_mask[1] = 0xff;
849 pkt->ref_tag_mask[2] = 0xff;
850 pkt->ref_tag_mask[3] = 0xff;
851 break;
852 }
853 }
854
855 int
qla24xx_get_one_block_sg(uint32_t blk_sz,struct qla2_sgx * sgx,uint32_t * partial)856 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
857 uint32_t *partial)
858 {
859 struct scatterlist *sg;
860 uint32_t cumulative_partial, sg_len;
861 dma_addr_t sg_dma_addr;
862
863 if (sgx->num_bytes == sgx->tot_bytes)
864 return 0;
865
866 sg = sgx->cur_sg;
867 cumulative_partial = sgx->tot_partial;
868
869 sg_dma_addr = sg_dma_address(sg);
870 sg_len = sg_dma_len(sg);
871
872 sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
873
874 if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
875 sgx->dma_len = (blk_sz - cumulative_partial);
876 sgx->tot_partial = 0;
877 sgx->num_bytes += blk_sz;
878 *partial = 0;
879 } else {
880 sgx->dma_len = sg_len - sgx->bytes_consumed;
881 sgx->tot_partial += sgx->dma_len;
882 *partial = 1;
883 }
884
885 sgx->bytes_consumed += sgx->dma_len;
886
887 if (sg_len == sgx->bytes_consumed) {
888 sg = sg_next(sg);
889 sgx->num_sg++;
890 sgx->cur_sg = sg;
891 sgx->bytes_consumed = 0;
892 }
893
894 return 1;
895 }
896
897 int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds,struct qla_tc_param * tc)898 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
899 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
900 {
901 void *next_dsd;
902 uint8_t avail_dsds = 0;
903 uint32_t dsd_list_len;
904 struct dsd_dma *dsd_ptr;
905 struct scatterlist *sg_prot;
906 uint32_t *cur_dsd = dsd;
907 uint16_t used_dsds = tot_dsds;
908 uint32_t prot_int; /* protection interval */
909 uint32_t partial;
910 struct qla2_sgx sgx;
911 dma_addr_t sle_dma;
912 uint32_t sle_dma_len, tot_prot_dma_len = 0;
913 struct scsi_cmnd *cmd;
914
915 memset(&sgx, 0, sizeof(struct qla2_sgx));
916 if (sp) {
917 cmd = GET_CMD_SP(sp);
918 prot_int = cmd->device->sector_size;
919
920 sgx.tot_bytes = scsi_bufflen(cmd);
921 sgx.cur_sg = scsi_sglist(cmd);
922 sgx.sp = sp;
923
924 sg_prot = scsi_prot_sglist(cmd);
925 } else if (tc) {
926 prot_int = tc->blk_sz;
927 sgx.tot_bytes = tc->bufflen;
928 sgx.cur_sg = tc->sg;
929 sg_prot = tc->prot_sg;
930 } else {
931 BUG();
932 return 1;
933 }
934
935 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
936
937 sle_dma = sgx.dma_addr;
938 sle_dma_len = sgx.dma_len;
939 alloc_and_fill:
940 /* Allocate additional continuation packets? */
941 if (avail_dsds == 0) {
942 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
943 QLA_DSDS_PER_IOCB : used_dsds;
944 dsd_list_len = (avail_dsds + 1) * 12;
945 used_dsds -= avail_dsds;
946
947 /* allocate tracking DS */
948 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
949 if (!dsd_ptr)
950 return 1;
951
952 /* allocate new list */
953 dsd_ptr->dsd_addr = next_dsd =
954 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
955 &dsd_ptr->dsd_list_dma);
956
957 if (!next_dsd) {
958 /*
959 * Need to cleanup only this dsd_ptr, rest
960 * will be done by sp_free_dma()
961 */
962 kfree(dsd_ptr);
963 return 1;
964 }
965
966 if (sp) {
967 list_add_tail(&dsd_ptr->list,
968 &((struct crc_context *)
969 sp->u.scmd.ctx)->dsd_list);
970
971 sp->flags |= SRB_CRC_CTX_DSD_VALID;
972 } else {
973 list_add_tail(&dsd_ptr->list,
974 &(tc->ctx->dsd_list));
975 *tc->ctx_dsd_alloced = 1;
976 }
977
978
979 /* add new list to cmd iocb or last list */
980 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
981 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
982 *cur_dsd++ = dsd_list_len;
983 cur_dsd = (uint32_t *)next_dsd;
984 }
985 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
986 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
987 *cur_dsd++ = cpu_to_le32(sle_dma_len);
988 avail_dsds--;
989
990 if (partial == 0) {
991 /* Got a full protection interval */
992 sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
993 sle_dma_len = 8;
994
995 tot_prot_dma_len += sle_dma_len;
996 if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
997 tot_prot_dma_len = 0;
998 sg_prot = sg_next(sg_prot);
999 }
1000
1001 partial = 1; /* So as to not re-enter this block */
1002 goto alloc_and_fill;
1003 }
1004 }
1005 /* Null termination */
1006 *cur_dsd++ = 0;
1007 *cur_dsd++ = 0;
1008 *cur_dsd++ = 0;
1009 return 0;
1010 }
1011
1012 int
qla24xx_walk_and_build_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds,struct qla_tc_param * tc)1013 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1014 uint16_t tot_dsds, struct qla_tc_param *tc)
1015 {
1016 void *next_dsd;
1017 uint8_t avail_dsds = 0;
1018 uint32_t dsd_list_len;
1019 struct dsd_dma *dsd_ptr;
1020 struct scatterlist *sg, *sgl;
1021 uint32_t *cur_dsd = dsd;
1022 int i;
1023 uint16_t used_dsds = tot_dsds;
1024 struct scsi_cmnd *cmd;
1025
1026 if (sp) {
1027 cmd = GET_CMD_SP(sp);
1028 sgl = scsi_sglist(cmd);
1029 } else if (tc) {
1030 sgl = tc->sg;
1031 } else {
1032 BUG();
1033 return 1;
1034 }
1035
1036
1037 for_each_sg(sgl, sg, tot_dsds, i) {
1038 dma_addr_t sle_dma;
1039
1040 /* Allocate additional continuation packets? */
1041 if (avail_dsds == 0) {
1042 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1043 QLA_DSDS_PER_IOCB : used_dsds;
1044 dsd_list_len = (avail_dsds + 1) * 12;
1045 used_dsds -= avail_dsds;
1046
1047 /* allocate tracking DS */
1048 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1049 if (!dsd_ptr)
1050 return 1;
1051
1052 /* allocate new list */
1053 dsd_ptr->dsd_addr = next_dsd =
1054 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1055 &dsd_ptr->dsd_list_dma);
1056
1057 if (!next_dsd) {
1058 /*
1059 * Need to cleanup only this dsd_ptr, rest
1060 * will be done by sp_free_dma()
1061 */
1062 kfree(dsd_ptr);
1063 return 1;
1064 }
1065
1066 if (sp) {
1067 list_add_tail(&dsd_ptr->list,
1068 &((struct crc_context *)
1069 sp->u.scmd.ctx)->dsd_list);
1070
1071 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1072 } else {
1073 list_add_tail(&dsd_ptr->list,
1074 &(tc->ctx->dsd_list));
1075 *tc->ctx_dsd_alloced = 1;
1076 }
1077
1078 /* add new list to cmd iocb or last list */
1079 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1080 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1081 *cur_dsd++ = dsd_list_len;
1082 cur_dsd = (uint32_t *)next_dsd;
1083 }
1084 sle_dma = sg_dma_address(sg);
1085
1086 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1087 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1088 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1089 avail_dsds--;
1090
1091 }
1092 /* Null termination */
1093 *cur_dsd++ = 0;
1094 *cur_dsd++ = 0;
1095 *cur_dsd++ = 0;
1096 return 0;
1097 }
1098
1099 int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data * ha,srb_t * sp,uint32_t * dsd,uint16_t tot_dsds,struct qla_tc_param * tc)1100 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1101 uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
1102 {
1103 void *next_dsd;
1104 uint8_t avail_dsds = 0;
1105 uint32_t dsd_list_len;
1106 struct dsd_dma *dsd_ptr;
1107 struct scatterlist *sg, *sgl;
1108 int i;
1109 struct scsi_cmnd *cmd;
1110 uint32_t *cur_dsd = dsd;
1111 uint16_t used_dsds = tot_dsds;
1112 struct scsi_qla_host *vha;
1113
1114 if (sp) {
1115 cmd = GET_CMD_SP(sp);
1116 sgl = scsi_prot_sglist(cmd);
1117 vha = sp->vha;
1118 } else if (tc) {
1119 vha = tc->vha;
1120 sgl = tc->prot_sg;
1121 } else {
1122 BUG();
1123 return 1;
1124 }
1125
1126 ql_dbg(ql_dbg_tgt, vha, 0xe021,
1127 "%s: enter\n", __func__);
1128
1129 for_each_sg(sgl, sg, tot_dsds, i) {
1130 dma_addr_t sle_dma;
1131
1132 /* Allocate additional continuation packets? */
1133 if (avail_dsds == 0) {
1134 avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1135 QLA_DSDS_PER_IOCB : used_dsds;
1136 dsd_list_len = (avail_dsds + 1) * 12;
1137 used_dsds -= avail_dsds;
1138
1139 /* allocate tracking DS */
1140 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1141 if (!dsd_ptr)
1142 return 1;
1143
1144 /* allocate new list */
1145 dsd_ptr->dsd_addr = next_dsd =
1146 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1147 &dsd_ptr->dsd_list_dma);
1148
1149 if (!next_dsd) {
1150 /*
1151 * Need to cleanup only this dsd_ptr, rest
1152 * will be done by sp_free_dma()
1153 */
1154 kfree(dsd_ptr);
1155 return 1;
1156 }
1157
1158 if (sp) {
1159 list_add_tail(&dsd_ptr->list,
1160 &((struct crc_context *)
1161 sp->u.scmd.ctx)->dsd_list);
1162
1163 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1164 } else {
1165 list_add_tail(&dsd_ptr->list,
1166 &(tc->ctx->dsd_list));
1167 *tc->ctx_dsd_alloced = 1;
1168 }
1169
1170 /* add new list to cmd iocb or last list */
1171 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1172 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1173 *cur_dsd++ = dsd_list_len;
1174 cur_dsd = (uint32_t *)next_dsd;
1175 }
1176 sle_dma = sg_dma_address(sg);
1177
1178 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1179 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1180 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1181
1182 avail_dsds--;
1183 }
1184 /* Null termination */
1185 *cur_dsd++ = 0;
1186 *cur_dsd++ = 0;
1187 *cur_dsd++ = 0;
1188 return 0;
1189 }
1190
1191 /**
1192 * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1193 * Type 6 IOCB types.
1194 *
1195 * @sp: SRB command to process
1196 * @cmd_pkt: Command type 3 IOCB
1197 * @tot_dsds: Total number of segments to transfer
1198 * @tot_prot_dsds:
1199 * @fw_prot_opts:
1200 */
1201 inline int
qla24xx_build_scsi_crc_2_iocbs(srb_t * sp,struct cmd_type_crc_2 * cmd_pkt,uint16_t tot_dsds,uint16_t tot_prot_dsds,uint16_t fw_prot_opts)1202 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1203 uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1204 {
1205 uint32_t *cur_dsd, *fcp_dl;
1206 scsi_qla_host_t *vha;
1207 struct scsi_cmnd *cmd;
1208 uint32_t total_bytes = 0;
1209 uint32_t data_bytes;
1210 uint32_t dif_bytes;
1211 uint8_t bundling = 1;
1212 uint16_t blk_size;
1213 struct crc_context *crc_ctx_pkt = NULL;
1214 struct qla_hw_data *ha;
1215 uint8_t additional_fcpcdb_len;
1216 uint16_t fcp_cmnd_len;
1217 struct fcp_cmnd *fcp_cmnd;
1218 dma_addr_t crc_ctx_dma;
1219
1220 cmd = GET_CMD_SP(sp);
1221
1222 /* Update entry type to indicate Command Type CRC_2 IOCB */
1223 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1224
1225 vha = sp->vha;
1226 ha = vha->hw;
1227
1228 /* No data transfer */
1229 data_bytes = scsi_bufflen(cmd);
1230 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1231 cmd_pkt->byte_count = cpu_to_le32(0);
1232 return QLA_SUCCESS;
1233 }
1234
1235 cmd_pkt->vp_index = sp->vha->vp_idx;
1236
1237 /* Set transfer direction */
1238 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1239 cmd_pkt->control_flags =
1240 cpu_to_le16(CF_WRITE_DATA);
1241 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1242 cmd_pkt->control_flags =
1243 cpu_to_le16(CF_READ_DATA);
1244 }
1245
1246 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1247 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1248 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1249 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1250 bundling = 0;
1251
1252 /* Allocate CRC context from global pool */
1253 crc_ctx_pkt = sp->u.scmd.ctx =
1254 dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1255
1256 if (!crc_ctx_pkt)
1257 goto crc_queuing_error;
1258
1259 crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1260
1261 sp->flags |= SRB_CRC_CTX_DMA_VALID;
1262
1263 /* Set handle */
1264 crc_ctx_pkt->handle = cmd_pkt->handle;
1265
1266 INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1267
1268 qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1269 &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1270
1271 cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1272 cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1273 cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1274
1275 /* Determine SCSI command length -- align to 4 byte boundary */
1276 if (cmd->cmd_len > 16) {
1277 additional_fcpcdb_len = cmd->cmd_len - 16;
1278 if ((cmd->cmd_len % 4) != 0) {
1279 /* SCSI cmd > 16 bytes must be multiple of 4 */
1280 goto crc_queuing_error;
1281 }
1282 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1283 } else {
1284 additional_fcpcdb_len = 0;
1285 fcp_cmnd_len = 12 + 16 + 4;
1286 }
1287
1288 fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1289
1290 fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1291 if (cmd->sc_data_direction == DMA_TO_DEVICE)
1292 fcp_cmnd->additional_cdb_len |= 1;
1293 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1294 fcp_cmnd->additional_cdb_len |= 2;
1295
1296 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1297 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1298 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1299 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1300 LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1301 cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1302 MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303 fcp_cmnd->task_management = 0;
1304 fcp_cmnd->task_attribute = TSK_SIMPLE;
1305
1306 cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1307
1308 /* Compute dif len and adjust data len to incude protection */
1309 dif_bytes = 0;
1310 blk_size = cmd->device->sector_size;
1311 dif_bytes = (data_bytes / blk_size) * 8;
1312
1313 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1314 case SCSI_PROT_READ_INSERT:
1315 case SCSI_PROT_WRITE_STRIP:
1316 total_bytes = data_bytes;
1317 data_bytes += dif_bytes;
1318 break;
1319
1320 case SCSI_PROT_READ_STRIP:
1321 case SCSI_PROT_WRITE_INSERT:
1322 case SCSI_PROT_READ_PASS:
1323 case SCSI_PROT_WRITE_PASS:
1324 total_bytes = data_bytes + dif_bytes;
1325 break;
1326 default:
1327 BUG();
1328 }
1329
1330 if (!qla2x00_hba_err_chk_enabled(sp))
1331 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1332 /* HBA error checking enabled */
1333 else if (IS_PI_UNINIT_CAPABLE(ha)) {
1334 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1335 || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336 SCSI_PROT_DIF_TYPE2))
1337 fw_prot_opts |= BIT_10;
1338 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1339 SCSI_PROT_DIF_TYPE3)
1340 fw_prot_opts |= BIT_11;
1341 }
1342
1343 if (!bundling) {
1344 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1345 } else {
1346 /*
1347 * Configure Bundling if we need to fetch interlaving
1348 * protection PCI accesses
1349 */
1350 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1351 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1352 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1353 tot_prot_dsds);
1354 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1355 }
1356
1357 /* Finish the common fields of CRC pkt */
1358 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1359 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1360 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1361 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1362 /* Fibre channel byte count */
1363 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1364 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1365 additional_fcpcdb_len);
1366 *fcp_dl = htonl(total_bytes);
1367
1368 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1369 cmd_pkt->byte_count = cpu_to_le32(0);
1370 return QLA_SUCCESS;
1371 }
1372 /* Walks data segments */
1373
1374 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1375
1376 if (!bundling && tot_prot_dsds) {
1377 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1378 cur_dsd, tot_dsds, NULL))
1379 goto crc_queuing_error;
1380 } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1381 (tot_dsds - tot_prot_dsds), NULL))
1382 goto crc_queuing_error;
1383
1384 if (bundling && tot_prot_dsds) {
1385 /* Walks dif segments */
1386 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1387 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1388 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1389 tot_prot_dsds, NULL))
1390 goto crc_queuing_error;
1391 }
1392 return QLA_SUCCESS;
1393
1394 crc_queuing_error:
1395 /* Cleanup will be performed by the caller */
1396
1397 return QLA_FUNCTION_FAILED;
1398 }
1399
1400 /**
1401 * qla24xx_start_scsi() - Send a SCSI command to the ISP
1402 * @sp: command to send to the ISP
1403 *
1404 * Returns non-zero if a failure occurred, else zero.
1405 */
1406 int
qla24xx_start_scsi(srb_t * sp)1407 qla24xx_start_scsi(srb_t *sp)
1408 {
1409 int nseg;
1410 unsigned long flags;
1411 uint32_t *clr_ptr;
1412 uint32_t index;
1413 uint32_t handle;
1414 struct cmd_type_7 *cmd_pkt;
1415 uint16_t cnt;
1416 uint16_t req_cnt;
1417 uint16_t tot_dsds;
1418 struct req_que *req = NULL;
1419 struct rsp_que *rsp = NULL;
1420 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1421 struct scsi_qla_host *vha = sp->vha;
1422 struct qla_hw_data *ha = vha->hw;
1423
1424 /* Setup device pointers. */
1425 req = vha->req;
1426 rsp = req->rsp;
1427
1428 /* So we know we haven't pci_map'ed anything yet */
1429 tot_dsds = 0;
1430
1431 /* Send marker if required */
1432 if (vha->marker_needed != 0) {
1433 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1434 QLA_SUCCESS)
1435 return QLA_FUNCTION_FAILED;
1436 vha->marker_needed = 0;
1437 }
1438
1439 /* Acquire ring specific lock */
1440 spin_lock_irqsave(&ha->hardware_lock, flags);
1441
1442 /* Check for room in outstanding command list. */
1443 handle = req->current_outstanding_cmd;
1444 for (index = 1; index < req->num_outstanding_cmds; index++) {
1445 handle++;
1446 if (handle == req->num_outstanding_cmds)
1447 handle = 1;
1448 if (!req->outstanding_cmds[handle])
1449 break;
1450 }
1451 if (index == req->num_outstanding_cmds)
1452 goto queuing_error;
1453
1454 /* Map the sg table so we have an accurate count of sg entries needed */
1455 if (scsi_sg_count(cmd)) {
1456 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1457 scsi_sg_count(cmd), cmd->sc_data_direction);
1458 if (unlikely(!nseg))
1459 goto queuing_error;
1460 } else
1461 nseg = 0;
1462
1463 tot_dsds = nseg;
1464 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1465 if (req->cnt < (req_cnt + 2)) {
1466 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1467 RD_REG_DWORD_RELAXED(req->req_q_out);
1468 if (req->ring_index < cnt)
1469 req->cnt = cnt - req->ring_index;
1470 else
1471 req->cnt = req->length -
1472 (req->ring_index - cnt);
1473 if (req->cnt < (req_cnt + 2))
1474 goto queuing_error;
1475 }
1476
1477 /* Build command packet. */
1478 req->current_outstanding_cmd = handle;
1479 req->outstanding_cmds[handle] = sp;
1480 sp->handle = handle;
1481 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1482 req->cnt -= req_cnt;
1483
1484 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1485 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1486
1487 /* Zero out remaining portion of packet. */
1488 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1489 clr_ptr = (uint32_t *)cmd_pkt + 2;
1490 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1491 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1492
1493 /* Set NPORT-ID and LUN number*/
1494 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1495 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1496 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1497 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1498 cmd_pkt->vp_index = sp->vha->vp_idx;
1499
1500 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1501 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1502
1503 cmd_pkt->task = TSK_SIMPLE;
1504
1505 /* Load SCSI command packet. */
1506 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1507 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1508
1509 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1510
1511 /* Build IOCB segments */
1512 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1513
1514 /* Set total data segment count. */
1515 cmd_pkt->entry_count = (uint8_t)req_cnt;
1516 wmb();
1517 /* Adjust ring index. */
1518 req->ring_index++;
1519 if (req->ring_index == req->length) {
1520 req->ring_index = 0;
1521 req->ring_ptr = req->ring;
1522 } else
1523 req->ring_ptr++;
1524
1525 sp->flags |= SRB_DMA_VALID;
1526
1527 /* Set chip new ring index. */
1528 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1529 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1530
1531 /* Manage unprocessed RIO/ZIO commands in response queue. */
1532 if (vha->flags.process_response_queue &&
1533 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1534 qla24xx_process_response_queue(vha, rsp);
1535
1536 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1537 return QLA_SUCCESS;
1538
1539 queuing_error:
1540 if (tot_dsds)
1541 scsi_dma_unmap(cmd);
1542
1543 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1544
1545 return QLA_FUNCTION_FAILED;
1546 }
1547
1548 /**
1549 * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1550 * @sp: command to send to the ISP
1551 *
1552 * Returns non-zero if a failure occurred, else zero.
1553 */
1554 int
qla24xx_dif_start_scsi(srb_t * sp)1555 qla24xx_dif_start_scsi(srb_t *sp)
1556 {
1557 int nseg;
1558 unsigned long flags;
1559 uint32_t *clr_ptr;
1560 uint32_t index;
1561 uint32_t handle;
1562 uint16_t cnt;
1563 uint16_t req_cnt = 0;
1564 uint16_t tot_dsds;
1565 uint16_t tot_prot_dsds;
1566 uint16_t fw_prot_opts = 0;
1567 struct req_que *req = NULL;
1568 struct rsp_que *rsp = NULL;
1569 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1570 struct scsi_qla_host *vha = sp->vha;
1571 struct qla_hw_data *ha = vha->hw;
1572 struct cmd_type_crc_2 *cmd_pkt;
1573 uint32_t status = 0;
1574
1575 #define QDSS_GOT_Q_SPACE BIT_0
1576
1577 /* Only process protection or >16 cdb in this routine */
1578 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1579 if (cmd->cmd_len <= 16)
1580 return qla24xx_start_scsi(sp);
1581 }
1582
1583 /* Setup device pointers. */
1584 req = vha->req;
1585 rsp = req->rsp;
1586
1587 /* So we know we haven't pci_map'ed anything yet */
1588 tot_dsds = 0;
1589
1590 /* Send marker if required */
1591 if (vha->marker_needed != 0) {
1592 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1593 QLA_SUCCESS)
1594 return QLA_FUNCTION_FAILED;
1595 vha->marker_needed = 0;
1596 }
1597
1598 /* Acquire ring specific lock */
1599 spin_lock_irqsave(&ha->hardware_lock, flags);
1600
1601 /* Check for room in outstanding command list. */
1602 handle = req->current_outstanding_cmd;
1603 for (index = 1; index < req->num_outstanding_cmds; index++) {
1604 handle++;
1605 if (handle == req->num_outstanding_cmds)
1606 handle = 1;
1607 if (!req->outstanding_cmds[handle])
1608 break;
1609 }
1610
1611 if (index == req->num_outstanding_cmds)
1612 goto queuing_error;
1613
1614 /* Compute number of required data segments */
1615 /* Map the sg table so we have an accurate count of sg entries needed */
1616 if (scsi_sg_count(cmd)) {
1617 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1618 scsi_sg_count(cmd), cmd->sc_data_direction);
1619 if (unlikely(!nseg))
1620 goto queuing_error;
1621 else
1622 sp->flags |= SRB_DMA_VALID;
1623
1624 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1625 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1626 struct qla2_sgx sgx;
1627 uint32_t partial;
1628
1629 memset(&sgx, 0, sizeof(struct qla2_sgx));
1630 sgx.tot_bytes = scsi_bufflen(cmd);
1631 sgx.cur_sg = scsi_sglist(cmd);
1632 sgx.sp = sp;
1633
1634 nseg = 0;
1635 while (qla24xx_get_one_block_sg(
1636 cmd->device->sector_size, &sgx, &partial))
1637 nseg++;
1638 }
1639 } else
1640 nseg = 0;
1641
1642 /* number of required data segments */
1643 tot_dsds = nseg;
1644
1645 /* Compute number of required protection segments */
1646 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1647 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1648 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1649 if (unlikely(!nseg))
1650 goto queuing_error;
1651 else
1652 sp->flags |= SRB_CRC_PROT_DMA_VALID;
1653
1654 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1655 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1656 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1657 }
1658 } else {
1659 nseg = 0;
1660 }
1661
1662 req_cnt = 1;
1663 /* Total Data and protection sg segment(s) */
1664 tot_prot_dsds = nseg;
1665 tot_dsds += nseg;
1666 if (req->cnt < (req_cnt + 2)) {
1667 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1668 RD_REG_DWORD_RELAXED(req->req_q_out);
1669 if (req->ring_index < cnt)
1670 req->cnt = cnt - req->ring_index;
1671 else
1672 req->cnt = req->length -
1673 (req->ring_index - cnt);
1674 if (req->cnt < (req_cnt + 2))
1675 goto queuing_error;
1676 }
1677
1678 status |= QDSS_GOT_Q_SPACE;
1679
1680 /* Build header part of command packet (excluding the OPCODE). */
1681 req->current_outstanding_cmd = handle;
1682 req->outstanding_cmds[handle] = sp;
1683 sp->handle = handle;
1684 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1685 req->cnt -= req_cnt;
1686
1687 /* Fill-in common area */
1688 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1689 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1690
1691 clr_ptr = (uint32_t *)cmd_pkt + 2;
1692 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1693
1694 /* Set NPORT-ID and LUN number*/
1695 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1696 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1697 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1698 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1699
1700 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1701 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1702
1703 /* Total Data and protection segment(s) */
1704 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1705
1706 /* Build IOCB segments and adjust for data protection segments */
1707 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1708 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1709 QLA_SUCCESS)
1710 goto queuing_error;
1711
1712 cmd_pkt->entry_count = (uint8_t)req_cnt;
1713 /* Specify response queue number where completion should happen */
1714 cmd_pkt->entry_status = (uint8_t) rsp->id;
1715 cmd_pkt->timeout = cpu_to_le16(0);
1716 wmb();
1717
1718 /* Adjust ring index. */
1719 req->ring_index++;
1720 if (req->ring_index == req->length) {
1721 req->ring_index = 0;
1722 req->ring_ptr = req->ring;
1723 } else
1724 req->ring_ptr++;
1725
1726 /* Set chip new ring index. */
1727 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1728 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1729
1730 /* Manage unprocessed RIO/ZIO commands in response queue. */
1731 if (vha->flags.process_response_queue &&
1732 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1733 qla24xx_process_response_queue(vha, rsp);
1734
1735 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1736
1737 return QLA_SUCCESS;
1738
1739 queuing_error:
1740 if (status & QDSS_GOT_Q_SPACE) {
1741 req->outstanding_cmds[handle] = NULL;
1742 req->cnt += req_cnt;
1743 }
1744 /* Cleanup will be performed by the caller (queuecommand) */
1745
1746 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1747 return QLA_FUNCTION_FAILED;
1748 }
1749
1750 /**
1751 * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1752 * @sp: command to send to the ISP
1753 *
1754 * Returns non-zero if a failure occurred, else zero.
1755 */
1756 static int
qla2xxx_start_scsi_mq(srb_t * sp)1757 qla2xxx_start_scsi_mq(srb_t *sp)
1758 {
1759 int nseg;
1760 unsigned long flags;
1761 uint32_t *clr_ptr;
1762 uint32_t index;
1763 uint32_t handle;
1764 struct cmd_type_7 *cmd_pkt;
1765 uint16_t cnt;
1766 uint16_t req_cnt;
1767 uint16_t tot_dsds;
1768 struct req_que *req = NULL;
1769 struct rsp_que *rsp = NULL;
1770 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1771 struct scsi_qla_host *vha = sp->fcport->vha;
1772 struct qla_hw_data *ha = vha->hw;
1773 struct qla_qpair *qpair = sp->qpair;
1774
1775 /* Acquire qpair specific lock */
1776 spin_lock_irqsave(&qpair->qp_lock, flags);
1777
1778 /* Setup qpair pointers */
1779 rsp = qpair->rsp;
1780 req = qpair->req;
1781
1782 /* So we know we haven't pci_map'ed anything yet */
1783 tot_dsds = 0;
1784
1785 /* Send marker if required */
1786 if (vha->marker_needed != 0) {
1787 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1788 QLA_SUCCESS) {
1789 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1790 return QLA_FUNCTION_FAILED;
1791 }
1792 vha->marker_needed = 0;
1793 }
1794
1795 /* Check for room in outstanding command list. */
1796 handle = req->current_outstanding_cmd;
1797 for (index = 1; index < req->num_outstanding_cmds; index++) {
1798 handle++;
1799 if (handle == req->num_outstanding_cmds)
1800 handle = 1;
1801 if (!req->outstanding_cmds[handle])
1802 break;
1803 }
1804 if (index == req->num_outstanding_cmds)
1805 goto queuing_error;
1806
1807 /* Map the sg table so we have an accurate count of sg entries needed */
1808 if (scsi_sg_count(cmd)) {
1809 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1810 scsi_sg_count(cmd), cmd->sc_data_direction);
1811 if (unlikely(!nseg))
1812 goto queuing_error;
1813 } else
1814 nseg = 0;
1815
1816 tot_dsds = nseg;
1817 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1818 if (req->cnt < (req_cnt + 2)) {
1819 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1820 RD_REG_DWORD_RELAXED(req->req_q_out);
1821 if (req->ring_index < cnt)
1822 req->cnt = cnt - req->ring_index;
1823 else
1824 req->cnt = req->length -
1825 (req->ring_index - cnt);
1826 if (req->cnt < (req_cnt + 2))
1827 goto queuing_error;
1828 }
1829
1830 /* Build command packet. */
1831 req->current_outstanding_cmd = handle;
1832 req->outstanding_cmds[handle] = sp;
1833 sp->handle = handle;
1834 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1835 req->cnt -= req_cnt;
1836
1837 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1838 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1839
1840 /* Zero out remaining portion of packet. */
1841 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
1842 clr_ptr = (uint32_t *)cmd_pkt + 2;
1843 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1844 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1845
1846 /* Set NPORT-ID and LUN number*/
1847 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1848 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1849 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1850 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1851 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1852
1853 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1854 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1855
1856 cmd_pkt->task = TSK_SIMPLE;
1857
1858 /* Load SCSI command packet. */
1859 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1860 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1861
1862 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1863
1864 /* Build IOCB segments */
1865 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1866
1867 /* Set total data segment count. */
1868 cmd_pkt->entry_count = (uint8_t)req_cnt;
1869 wmb();
1870 /* Adjust ring index. */
1871 req->ring_index++;
1872 if (req->ring_index == req->length) {
1873 req->ring_index = 0;
1874 req->ring_ptr = req->ring;
1875 } else
1876 req->ring_ptr++;
1877
1878 sp->flags |= SRB_DMA_VALID;
1879
1880 /* Set chip new ring index. */
1881 WRT_REG_DWORD(req->req_q_in, req->ring_index);
1882
1883 /* Manage unprocessed RIO/ZIO commands in response queue. */
1884 if (vha->flags.process_response_queue &&
1885 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1886 qla24xx_process_response_queue(vha, rsp);
1887
1888 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1889 return QLA_SUCCESS;
1890
1891 queuing_error:
1892 if (tot_dsds)
1893 scsi_dma_unmap(cmd);
1894
1895 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1896
1897 return QLA_FUNCTION_FAILED;
1898 }
1899
1900
1901 /**
1902 * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1903 * @sp: command to send to the ISP
1904 *
1905 * Returns non-zero if a failure occurred, else zero.
1906 */
1907 int
qla2xxx_dif_start_scsi_mq(srb_t * sp)1908 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1909 {
1910 int nseg;
1911 unsigned long flags;
1912 uint32_t *clr_ptr;
1913 uint32_t index;
1914 uint32_t handle;
1915 uint16_t cnt;
1916 uint16_t req_cnt = 0;
1917 uint16_t tot_dsds;
1918 uint16_t tot_prot_dsds;
1919 uint16_t fw_prot_opts = 0;
1920 struct req_que *req = NULL;
1921 struct rsp_que *rsp = NULL;
1922 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1923 struct scsi_qla_host *vha = sp->fcport->vha;
1924 struct qla_hw_data *ha = vha->hw;
1925 struct cmd_type_crc_2 *cmd_pkt;
1926 uint32_t status = 0;
1927 struct qla_qpair *qpair = sp->qpair;
1928
1929 #define QDSS_GOT_Q_SPACE BIT_0
1930
1931 /* Check for host side state */
1932 if (!qpair->online) {
1933 cmd->result = DID_NO_CONNECT << 16;
1934 return QLA_INTERFACE_ERROR;
1935 }
1936
1937 if (!qpair->difdix_supported &&
1938 scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1939 cmd->result = DID_NO_CONNECT << 16;
1940 return QLA_INTERFACE_ERROR;
1941 }
1942
1943 /* Only process protection or >16 cdb in this routine */
1944 if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1945 if (cmd->cmd_len <= 16)
1946 return qla2xxx_start_scsi_mq(sp);
1947 }
1948
1949 spin_lock_irqsave(&qpair->qp_lock, flags);
1950
1951 /* Setup qpair pointers */
1952 rsp = qpair->rsp;
1953 req = qpair->req;
1954
1955 /* So we know we haven't pci_map'ed anything yet */
1956 tot_dsds = 0;
1957
1958 /* Send marker if required */
1959 if (vha->marker_needed != 0) {
1960 if (__qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1961 QLA_SUCCESS) {
1962 spin_unlock_irqrestore(&qpair->qp_lock, flags);
1963 return QLA_FUNCTION_FAILED;
1964 }
1965 vha->marker_needed = 0;
1966 }
1967
1968 /* Check for room in outstanding command list. */
1969 handle = req->current_outstanding_cmd;
1970 for (index = 1; index < req->num_outstanding_cmds; index++) {
1971 handle++;
1972 if (handle == req->num_outstanding_cmds)
1973 handle = 1;
1974 if (!req->outstanding_cmds[handle])
1975 break;
1976 }
1977
1978 if (index == req->num_outstanding_cmds)
1979 goto queuing_error;
1980
1981 /* Compute number of required data segments */
1982 /* Map the sg table so we have an accurate count of sg entries needed */
1983 if (scsi_sg_count(cmd)) {
1984 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1985 scsi_sg_count(cmd), cmd->sc_data_direction);
1986 if (unlikely(!nseg))
1987 goto queuing_error;
1988 else
1989 sp->flags |= SRB_DMA_VALID;
1990
1991 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1992 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1993 struct qla2_sgx sgx;
1994 uint32_t partial;
1995
1996 memset(&sgx, 0, sizeof(struct qla2_sgx));
1997 sgx.tot_bytes = scsi_bufflen(cmd);
1998 sgx.cur_sg = scsi_sglist(cmd);
1999 sgx.sp = sp;
2000
2001 nseg = 0;
2002 while (qla24xx_get_one_block_sg(
2003 cmd->device->sector_size, &sgx, &partial))
2004 nseg++;
2005 }
2006 } else
2007 nseg = 0;
2008
2009 /* number of required data segments */
2010 tot_dsds = nseg;
2011
2012 /* Compute number of required protection segments */
2013 if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2014 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2015 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2016 if (unlikely(!nseg))
2017 goto queuing_error;
2018 else
2019 sp->flags |= SRB_CRC_PROT_DMA_VALID;
2020
2021 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2022 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2023 nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2024 }
2025 } else {
2026 nseg = 0;
2027 }
2028
2029 req_cnt = 1;
2030 /* Total Data and protection sg segment(s) */
2031 tot_prot_dsds = nseg;
2032 tot_dsds += nseg;
2033 if (req->cnt < (req_cnt + 2)) {
2034 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2035 RD_REG_DWORD_RELAXED(req->req_q_out);
2036 if (req->ring_index < cnt)
2037 req->cnt = cnt - req->ring_index;
2038 else
2039 req->cnt = req->length -
2040 (req->ring_index - cnt);
2041 if (req->cnt < (req_cnt + 2))
2042 goto queuing_error;
2043 }
2044
2045 status |= QDSS_GOT_Q_SPACE;
2046
2047 /* Build header part of command packet (excluding the OPCODE). */
2048 req->current_outstanding_cmd = handle;
2049 req->outstanding_cmds[handle] = sp;
2050 sp->handle = handle;
2051 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2052 req->cnt -= req_cnt;
2053
2054 /* Fill-in common area */
2055 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2056 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2057
2058 clr_ptr = (uint32_t *)cmd_pkt + 2;
2059 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2060
2061 /* Set NPORT-ID and LUN number*/
2062 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2063 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2064 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2065 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2066
2067 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2068 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2069
2070 /* Total Data and protection segment(s) */
2071 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2072
2073 /* Build IOCB segments and adjust for data protection segments */
2074 if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2075 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2076 QLA_SUCCESS)
2077 goto queuing_error;
2078
2079 cmd_pkt->entry_count = (uint8_t)req_cnt;
2080 cmd_pkt->timeout = cpu_to_le16(0);
2081 wmb();
2082
2083 /* Adjust ring index. */
2084 req->ring_index++;
2085 if (req->ring_index == req->length) {
2086 req->ring_index = 0;
2087 req->ring_ptr = req->ring;
2088 } else
2089 req->ring_ptr++;
2090
2091 /* Set chip new ring index. */
2092 WRT_REG_DWORD(req->req_q_in, req->ring_index);
2093
2094 /* Manage unprocessed RIO/ZIO commands in response queue. */
2095 if (vha->flags.process_response_queue &&
2096 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2097 qla24xx_process_response_queue(vha, rsp);
2098
2099 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2100
2101 return QLA_SUCCESS;
2102
2103 queuing_error:
2104 if (status & QDSS_GOT_Q_SPACE) {
2105 req->outstanding_cmds[handle] = NULL;
2106 req->cnt += req_cnt;
2107 }
2108 /* Cleanup will be performed by the caller (queuecommand) */
2109
2110 spin_unlock_irqrestore(&qpair->qp_lock, flags);
2111 return QLA_FUNCTION_FAILED;
2112 }
2113
2114 /* Generic Control-SRB manipulation functions. */
2115
2116 /* hardware_lock assumed to be held. */
2117
2118 void *
__qla2x00_alloc_iocbs(struct qla_qpair * qpair,srb_t * sp)2119 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2120 {
2121 scsi_qla_host_t *vha = qpair->vha;
2122 struct qla_hw_data *ha = vha->hw;
2123 struct req_que *req = qpair->req;
2124 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2125 uint32_t index, handle;
2126 request_t *pkt;
2127 uint16_t cnt, req_cnt;
2128
2129 pkt = NULL;
2130 req_cnt = 1;
2131 handle = 0;
2132
2133 if (sp && (sp->type != SRB_SCSI_CMD)) {
2134 /* Adjust entry-counts as needed. */
2135 req_cnt = sp->iocbs;
2136 }
2137
2138 /* Check for room on request queue. */
2139 if (req->cnt < req_cnt + 2) {
2140 if (qpair->use_shadow_reg)
2141 cnt = *req->out_ptr;
2142 else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2143 cnt = RD_REG_DWORD(®->isp25mq.req_q_out);
2144 else if (IS_P3P_TYPE(ha))
2145 cnt = RD_REG_DWORD(®->isp82.req_q_out);
2146 else if (IS_FWI2_CAPABLE(ha))
2147 cnt = RD_REG_DWORD(®->isp24.req_q_out);
2148 else if (IS_QLAFX00(ha))
2149 cnt = RD_REG_DWORD(®->ispfx00.req_q_out);
2150 else
2151 cnt = qla2x00_debounce_register(
2152 ISP_REQ_Q_OUT(ha, ®->isp));
2153
2154 if (req->ring_index < cnt)
2155 req->cnt = cnt - req->ring_index;
2156 else
2157 req->cnt = req->length -
2158 (req->ring_index - cnt);
2159 }
2160 if (req->cnt < req_cnt + 2)
2161 goto queuing_error;
2162
2163 if (sp) {
2164 /* Check for room in outstanding command list. */
2165 handle = req->current_outstanding_cmd;
2166 for (index = 1; index < req->num_outstanding_cmds; index++) {
2167 handle++;
2168 if (handle == req->num_outstanding_cmds)
2169 handle = 1;
2170 if (!req->outstanding_cmds[handle])
2171 break;
2172 }
2173 if (index == req->num_outstanding_cmds) {
2174 ql_log(ql_log_warn, vha, 0x700b,
2175 "No room on outstanding cmd array.\n");
2176 goto queuing_error;
2177 }
2178
2179 /* Prep command array. */
2180 req->current_outstanding_cmd = handle;
2181 req->outstanding_cmds[handle] = sp;
2182 sp->handle = handle;
2183 }
2184
2185 /* Prep packet */
2186 req->cnt -= req_cnt;
2187 pkt = req->ring_ptr;
2188 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2189 if (IS_QLAFX00(ha)) {
2190 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2191 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2192 } else {
2193 pkt->entry_count = req_cnt;
2194 pkt->handle = handle;
2195 }
2196
2197 return pkt;
2198
2199 queuing_error:
2200 qpair->tgt_counters.num_alloc_iocb_failed++;
2201 return pkt;
2202 }
2203
2204 void *
qla2x00_alloc_iocbs_ready(struct qla_qpair * qpair,srb_t * sp)2205 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2206 {
2207 scsi_qla_host_t *vha = qpair->vha;
2208
2209 if (qla2x00_reset_active(vha))
2210 return NULL;
2211
2212 return __qla2x00_alloc_iocbs(qpair, sp);
2213 }
2214
2215 void *
qla2x00_alloc_iocbs(struct scsi_qla_host * vha,srb_t * sp)2216 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2217 {
2218 return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2219 }
2220
2221 static void
qla24xx_prli_iocb(srb_t * sp,struct logio_entry_24xx * logio)2222 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2223 {
2224 struct srb_iocb *lio = &sp->u.iocb_cmd;
2225
2226 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2227 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2228 if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI)
2229 logio->control_flags |= LCF_NVME_PRLI;
2230
2231 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2232 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2233 logio->port_id[1] = sp->fcport->d_id.b.area;
2234 logio->port_id[2] = sp->fcport->d_id.b.domain;
2235 logio->vp_index = sp->vha->vp_idx;
2236 }
2237
2238 static void
qla24xx_login_iocb(srb_t * sp,struct logio_entry_24xx * logio)2239 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2240 {
2241 struct srb_iocb *lio = &sp->u.iocb_cmd;
2242
2243 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2244 if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2245 logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2246 } else {
2247 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2248 if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2249 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2250 if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2251 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2252 }
2253 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2254 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2255 logio->port_id[1] = sp->fcport->d_id.b.area;
2256 logio->port_id[2] = sp->fcport->d_id.b.domain;
2257 logio->vp_index = sp->vha->vp_idx;
2258 }
2259
2260 static void
qla2x00_login_iocb(srb_t * sp,struct mbx_entry * mbx)2261 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2262 {
2263 struct qla_hw_data *ha = sp->vha->hw;
2264 struct srb_iocb *lio = &sp->u.iocb_cmd;
2265 uint16_t opts;
2266
2267 mbx->entry_type = MBX_IOCB_TYPE;
2268 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2269 mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2270 opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2271 opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2272 if (HAS_EXTENDED_IDS(ha)) {
2273 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2274 mbx->mb10 = cpu_to_le16(opts);
2275 } else {
2276 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2277 }
2278 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2279 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2280 sp->fcport->d_id.b.al_pa);
2281 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2282 }
2283
2284 static void
qla24xx_logout_iocb(srb_t * sp,struct logio_entry_24xx * logio)2285 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2286 {
2287 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2288 logio->control_flags =
2289 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2290 if (!sp->fcport->se_sess ||
2291 !sp->fcport->keep_nport_handle)
2292 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2293 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2294 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2295 logio->port_id[1] = sp->fcport->d_id.b.area;
2296 logio->port_id[2] = sp->fcport->d_id.b.domain;
2297 logio->vp_index = sp->vha->vp_idx;
2298 }
2299
2300 static void
qla2x00_logout_iocb(srb_t * sp,struct mbx_entry * mbx)2301 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2302 {
2303 struct qla_hw_data *ha = sp->vha->hw;
2304
2305 mbx->entry_type = MBX_IOCB_TYPE;
2306 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2307 mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2308 mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2309 cpu_to_le16(sp->fcport->loop_id):
2310 cpu_to_le16(sp->fcport->loop_id << 8);
2311 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2312 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2313 sp->fcport->d_id.b.al_pa);
2314 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2315 /* Implicit: mbx->mbx10 = 0. */
2316 }
2317
2318 static void
qla24xx_adisc_iocb(srb_t * sp,struct logio_entry_24xx * logio)2319 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2320 {
2321 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2322 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2323 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2324 logio->vp_index = sp->vha->vp_idx;
2325 }
2326
2327 static void
qla2x00_adisc_iocb(srb_t * sp,struct mbx_entry * mbx)2328 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2329 {
2330 struct qla_hw_data *ha = sp->vha->hw;
2331
2332 mbx->entry_type = MBX_IOCB_TYPE;
2333 SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2334 mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2335 if (HAS_EXTENDED_IDS(ha)) {
2336 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2337 mbx->mb10 = cpu_to_le16(BIT_0);
2338 } else {
2339 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2340 }
2341 mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2342 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2343 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2344 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2345 mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2346 }
2347
2348 static void
qla24xx_tm_iocb(srb_t * sp,struct tsk_mgmt_entry * tsk)2349 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2350 {
2351 uint32_t flags;
2352 uint64_t lun;
2353 struct fc_port *fcport = sp->fcport;
2354 scsi_qla_host_t *vha = fcport->vha;
2355 struct qla_hw_data *ha = vha->hw;
2356 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2357 struct req_que *req = vha->req;
2358
2359 flags = iocb->u.tmf.flags;
2360 lun = iocb->u.tmf.lun;
2361
2362 tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2363 tsk->entry_count = 1;
2364 tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2365 tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2366 tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2367 tsk->control_flags = cpu_to_le32(flags);
2368 tsk->port_id[0] = fcport->d_id.b.al_pa;
2369 tsk->port_id[1] = fcport->d_id.b.area;
2370 tsk->port_id[2] = fcport->d_id.b.domain;
2371 tsk->vp_index = fcport->vha->vp_idx;
2372
2373 if (flags == TCF_LUN_RESET) {
2374 int_to_scsilun(lun, &tsk->lun);
2375 host_to_fcp_swap((uint8_t *)&tsk->lun,
2376 sizeof(tsk->lun));
2377 }
2378 }
2379
2380 static void
qla2x00_els_dcmd_sp_free(void * data)2381 qla2x00_els_dcmd_sp_free(void *data)
2382 {
2383 srb_t *sp = data;
2384 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2385
2386 kfree(sp->fcport);
2387
2388 if (elsio->u.els_logo.els_logo_pyld)
2389 dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2390 elsio->u.els_logo.els_logo_pyld,
2391 elsio->u.els_logo.els_logo_pyld_dma);
2392
2393 del_timer(&elsio->timer);
2394 qla2x00_rel_sp(sp);
2395 }
2396
2397 static void
qla2x00_els_dcmd_iocb_timeout(void * data)2398 qla2x00_els_dcmd_iocb_timeout(void *data)
2399 {
2400 srb_t *sp = data;
2401 fc_port_t *fcport = sp->fcport;
2402 struct scsi_qla_host *vha = sp->vha;
2403 struct srb_iocb *lio = &sp->u.iocb_cmd;
2404
2405 ql_dbg(ql_dbg_io, vha, 0x3069,
2406 "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2407 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2408 fcport->d_id.b.al_pa);
2409
2410 complete(&lio->u.els_logo.comp);
2411 }
2412
2413 static void
qla2x00_els_dcmd_sp_done(void * ptr,int res)2414 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2415 {
2416 srb_t *sp = ptr;
2417 fc_port_t *fcport = sp->fcport;
2418 struct srb_iocb *lio = &sp->u.iocb_cmd;
2419 struct scsi_qla_host *vha = sp->vha;
2420
2421 ql_dbg(ql_dbg_io, vha, 0x3072,
2422 "%s hdl=%x, portid=%02x%02x%02x done\n",
2423 sp->name, sp->handle, fcport->d_id.b.domain,
2424 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2425
2426 complete(&lio->u.els_logo.comp);
2427 }
2428
2429 int
qla24xx_els_dcmd_iocb(scsi_qla_host_t * vha,int els_opcode,port_id_t remote_did)2430 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2431 port_id_t remote_did)
2432 {
2433 srb_t *sp;
2434 fc_port_t *fcport = NULL;
2435 struct srb_iocb *elsio = NULL;
2436 struct qla_hw_data *ha = vha->hw;
2437 struct els_logo_payload logo_pyld;
2438 int rval = QLA_SUCCESS;
2439
2440 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2441 if (!fcport) {
2442 ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2443 return -ENOMEM;
2444 }
2445
2446 /* Alloc SRB structure */
2447 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2448 if (!sp) {
2449 kfree(fcport);
2450 ql_log(ql_log_info, vha, 0x70e6,
2451 "SRB allocation failed\n");
2452 return -ENOMEM;
2453 }
2454
2455 elsio = &sp->u.iocb_cmd;
2456 fcport->loop_id = 0xFFFF;
2457 fcport->d_id.b.domain = remote_did.b.domain;
2458 fcport->d_id.b.area = remote_did.b.area;
2459 fcport->d_id.b.al_pa = remote_did.b.al_pa;
2460
2461 ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2462 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2463
2464 sp->type = SRB_ELS_DCMD;
2465 sp->name = "ELS_DCMD";
2466 sp->fcport = fcport;
2467 elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2468 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2469 init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2470 sp->done = qla2x00_els_dcmd_sp_done;
2471 sp->free = qla2x00_els_dcmd_sp_free;
2472
2473 elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2474 DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2475 GFP_KERNEL);
2476
2477 if (!elsio->u.els_logo.els_logo_pyld) {
2478 sp->free(sp);
2479 return QLA_FUNCTION_FAILED;
2480 }
2481
2482 memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2483
2484 elsio->u.els_logo.els_cmd = els_opcode;
2485 logo_pyld.opcode = els_opcode;
2486 logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2487 logo_pyld.s_id[1] = vha->d_id.b.area;
2488 logo_pyld.s_id[2] = vha->d_id.b.domain;
2489 host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2490 memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2491
2492 memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2493 sizeof(struct els_logo_payload));
2494
2495 rval = qla2x00_start_sp(sp);
2496 if (rval != QLA_SUCCESS) {
2497 sp->free(sp);
2498 return QLA_FUNCTION_FAILED;
2499 }
2500
2501 ql_dbg(ql_dbg_io, vha, 0x3074,
2502 "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2503 sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2504 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2505
2506 wait_for_completion(&elsio->u.els_logo.comp);
2507
2508 sp->free(sp);
2509 return rval;
2510 }
2511
2512 static void
qla24xx_els_logo_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)2513 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2514 {
2515 scsi_qla_host_t *vha = sp->vha;
2516 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2517
2518 els_iocb->entry_type = ELS_IOCB_TYPE;
2519 els_iocb->entry_count = 1;
2520 els_iocb->sys_define = 0;
2521 els_iocb->entry_status = 0;
2522 els_iocb->handle = sp->handle;
2523 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2524 els_iocb->tx_dsd_count = 1;
2525 els_iocb->vp_index = vha->vp_idx;
2526 els_iocb->sof_type = EST_SOFI3;
2527 els_iocb->rx_dsd_count = 0;
2528 els_iocb->opcode = elsio->u.els_logo.els_cmd;
2529
2530 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2531 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2532 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2533 els_iocb->s_id[0] = vha->d_id.b.al_pa;
2534 els_iocb->s_id[1] = vha->d_id.b.area;
2535 els_iocb->s_id[2] = vha->d_id.b.domain;
2536 els_iocb->control_flags = 0;
2537
2538 if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2539 els_iocb->tx_byte_count = els_iocb->tx_len =
2540 sizeof(struct els_plogi_payload);
2541 els_iocb->tx_address[0] =
2542 cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2543 els_iocb->tx_address[1] =
2544 cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma));
2545
2546 els_iocb->rx_dsd_count = 1;
2547 els_iocb->rx_byte_count = els_iocb->rx_len =
2548 sizeof(struct els_plogi_payload);
2549 els_iocb->rx_address[0] =
2550 cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma));
2551 els_iocb->rx_address[1] =
2552 cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma));
2553
2554 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2555 "PLOGI ELS IOCB:\n");
2556 ql_dump_buffer(ql_log_info, vha, 0x0109,
2557 (uint8_t *)els_iocb, 0x70);
2558 } else {
2559 els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2560 els_iocb->tx_address[0] =
2561 cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2562 els_iocb->tx_address[1] =
2563 cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2564 els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2565
2566 els_iocb->rx_byte_count = 0;
2567 els_iocb->rx_address[0] = 0;
2568 els_iocb->rx_address[1] = 0;
2569 els_iocb->rx_len = 0;
2570 }
2571
2572 sp->vha->qla_stats.control_requests++;
2573 }
2574
2575 static void
qla2x00_els_dcmd2_iocb_timeout(void * data)2576 qla2x00_els_dcmd2_iocb_timeout(void *data)
2577 {
2578 srb_t *sp = data;
2579 fc_port_t *fcport = sp->fcport;
2580 struct scsi_qla_host *vha = sp->vha;
2581 struct qla_hw_data *ha = vha->hw;
2582 unsigned long flags = 0;
2583 int res;
2584
2585 ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2586 "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2587 sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2588
2589 /* Abort the exchange */
2590 spin_lock_irqsave(&ha->hardware_lock, flags);
2591 res = ha->isp_ops->abort_command(sp);
2592 ql_dbg(ql_dbg_io, vha, 0x3070,
2593 "mbx abort_command %s\n",
2594 (res == QLA_SUCCESS) ? "successful" : "failed");
2595 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2596
2597 sp->done(sp, QLA_FUNCTION_TIMEOUT);
2598 }
2599
2600 static void
qla2x00_els_dcmd2_sp_done(void * ptr,int res)2601 qla2x00_els_dcmd2_sp_done(void *ptr, int res)
2602 {
2603 srb_t *sp = ptr;
2604 fc_port_t *fcport = sp->fcport;
2605 struct srb_iocb *lio = &sp->u.iocb_cmd;
2606 struct scsi_qla_host *vha = sp->vha;
2607 struct event_arg ea;
2608 struct qla_work_evt *e;
2609
2610 ql_dbg(ql_dbg_disc, vha, 0x3072,
2611 "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2612 sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2613
2614 fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2615 del_timer(&sp->u.iocb_cmd.timer);
2616
2617 if (sp->flags & SRB_WAKEUP_ON_COMP)
2618 complete(&lio->u.els_plogi.comp);
2619 else {
2620 if (res) {
2621 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
2622 } else {
2623 memset(&ea, 0, sizeof(ea));
2624 ea.fcport = fcport;
2625 ea.rc = res;
2626 ea.event = FCME_ELS_PLOGI_DONE;
2627 qla2x00_fcport_event_handler(vha, &ea);
2628 }
2629
2630 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
2631 if (!e) {
2632 struct srb_iocb *elsio = &sp->u.iocb_cmd;
2633
2634 if (elsio->u.els_plogi.els_plogi_pyld)
2635 dma_free_coherent(&sp->vha->hw->pdev->dev,
2636 elsio->u.els_plogi.tx_size,
2637 elsio->u.els_plogi.els_plogi_pyld,
2638 elsio->u.els_plogi.els_plogi_pyld_dma);
2639
2640 if (elsio->u.els_plogi.els_resp_pyld)
2641 dma_free_coherent(&sp->vha->hw->pdev->dev,
2642 elsio->u.els_plogi.rx_size,
2643 elsio->u.els_plogi.els_resp_pyld,
2644 elsio->u.els_plogi.els_resp_pyld_dma);
2645 sp->free(sp);
2646 return;
2647 }
2648 e->u.iosb.sp = sp;
2649 qla2x00_post_work(vha, e);
2650 }
2651 }
2652
2653 int
qla24xx_els_dcmd2_iocb(scsi_qla_host_t * vha,int els_opcode,fc_port_t * fcport,bool wait)2654 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
2655 fc_port_t *fcport, bool wait)
2656 {
2657 srb_t *sp;
2658 struct srb_iocb *elsio = NULL;
2659 struct qla_hw_data *ha = vha->hw;
2660 int rval = QLA_SUCCESS;
2661 void *ptr, *resp_ptr;
2662 dma_addr_t ptr_dma;
2663
2664 /* Alloc SRB structure */
2665 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2666 if (!sp) {
2667 ql_log(ql_log_info, vha, 0x70e6,
2668 "SRB allocation failed\n");
2669 return -ENOMEM;
2670 }
2671
2672 elsio = &sp->u.iocb_cmd;
2673 ql_dbg(ql_dbg_io, vha, 0x3073,
2674 "Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
2675
2676 fcport->flags |= FCF_ASYNC_SENT;
2677 sp->type = SRB_ELS_DCMD;
2678 sp->name = "ELS_DCMD";
2679 sp->fcport = fcport;
2680
2681 elsio->timeout = qla2x00_els_dcmd2_iocb_timeout;
2682 init_completion(&elsio->u.els_plogi.comp);
2683 if (wait)
2684 sp->flags = SRB_WAKEUP_ON_COMP;
2685
2686 qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2);
2687
2688 sp->done = qla2x00_els_dcmd2_sp_done;
2689 elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
2690
2691 ptr = elsio->u.els_plogi.els_plogi_pyld =
2692 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2693 &elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
2694 ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
2695
2696 if (!elsio->u.els_plogi.els_plogi_pyld) {
2697 rval = QLA_FUNCTION_FAILED;
2698 goto out;
2699 }
2700
2701 resp_ptr = elsio->u.els_plogi.els_resp_pyld =
2702 dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
2703 &elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
2704
2705 if (!elsio->u.els_plogi.els_resp_pyld) {
2706 rval = QLA_FUNCTION_FAILED;
2707 goto out;
2708 }
2709
2710 ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
2711
2712 memset(ptr, 0, sizeof(struct els_plogi_payload));
2713 memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
2714 memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
2715 &ha->plogi_els_payld.data,
2716 sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
2717
2718 elsio->u.els_plogi.els_cmd = els_opcode;
2719 elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
2720
2721 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
2722 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
2723 (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
2724
2725 rval = qla2x00_start_sp(sp);
2726 if (rval != QLA_SUCCESS) {
2727 rval = QLA_FUNCTION_FAILED;
2728 } else {
2729 ql_dbg(ql_dbg_disc, vha, 0x3074,
2730 "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
2731 sp->name, sp->handle, fcport->loop_id,
2732 fcport->d_id.b24, vha->d_id.b24);
2733 }
2734
2735 if (wait) {
2736 wait_for_completion(&elsio->u.els_plogi.comp);
2737
2738 if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
2739 rval = QLA_FUNCTION_FAILED;
2740 } else {
2741 goto done;
2742 }
2743
2744 out:
2745 fcport->flags &= ~(FCF_ASYNC_SENT);
2746 if (elsio->u.els_plogi.els_plogi_pyld)
2747 dma_free_coherent(&sp->vha->hw->pdev->dev,
2748 elsio->u.els_plogi.tx_size,
2749 elsio->u.els_plogi.els_plogi_pyld,
2750 elsio->u.els_plogi.els_plogi_pyld_dma);
2751
2752 if (elsio->u.els_plogi.els_resp_pyld)
2753 dma_free_coherent(&sp->vha->hw->pdev->dev,
2754 elsio->u.els_plogi.rx_size,
2755 elsio->u.els_plogi.els_resp_pyld,
2756 elsio->u.els_plogi.els_resp_pyld_dma);
2757
2758 sp->free(sp);
2759 done:
2760 return rval;
2761 }
2762
2763 static void
qla24xx_els_iocb(srb_t * sp,struct els_entry_24xx * els_iocb)2764 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2765 {
2766 struct bsg_job *bsg_job = sp->u.bsg_job;
2767 struct fc_bsg_request *bsg_request = bsg_job->request;
2768
2769 els_iocb->entry_type = ELS_IOCB_TYPE;
2770 els_iocb->entry_count = 1;
2771 els_iocb->sys_define = 0;
2772 els_iocb->entry_status = 0;
2773 els_iocb->handle = sp->handle;
2774 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2775 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2776 els_iocb->vp_index = sp->vha->vp_idx;
2777 els_iocb->sof_type = EST_SOFI3;
2778 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2779
2780 els_iocb->opcode =
2781 sp->type == SRB_ELS_CMD_RPT ?
2782 bsg_request->rqst_data.r_els.els_code :
2783 bsg_request->rqst_data.h_els.command_code;
2784 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2785 els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2786 els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2787 els_iocb->control_flags = 0;
2788 els_iocb->rx_byte_count =
2789 cpu_to_le32(bsg_job->reply_payload.payload_len);
2790 els_iocb->tx_byte_count =
2791 cpu_to_le32(bsg_job->request_payload.payload_len);
2792
2793 els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2794 (bsg_job->request_payload.sg_list)));
2795 els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2796 (bsg_job->request_payload.sg_list)));
2797 els_iocb->tx_len = cpu_to_le32(sg_dma_len
2798 (bsg_job->request_payload.sg_list));
2799
2800 els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2801 (bsg_job->reply_payload.sg_list)));
2802 els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2803 (bsg_job->reply_payload.sg_list)));
2804 els_iocb->rx_len = cpu_to_le32(sg_dma_len
2805 (bsg_job->reply_payload.sg_list));
2806
2807 sp->vha->qla_stats.control_requests++;
2808 }
2809
2810 static void
qla2x00_ct_iocb(srb_t * sp,ms_iocb_entry_t * ct_iocb)2811 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2812 {
2813 uint16_t avail_dsds;
2814 uint32_t *cur_dsd;
2815 struct scatterlist *sg;
2816 int index;
2817 uint16_t tot_dsds;
2818 scsi_qla_host_t *vha = sp->vha;
2819 struct qla_hw_data *ha = vha->hw;
2820 struct bsg_job *bsg_job = sp->u.bsg_job;
2821 int loop_iterartion = 0;
2822 int entry_count = 1;
2823
2824 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2825 ct_iocb->entry_type = CT_IOCB_TYPE;
2826 ct_iocb->entry_status = 0;
2827 ct_iocb->handle1 = sp->handle;
2828 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2829 ct_iocb->status = cpu_to_le16(0);
2830 ct_iocb->control_flags = cpu_to_le16(0);
2831 ct_iocb->timeout = 0;
2832 ct_iocb->cmd_dsd_count =
2833 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2834 ct_iocb->total_dsd_count =
2835 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2836 ct_iocb->req_bytecount =
2837 cpu_to_le32(bsg_job->request_payload.payload_len);
2838 ct_iocb->rsp_bytecount =
2839 cpu_to_le32(bsg_job->reply_payload.payload_len);
2840
2841 ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2842 (bsg_job->request_payload.sg_list)));
2843 ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2844 (bsg_job->request_payload.sg_list)));
2845 ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2846
2847 ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2848 (bsg_job->reply_payload.sg_list)));
2849 ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2850 (bsg_job->reply_payload.sg_list)));
2851 ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2852
2853 avail_dsds = 1;
2854 cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2855 index = 0;
2856 tot_dsds = bsg_job->reply_payload.sg_cnt;
2857
2858 for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2859 dma_addr_t sle_dma;
2860 cont_a64_entry_t *cont_pkt;
2861
2862 /* Allocate additional continuation packets? */
2863 if (avail_dsds == 0) {
2864 /*
2865 * Five DSDs are available in the Cont.
2866 * Type 1 IOCB.
2867 */
2868 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2869 vha->hw->req_q_map[0]);
2870 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2871 avail_dsds = 5;
2872 entry_count++;
2873 }
2874
2875 sle_dma = sg_dma_address(sg);
2876 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2877 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2878 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2879 loop_iterartion++;
2880 avail_dsds--;
2881 }
2882 ct_iocb->entry_count = entry_count;
2883
2884 sp->vha->qla_stats.control_requests++;
2885 }
2886
2887 static void
qla24xx_ct_iocb(srb_t * sp,struct ct_entry_24xx * ct_iocb)2888 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2889 {
2890 uint16_t avail_dsds;
2891 uint32_t *cur_dsd;
2892 struct scatterlist *sg;
2893 int index;
2894 uint16_t cmd_dsds, rsp_dsds;
2895 scsi_qla_host_t *vha = sp->vha;
2896 struct qla_hw_data *ha = vha->hw;
2897 struct bsg_job *bsg_job = sp->u.bsg_job;
2898 int entry_count = 1;
2899 cont_a64_entry_t *cont_pkt = NULL;
2900
2901 ct_iocb->entry_type = CT_IOCB_TYPE;
2902 ct_iocb->entry_status = 0;
2903 ct_iocb->sys_define = 0;
2904 ct_iocb->handle = sp->handle;
2905
2906 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2907 ct_iocb->vp_index = sp->vha->vp_idx;
2908 ct_iocb->comp_status = cpu_to_le16(0);
2909
2910 cmd_dsds = bsg_job->request_payload.sg_cnt;
2911 rsp_dsds = bsg_job->reply_payload.sg_cnt;
2912
2913 ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
2914 ct_iocb->timeout = 0;
2915 ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
2916 ct_iocb->cmd_byte_count =
2917 cpu_to_le32(bsg_job->request_payload.payload_len);
2918
2919 avail_dsds = 2;
2920 cur_dsd = (uint32_t *)ct_iocb->dseg_0_address;
2921 index = 0;
2922
2923 for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
2924 dma_addr_t sle_dma;
2925
2926 /* Allocate additional continuation packets? */
2927 if (avail_dsds == 0) {
2928 /*
2929 * Five DSDs are available in the Cont.
2930 * Type 1 IOCB.
2931 */
2932 cont_pkt = qla2x00_prep_cont_type1_iocb(
2933 vha, ha->req_q_map[0]);
2934 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2935 avail_dsds = 5;
2936 entry_count++;
2937 }
2938
2939 sle_dma = sg_dma_address(sg);
2940 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2941 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2942 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2943 avail_dsds--;
2944 }
2945
2946 index = 0;
2947
2948 for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
2949 dma_addr_t sle_dma;
2950
2951 /* Allocate additional continuation packets? */
2952 if (avail_dsds == 0) {
2953 /*
2954 * Five DSDs are available in the Cont.
2955 * Type 1 IOCB.
2956 */
2957 cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2958 ha->req_q_map[0]);
2959 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2960 avail_dsds = 5;
2961 entry_count++;
2962 }
2963
2964 sle_dma = sg_dma_address(sg);
2965 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
2966 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
2967 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
2968 avail_dsds--;
2969 }
2970 ct_iocb->entry_count = entry_count;
2971 }
2972
2973 /*
2974 * qla82xx_start_scsi() - Send a SCSI command to the ISP
2975 * @sp: command to send to the ISP
2976 *
2977 * Returns non-zero if a failure occurred, else zero.
2978 */
2979 int
qla82xx_start_scsi(srb_t * sp)2980 qla82xx_start_scsi(srb_t *sp)
2981 {
2982 int nseg;
2983 unsigned long flags;
2984 struct scsi_cmnd *cmd;
2985 uint32_t *clr_ptr;
2986 uint32_t index;
2987 uint32_t handle;
2988 uint16_t cnt;
2989 uint16_t req_cnt;
2990 uint16_t tot_dsds;
2991 struct device_reg_82xx __iomem *reg;
2992 uint32_t dbval;
2993 uint32_t *fcp_dl;
2994 uint8_t additional_cdb_len;
2995 struct ct6_dsd *ctx;
2996 struct scsi_qla_host *vha = sp->vha;
2997 struct qla_hw_data *ha = vha->hw;
2998 struct req_que *req = NULL;
2999 struct rsp_que *rsp = NULL;
3000
3001 /* Setup device pointers. */
3002 reg = &ha->iobase->isp82;
3003 cmd = GET_CMD_SP(sp);
3004 req = vha->req;
3005 rsp = ha->rsp_q_map[0];
3006
3007 /* So we know we haven't pci_map'ed anything yet */
3008 tot_dsds = 0;
3009
3010 dbval = 0x04 | (ha->portnum << 5);
3011
3012 /* Send marker if required */
3013 if (vha->marker_needed != 0) {
3014 if (qla2x00_marker(vha, req,
3015 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3016 ql_log(ql_log_warn, vha, 0x300c,
3017 "qla2x00_marker failed for cmd=%p.\n", cmd);
3018 return QLA_FUNCTION_FAILED;
3019 }
3020 vha->marker_needed = 0;
3021 }
3022
3023 /* Acquire ring specific lock */
3024 spin_lock_irqsave(&ha->hardware_lock, flags);
3025
3026 /* Check for room in outstanding command list. */
3027 handle = req->current_outstanding_cmd;
3028 for (index = 1; index < req->num_outstanding_cmds; index++) {
3029 handle++;
3030 if (handle == req->num_outstanding_cmds)
3031 handle = 1;
3032 if (!req->outstanding_cmds[handle])
3033 break;
3034 }
3035 if (index == req->num_outstanding_cmds)
3036 goto queuing_error;
3037
3038 /* Map the sg table so we have an accurate count of sg entries needed */
3039 if (scsi_sg_count(cmd)) {
3040 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3041 scsi_sg_count(cmd), cmd->sc_data_direction);
3042 if (unlikely(!nseg))
3043 goto queuing_error;
3044 } else
3045 nseg = 0;
3046
3047 tot_dsds = nseg;
3048
3049 if (tot_dsds > ql2xshiftctondsd) {
3050 struct cmd_type_6 *cmd_pkt;
3051 uint16_t more_dsd_lists = 0;
3052 struct dsd_dma *dsd_ptr;
3053 uint16_t i;
3054
3055 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3056 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3057 ql_dbg(ql_dbg_io, vha, 0x300d,
3058 "Num of DSD list %d is than %d for cmd=%p.\n",
3059 more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3060 cmd);
3061 goto queuing_error;
3062 }
3063
3064 if (more_dsd_lists <= ha->gbl_dsd_avail)
3065 goto sufficient_dsds;
3066 else
3067 more_dsd_lists -= ha->gbl_dsd_avail;
3068
3069 for (i = 0; i < more_dsd_lists; i++) {
3070 dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3071 if (!dsd_ptr) {
3072 ql_log(ql_log_fatal, vha, 0x300e,
3073 "Failed to allocate memory for dsd_dma "
3074 "for cmd=%p.\n", cmd);
3075 goto queuing_error;
3076 }
3077
3078 dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3079 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3080 if (!dsd_ptr->dsd_addr) {
3081 kfree(dsd_ptr);
3082 ql_log(ql_log_fatal, vha, 0x300f,
3083 "Failed to allocate memory for dsd_addr "
3084 "for cmd=%p.\n", cmd);
3085 goto queuing_error;
3086 }
3087 list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3088 ha->gbl_dsd_avail++;
3089 }
3090
3091 sufficient_dsds:
3092 req_cnt = 1;
3093
3094 if (req->cnt < (req_cnt + 2)) {
3095 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3096 ®->req_q_out[0]);
3097 if (req->ring_index < cnt)
3098 req->cnt = cnt - req->ring_index;
3099 else
3100 req->cnt = req->length -
3101 (req->ring_index - cnt);
3102 if (req->cnt < (req_cnt + 2))
3103 goto queuing_error;
3104 }
3105
3106 ctx = sp->u.scmd.ctx =
3107 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
3108 if (!ctx) {
3109 ql_log(ql_log_fatal, vha, 0x3010,
3110 "Failed to allocate ctx for cmd=%p.\n", cmd);
3111 goto queuing_error;
3112 }
3113
3114 memset(ctx, 0, sizeof(struct ct6_dsd));
3115 ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3116 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3117 if (!ctx->fcp_cmnd) {
3118 ql_log(ql_log_fatal, vha, 0x3011,
3119 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3120 goto queuing_error;
3121 }
3122
3123 /* Initialize the DSD list and dma handle */
3124 INIT_LIST_HEAD(&ctx->dsd_list);
3125 ctx->dsd_use_cnt = 0;
3126
3127 if (cmd->cmd_len > 16) {
3128 additional_cdb_len = cmd->cmd_len - 16;
3129 if ((cmd->cmd_len % 4) != 0) {
3130 /* SCSI command bigger than 16 bytes must be
3131 * multiple of 4
3132 */
3133 ql_log(ql_log_warn, vha, 0x3012,
3134 "scsi cmd len %d not multiple of 4 "
3135 "for cmd=%p.\n", cmd->cmd_len, cmd);
3136 goto queuing_error_fcp_cmnd;
3137 }
3138 ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3139 } else {
3140 additional_cdb_len = 0;
3141 ctx->fcp_cmnd_len = 12 + 16 + 4;
3142 }
3143
3144 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3145 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3146
3147 /* Zero out remaining portion of packet. */
3148 /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
3149 clr_ptr = (uint32_t *)cmd_pkt + 2;
3150 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3151 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3152
3153 /* Set NPORT-ID and LUN number*/
3154 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3155 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3156 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3157 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3158 cmd_pkt->vp_index = sp->vha->vp_idx;
3159
3160 /* Build IOCB segments */
3161 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3162 goto queuing_error_fcp_cmnd;
3163
3164 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3165 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3166
3167 /* build FCP_CMND IU */
3168 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3169 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3170
3171 if (cmd->sc_data_direction == DMA_TO_DEVICE)
3172 ctx->fcp_cmnd->additional_cdb_len |= 1;
3173 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3174 ctx->fcp_cmnd->additional_cdb_len |= 2;
3175
3176 /* Populate the FCP_PRIO. */
3177 if (ha->flags.fcp_prio_enabled)
3178 ctx->fcp_cmnd->task_attribute |=
3179 sp->fcport->fcp_prio << 3;
3180
3181 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3182
3183 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
3184 additional_cdb_len);
3185 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3186
3187 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3188 cmd_pkt->fcp_cmnd_dseg_address[0] =
3189 cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
3190 cmd_pkt->fcp_cmnd_dseg_address[1] =
3191 cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
3192
3193 sp->flags |= SRB_FCP_CMND_DMA_VALID;
3194 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3195 /* Set total data segment count. */
3196 cmd_pkt->entry_count = (uint8_t)req_cnt;
3197 /* Specify response queue number where
3198 * completion should happen
3199 */
3200 cmd_pkt->entry_status = (uint8_t) rsp->id;
3201 } else {
3202 struct cmd_type_7 *cmd_pkt;
3203 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3204 if (req->cnt < (req_cnt + 2)) {
3205 cnt = (uint16_t)RD_REG_DWORD_RELAXED(
3206 ®->req_q_out[0]);
3207 if (req->ring_index < cnt)
3208 req->cnt = cnt - req->ring_index;
3209 else
3210 req->cnt = req->length -
3211 (req->ring_index - cnt);
3212 }
3213 if (req->cnt < (req_cnt + 2))
3214 goto queuing_error;
3215
3216 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3217 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3218
3219 /* Zero out remaining portion of packet. */
3220 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3221 clr_ptr = (uint32_t *)cmd_pkt + 2;
3222 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3223 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3224
3225 /* Set NPORT-ID and LUN number*/
3226 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3227 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3228 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3229 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3230 cmd_pkt->vp_index = sp->vha->vp_idx;
3231
3232 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3233 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3234 sizeof(cmd_pkt->lun));
3235
3236 /* Populate the FCP_PRIO. */
3237 if (ha->flags.fcp_prio_enabled)
3238 cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3239
3240 /* Load SCSI command packet. */
3241 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3242 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3243
3244 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3245
3246 /* Build IOCB segments */
3247 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3248
3249 /* Set total data segment count. */
3250 cmd_pkt->entry_count = (uint8_t)req_cnt;
3251 /* Specify response queue number where
3252 * completion should happen.
3253 */
3254 cmd_pkt->entry_status = (uint8_t) rsp->id;
3255
3256 }
3257 /* Build command packet. */
3258 req->current_outstanding_cmd = handle;
3259 req->outstanding_cmds[handle] = sp;
3260 sp->handle = handle;
3261 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3262 req->cnt -= req_cnt;
3263 wmb();
3264
3265 /* Adjust ring index. */
3266 req->ring_index++;
3267 if (req->ring_index == req->length) {
3268 req->ring_index = 0;
3269 req->ring_ptr = req->ring;
3270 } else
3271 req->ring_ptr++;
3272
3273 sp->flags |= SRB_DMA_VALID;
3274
3275 /* Set chip new ring index. */
3276 /* write, read and verify logic */
3277 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3278 if (ql2xdbwr)
3279 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3280 else {
3281 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3282 wmb();
3283 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3284 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3285 wmb();
3286 }
3287 }
3288
3289 /* Manage unprocessed RIO/ZIO commands in response queue. */
3290 if (vha->flags.process_response_queue &&
3291 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3292 qla24xx_process_response_queue(vha, rsp);
3293
3294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3295 return QLA_SUCCESS;
3296
3297 queuing_error_fcp_cmnd:
3298 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3299 queuing_error:
3300 if (tot_dsds)
3301 scsi_dma_unmap(cmd);
3302
3303 if (sp->u.scmd.ctx) {
3304 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3305 sp->u.scmd.ctx = NULL;
3306 }
3307 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3308
3309 return QLA_FUNCTION_FAILED;
3310 }
3311
3312 static void
qla24xx_abort_iocb(srb_t * sp,struct abort_entry_24xx * abt_iocb)3313 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3314 {
3315 struct srb_iocb *aio = &sp->u.iocb_cmd;
3316 scsi_qla_host_t *vha = sp->vha;
3317 struct req_que *req = vha->req;
3318
3319 memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3320 abt_iocb->entry_type = ABORT_IOCB_TYPE;
3321 abt_iocb->entry_count = 1;
3322 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3323 abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3324 abt_iocb->handle_to_abort =
3325 cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
3326 aio->u.abt.cmd_hndl));
3327 abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3328 abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3329 abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3330 abt_iocb->vp_index = vha->vp_idx;
3331 abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
3332 /* Send the command to the firmware */
3333 wmb();
3334 }
3335
3336 static void
qla2x00_mb_iocb(srb_t * sp,struct mbx_24xx_entry * mbx)3337 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3338 {
3339 int i, sz;
3340
3341 mbx->entry_type = MBX_IOCB_TYPE;
3342 mbx->handle = sp->handle;
3343 sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3344
3345 for (i = 0; i < sz; i++)
3346 mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3347 }
3348
3349 static void
qla2x00_ctpthru_cmd_iocb(srb_t * sp,struct ct_entry_24xx * ct_pkt)3350 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3351 {
3352 sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3353 qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3354 ct_pkt->handle = sp->handle;
3355 }
3356
qla2x00_send_notify_ack_iocb(srb_t * sp,struct nack_to_isp * nack)3357 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3358 struct nack_to_isp *nack)
3359 {
3360 struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3361
3362 nack->entry_type = NOTIFY_ACK_TYPE;
3363 nack->entry_count = 1;
3364 nack->ox_id = ntfy->ox_id;
3365
3366 nack->u.isp24.handle = sp->handle;
3367 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3368 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3369 nack->u.isp24.flags = ntfy->u.isp24.flags &
3370 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3371 }
3372 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3373 nack->u.isp24.status = ntfy->u.isp24.status;
3374 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3375 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3376 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3377 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3378 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3379 nack->u.isp24.srr_flags = 0;
3380 nack->u.isp24.srr_reject_code = 0;
3381 nack->u.isp24.srr_reject_code_expl = 0;
3382 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3383 }
3384
3385 /*
3386 * Build NVME LS request
3387 */
3388 static int
qla_nvme_ls(srb_t * sp,struct pt_ls4_request * cmd_pkt)3389 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3390 {
3391 struct srb_iocb *nvme;
3392 int rval = QLA_SUCCESS;
3393
3394 nvme = &sp->u.iocb_cmd;
3395 cmd_pkt->entry_type = PT_LS4_REQUEST;
3396 cmd_pkt->entry_count = 1;
3397 cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
3398
3399 cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3400 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3401 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3402
3403 cmd_pkt->tx_dseg_count = 1;
3404 cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
3405 cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
3406 cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
3407 cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
3408
3409 cmd_pkt->rx_dseg_count = 1;
3410 cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
3411 cmd_pkt->dseg1_len = nvme->u.nvme.rsp_len;
3412 cmd_pkt->dseg1_address[0] = cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
3413 cmd_pkt->dseg1_address[1] = cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
3414
3415 return rval;
3416 }
3417
3418 static void
qla25xx_ctrlvp_iocb(srb_t * sp,struct vp_ctrl_entry_24xx * vce)3419 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3420 {
3421 int map, pos;
3422
3423 vce->entry_type = VP_CTRL_IOCB_TYPE;
3424 vce->handle = sp->handle;
3425 vce->entry_count = 1;
3426 vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3427 vce->vp_count = cpu_to_le16(1);
3428
3429 /*
3430 * index map in firmware starts with 1; decrement index
3431 * this is ok as we never use index 0
3432 */
3433 map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3434 pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3435 vce->vp_idx_map[map] |= 1 << pos;
3436 }
3437
3438 static void
qla24xx_prlo_iocb(srb_t * sp,struct logio_entry_24xx * logio)3439 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3440 {
3441 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3442 logio->control_flags =
3443 cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3444
3445 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3446 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3447 logio->port_id[1] = sp->fcport->d_id.b.area;
3448 logio->port_id[2] = sp->fcport->d_id.b.domain;
3449 logio->vp_index = sp->fcport->vha->vp_idx;
3450 }
3451
3452 int
qla2x00_start_sp(srb_t * sp)3453 qla2x00_start_sp(srb_t *sp)
3454 {
3455 int rval;
3456 scsi_qla_host_t *vha = sp->vha;
3457 struct qla_hw_data *ha = vha->hw;
3458 void *pkt;
3459 unsigned long flags;
3460
3461 rval = QLA_FUNCTION_FAILED;
3462 spin_lock_irqsave(&ha->hardware_lock, flags);
3463 pkt = qla2x00_alloc_iocbs(vha, sp);
3464 if (!pkt) {
3465 ql_log(ql_log_warn, vha, 0x700c,
3466 "qla2x00_alloc_iocbs failed.\n");
3467 goto done;
3468 }
3469
3470 rval = QLA_SUCCESS;
3471 switch (sp->type) {
3472 case SRB_LOGIN_CMD:
3473 IS_FWI2_CAPABLE(ha) ?
3474 qla24xx_login_iocb(sp, pkt) :
3475 qla2x00_login_iocb(sp, pkt);
3476 break;
3477 case SRB_PRLI_CMD:
3478 qla24xx_prli_iocb(sp, pkt);
3479 break;
3480 case SRB_LOGOUT_CMD:
3481 IS_FWI2_CAPABLE(ha) ?
3482 qla24xx_logout_iocb(sp, pkt) :
3483 qla2x00_logout_iocb(sp, pkt);
3484 break;
3485 case SRB_ELS_CMD_RPT:
3486 case SRB_ELS_CMD_HST:
3487 qla24xx_els_iocb(sp, pkt);
3488 break;
3489 case SRB_CT_CMD:
3490 IS_FWI2_CAPABLE(ha) ?
3491 qla24xx_ct_iocb(sp, pkt) :
3492 qla2x00_ct_iocb(sp, pkt);
3493 break;
3494 case SRB_ADISC_CMD:
3495 IS_FWI2_CAPABLE(ha) ?
3496 qla24xx_adisc_iocb(sp, pkt) :
3497 qla2x00_adisc_iocb(sp, pkt);
3498 break;
3499 case SRB_TM_CMD:
3500 IS_QLAFX00(ha) ?
3501 qlafx00_tm_iocb(sp, pkt) :
3502 qla24xx_tm_iocb(sp, pkt);
3503 break;
3504 case SRB_FXIOCB_DCMD:
3505 case SRB_FXIOCB_BCMD:
3506 qlafx00_fxdisc_iocb(sp, pkt);
3507 break;
3508 case SRB_NVME_LS:
3509 qla_nvme_ls(sp, pkt);
3510 break;
3511 case SRB_ABT_CMD:
3512 IS_QLAFX00(ha) ?
3513 qlafx00_abort_iocb(sp, pkt) :
3514 qla24xx_abort_iocb(sp, pkt);
3515 break;
3516 case SRB_ELS_DCMD:
3517 qla24xx_els_logo_iocb(sp, pkt);
3518 break;
3519 case SRB_CT_PTHRU_CMD:
3520 qla2x00_ctpthru_cmd_iocb(sp, pkt);
3521 break;
3522 case SRB_MB_IOCB:
3523 qla2x00_mb_iocb(sp, pkt);
3524 break;
3525 case SRB_NACK_PLOGI:
3526 case SRB_NACK_PRLI:
3527 case SRB_NACK_LOGO:
3528 qla2x00_send_notify_ack_iocb(sp, pkt);
3529 break;
3530 case SRB_CTRL_VP:
3531 qla25xx_ctrlvp_iocb(sp, pkt);
3532 break;
3533 case SRB_PRLO_CMD:
3534 qla24xx_prlo_iocb(sp, pkt);
3535 break;
3536 default:
3537 break;
3538 }
3539
3540 wmb();
3541 qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3542 done:
3543 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3544 return rval;
3545 }
3546
3547 static void
qla25xx_build_bidir_iocb(srb_t * sp,struct scsi_qla_host * vha,struct cmd_bidir * cmd_pkt,uint32_t tot_dsds)3548 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3549 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3550 {
3551 uint16_t avail_dsds;
3552 uint32_t *cur_dsd;
3553 uint32_t req_data_len = 0;
3554 uint32_t rsp_data_len = 0;
3555 struct scatterlist *sg;
3556 int index;
3557 int entry_count = 1;
3558 struct bsg_job *bsg_job = sp->u.bsg_job;
3559
3560 /*Update entry type to indicate bidir command */
3561 *((uint32_t *)(&cmd_pkt->entry_type)) =
3562 cpu_to_le32(COMMAND_BIDIRECTIONAL);
3563
3564 /* Set the transfer direction, in this set both flags
3565 * Also set the BD_WRAP_BACK flag, firmware will take care
3566 * assigning DID=SID for outgoing pkts.
3567 */
3568 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3569 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3570 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3571 BD_WRAP_BACK);
3572
3573 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3574 cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3575 cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3576 cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3577
3578 vha->bidi_stats.transfer_bytes += req_data_len;
3579 vha->bidi_stats.io_count++;
3580
3581 vha->qla_stats.output_bytes += req_data_len;
3582 vha->qla_stats.output_requests++;
3583
3584 /* Only one dsd is available for bidirectional IOCB, remaining dsds
3585 * are bundled in continuation iocb
3586 */
3587 avail_dsds = 1;
3588 cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3589
3590 index = 0;
3591
3592 for_each_sg(bsg_job->request_payload.sg_list, sg,
3593 bsg_job->request_payload.sg_cnt, index) {
3594 dma_addr_t sle_dma;
3595 cont_a64_entry_t *cont_pkt;
3596
3597 /* Allocate additional continuation packets */
3598 if (avail_dsds == 0) {
3599 /* Continuation type 1 IOCB can accomodate
3600 * 5 DSDS
3601 */
3602 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3603 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3604 avail_dsds = 5;
3605 entry_count++;
3606 }
3607 sle_dma = sg_dma_address(sg);
3608 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3609 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3610 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3611 avail_dsds--;
3612 }
3613 /* For read request DSD will always goes to continuation IOCB
3614 * and follow the write DSD. If there is room on the current IOCB
3615 * then it is added to that IOCB else new continuation IOCB is
3616 * allocated.
3617 */
3618 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3619 bsg_job->reply_payload.sg_cnt, index) {
3620 dma_addr_t sle_dma;
3621 cont_a64_entry_t *cont_pkt;
3622
3623 /* Allocate additional continuation packets */
3624 if (avail_dsds == 0) {
3625 /* Continuation type 1 IOCB can accomodate
3626 * 5 DSDS
3627 */
3628 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3629 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3630 avail_dsds = 5;
3631 entry_count++;
3632 }
3633 sle_dma = sg_dma_address(sg);
3634 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3635 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3636 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3637 avail_dsds--;
3638 }
3639 /* This value should be same as number of IOCB required for this cmd */
3640 cmd_pkt->entry_count = entry_count;
3641 }
3642
3643 int
qla2x00_start_bidir(srb_t * sp,struct scsi_qla_host * vha,uint32_t tot_dsds)3644 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3645 {
3646
3647 struct qla_hw_data *ha = vha->hw;
3648 unsigned long flags;
3649 uint32_t handle;
3650 uint32_t index;
3651 uint16_t req_cnt;
3652 uint16_t cnt;
3653 uint32_t *clr_ptr;
3654 struct cmd_bidir *cmd_pkt = NULL;
3655 struct rsp_que *rsp;
3656 struct req_que *req;
3657 int rval = EXT_STATUS_OK;
3658
3659 rval = QLA_SUCCESS;
3660
3661 rsp = ha->rsp_q_map[0];
3662 req = vha->req;
3663
3664 /* Send marker if required */
3665 if (vha->marker_needed != 0) {
3666 if (qla2x00_marker(vha, req,
3667 rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3668 return EXT_STATUS_MAILBOX;
3669 vha->marker_needed = 0;
3670 }
3671
3672 /* Acquire ring specific lock */
3673 spin_lock_irqsave(&ha->hardware_lock, flags);
3674
3675 /* Check for room in outstanding command list. */
3676 handle = req->current_outstanding_cmd;
3677 for (index = 1; index < req->num_outstanding_cmds; index++) {
3678 handle++;
3679 if (handle == req->num_outstanding_cmds)
3680 handle = 1;
3681 if (!req->outstanding_cmds[handle])
3682 break;
3683 }
3684
3685 if (index == req->num_outstanding_cmds) {
3686 rval = EXT_STATUS_BUSY;
3687 goto queuing_error;
3688 }
3689
3690 /* Calculate number of IOCB required */
3691 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3692
3693 /* Check for room on request queue. */
3694 if (req->cnt < req_cnt + 2) {
3695 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3696 RD_REG_DWORD_RELAXED(req->req_q_out);
3697 if (req->ring_index < cnt)
3698 req->cnt = cnt - req->ring_index;
3699 else
3700 req->cnt = req->length -
3701 (req->ring_index - cnt);
3702 }
3703 if (req->cnt < req_cnt + 2) {
3704 rval = EXT_STATUS_BUSY;
3705 goto queuing_error;
3706 }
3707
3708 cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3709 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3710
3711 /* Zero out remaining portion of packet. */
3712 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3713 clr_ptr = (uint32_t *)cmd_pkt + 2;
3714 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3715
3716 /* Set NPORT-ID (of vha)*/
3717 cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3718 cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3719 cmd_pkt->port_id[1] = vha->d_id.b.area;
3720 cmd_pkt->port_id[2] = vha->d_id.b.domain;
3721
3722 qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3723 cmd_pkt->entry_status = (uint8_t) rsp->id;
3724 /* Build command packet. */
3725 req->current_outstanding_cmd = handle;
3726 req->outstanding_cmds[handle] = sp;
3727 sp->handle = handle;
3728 req->cnt -= req_cnt;
3729
3730 /* Send the command to the firmware */
3731 wmb();
3732 qla2x00_start_iocbs(vha, req);
3733 queuing_error:
3734 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3735 return rval;
3736 }
3737