1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_gbl.h"
9
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/vmalloc.h>
13
14 #include "qla_devtbl.h"
15
16 #ifdef CONFIG_SPARC
17 #include <asm/prom.h>
18 #endif
19
20 #include <target/target_core_base.h>
21 #include "qla_target.h"
22
23 /*
24 * QLogic ISP2x00 Hardware Support Function Prototypes.
25 */
26 static int qla2x00_isp_firmware(scsi_qla_host_t *);
27 static int qla2x00_setup_chip(scsi_qla_host_t *);
28 static int qla2x00_fw_ready(scsi_qla_host_t *);
29 static int qla2x00_configure_hba(scsi_qla_host_t *);
30 static int qla2x00_configure_loop(scsi_qla_host_t *);
31 static int qla2x00_configure_local_loop(scsi_qla_host_t *);
32 static int qla2x00_configure_fabric(scsi_qla_host_t *);
33 static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *);
34 static int qla2x00_restart_isp(scsi_qla_host_t *);
35
36 static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *);
37 static int qla84xx_init_chip(scsi_qla_host_t *);
38 static int qla25xx_init_queues(struct qla_hw_data *);
39 static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *);
40 static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha,
41 struct event_arg *ea);
42 static void qla24xx_handle_prli_done_event(struct scsi_qla_host *,
43 struct event_arg *);
44 static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *);
45
46 /* SRB Extensions ---------------------------------------------------------- */
47
48 void
qla2x00_sp_timeout(struct timer_list * t)49 qla2x00_sp_timeout(struct timer_list *t)
50 {
51 srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer);
52 struct srb_iocb *iocb;
53 struct req_que *req;
54 unsigned long flags;
55 struct qla_hw_data *ha = sp->vha->hw;
56
57 WARN_ON_ONCE(irqs_disabled());
58 spin_lock_irqsave(&ha->hardware_lock, flags);
59 req = sp->qpair->req;
60 req->outstanding_cmds[sp->handle] = NULL;
61 iocb = &sp->u.iocb_cmd;
62 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63 iocb->timeout(sp);
64 }
65
qla2x00_sp_free(srb_t * sp)66 void qla2x00_sp_free(srb_t *sp)
67 {
68 struct srb_iocb *iocb = &sp->u.iocb_cmd;
69
70 del_timer(&iocb->timer);
71 qla2x00_rel_sp(sp);
72 }
73
74 /* Asynchronous Login/Logout Routines -------------------------------------- */
75
76 unsigned long
qla2x00_get_async_timeout(struct scsi_qla_host * vha)77 qla2x00_get_async_timeout(struct scsi_qla_host *vha)
78 {
79 unsigned long tmo;
80 struct qla_hw_data *ha = vha->hw;
81
82 /* Firmware should use switch negotiated r_a_tov for timeout. */
83 tmo = ha->r_a_tov / 10 * 2;
84 if (IS_QLAFX00(ha)) {
85 tmo = FX00_DEF_RATOV * 2;
86 } else if (!IS_FWI2_CAPABLE(ha)) {
87 /*
88 * Except for earlier ISPs where the timeout is seeded from the
89 * initialization control block.
90 */
91 tmo = ha->login_timeout;
92 }
93 return tmo;
94 }
95
qla24xx_abort_iocb_timeout(void * data)96 static void qla24xx_abort_iocb_timeout(void *data)
97 {
98 srb_t *sp = data;
99 struct srb_iocb *abt = &sp->u.iocb_cmd;
100 struct qla_qpair *qpair = sp->qpair;
101 u32 handle;
102 unsigned long flags;
103
104 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
105 for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) {
106 /* removing the abort */
107 if (qpair->req->outstanding_cmds[handle] == sp) {
108 qpair->req->outstanding_cmds[handle] = NULL;
109 break;
110 }
111 }
112 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
113
114 abt->u.abt.comp_status = CS_TIMEOUT;
115 sp->done(sp, QLA_OS_TIMER_EXPIRED);
116 }
117
qla24xx_abort_sp_done(srb_t * sp,int res)118 static void qla24xx_abort_sp_done(srb_t *sp, int res)
119 {
120 struct srb_iocb *abt = &sp->u.iocb_cmd;
121
122 del_timer(&sp->u.iocb_cmd.timer);
123 if (sp->flags & SRB_WAKEUP_ON_COMP)
124 complete(&abt->u.abt.comp);
125 else
126 sp->free(sp);
127 }
128
qla24xx_async_abort_cmd(srb_t * cmd_sp,bool wait)129 static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
130 {
131 scsi_qla_host_t *vha = cmd_sp->vha;
132 struct srb_iocb *abt_iocb;
133 srb_t *sp;
134 int rval = QLA_FUNCTION_FAILED;
135
136 sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
137 GFP_ATOMIC);
138 if (!sp)
139 return rval;
140
141 abt_iocb = &sp->u.iocb_cmd;
142 sp->type = SRB_ABT_CMD;
143 sp->name = "abort";
144 sp->qpair = cmd_sp->qpair;
145 if (wait)
146 sp->flags = SRB_WAKEUP_ON_COMP;
147
148 abt_iocb->timeout = qla24xx_abort_iocb_timeout;
149 init_completion(&abt_iocb->u.abt.comp);
150 /* FW can send 2 x ABTS's timeout/20s */
151 qla2x00_init_timer(sp, 42);
152
153 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
154 abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
155
156 sp->done = qla24xx_abort_sp_done;
157
158 ql_dbg(ql_dbg_async, vha, 0x507c,
159 "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle,
160 cmd_sp->type);
161
162 rval = qla2x00_start_sp(sp);
163 if (rval != QLA_SUCCESS) {
164 sp->free(sp);
165 return rval;
166 }
167
168 if (wait) {
169 wait_for_completion(&abt_iocb->u.abt.comp);
170 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
171 QLA_SUCCESS : QLA_FUNCTION_FAILED;
172 sp->free(sp);
173 }
174
175 return rval;
176 }
177
178 void
qla2x00_async_iocb_timeout(void * data)179 qla2x00_async_iocb_timeout(void *data)
180 {
181 srb_t *sp = data;
182 fc_port_t *fcport = sp->fcport;
183 struct srb_iocb *lio = &sp->u.iocb_cmd;
184 int rc, h;
185 unsigned long flags;
186
187 if (fcport) {
188 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
189 "Async-%s timeout - hdl=%x portid=%06x %8phC.\n",
190 sp->name, sp->handle, fcport->d_id.b24, fcport->port_name);
191
192 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
193 } else {
194 pr_info("Async-%s timeout - hdl=%x.\n",
195 sp->name, sp->handle);
196 }
197
198 switch (sp->type) {
199 case SRB_LOGIN_CMD:
200 rc = qla24xx_async_abort_cmd(sp, false);
201 if (rc) {
202 /* Retry as needed. */
203 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
204 lio->u.logio.data[1] =
205 lio->u.logio.flags & SRB_LOGIN_RETRIED ?
206 QLA_LOGIO_LOGIN_RETRIED : 0;
207 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
208 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
209 h++) {
210 if (sp->qpair->req->outstanding_cmds[h] ==
211 sp) {
212 sp->qpair->req->outstanding_cmds[h] =
213 NULL;
214 break;
215 }
216 }
217 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
218 sp->done(sp, QLA_FUNCTION_TIMEOUT);
219 }
220 break;
221 case SRB_LOGOUT_CMD:
222 case SRB_CT_PTHRU_CMD:
223 case SRB_MB_IOCB:
224 case SRB_NACK_PLOGI:
225 case SRB_NACK_PRLI:
226 case SRB_NACK_LOGO:
227 case SRB_CTRL_VP:
228 rc = qla24xx_async_abort_cmd(sp, false);
229 if (rc) {
230 spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
231 for (h = 1; h < sp->qpair->req->num_outstanding_cmds;
232 h++) {
233 if (sp->qpair->req->outstanding_cmds[h] ==
234 sp) {
235 sp->qpair->req->outstanding_cmds[h] =
236 NULL;
237 break;
238 }
239 }
240 spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
241 sp->done(sp, QLA_FUNCTION_TIMEOUT);
242 }
243 break;
244 default:
245 WARN_ON_ONCE(true);
246 sp->done(sp, QLA_FUNCTION_TIMEOUT);
247 break;
248 }
249 }
250
qla2x00_async_login_sp_done(srb_t * sp,int res)251 static void qla2x00_async_login_sp_done(srb_t *sp, int res)
252 {
253 struct scsi_qla_host *vha = sp->vha;
254 struct srb_iocb *lio = &sp->u.iocb_cmd;
255 struct event_arg ea;
256
257 ql_dbg(ql_dbg_disc, vha, 0x20dd,
258 "%s %8phC res %d \n", __func__, sp->fcport->port_name, res);
259
260 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
261
262 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
263 memset(&ea, 0, sizeof(ea));
264 ea.fcport = sp->fcport;
265 ea.data[0] = lio->u.logio.data[0];
266 ea.data[1] = lio->u.logio.data[1];
267 ea.iop[0] = lio->u.logio.iop[0];
268 ea.iop[1] = lio->u.logio.iop[1];
269 ea.sp = sp;
270 qla24xx_handle_plogi_done_event(vha, &ea);
271 }
272
273 sp->free(sp);
274 }
275
276 static inline bool
fcport_is_smaller(fc_port_t * fcport)277 fcport_is_smaller(fc_port_t *fcport)
278 {
279 if (wwn_to_u64(fcport->port_name) <
280 wwn_to_u64(fcport->vha->port_name))
281 return true;
282 else
283 return false;
284 }
285
286 static inline bool
fcport_is_bigger(fc_port_t * fcport)287 fcport_is_bigger(fc_port_t *fcport)
288 {
289 return !fcport_is_smaller(fcport);
290 }
291
292 int
qla2x00_async_login(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)293 qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
294 uint16_t *data)
295 {
296 srb_t *sp;
297 struct srb_iocb *lio;
298 int rval = QLA_FUNCTION_FAILED;
299
300 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
301 fcport->loop_id == FC_NO_LOOP_ID) {
302 ql_log(ql_log_warn, vha, 0xffff,
303 "%s: %8phC - not sending command.\n",
304 __func__, fcport->port_name);
305 return rval;
306 }
307
308 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
309 if (!sp)
310 goto done;
311
312 fcport->flags |= FCF_ASYNC_SENT;
313 fcport->logout_completed = 0;
314
315 fcport->disc_state = DSC_LOGIN_PEND;
316 sp->type = SRB_LOGIN_CMD;
317 sp->name = "login";
318 sp->gen1 = fcport->rscn_gen;
319 sp->gen2 = fcport->login_gen;
320
321 lio = &sp->u.iocb_cmd;
322 lio->timeout = qla2x00_async_iocb_timeout;
323 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
324
325 sp->done = qla2x00_async_login_sp_done;
326 if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport))
327 lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY;
328 else
329 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
330
331 if (fcport->fc4f_nvme)
332 lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
333
334 ql_dbg(ql_dbg_disc, vha, 0x2072,
335 "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
336 "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
337 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
338 fcport->login_retry);
339
340 rval = qla2x00_start_sp(sp);
341 if (rval != QLA_SUCCESS) {
342 fcport->flags |= FCF_LOGIN_NEEDED;
343 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
344 goto done_free_sp;
345 }
346
347 return rval;
348
349 done_free_sp:
350 sp->free(sp);
351 fcport->flags &= ~FCF_ASYNC_SENT;
352 done:
353 fcport->flags &= ~FCF_ASYNC_ACTIVE;
354 return rval;
355 }
356
qla2x00_async_logout_sp_done(srb_t * sp,int res)357 static void qla2x00_async_logout_sp_done(srb_t *sp, int res)
358 {
359 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
360 sp->fcport->login_gen++;
361 qlt_logo_completion_handler(sp->fcport, res);
362 sp->free(sp);
363 }
364
365 int
qla2x00_async_logout(struct scsi_qla_host * vha,fc_port_t * fcport)366 qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
367 {
368 srb_t *sp;
369 struct srb_iocb *lio;
370 int rval = QLA_FUNCTION_FAILED;
371
372 fcport->flags |= FCF_ASYNC_SENT;
373 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
374 if (!sp)
375 goto done;
376
377 sp->type = SRB_LOGOUT_CMD;
378 sp->name = "logout";
379
380 lio = &sp->u.iocb_cmd;
381 lio->timeout = qla2x00_async_iocb_timeout;
382 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
383
384 sp->done = qla2x00_async_logout_sp_done;
385
386 ql_dbg(ql_dbg_disc, vha, 0x2070,
387 "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
388 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
389 fcport->d_id.b.area, fcport->d_id.b.al_pa,
390 fcport->port_name);
391
392 rval = qla2x00_start_sp(sp);
393 if (rval != QLA_SUCCESS)
394 goto done_free_sp;
395 return rval;
396
397 done_free_sp:
398 sp->free(sp);
399 done:
400 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
401 return rval;
402 }
403
404 void
qla2x00_async_prlo_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)405 qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport,
406 uint16_t *data)
407 {
408 fcport->flags &= ~FCF_ASYNC_ACTIVE;
409 /* Don't re-login in target mode */
410 if (!fcport->tgt_session)
411 qla2x00_mark_device_lost(vha, fcport, 1, 0);
412 qlt_logo_completion_handler(fcport, data[0]);
413 }
414
qla2x00_async_prlo_sp_done(srb_t * sp,int res)415 static void qla2x00_async_prlo_sp_done(srb_t *sp, int res)
416 {
417 struct srb_iocb *lio = &sp->u.iocb_cmd;
418 struct scsi_qla_host *vha = sp->vha;
419
420 sp->fcport->flags &= ~FCF_ASYNC_ACTIVE;
421 if (!test_bit(UNLOADING, &vha->dpc_flags))
422 qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport,
423 lio->u.logio.data);
424 sp->free(sp);
425 }
426
427 int
qla2x00_async_prlo(struct scsi_qla_host * vha,fc_port_t * fcport)428 qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport)
429 {
430 srb_t *sp;
431 struct srb_iocb *lio;
432 int rval;
433
434 rval = QLA_FUNCTION_FAILED;
435 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
436 if (!sp)
437 goto done;
438
439 sp->type = SRB_PRLO_CMD;
440 sp->name = "prlo";
441
442 lio = &sp->u.iocb_cmd;
443 lio->timeout = qla2x00_async_iocb_timeout;
444 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
445
446 sp->done = qla2x00_async_prlo_sp_done;
447
448 ql_dbg(ql_dbg_disc, vha, 0x2070,
449 "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
450 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
451 fcport->d_id.b.area, fcport->d_id.b.al_pa);
452
453 rval = qla2x00_start_sp(sp);
454 if (rval != QLA_SUCCESS)
455 goto done_free_sp;
456
457 return rval;
458
459 done_free_sp:
460 sp->free(sp);
461 done:
462 fcport->flags &= ~FCF_ASYNC_ACTIVE;
463 return rval;
464 }
465
466 static
qla24xx_handle_adisc_event(scsi_qla_host_t * vha,struct event_arg * ea)467 void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
468 {
469 struct fc_port *fcport = ea->fcport;
470
471 ql_dbg(ql_dbg_disc, vha, 0x20d2,
472 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
473 __func__, fcport->port_name, fcport->disc_state,
474 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
475 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
476
477 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
478 ea->data[0]);
479
480 if (ea->data[0] != MBS_COMMAND_COMPLETE) {
481 ql_dbg(ql_dbg_disc, vha, 0x2066,
482 "%s %8phC: adisc fail: post delete\n",
483 __func__, ea->fcport->port_name);
484 /* deleted = 0 & logout_on_delete = force fw cleanup */
485 fcport->deleted = 0;
486 fcport->logout_on_delete = 1;
487 qlt_schedule_sess_for_deletion(ea->fcport);
488 return;
489 }
490
491 if (ea->fcport->disc_state == DSC_DELETE_PEND)
492 return;
493
494 if (ea->sp->gen2 != ea->fcport->login_gen) {
495 /* target side must have changed it. */
496 ql_dbg(ql_dbg_disc, vha, 0x20d3,
497 "%s %8phC generation changed\n",
498 __func__, ea->fcport->port_name);
499 return;
500 } else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
501 qla_rscn_replay(fcport);
502 qlt_schedule_sess_for_deletion(fcport);
503 return;
504 }
505
506 __qla24xx_handle_gpdb_event(vha, ea);
507 }
508
qla_post_els_plogi_work(struct scsi_qla_host * vha,fc_port_t * fcport)509 static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
510 {
511 struct qla_work_evt *e;
512
513 e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI);
514 if (!e)
515 return QLA_FUNCTION_FAILED;
516
517 e->u.fcport.fcport = fcport;
518 fcport->flags |= FCF_ASYNC_ACTIVE;
519 return qla2x00_post_work(vha, e);
520 }
521
qla2x00_async_adisc_sp_done(srb_t * sp,int res)522 static void qla2x00_async_adisc_sp_done(srb_t *sp, int res)
523 {
524 struct scsi_qla_host *vha = sp->vha;
525 struct event_arg ea;
526 struct srb_iocb *lio = &sp->u.iocb_cmd;
527
528 ql_dbg(ql_dbg_disc, vha, 0x2066,
529 "Async done-%s res %x %8phC\n",
530 sp->name, res, sp->fcport->port_name);
531
532 sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
533
534 memset(&ea, 0, sizeof(ea));
535 ea.rc = res;
536 ea.data[0] = lio->u.logio.data[0];
537 ea.data[1] = lio->u.logio.data[1];
538 ea.iop[0] = lio->u.logio.iop[0];
539 ea.iop[1] = lio->u.logio.iop[1];
540 ea.fcport = sp->fcport;
541 ea.sp = sp;
542
543 qla24xx_handle_adisc_event(vha, &ea);
544
545 sp->free(sp);
546 }
547
548 int
qla2x00_async_adisc(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)549 qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
550 uint16_t *data)
551 {
552 srb_t *sp;
553 struct srb_iocb *lio;
554 int rval = QLA_FUNCTION_FAILED;
555
556 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
557 return rval;
558
559 fcport->flags |= FCF_ASYNC_SENT;
560 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
561 if (!sp)
562 goto done;
563
564 sp->type = SRB_ADISC_CMD;
565 sp->name = "adisc";
566
567 lio = &sp->u.iocb_cmd;
568 lio->timeout = qla2x00_async_iocb_timeout;
569 sp->gen1 = fcport->rscn_gen;
570 sp->gen2 = fcport->login_gen;
571 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
572
573 sp->done = qla2x00_async_adisc_sp_done;
574 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
575 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
576
577 ql_dbg(ql_dbg_disc, vha, 0x206f,
578 "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
579 sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
580
581 rval = qla2x00_start_sp(sp);
582 if (rval != QLA_SUCCESS)
583 goto done_free_sp;
584
585 return rval;
586
587 done_free_sp:
588 sp->free(sp);
589 done:
590 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
591 qla2x00_post_async_adisc_work(vha, fcport, data);
592 return rval;
593 }
594
qla2x00_is_reserved_id(scsi_qla_host_t * vha,uint16_t loop_id)595 static bool qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
596 {
597 struct qla_hw_data *ha = vha->hw;
598
599 if (IS_FWI2_CAPABLE(ha))
600 return loop_id > NPH_LAST_HANDLE;
601
602 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
603 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST;
604 }
605
606 /**
607 * qla2x00_find_new_loop_id - scan through our port list and find a new usable loop ID
608 * @vha: adapter state pointer.
609 * @dev: port structure pointer.
610 *
611 * Returns:
612 * qla2x00 local function return status code.
613 *
614 * Context:
615 * Kernel context.
616 */
qla2x00_find_new_loop_id(scsi_qla_host_t * vha,fc_port_t * dev)617 static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
618 {
619 int rval;
620 struct qla_hw_data *ha = vha->hw;
621 unsigned long flags = 0;
622
623 rval = QLA_SUCCESS;
624
625 spin_lock_irqsave(&ha->vport_slock, flags);
626
627 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
628 if (dev->loop_id >= LOOPID_MAP_SIZE ||
629 qla2x00_is_reserved_id(vha, dev->loop_id)) {
630 dev->loop_id = FC_NO_LOOP_ID;
631 rval = QLA_FUNCTION_FAILED;
632 } else {
633 set_bit(dev->loop_id, ha->loop_id_map);
634 }
635 spin_unlock_irqrestore(&ha->vport_slock, flags);
636
637 if (rval == QLA_SUCCESS)
638 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
639 "Assigning new loopid=%x, portid=%x.\n",
640 dev->loop_id, dev->d_id.b24);
641 else
642 ql_log(ql_log_warn, dev->vha, 0x2087,
643 "No loop_id's available, portid=%x.\n",
644 dev->d_id.b24);
645
646 return rval;
647 }
648
qla2x00_clear_loop_id(fc_port_t * fcport)649 void qla2x00_clear_loop_id(fc_port_t *fcport)
650 {
651 struct qla_hw_data *ha = fcport->vha->hw;
652
653 if (fcport->loop_id == FC_NO_LOOP_ID ||
654 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
655 return;
656
657 clear_bit(fcport->loop_id, ha->loop_id_map);
658 fcport->loop_id = FC_NO_LOOP_ID;
659 }
660
qla24xx_handle_gnl_done_event(scsi_qla_host_t * vha,struct event_arg * ea)661 static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
662 struct event_arg *ea)
663 {
664 fc_port_t *fcport, *conflict_fcport;
665 struct get_name_list_extended *e;
666 u16 i, n, found = 0, loop_id;
667 port_id_t id;
668 u64 wwn;
669 u16 data[2];
670 u8 current_login_state;
671
672 fcport = ea->fcport;
673 ql_dbg(ql_dbg_disc, vha, 0xffff,
674 "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n",
675 __func__, fcport->port_name, fcport->disc_state,
676 fcport->fw_login_state, ea->rc,
677 fcport->login_gen, fcport->last_login_gen,
678 fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id);
679
680 if (fcport->disc_state == DSC_DELETE_PEND)
681 return;
682
683 if (ea->rc) { /* rval */
684 if (fcport->login_retry == 0) {
685 ql_dbg(ql_dbg_disc, vha, 0x20de,
686 "GNL failed Port login retry %8phN, retry cnt=%d.\n",
687 fcport->port_name, fcport->login_retry);
688 }
689 return;
690 }
691
692 if (fcport->last_rscn_gen != fcport->rscn_gen) {
693 qla_rscn_replay(fcport);
694 qlt_schedule_sess_for_deletion(fcport);
695 return;
696 } else if (fcport->last_login_gen != fcport->login_gen) {
697 ql_dbg(ql_dbg_disc, vha, 0x20e0,
698 "%s %8phC login gen changed\n",
699 __func__, fcport->port_name);
700 return;
701 }
702
703 n = ea->data[0] / sizeof(struct get_name_list_extended);
704
705 ql_dbg(ql_dbg_disc, vha, 0x20e1,
706 "%s %d %8phC n %d %02x%02x%02x lid %d \n",
707 __func__, __LINE__, fcport->port_name, n,
708 fcport->d_id.b.domain, fcport->d_id.b.area,
709 fcport->d_id.b.al_pa, fcport->loop_id);
710
711 for (i = 0; i < n; i++) {
712 e = &vha->gnl.l[i];
713 wwn = wwn_to_u64(e->port_name);
714 id.b.domain = e->port_id[2];
715 id.b.area = e->port_id[1];
716 id.b.al_pa = e->port_id[0];
717 id.b.rsvd_1 = 0;
718
719 if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE))
720 continue;
721
722 if (IS_SW_RESV_ADDR(id))
723 continue;
724
725 found = 1;
726
727 loop_id = le16_to_cpu(e->nport_handle);
728 loop_id = (loop_id & 0x7fff);
729 if (fcport->fc4f_nvme)
730 current_login_state = e->current_login_state >> 4;
731 else
732 current_login_state = e->current_login_state & 0xf;
733
734
735 ql_dbg(ql_dbg_disc, vha, 0x20e2,
736 "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n",
737 __func__, fcport->port_name,
738 e->current_login_state, fcport->fw_login_state,
739 fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa,
740 fcport->d_id.b.domain, fcport->d_id.b.area,
741 fcport->d_id.b.al_pa, loop_id, fcport->loop_id);
742
743 switch (fcport->disc_state) {
744 case DSC_DELETE_PEND:
745 case DSC_DELETED:
746 break;
747 default:
748 if ((id.b24 != fcport->d_id.b24 &&
749 fcport->d_id.b24 &&
750 fcport->loop_id != FC_NO_LOOP_ID) ||
751 (fcport->loop_id != FC_NO_LOOP_ID &&
752 fcport->loop_id != loop_id)) {
753 ql_dbg(ql_dbg_disc, vha, 0x20e3,
754 "%s %d %8phC post del sess\n",
755 __func__, __LINE__, fcport->port_name);
756 if (fcport->n2n_flag)
757 fcport->d_id.b24 = 0;
758 qlt_schedule_sess_for_deletion(fcport);
759 return;
760 }
761 break;
762 }
763
764 fcport->loop_id = loop_id;
765 if (fcport->n2n_flag)
766 fcport->d_id.b24 = id.b24;
767
768 wwn = wwn_to_u64(fcport->port_name);
769 qlt_find_sess_invalidate_other(vha, wwn,
770 id, loop_id, &conflict_fcport);
771
772 if (conflict_fcport) {
773 /*
774 * Another share fcport share the same loop_id &
775 * nport id. Conflict fcport needs to finish
776 * cleanup before this fcport can proceed to login.
777 */
778 conflict_fcport->conflict = fcport;
779 fcport->login_pause = 1;
780 }
781
782 switch (vha->hw->current_topology) {
783 default:
784 switch (current_login_state) {
785 case DSC_LS_PRLI_COMP:
786 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
787 vha, 0x20e4, "%s %d %8phC post gpdb\n",
788 __func__, __LINE__, fcport->port_name);
789
790 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
791 fcport->port_type = FCT_INITIATOR;
792 else
793 fcport->port_type = FCT_TARGET;
794 data[0] = data[1] = 0;
795 qla2x00_post_async_adisc_work(vha, fcport,
796 data);
797 break;
798 case DSC_LS_PORT_UNAVAIL:
799 default:
800 if (fcport->loop_id == FC_NO_LOOP_ID) {
801 qla2x00_find_new_loop_id(vha, fcport);
802 fcport->fw_login_state =
803 DSC_LS_PORT_UNAVAIL;
804 }
805 ql_dbg(ql_dbg_disc, vha, 0x20e5,
806 "%s %d %8phC\n", __func__, __LINE__,
807 fcport->port_name);
808 qla24xx_fcport_handle_login(vha, fcport);
809 break;
810 }
811 break;
812 case ISP_CFG_N:
813 fcport->fw_login_state = current_login_state;
814 fcport->d_id = id;
815 switch (current_login_state) {
816 case DSC_LS_PRLI_PEND:
817 /*
818 * In the middle of PRLI. Let it finish.
819 * Allow relogin code to recheck state again
820 * with GNL. Push disc_state back to DELETED
821 * so GNL can go out again
822 */
823 fcport->disc_state = DSC_DELETED;
824 break;
825 case DSC_LS_PRLI_COMP:
826 if ((e->prli_svc_param_word_3[0] & BIT_4) == 0)
827 fcport->port_type = FCT_INITIATOR;
828 else
829 fcport->port_type = FCT_TARGET;
830
831 data[0] = data[1] = 0;
832 qla2x00_post_async_adisc_work(vha, fcport,
833 data);
834 break;
835 case DSC_LS_PLOGI_COMP:
836 if (fcport_is_bigger(fcport)) {
837 /* local adapter is smaller */
838 if (fcport->loop_id != FC_NO_LOOP_ID)
839 qla2x00_clear_loop_id(fcport);
840
841 fcport->loop_id = loop_id;
842 qla24xx_fcport_handle_login(vha,
843 fcport);
844 break;
845 }
846 /* fall through */
847 default:
848 if (fcport_is_smaller(fcport)) {
849 /* local adapter is bigger */
850 if (fcport->loop_id != FC_NO_LOOP_ID)
851 qla2x00_clear_loop_id(fcport);
852
853 fcport->loop_id = loop_id;
854 qla24xx_fcport_handle_login(vha,
855 fcport);
856 }
857 break;
858 }
859 break;
860 } /* switch (ha->current_topology) */
861 }
862
863 if (!found) {
864 switch (vha->hw->current_topology) {
865 case ISP_CFG_F:
866 case ISP_CFG_FL:
867 for (i = 0; i < n; i++) {
868 e = &vha->gnl.l[i];
869 id.b.domain = e->port_id[0];
870 id.b.area = e->port_id[1];
871 id.b.al_pa = e->port_id[2];
872 id.b.rsvd_1 = 0;
873 loop_id = le16_to_cpu(e->nport_handle);
874
875 if (fcport->d_id.b24 == id.b24) {
876 conflict_fcport =
877 qla2x00_find_fcport_by_wwpn(vha,
878 e->port_name, 0);
879 if (conflict_fcport) {
880 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
881 vha, 0x20e5,
882 "%s %d %8phC post del sess\n",
883 __func__, __LINE__,
884 conflict_fcport->port_name);
885 qlt_schedule_sess_for_deletion
886 (conflict_fcport);
887 }
888 }
889 /*
890 * FW already picked this loop id for
891 * another fcport
892 */
893 if (fcport->loop_id == loop_id)
894 fcport->loop_id = FC_NO_LOOP_ID;
895 }
896 qla24xx_fcport_handle_login(vha, fcport);
897 break;
898 case ISP_CFG_N:
899 fcport->disc_state = DSC_DELETED;
900 if (time_after_eq(jiffies, fcport->dm_login_expire)) {
901 if (fcport->n2n_link_reset_cnt < 2) {
902 fcport->n2n_link_reset_cnt++;
903 /*
904 * remote port is not sending PLOGI.
905 * Reset link to kick start his state
906 * machine
907 */
908 set_bit(N2N_LINK_RESET,
909 &vha->dpc_flags);
910 } else {
911 if (fcport->n2n_chip_reset < 1) {
912 ql_log(ql_log_info, vha, 0x705d,
913 "Chip reset to bring laser down");
914 set_bit(ISP_ABORT_NEEDED,
915 &vha->dpc_flags);
916 fcport->n2n_chip_reset++;
917 } else {
918 ql_log(ql_log_info, vha, 0x705d,
919 "Remote port %8ph is not coming back\n",
920 fcport->port_name);
921 fcport->scan_state = 0;
922 }
923 }
924 qla2xxx_wake_dpc(vha);
925 } else {
926 /*
927 * report port suppose to do PLOGI. Give him
928 * more time. FW will catch it.
929 */
930 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
931 }
932 break;
933 default:
934 break;
935 }
936 }
937 } /* gnl_event */
938
qla24xx_async_gnl_sp_done(srb_t * sp,int res)939 static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
940 {
941 struct scsi_qla_host *vha = sp->vha;
942 unsigned long flags;
943 struct fc_port *fcport = NULL, *tf;
944 u16 i, n = 0, loop_id;
945 struct event_arg ea;
946 struct get_name_list_extended *e;
947 u64 wwn;
948 struct list_head h;
949 bool found = false;
950
951 ql_dbg(ql_dbg_disc, vha, 0x20e7,
952 "Async done-%s res %x mb[1]=%x mb[2]=%x \n",
953 sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
954 sp->u.iocb_cmd.u.mbx.in_mb[2]);
955
956 if (res == QLA_FUNCTION_TIMEOUT)
957 return;
958
959 sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
960 memset(&ea, 0, sizeof(ea));
961 ea.sp = sp;
962 ea.rc = res;
963
964 if (sp->u.iocb_cmd.u.mbx.in_mb[1] >=
965 sizeof(struct get_name_list_extended)) {
966 n = sp->u.iocb_cmd.u.mbx.in_mb[1] /
967 sizeof(struct get_name_list_extended);
968 ea.data[0] = sp->u.iocb_cmd.u.mbx.in_mb[1]; /* amnt xfered */
969 }
970
971 for (i = 0; i < n; i++) {
972 e = &vha->gnl.l[i];
973 loop_id = le16_to_cpu(e->nport_handle);
974 /* mask out reserve bit */
975 loop_id = (loop_id & 0x7fff);
976 set_bit(loop_id, vha->hw->loop_id_map);
977 wwn = wwn_to_u64(e->port_name);
978
979 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
980 "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
981 __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
982 e->port_id[0], e->current_login_state, e->last_login_state,
983 (loop_id & 0x7fff));
984 }
985
986 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
987
988 INIT_LIST_HEAD(&h);
989 fcport = tf = NULL;
990 if (!list_empty(&vha->gnl.fcports))
991 list_splice_init(&vha->gnl.fcports, &h);
992 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
993
994 list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
995 list_del_init(&fcport->gnl_entry);
996 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
997 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
998 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
999 ea.fcport = fcport;
1000
1001 qla24xx_handle_gnl_done_event(vha, &ea);
1002 }
1003
1004 /* create new fcport if fw has knowledge of new sessions */
1005 for (i = 0; i < n; i++) {
1006 port_id_t id;
1007 u64 wwnn;
1008
1009 e = &vha->gnl.l[i];
1010 wwn = wwn_to_u64(e->port_name);
1011
1012 found = false;
1013 list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) {
1014 if (!memcmp((u8 *)&wwn, fcport->port_name,
1015 WWN_SIZE)) {
1016 found = true;
1017 break;
1018 }
1019 }
1020
1021 id.b.domain = e->port_id[2];
1022 id.b.area = e->port_id[1];
1023 id.b.al_pa = e->port_id[0];
1024 id.b.rsvd_1 = 0;
1025
1026 if (!found && wwn && !IS_SW_RESV_ADDR(id)) {
1027 ql_dbg(ql_dbg_disc, vha, 0x2065,
1028 "%s %d %8phC %06x post new sess\n",
1029 __func__, __LINE__, (u8 *)&wwn, id.b24);
1030 wwnn = wwn_to_u64(e->node_name);
1031 qla24xx_post_newsess_work(vha, &id, (u8 *)&wwn,
1032 (u8 *)&wwnn, NULL, FC4_TYPE_UNKNOWN);
1033 }
1034 }
1035
1036 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1037 vha->gnl.sent = 0;
1038 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1039
1040 sp->free(sp);
1041 }
1042
qla24xx_async_gnl(struct scsi_qla_host * vha,fc_port_t * fcport)1043 int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
1044 {
1045 srb_t *sp;
1046 struct srb_iocb *mbx;
1047 int rval = QLA_FUNCTION_FAILED;
1048 unsigned long flags;
1049 u16 *mb;
1050
1051 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
1052 return rval;
1053
1054 ql_dbg(ql_dbg_disc, vha, 0x20d9,
1055 "Async-gnlist WWPN %8phC \n", fcport->port_name);
1056
1057 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1058 fcport->flags |= FCF_ASYNC_SENT;
1059 fcport->disc_state = DSC_GNL;
1060 fcport->last_rscn_gen = fcport->rscn_gen;
1061 fcport->last_login_gen = fcport->login_gen;
1062
1063 list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
1064 if (vha->gnl.sent) {
1065 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1066 return QLA_SUCCESS;
1067 }
1068 vha->gnl.sent = 1;
1069 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1070
1071 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1072 if (!sp)
1073 goto done;
1074
1075 sp->type = SRB_MB_IOCB;
1076 sp->name = "gnlist";
1077 sp->gen1 = fcport->rscn_gen;
1078 sp->gen2 = fcport->login_gen;
1079
1080 mbx = &sp->u.iocb_cmd;
1081 mbx->timeout = qla2x00_async_iocb_timeout;
1082 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2);
1083
1084 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1085 mb[0] = MBC_PORT_NODE_NAME_LIST;
1086 mb[1] = BIT_2 | BIT_3;
1087 mb[2] = MSW(vha->gnl.ldma);
1088 mb[3] = LSW(vha->gnl.ldma);
1089 mb[6] = MSW(MSD(vha->gnl.ldma));
1090 mb[7] = LSW(MSD(vha->gnl.ldma));
1091 mb[8] = vha->gnl.size;
1092 mb[9] = vha->vp_idx;
1093
1094 sp->done = qla24xx_async_gnl_sp_done;
1095
1096 ql_dbg(ql_dbg_disc, vha, 0x20da,
1097 "Async-%s - OUT WWPN %8phC hndl %x\n",
1098 sp->name, fcport->port_name, sp->handle);
1099
1100 rval = qla2x00_start_sp(sp);
1101 if (rval != QLA_SUCCESS)
1102 goto done_free_sp;
1103
1104 return rval;
1105
1106 done_free_sp:
1107 sp->free(sp);
1108 fcport->flags &= ~FCF_ASYNC_SENT;
1109 done:
1110 return rval;
1111 }
1112
qla24xx_post_gnl_work(struct scsi_qla_host * vha,fc_port_t * fcport)1113 int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1114 {
1115 struct qla_work_evt *e;
1116
1117 e = qla2x00_alloc_work(vha, QLA_EVT_GNL);
1118 if (!e)
1119 return QLA_FUNCTION_FAILED;
1120
1121 e->u.fcport.fcport = fcport;
1122 fcport->flags |= FCF_ASYNC_ACTIVE;
1123 return qla2x00_post_work(vha, e);
1124 }
1125
qla24xx_async_gpdb_sp_done(srb_t * sp,int res)1126 static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res)
1127 {
1128 struct scsi_qla_host *vha = sp->vha;
1129 struct qla_hw_data *ha = vha->hw;
1130 fc_port_t *fcport = sp->fcport;
1131 u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
1132 struct event_arg ea;
1133
1134 ql_dbg(ql_dbg_disc, vha, 0x20db,
1135 "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
1136 sp->name, res, fcport->port_name, mb[1], mb[2]);
1137
1138 if (res == QLA_FUNCTION_TIMEOUT) {
1139 dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1140 sp->u.iocb_cmd.u.mbx.in_dma);
1141 return;
1142 }
1143
1144 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
1145 memset(&ea, 0, sizeof(ea));
1146 ea.fcport = fcport;
1147 ea.sp = sp;
1148
1149 qla24xx_handle_gpdb_event(vha, &ea);
1150
1151 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1152 sp->u.iocb_cmd.u.mbx.in_dma);
1153
1154 sp->free(sp);
1155 }
1156
qla24xx_post_prli_work(struct scsi_qla_host * vha,fc_port_t * fcport)1157 static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport)
1158 {
1159 struct qla_work_evt *e;
1160
1161 e = qla2x00_alloc_work(vha, QLA_EVT_PRLI);
1162 if (!e)
1163 return QLA_FUNCTION_FAILED;
1164
1165 e->u.fcport.fcport = fcport;
1166
1167 return qla2x00_post_work(vha, e);
1168 }
1169
qla2x00_async_prli_sp_done(srb_t * sp,int res)1170 static void qla2x00_async_prli_sp_done(srb_t *sp, int res)
1171 {
1172 struct scsi_qla_host *vha = sp->vha;
1173 struct srb_iocb *lio = &sp->u.iocb_cmd;
1174 struct event_arg ea;
1175
1176 ql_dbg(ql_dbg_disc, vha, 0x2129,
1177 "%s %8phC res %d \n", __func__,
1178 sp->fcport->port_name, res);
1179
1180 sp->fcport->flags &= ~FCF_ASYNC_SENT;
1181
1182 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
1183 memset(&ea, 0, sizeof(ea));
1184 ea.fcport = sp->fcport;
1185 ea.data[0] = lio->u.logio.data[0];
1186 ea.data[1] = lio->u.logio.data[1];
1187 ea.iop[0] = lio->u.logio.iop[0];
1188 ea.iop[1] = lio->u.logio.iop[1];
1189 ea.sp = sp;
1190
1191 qla24xx_handle_prli_done_event(vha, &ea);
1192 }
1193
1194 sp->free(sp);
1195 }
1196
1197 int
qla24xx_async_prli(struct scsi_qla_host * vha,fc_port_t * fcport)1198 qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport)
1199 {
1200 srb_t *sp;
1201 struct srb_iocb *lio;
1202 int rval = QLA_FUNCTION_FAILED;
1203
1204 if (!vha->flags.online)
1205 return rval;
1206
1207 if (fcport->fw_login_state == DSC_LS_PLOGI_PEND ||
1208 fcport->fw_login_state == DSC_LS_PRLI_PEND)
1209 return rval;
1210
1211 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1212 if (!sp)
1213 return rval;
1214
1215 fcport->flags |= FCF_ASYNC_SENT;
1216 fcport->logout_completed = 0;
1217
1218 sp->type = SRB_PRLI_CMD;
1219 sp->name = "prli";
1220
1221 lio = &sp->u.iocb_cmd;
1222 lio->timeout = qla2x00_async_iocb_timeout;
1223 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1224
1225 sp->done = qla2x00_async_prli_sp_done;
1226 lio->u.logio.flags = 0;
1227
1228 if (fcport->fc4f_nvme)
1229 lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI;
1230
1231 ql_dbg(ql_dbg_disc, vha, 0x211b,
1232 "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n",
1233 fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24,
1234 fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc");
1235
1236 rval = qla2x00_start_sp(sp);
1237 if (rval != QLA_SUCCESS) {
1238 fcport->flags |= FCF_LOGIN_NEEDED;
1239 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1240 goto done_free_sp;
1241 }
1242
1243 return rval;
1244
1245 done_free_sp:
1246 sp->free(sp);
1247 fcport->flags &= ~FCF_ASYNC_SENT;
1248 return rval;
1249 }
1250
qla24xx_post_gpdb_work(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1251 int qla24xx_post_gpdb_work(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1252 {
1253 struct qla_work_evt *e;
1254
1255 e = qla2x00_alloc_work(vha, QLA_EVT_GPDB);
1256 if (!e)
1257 return QLA_FUNCTION_FAILED;
1258
1259 e->u.fcport.fcport = fcport;
1260 e->u.fcport.opt = opt;
1261 fcport->flags |= FCF_ASYNC_ACTIVE;
1262 return qla2x00_post_work(vha, e);
1263 }
1264
qla24xx_async_gpdb(struct scsi_qla_host * vha,fc_port_t * fcport,u8 opt)1265 int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
1266 {
1267 srb_t *sp;
1268 struct srb_iocb *mbx;
1269 int rval = QLA_FUNCTION_FAILED;
1270 u16 *mb;
1271 dma_addr_t pd_dma;
1272 struct port_database_24xx *pd;
1273 struct qla_hw_data *ha = vha->hw;
1274
1275 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
1276 fcport->loop_id == FC_NO_LOOP_ID) {
1277 ql_log(ql_log_warn, vha, 0xffff,
1278 "%s: %8phC - not sending command.\n",
1279 __func__, fcport->port_name);
1280 return rval;
1281 }
1282
1283 fcport->disc_state = DSC_GPDB;
1284
1285 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1286 if (!sp)
1287 goto done;
1288
1289 fcport->flags |= FCF_ASYNC_SENT;
1290 sp->type = SRB_MB_IOCB;
1291 sp->name = "gpdb";
1292 sp->gen1 = fcport->rscn_gen;
1293 sp->gen2 = fcport->login_gen;
1294
1295 mbx = &sp->u.iocb_cmd;
1296 mbx->timeout = qla2x00_async_iocb_timeout;
1297 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
1298
1299 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1300 if (pd == NULL) {
1301 ql_log(ql_log_warn, vha, 0xd043,
1302 "Failed to allocate port database structure.\n");
1303 goto done_free_sp;
1304 }
1305
1306 mb = sp->u.iocb_cmd.u.mbx.out_mb;
1307 mb[0] = MBC_GET_PORT_DATABASE;
1308 mb[1] = fcport->loop_id;
1309 mb[2] = MSW(pd_dma);
1310 mb[3] = LSW(pd_dma);
1311 mb[6] = MSW(MSD(pd_dma));
1312 mb[7] = LSW(MSD(pd_dma));
1313 mb[9] = vha->vp_idx;
1314 mb[10] = opt;
1315
1316 mbx->u.mbx.in = (void *)pd;
1317 mbx->u.mbx.in_dma = pd_dma;
1318
1319 sp->done = qla24xx_async_gpdb_sp_done;
1320
1321 ql_dbg(ql_dbg_disc, vha, 0x20dc,
1322 "Async-%s %8phC hndl %x opt %x\n",
1323 sp->name, fcport->port_name, sp->handle, opt);
1324
1325 rval = qla2x00_start_sp(sp);
1326 if (rval != QLA_SUCCESS)
1327 goto done_free_sp;
1328 return rval;
1329
1330 done_free_sp:
1331 if (pd)
1332 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1333
1334 sp->free(sp);
1335 fcport->flags &= ~FCF_ASYNC_SENT;
1336 done:
1337 qla24xx_post_gpdb_work(vha, fcport, opt);
1338 return rval;
1339 }
1340
1341 static
__qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1342 void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1343 {
1344 unsigned long flags;
1345
1346 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1347 ea->fcport->login_gen++;
1348 ea->fcport->deleted = 0;
1349 ea->fcport->logout_on_delete = 1;
1350
1351 if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
1352 vha->fcport_count++;
1353 ea->fcport->login_succ = 1;
1354
1355 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1356 qla24xx_sched_upd_fcport(ea->fcport);
1357 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1358 } else if (ea->fcport->login_succ) {
1359 /*
1360 * We have an existing session. A late RSCN delivery
1361 * must have triggered the session to be re-validate.
1362 * Session is still valid.
1363 */
1364 ql_dbg(ql_dbg_disc, vha, 0x20d6,
1365 "%s %d %8phC session revalidate success\n",
1366 __func__, __LINE__, ea->fcport->port_name);
1367 ea->fcport->disc_state = DSC_LOGIN_COMPLETE;
1368 }
1369 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1370 }
1371
1372 static
qla24xx_handle_gpdb_event(scsi_qla_host_t * vha,struct event_arg * ea)1373 void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
1374 {
1375 fc_port_t *fcport = ea->fcport;
1376 struct port_database_24xx *pd;
1377 struct srb *sp = ea->sp;
1378 uint8_t ls;
1379
1380 pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
1381
1382 fcport->flags &= ~FCF_ASYNC_SENT;
1383
1384 ql_dbg(ql_dbg_disc, vha, 0x20d2,
1385 "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name,
1386 fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme,
1387 ea->rc);
1388
1389 if (fcport->disc_state == DSC_DELETE_PEND)
1390 return;
1391
1392 if (fcport->fc4f_nvme)
1393 ls = pd->current_login_state >> 4;
1394 else
1395 ls = pd->current_login_state & 0xf;
1396
1397 if (ea->sp->gen2 != fcport->login_gen) {
1398 /* target side must have changed it. */
1399
1400 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1401 "%s %8phC generation changed\n",
1402 __func__, fcport->port_name);
1403 return;
1404 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1405 qla_rscn_replay(fcport);
1406 qlt_schedule_sess_for_deletion(fcport);
1407 return;
1408 }
1409
1410 switch (ls) {
1411 case PDS_PRLI_COMPLETE:
1412 __qla24xx_parse_gpdb(vha, fcport, pd);
1413 break;
1414 case PDS_PLOGI_PENDING:
1415 case PDS_PLOGI_COMPLETE:
1416 case PDS_PRLI_PENDING:
1417 case PDS_PRLI2_PENDING:
1418 /* Set discovery state back to GNL to Relogin attempt */
1419 if (qla_dual_mode_enabled(vha) ||
1420 qla_ini_mode_enabled(vha)) {
1421 fcport->disc_state = DSC_GNL;
1422 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1423 }
1424 return;
1425 case PDS_LOGO_PENDING:
1426 case PDS_PORT_UNAVAILABLE:
1427 default:
1428 ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n",
1429 __func__, __LINE__, fcport->port_name);
1430 qlt_schedule_sess_for_deletion(fcport);
1431 return;
1432 }
1433 __qla24xx_handle_gpdb_event(vha, ea);
1434 } /* gpdb event */
1435
qla_chk_n2n_b4_login(struct scsi_qla_host * vha,fc_port_t * fcport)1436 static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1437 {
1438 u8 login = 0;
1439 int rc;
1440
1441 if (qla_tgt_mode_enabled(vha))
1442 return;
1443
1444 if (qla_dual_mode_enabled(vha)) {
1445 if (N2N_TOPO(vha->hw)) {
1446 u64 mywwn, wwn;
1447
1448 mywwn = wwn_to_u64(vha->port_name);
1449 wwn = wwn_to_u64(fcport->port_name);
1450 if (mywwn > wwn)
1451 login = 1;
1452 else if ((fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1453 && time_after_eq(jiffies,
1454 fcport->plogi_nack_done_deadline))
1455 login = 1;
1456 } else {
1457 login = 1;
1458 }
1459 } else {
1460 /* initiator mode */
1461 login = 1;
1462 }
1463
1464 if (login && fcport->login_retry) {
1465 fcport->login_retry--;
1466 if (fcport->loop_id == FC_NO_LOOP_ID) {
1467 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
1468 rc = qla2x00_find_new_loop_id(vha, fcport);
1469 if (rc) {
1470 ql_dbg(ql_dbg_disc, vha, 0x20e6,
1471 "%s %d %8phC post del sess - out of loopid\n",
1472 __func__, __LINE__, fcport->port_name);
1473 fcport->scan_state = 0;
1474 qlt_schedule_sess_for_deletion(fcport);
1475 return;
1476 }
1477 }
1478 ql_dbg(ql_dbg_disc, vha, 0x20bf,
1479 "%s %d %8phC post login\n",
1480 __func__, __LINE__, fcport->port_name);
1481 qla2x00_post_async_login_work(vha, fcport, NULL);
1482 }
1483 }
1484
qla24xx_fcport_handle_login(struct scsi_qla_host * vha,fc_port_t * fcport)1485 int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
1486 {
1487 u16 data[2];
1488 u64 wwn;
1489 u16 sec;
1490
1491 ql_dbg(ql_dbg_disc, vha, 0x20d8,
1492 "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
1493 __func__, fcport->port_name, fcport->disc_state,
1494 fcport->fw_login_state, fcport->login_pause, fcport->flags,
1495 fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
1496 fcport->login_gen, fcport->loop_id, fcport->scan_state);
1497
1498 if (fcport->scan_state != QLA_FCPORT_FOUND)
1499 return 0;
1500
1501 if ((fcport->loop_id != FC_NO_LOOP_ID) &&
1502 qla_dual_mode_enabled(vha) &&
1503 ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1504 (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
1505 return 0;
1506
1507 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
1508 !N2N_TOPO(vha->hw)) {
1509 if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
1510 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1511 return 0;
1512 }
1513 }
1514
1515 /* for pure Target Mode. Login will not be initiated */
1516 if (vha->host->active_mode == MODE_TARGET)
1517 return 0;
1518
1519 if (fcport->flags & FCF_ASYNC_SENT) {
1520 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1521 return 0;
1522 }
1523
1524 switch (fcport->disc_state) {
1525 case DSC_DELETED:
1526 wwn = wwn_to_u64(fcport->node_name);
1527 switch (vha->hw->current_topology) {
1528 case ISP_CFG_N:
1529 if (fcport_is_smaller(fcport)) {
1530 /* this adapter is bigger */
1531 if (fcport->login_retry) {
1532 if (fcport->loop_id == FC_NO_LOOP_ID) {
1533 qla2x00_find_new_loop_id(vha,
1534 fcport);
1535 fcport->fw_login_state =
1536 DSC_LS_PORT_UNAVAIL;
1537 }
1538 fcport->login_retry--;
1539 qla_post_els_plogi_work(vha, fcport);
1540 } else {
1541 ql_log(ql_log_info, vha, 0x705d,
1542 "Unable to reach remote port %8phC",
1543 fcport->port_name);
1544 }
1545 } else {
1546 qla24xx_post_gnl_work(vha, fcport);
1547 }
1548 break;
1549 default:
1550 if (wwn == 0) {
1551 ql_dbg(ql_dbg_disc, vha, 0xffff,
1552 "%s %d %8phC post GNNID\n",
1553 __func__, __LINE__, fcport->port_name);
1554 qla24xx_post_gnnid_work(vha, fcport);
1555 } else if (fcport->loop_id == FC_NO_LOOP_ID) {
1556 ql_dbg(ql_dbg_disc, vha, 0x20bd,
1557 "%s %d %8phC post gnl\n",
1558 __func__, __LINE__, fcport->port_name);
1559 qla24xx_post_gnl_work(vha, fcport);
1560 } else {
1561 qla_chk_n2n_b4_login(vha, fcport);
1562 }
1563 break;
1564 }
1565 break;
1566
1567 case DSC_GNL:
1568 switch (vha->hw->current_topology) {
1569 case ISP_CFG_N:
1570 if ((fcport->current_login_state & 0xf) == 0x6) {
1571 ql_dbg(ql_dbg_disc, vha, 0x2118,
1572 "%s %d %8phC post GPDB work\n",
1573 __func__, __LINE__, fcport->port_name);
1574 fcport->chip_reset =
1575 vha->hw->base_qpair->chip_reset;
1576 qla24xx_post_gpdb_work(vha, fcport, 0);
1577 } else {
1578 ql_dbg(ql_dbg_disc, vha, 0x2118,
1579 "%s %d %8phC post %s PRLI\n",
1580 __func__, __LINE__, fcport->port_name,
1581 fcport->fc4f_nvme ? "NVME" : "FC");
1582 qla24xx_post_prli_work(vha, fcport);
1583 }
1584 break;
1585 default:
1586 if (fcport->login_pause) {
1587 fcport->last_rscn_gen = fcport->rscn_gen;
1588 fcport->last_login_gen = fcport->login_gen;
1589 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1590 break;
1591 }
1592 qla_chk_n2n_b4_login(vha, fcport);
1593 break;
1594 }
1595 break;
1596
1597 case DSC_LOGIN_FAILED:
1598 if (N2N_TOPO(vha->hw))
1599 qla_chk_n2n_b4_login(vha, fcport);
1600 else
1601 qlt_schedule_sess_for_deletion(fcport);
1602 break;
1603
1604 case DSC_LOGIN_COMPLETE:
1605 /* recheck login state */
1606 data[0] = data[1] = 0;
1607 qla2x00_post_async_adisc_work(vha, fcport, data);
1608 break;
1609
1610 case DSC_LOGIN_PEND:
1611 if (fcport->fw_login_state == DSC_LS_PLOGI_COMP)
1612 qla24xx_post_prli_work(vha, fcport);
1613 break;
1614
1615 case DSC_UPD_FCPORT:
1616 sec = jiffies_to_msecs(jiffies -
1617 fcport->jiffies_at_registration)/1000;
1618 if (fcport->sec_since_registration < sec && sec &&
1619 !(sec % 60)) {
1620 fcport->sec_since_registration = sec;
1621 ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
1622 "%s %8phC - Slow Rport registration(%d Sec)\n",
1623 __func__, fcport->port_name, sec);
1624 }
1625
1626 if (fcport->next_disc_state != DSC_DELETE_PEND)
1627 fcport->next_disc_state = DSC_ADISC;
1628 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1629 break;
1630
1631 default:
1632 break;
1633 }
1634
1635 return 0;
1636 }
1637
qla24xx_post_newsess_work(struct scsi_qla_host * vha,port_id_t * id,u8 * port_name,u8 * node_name,void * pla,u8 fc4_type)1638 int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id,
1639 u8 *port_name, u8 *node_name, void *pla, u8 fc4_type)
1640 {
1641 struct qla_work_evt *e;
1642
1643 e = qla2x00_alloc_work(vha, QLA_EVT_NEW_SESS);
1644 if (!e)
1645 return QLA_FUNCTION_FAILED;
1646
1647 e->u.new_sess.id = *id;
1648 e->u.new_sess.pla = pla;
1649 e->u.new_sess.fc4_type = fc4_type;
1650 memcpy(e->u.new_sess.port_name, port_name, WWN_SIZE);
1651 if (node_name)
1652 memcpy(e->u.new_sess.node_name, node_name, WWN_SIZE);
1653
1654 return qla2x00_post_work(vha, e);
1655 }
1656
qla2x00_handle_rscn(scsi_qla_host_t * vha,struct event_arg * ea)1657 void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea)
1658 {
1659 fc_port_t *fcport;
1660 unsigned long flags;
1661
1662 fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
1663 if (fcport) {
1664 fcport->scan_needed = 1;
1665 fcport->rscn_gen++;
1666 }
1667
1668 spin_lock_irqsave(&vha->work_lock, flags);
1669 if (vha->scan.scan_flags == 0) {
1670 ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__);
1671 vha->scan.scan_flags |= SF_QUEUED;
1672 schedule_delayed_work(&vha->scan.scan_work, 5);
1673 }
1674 spin_unlock_irqrestore(&vha->work_lock, flags);
1675 }
1676
qla24xx_handle_relogin_event(scsi_qla_host_t * vha,struct event_arg * ea)1677 void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
1678 struct event_arg *ea)
1679 {
1680 fc_port_t *fcport = ea->fcport;
1681
1682 if (test_bit(UNLOADING, &vha->dpc_flags))
1683 return;
1684
1685 ql_dbg(ql_dbg_disc, vha, 0x2102,
1686 "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n",
1687 __func__, fcport->port_name, fcport->disc_state,
1688 fcport->fw_login_state, fcport->login_pause,
1689 fcport->deleted, fcport->conflict,
1690 fcport->last_rscn_gen, fcport->rscn_gen,
1691 fcport->last_login_gen, fcport->login_gen,
1692 fcport->flags);
1693
1694 if (fcport->last_rscn_gen != fcport->rscn_gen) {
1695 ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n",
1696 __func__, __LINE__, fcport->port_name);
1697 qla24xx_post_gnl_work(vha, fcport);
1698 return;
1699 }
1700
1701 qla24xx_fcport_handle_login(vha, fcport);
1702 }
1703
1704 /*
1705 * RSCN(s) came in for this fcport, but the RSCN(s) was not able
1706 * to be consumed by the fcport
1707 */
qla_rscn_replay(fc_port_t * fcport)1708 void qla_rscn_replay(fc_port_t *fcport)
1709 {
1710 struct event_arg ea;
1711
1712 switch (fcport->disc_state) {
1713 case DSC_DELETE_PEND:
1714 return;
1715 default:
1716 break;
1717 }
1718
1719 if (fcport->scan_needed) {
1720 memset(&ea, 0, sizeof(ea));
1721 ea.id = fcport->d_id;
1722 ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
1723 qla2x00_handle_rscn(fcport->vha, &ea);
1724 }
1725 }
1726
1727 static void
qla2x00_tmf_iocb_timeout(void * data)1728 qla2x00_tmf_iocb_timeout(void *data)
1729 {
1730 srb_t *sp = data;
1731 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1732
1733 tmf->u.tmf.comp_status = CS_TIMEOUT;
1734 complete(&tmf->u.tmf.comp);
1735 }
1736
qla2x00_tmf_sp_done(srb_t * sp,int res)1737 static void qla2x00_tmf_sp_done(srb_t *sp, int res)
1738 {
1739 struct srb_iocb *tmf = &sp->u.iocb_cmd;
1740
1741 complete(&tmf->u.tmf.comp);
1742 }
1743
1744 int
qla2x00_async_tm_cmd(fc_port_t * fcport,uint32_t flags,uint32_t lun,uint32_t tag)1745 qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
1746 uint32_t tag)
1747 {
1748 struct scsi_qla_host *vha = fcport->vha;
1749 struct srb_iocb *tm_iocb;
1750 srb_t *sp;
1751 int rval = QLA_FUNCTION_FAILED;
1752
1753 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1754 if (!sp)
1755 goto done;
1756
1757 tm_iocb = &sp->u.iocb_cmd;
1758 sp->type = SRB_TM_CMD;
1759 sp->name = "tmf";
1760
1761 tm_iocb->timeout = qla2x00_tmf_iocb_timeout;
1762 init_completion(&tm_iocb->u.tmf.comp);
1763 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
1764
1765 tm_iocb->u.tmf.flags = flags;
1766 tm_iocb->u.tmf.lun = lun;
1767 tm_iocb->u.tmf.data = tag;
1768 sp->done = qla2x00_tmf_sp_done;
1769
1770 ql_dbg(ql_dbg_taskm, vha, 0x802f,
1771 "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
1772 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
1773 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1774
1775 rval = qla2x00_start_sp(sp);
1776 if (rval != QLA_SUCCESS)
1777 goto done_free_sp;
1778 wait_for_completion(&tm_iocb->u.tmf.comp);
1779
1780 rval = tm_iocb->u.tmf.data;
1781
1782 if (rval != QLA_SUCCESS) {
1783 ql_log(ql_log_warn, vha, 0x8030,
1784 "TM IOCB failed (%x).\n", rval);
1785 }
1786
1787 if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
1788 flags = tm_iocb->u.tmf.flags;
1789 lun = (uint16_t)tm_iocb->u.tmf.lun;
1790
1791 /* Issue Marker IOCB */
1792 qla2x00_marker(vha, vha->hw->base_qpair,
1793 fcport->loop_id, lun,
1794 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
1795 }
1796
1797 done_free_sp:
1798 sp->free(sp);
1799 fcport->flags &= ~FCF_ASYNC_SENT;
1800 done:
1801 return rval;
1802 }
1803
1804 int
qla24xx_async_abort_command(srb_t * sp)1805 qla24xx_async_abort_command(srb_t *sp)
1806 {
1807 unsigned long flags = 0;
1808
1809 uint32_t handle;
1810 fc_port_t *fcport = sp->fcport;
1811 struct qla_qpair *qpair = sp->qpair;
1812 struct scsi_qla_host *vha = fcport->vha;
1813 struct req_que *req = qpair->req;
1814
1815 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
1816 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1817 if (req->outstanding_cmds[handle] == sp)
1818 break;
1819 }
1820 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
1821
1822 if (handle == req->num_outstanding_cmds) {
1823 /* Command not found. */
1824 return QLA_FUNCTION_FAILED;
1825 }
1826 if (sp->type == SRB_FXIOCB_DCMD)
1827 return qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1828 FXDISC_ABORT_IOCTL);
1829
1830 return qla24xx_async_abort_cmd(sp, true);
1831 }
1832
1833 static void
qla24xx_handle_prli_done_event(struct scsi_qla_host * vha,struct event_arg * ea)1834 qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1835 {
1836 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1837 ea->data[0]);
1838
1839 switch (ea->data[0]) {
1840 case MBS_COMMAND_COMPLETE:
1841 ql_dbg(ql_dbg_disc, vha, 0x2118,
1842 "%s %d %8phC post gpdb\n",
1843 __func__, __LINE__, ea->fcport->port_name);
1844
1845 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1846 ea->fcport->logout_on_delete = 1;
1847 ea->fcport->nvme_prli_service_param = ea->iop[0];
1848 if (ea->iop[0] & NVME_PRLI_SP_FIRST_BURST)
1849 ea->fcport->nvme_first_burst_size =
1850 (ea->iop[1] & 0xffff) * 512;
1851 else
1852 ea->fcport->nvme_first_burst_size = 0;
1853 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1854 break;
1855 default:
1856 if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) &&
1857 (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */
1858 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1859 ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
1860 break;
1861 }
1862
1863 if (ea->fcport->fc4f_nvme) {
1864 ql_dbg(ql_dbg_disc, vha, 0x2118,
1865 "%s %d %8phC post fc4 prli\n",
1866 __func__, __LINE__, ea->fcport->port_name);
1867 ea->fcport->fc4f_nvme = 0;
1868 qla24xx_post_prli_work(vha, ea->fcport);
1869 return;
1870 }
1871
1872 /* at this point both PRLI NVME & PRLI FCP failed */
1873 if (N2N_TOPO(vha->hw)) {
1874 if (ea->fcport->n2n_link_reset_cnt < 3) {
1875 ea->fcport->n2n_link_reset_cnt++;
1876 /*
1877 * remote port is not sending Plogi. Reset
1878 * link to kick start his state machine
1879 */
1880 set_bit(N2N_LINK_RESET, &vha->dpc_flags);
1881 } else {
1882 ql_log(ql_log_warn, vha, 0x2119,
1883 "%s %d %8phC Unable to reconnect\n",
1884 __func__, __LINE__, ea->fcport->port_name);
1885 }
1886 } else {
1887 /*
1888 * switch connect. login failed. Take connection
1889 * down and allow relogin to retrigger
1890 */
1891 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1892 ea->fcport->keep_nport_handle = 0;
1893 qlt_schedule_sess_for_deletion(ea->fcport);
1894 }
1895 break;
1896 }
1897 }
1898
1899 void
qla24xx_handle_plogi_done_event(struct scsi_qla_host * vha,struct event_arg * ea)1900 qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1901 {
1902 port_id_t cid; /* conflict Nport id */
1903 u16 lid;
1904 struct fc_port *conflict_fcport;
1905 unsigned long flags;
1906 struct fc_port *fcport = ea->fcport;
1907
1908 ql_dbg(ql_dbg_disc, vha, 0xffff,
1909 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
1910 __func__, fcport->port_name, fcport->disc_state,
1911 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
1912 ea->sp->gen1, fcport->rscn_gen,
1913 ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
1914
1915 if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
1916 (fcport->fw_login_state == DSC_LS_PRLI_PEND)) {
1917 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1918 "%s %d %8phC Remote is trying to login\n",
1919 __func__, __LINE__, fcport->port_name);
1920 return;
1921 }
1922
1923 if ((fcport->disc_state == DSC_DELETE_PEND) ||
1924 (fcport->disc_state == DSC_DELETED)) {
1925 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1926 return;
1927 }
1928
1929 if (ea->sp->gen2 != fcport->login_gen) {
1930 /* target side must have changed it. */
1931 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1932 "%s %8phC generation changed\n",
1933 __func__, fcport->port_name);
1934 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1935 return;
1936 } else if (ea->sp->gen1 != fcport->rscn_gen) {
1937 ql_dbg(ql_dbg_disc, vha, 0x20d3,
1938 "%s %8phC RSCN generation changed\n",
1939 __func__, fcport->port_name);
1940 qla_rscn_replay(fcport);
1941 qlt_schedule_sess_for_deletion(fcport);
1942 return;
1943 }
1944
1945 WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n",
1946 ea->data[0]);
1947
1948 switch (ea->data[0]) {
1949 case MBS_COMMAND_COMPLETE:
1950 /*
1951 * Driver must validate login state - If PRLI not complete,
1952 * force a relogin attempt via implicit LOGO, PLOGI, and PRLI
1953 * requests.
1954 */
1955 if (ea->fcport->fc4f_nvme) {
1956 ql_dbg(ql_dbg_disc, vha, 0x2117,
1957 "%s %d %8phC post prli\n",
1958 __func__, __LINE__, ea->fcport->port_name);
1959 qla24xx_post_prli_work(vha, ea->fcport);
1960 } else {
1961 ql_dbg(ql_dbg_disc, vha, 0x20ea,
1962 "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n",
1963 __func__, __LINE__, ea->fcport->port_name,
1964 ea->fcport->loop_id, ea->fcport->d_id.b24);
1965
1966 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1967 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1968 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1969 ea->fcport->logout_on_delete = 1;
1970 ea->fcport->send_els_logo = 0;
1971 ea->fcport->fw_login_state = DSC_LS_PRLI_COMP;
1972 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1973
1974 qla24xx_post_gpdb_work(vha, ea->fcport, 0);
1975 }
1976 break;
1977 case MBS_COMMAND_ERROR:
1978 ql_dbg(ql_dbg_disc, vha, 0x20eb, "%s %d %8phC cmd error %x\n",
1979 __func__, __LINE__, ea->fcport->port_name, ea->data[1]);
1980
1981 ea->fcport->flags &= ~FCF_ASYNC_SENT;
1982 ea->fcport->disc_state = DSC_LOGIN_FAILED;
1983 if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED)
1984 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1985 else
1986 qla2x00_mark_device_lost(vha, ea->fcport, 1, 0);
1987 break;
1988 case MBS_LOOP_ID_USED:
1989 /* data[1] = IO PARAM 1 = nport ID */
1990 cid.b.domain = (ea->iop[1] >> 16) & 0xff;
1991 cid.b.area = (ea->iop[1] >> 8) & 0xff;
1992 cid.b.al_pa = ea->iop[1] & 0xff;
1993 cid.b.rsvd_1 = 0;
1994
1995 ql_dbg(ql_dbg_disc, vha, 0x20ec,
1996 "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
1997 __func__, __LINE__, ea->fcport->port_name,
1998 ea->fcport->loop_id, cid.b24);
1999
2000 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
2001 ea->fcport->loop_id = FC_NO_LOOP_ID;
2002 qla24xx_post_gnl_work(vha, ea->fcport);
2003 break;
2004 case MBS_PORT_ID_USED:
2005 lid = ea->iop[1] & 0xffff;
2006 qlt_find_sess_invalidate_other(vha,
2007 wwn_to_u64(ea->fcport->port_name),
2008 ea->fcport->d_id, lid, &conflict_fcport);
2009
2010 if (conflict_fcport) {
2011 /*
2012 * Another fcport share the same loop_id/nport id.
2013 * Conflict fcport needs to finish cleanup before this
2014 * fcport can proceed to login.
2015 */
2016 conflict_fcport->conflict = ea->fcport;
2017 ea->fcport->login_pause = 1;
2018
2019 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2020 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
2021 __func__, __LINE__, ea->fcport->port_name,
2022 ea->fcport->d_id.b24, lid);
2023 } else {
2024 ql_dbg(ql_dbg_disc, vha, 0x20ed,
2025 "%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
2026 __func__, __LINE__, ea->fcport->port_name,
2027 ea->fcport->d_id.b24, lid);
2028
2029 qla2x00_clear_loop_id(ea->fcport);
2030 set_bit(lid, vha->hw->loop_id_map);
2031 ea->fcport->loop_id = lid;
2032 ea->fcport->keep_nport_handle = 0;
2033 qlt_schedule_sess_for_deletion(ea->fcport);
2034 }
2035 break;
2036 }
2037 return;
2038 }
2039
2040 void
qla2x00_async_logout_done(struct scsi_qla_host * vha,fc_port_t * fcport,uint16_t * data)2041 qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
2042 uint16_t *data)
2043 {
2044 qlt_logo_completion_handler(fcport, data[0]);
2045 fcport->login_gen++;
2046 fcport->flags &= ~FCF_ASYNC_ACTIVE;
2047 return;
2048 }
2049
2050 /****************************************************************************/
2051 /* QLogic ISP2x00 Hardware Support Functions. */
2052 /****************************************************************************/
2053
2054 static int
qla83xx_nic_core_fw_load(scsi_qla_host_t * vha)2055 qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
2056 {
2057 int rval = QLA_SUCCESS;
2058 struct qla_hw_data *ha = vha->hw;
2059 uint32_t idc_major_ver, idc_minor_ver;
2060 uint16_t config[4];
2061
2062 qla83xx_idc_lock(vha, 0);
2063
2064 /* SV: TODO: Assign initialization timeout from
2065 * flash-info / other param
2066 */
2067 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2068 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2069
2070 /* Set our fcoe function presence */
2071 if (__qla83xx_set_drv_presence(vha) != QLA_SUCCESS) {
2072 ql_dbg(ql_dbg_p3p, vha, 0xb077,
2073 "Error while setting DRV-Presence.\n");
2074 rval = QLA_FUNCTION_FAILED;
2075 goto exit;
2076 }
2077
2078 /* Decide the reset ownership */
2079 qla83xx_reset_ownership(vha);
2080
2081 /*
2082 * On first protocol driver load:
2083 * Init-Owner: Set IDC-Major-Version and Clear IDC-Lock-Recovery
2084 * register.
2085 * Others: Check compatibility with current IDC Major version.
2086 */
2087 qla83xx_rd_reg(vha, QLA83XX_IDC_MAJOR_VERSION, &idc_major_ver);
2088 if (ha->flags.nic_core_reset_owner) {
2089 /* Set IDC Major version */
2090 idc_major_ver = QLA83XX_SUPP_IDC_MAJOR_VERSION;
2091 qla83xx_wr_reg(vha, QLA83XX_IDC_MAJOR_VERSION, idc_major_ver);
2092
2093 /* Clearing IDC-Lock-Recovery register */
2094 qla83xx_wr_reg(vha, QLA83XX_IDC_LOCK_RECOVERY, 0);
2095 } else if (idc_major_ver != QLA83XX_SUPP_IDC_MAJOR_VERSION) {
2096 /*
2097 * Clear further IDC participation if we are not compatible with
2098 * the current IDC Major Version.
2099 */
2100 ql_log(ql_log_warn, vha, 0xb07d,
2101 "Failing load, idc_major_ver=%d, expected_major_ver=%d.\n",
2102 idc_major_ver, QLA83XX_SUPP_IDC_MAJOR_VERSION);
2103 __qla83xx_clear_drv_presence(vha);
2104 rval = QLA_FUNCTION_FAILED;
2105 goto exit;
2106 }
2107 /* Each function sets its supported Minor version. */
2108 qla83xx_rd_reg(vha, QLA83XX_IDC_MINOR_VERSION, &idc_minor_ver);
2109 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2110 qla83xx_wr_reg(vha, QLA83XX_IDC_MINOR_VERSION, idc_minor_ver);
2111
2112 if (ha->flags.nic_core_reset_owner) {
2113 memset(config, 0, sizeof(config));
2114 if (!qla81xx_get_port_config(vha, config))
2115 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
2116 QLA8XXX_DEV_READY);
2117 }
2118
2119 rval = qla83xx_idc_state_handler(vha);
2120
2121 exit:
2122 qla83xx_idc_unlock(vha, 0);
2123
2124 return rval;
2125 }
2126
2127 /*
2128 * qla2x00_initialize_adapter
2129 * Initialize board.
2130 *
2131 * Input:
2132 * ha = adapter block pointer.
2133 *
2134 * Returns:
2135 * 0 = success
2136 */
2137 int
qla2x00_initialize_adapter(scsi_qla_host_t * vha)2138 qla2x00_initialize_adapter(scsi_qla_host_t *vha)
2139 {
2140 int rval;
2141 struct qla_hw_data *ha = vha->hw;
2142 struct req_que *req = ha->req_q_map[0];
2143 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2144
2145 memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
2146 memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
2147
2148 /* Clear adapter flags. */
2149 vha->flags.online = 0;
2150 ha->flags.chip_reset_done = 0;
2151 vha->flags.reset_active = 0;
2152 ha->flags.pci_channel_io_perm_failure = 0;
2153 ha->flags.eeh_busy = 0;
2154 vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
2155 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2156 atomic_set(&vha->loop_state, LOOP_DOWN);
2157 vha->device_flags = DFLG_NO_CABLE;
2158 vha->dpc_flags = 0;
2159 vha->flags.management_server_logged_in = 0;
2160 vha->marker_needed = 0;
2161 ha->isp_abort_cnt = 0;
2162 ha->beacon_blink_led = 0;
2163
2164 set_bit(0, ha->req_qid_map);
2165 set_bit(0, ha->rsp_qid_map);
2166
2167 ql_dbg(ql_dbg_init, vha, 0x0040,
2168 "Configuring PCI space...\n");
2169 rval = ha->isp_ops->pci_config(vha);
2170 if (rval) {
2171 ql_log(ql_log_warn, vha, 0x0044,
2172 "Unable to configure PCI space.\n");
2173 return (rval);
2174 }
2175
2176 ha->isp_ops->reset_chip(vha);
2177
2178 /* Check for secure flash support */
2179 if (IS_QLA28XX(ha)) {
2180 if (RD_REG_DWORD(®->mailbox12) & BIT_0) {
2181 ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n");
2182 ha->flags.secure_adapter = 1;
2183 }
2184 }
2185
2186
2187 rval = qla2xxx_get_flash_info(vha);
2188 if (rval) {
2189 ql_log(ql_log_fatal, vha, 0x004f,
2190 "Unable to validate FLASH data.\n");
2191 return rval;
2192 }
2193
2194 if (IS_QLA8044(ha)) {
2195 qla8044_read_reset_template(vha);
2196
2197 /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
2198 * If DONRESET_BIT0 is set, drivers should not set dev_state
2199 * to NEED_RESET. But if NEED_RESET is set, drivers should
2200 * should honor the reset. */
2201 if (ql2xdontresethba == 1)
2202 qla8044_set_idc_dontreset(vha);
2203 }
2204
2205 ha->isp_ops->get_flash_version(vha, req->ring);
2206 ql_dbg(ql_dbg_init, vha, 0x0061,
2207 "Configure NVRAM parameters...\n");
2208
2209 ha->isp_ops->nvram_config(vha);
2210
2211 if (ha->flags.disable_serdes) {
2212 /* Mask HBA via NVRAM settings? */
2213 ql_log(ql_log_info, vha, 0x0077,
2214 "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
2215 return QLA_FUNCTION_FAILED;
2216 }
2217
2218 ql_dbg(ql_dbg_init, vha, 0x0078,
2219 "Verifying loaded RISC code...\n");
2220
2221 if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
2222 rval = ha->isp_ops->chip_diag(vha);
2223 if (rval)
2224 return (rval);
2225 rval = qla2x00_setup_chip(vha);
2226 if (rval)
2227 return (rval);
2228 }
2229
2230 if (IS_QLA84XX(ha)) {
2231 ha->cs84xx = qla84xx_get_chip(vha);
2232 if (!ha->cs84xx) {
2233 ql_log(ql_log_warn, vha, 0x00d0,
2234 "Unable to configure ISP84XX.\n");
2235 return QLA_FUNCTION_FAILED;
2236 }
2237 }
2238
2239 if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha))
2240 rval = qla2x00_init_rings(vha);
2241
2242 /* No point in continuing if firmware initialization failed. */
2243 if (rval != QLA_SUCCESS)
2244 return rval;
2245
2246 ha->flags.chip_reset_done = 1;
2247
2248 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2249 /* Issue verify 84xx FW IOCB to complete 84xx initialization */
2250 rval = qla84xx_init_chip(vha);
2251 if (rval != QLA_SUCCESS) {
2252 ql_log(ql_log_warn, vha, 0x00d4,
2253 "Unable to initialize ISP84XX.\n");
2254 qla84xx_put_chip(vha);
2255 }
2256 }
2257
2258 /* Load the NIC Core f/w if we are the first protocol driver. */
2259 if (IS_QLA8031(ha)) {
2260 rval = qla83xx_nic_core_fw_load(vha);
2261 if (rval)
2262 ql_log(ql_log_warn, vha, 0x0124,
2263 "Error in initializing NIC Core f/w.\n");
2264 }
2265
2266 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2267 qla24xx_read_fcp_prio_cfg(vha);
2268
2269 if (IS_P3P_TYPE(ha))
2270 qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
2271 else
2272 qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
2273
2274 return (rval);
2275 }
2276
2277 /**
2278 * qla2100_pci_config() - Setup ISP21xx PCI configuration registers.
2279 * @vha: HA context
2280 *
2281 * Returns 0 on success.
2282 */
2283 int
qla2100_pci_config(scsi_qla_host_t * vha)2284 qla2100_pci_config(scsi_qla_host_t *vha)
2285 {
2286 uint16_t w;
2287 unsigned long flags;
2288 struct qla_hw_data *ha = vha->hw;
2289 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2290
2291 pci_set_master(ha->pdev);
2292 pci_try_set_mwi(ha->pdev);
2293
2294 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2295 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2296 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2297
2298 pci_disable_rom(ha->pdev);
2299
2300 /* Get PCI bus information. */
2301 spin_lock_irqsave(&ha->hardware_lock, flags);
2302 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2303 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2304
2305 return QLA_SUCCESS;
2306 }
2307
2308 /**
2309 * qla2300_pci_config() - Setup ISP23xx PCI configuration registers.
2310 * @vha: HA context
2311 *
2312 * Returns 0 on success.
2313 */
2314 int
qla2300_pci_config(scsi_qla_host_t * vha)2315 qla2300_pci_config(scsi_qla_host_t *vha)
2316 {
2317 uint16_t w;
2318 unsigned long flags = 0;
2319 uint32_t cnt;
2320 struct qla_hw_data *ha = vha->hw;
2321 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2322
2323 pci_set_master(ha->pdev);
2324 pci_try_set_mwi(ha->pdev);
2325
2326 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2327 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2328
2329 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2330 w &= ~PCI_COMMAND_INTX_DISABLE;
2331 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2332
2333 /*
2334 * If this is a 2300 card and not 2312, reset the
2335 * COMMAND_INVALIDATE due to a bug in the 2300. Unfortunately,
2336 * the 2310 also reports itself as a 2300 so we need to get the
2337 * fb revision level -- a 6 indicates it really is a 2300 and
2338 * not a 2310.
2339 */
2340 if (IS_QLA2300(ha)) {
2341 spin_lock_irqsave(&ha->hardware_lock, flags);
2342
2343 /* Pause RISC. */
2344 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2345 for (cnt = 0; cnt < 30000; cnt++) {
2346 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0)
2347 break;
2348
2349 udelay(10);
2350 }
2351
2352 /* Select FPM registers. */
2353 WRT_REG_WORD(®->ctrl_status, 0x20);
2354 RD_REG_WORD(®->ctrl_status);
2355
2356 /* Get the fb rev level */
2357 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2358
2359 if (ha->fb_rev == FPM_2300)
2360 pci_clear_mwi(ha->pdev);
2361
2362 /* Deselect FPM registers. */
2363 WRT_REG_WORD(®->ctrl_status, 0x0);
2364 RD_REG_WORD(®->ctrl_status);
2365
2366 /* Release RISC module. */
2367 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2368 for (cnt = 0; cnt < 30000; cnt++) {
2369 if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0)
2370 break;
2371
2372 udelay(10);
2373 }
2374
2375 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2376 }
2377
2378 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2379
2380 pci_disable_rom(ha->pdev);
2381
2382 /* Get PCI bus information. */
2383 spin_lock_irqsave(&ha->hardware_lock, flags);
2384 ha->pci_attr = RD_REG_WORD(®->ctrl_status);
2385 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2386
2387 return QLA_SUCCESS;
2388 }
2389
2390 /**
2391 * qla24xx_pci_config() - Setup ISP24xx PCI configuration registers.
2392 * @vha: HA context
2393 *
2394 * Returns 0 on success.
2395 */
2396 int
qla24xx_pci_config(scsi_qla_host_t * vha)2397 qla24xx_pci_config(scsi_qla_host_t *vha)
2398 {
2399 uint16_t w;
2400 unsigned long flags = 0;
2401 struct qla_hw_data *ha = vha->hw;
2402 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2403
2404 pci_set_master(ha->pdev);
2405 pci_try_set_mwi(ha->pdev);
2406
2407 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2408 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2409 w &= ~PCI_COMMAND_INTX_DISABLE;
2410 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2411
2412 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2413
2414 /* PCI-X -- adjust Maximum Memory Read Byte Count (2048). */
2415 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
2416 pcix_set_mmrbc(ha->pdev, 2048);
2417
2418 /* PCIe -- adjust Maximum Read Request Size (2048). */
2419 if (pci_is_pcie(ha->pdev))
2420 pcie_set_readrq(ha->pdev, 4096);
2421
2422 pci_disable_rom(ha->pdev);
2423
2424 ha->chip_revision = ha->pdev->revision;
2425
2426 /* Get PCI bus information. */
2427 spin_lock_irqsave(&ha->hardware_lock, flags);
2428 ha->pci_attr = RD_REG_DWORD(®->ctrl_status);
2429 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2430
2431 return QLA_SUCCESS;
2432 }
2433
2434 /**
2435 * qla25xx_pci_config() - Setup ISP25xx PCI configuration registers.
2436 * @vha: HA context
2437 *
2438 * Returns 0 on success.
2439 */
2440 int
qla25xx_pci_config(scsi_qla_host_t * vha)2441 qla25xx_pci_config(scsi_qla_host_t *vha)
2442 {
2443 uint16_t w;
2444 struct qla_hw_data *ha = vha->hw;
2445
2446 pci_set_master(ha->pdev);
2447 pci_try_set_mwi(ha->pdev);
2448
2449 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2450 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
2451 w &= ~PCI_COMMAND_INTX_DISABLE;
2452 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2453
2454 /* PCIe -- adjust Maximum Read Request Size (2048). */
2455 if (pci_is_pcie(ha->pdev))
2456 pcie_set_readrq(ha->pdev, 4096);
2457
2458 pci_disable_rom(ha->pdev);
2459
2460 ha->chip_revision = ha->pdev->revision;
2461
2462 return QLA_SUCCESS;
2463 }
2464
2465 /**
2466 * qla2x00_isp_firmware() - Choose firmware image.
2467 * @vha: HA context
2468 *
2469 * Returns 0 on success.
2470 */
2471 static int
qla2x00_isp_firmware(scsi_qla_host_t * vha)2472 qla2x00_isp_firmware(scsi_qla_host_t *vha)
2473 {
2474 int rval;
2475 uint16_t loop_id, topo, sw_cap;
2476 uint8_t domain, area, al_pa;
2477 struct qla_hw_data *ha = vha->hw;
2478
2479 /* Assume loading risc code */
2480 rval = QLA_FUNCTION_FAILED;
2481
2482 if (ha->flags.disable_risc_code_load) {
2483 ql_log(ql_log_info, vha, 0x0079, "RISC CODE NOT loaded.\n");
2484
2485 /* Verify checksum of loaded RISC code. */
2486 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
2487 if (rval == QLA_SUCCESS) {
2488 /* And, verify we are not in ROM code. */
2489 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2490 &area, &domain, &topo, &sw_cap);
2491 }
2492 }
2493
2494 if (rval)
2495 ql_dbg(ql_dbg_init, vha, 0x007a,
2496 "**** Load RISC code ****.\n");
2497
2498 return (rval);
2499 }
2500
2501 /**
2502 * qla2x00_reset_chip() - Reset ISP chip.
2503 * @vha: HA context
2504 *
2505 * Returns 0 on success.
2506 */
2507 int
qla2x00_reset_chip(scsi_qla_host_t * vha)2508 qla2x00_reset_chip(scsi_qla_host_t *vha)
2509 {
2510 unsigned long flags = 0;
2511 struct qla_hw_data *ha = vha->hw;
2512 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2513 uint32_t cnt;
2514 uint16_t cmd;
2515 int rval = QLA_FUNCTION_FAILED;
2516
2517 if (unlikely(pci_channel_offline(ha->pdev)))
2518 return rval;
2519
2520 ha->isp_ops->disable_intrs(ha);
2521
2522 spin_lock_irqsave(&ha->hardware_lock, flags);
2523
2524 /* Turn off master enable */
2525 cmd = 0;
2526 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
2527 cmd &= ~PCI_COMMAND_MASTER;
2528 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2529
2530 if (!IS_QLA2100(ha)) {
2531 /* Pause RISC. */
2532 WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC);
2533 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
2534 for (cnt = 0; cnt < 30000; cnt++) {
2535 if ((RD_REG_WORD(®->hccr) &
2536 HCCR_RISC_PAUSE) != 0)
2537 break;
2538 udelay(100);
2539 }
2540 } else {
2541 RD_REG_WORD(®->hccr); /* PCI Posting. */
2542 udelay(10);
2543 }
2544
2545 /* Select FPM registers. */
2546 WRT_REG_WORD(®->ctrl_status, 0x20);
2547 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2548
2549 /* FPM Soft Reset. */
2550 WRT_REG_WORD(®->fpm_diag_config, 0x100);
2551 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2552
2553 /* Toggle Fpm Reset. */
2554 if (!IS_QLA2200(ha)) {
2555 WRT_REG_WORD(®->fpm_diag_config, 0x0);
2556 RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */
2557 }
2558
2559 /* Select frame buffer registers. */
2560 WRT_REG_WORD(®->ctrl_status, 0x10);
2561 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2562
2563 /* Reset frame buffer FIFOs. */
2564 if (IS_QLA2200(ha)) {
2565 WRT_FB_CMD_REG(ha, reg, 0xa000);
2566 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
2567 } else {
2568 WRT_FB_CMD_REG(ha, reg, 0x00fc);
2569
2570 /* Read back fb_cmd until zero or 3 seconds max */
2571 for (cnt = 0; cnt < 3000; cnt++) {
2572 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
2573 break;
2574 udelay(100);
2575 }
2576 }
2577
2578 /* Select RISC module registers. */
2579 WRT_REG_WORD(®->ctrl_status, 0);
2580 RD_REG_WORD(®->ctrl_status); /* PCI Posting. */
2581
2582 /* Reset RISC processor. */
2583 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2584 RD_REG_WORD(®->hccr); /* PCI Posting. */
2585
2586 /* Release RISC processor. */
2587 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2588 RD_REG_WORD(®->hccr); /* PCI Posting. */
2589 }
2590
2591 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
2592 WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT);
2593
2594 /* Reset ISP chip. */
2595 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2596
2597 /* Wait for RISC to recover from reset. */
2598 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2599 /*
2600 * It is necessary to for a delay here since the card doesn't
2601 * respond to PCI reads during a reset. On some architectures
2602 * this will result in an MCA.
2603 */
2604 udelay(20);
2605 for (cnt = 30000; cnt; cnt--) {
2606 if ((RD_REG_WORD(®->ctrl_status) &
2607 CSR_ISP_SOFT_RESET) == 0)
2608 break;
2609 udelay(100);
2610 }
2611 } else
2612 udelay(10);
2613
2614 /* Reset RISC processor. */
2615 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2616
2617 WRT_REG_WORD(®->semaphore, 0);
2618
2619 /* Release RISC processor. */
2620 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2621 RD_REG_WORD(®->hccr); /* PCI Posting. */
2622
2623 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2624 for (cnt = 0; cnt < 30000; cnt++) {
2625 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
2626 break;
2627
2628 udelay(100);
2629 }
2630 } else
2631 udelay(100);
2632
2633 /* Turn on master enable */
2634 cmd |= PCI_COMMAND_MASTER;
2635 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
2636
2637 /* Disable RISC pause on FPM parity error. */
2638 if (!IS_QLA2100(ha)) {
2639 WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE);
2640 RD_REG_WORD(®->hccr); /* PCI Posting. */
2641 }
2642
2643 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2644
2645 return QLA_SUCCESS;
2646 }
2647
2648 /**
2649 * qla81xx_reset_mpi() - Reset's MPI FW via Write MPI Register MBC.
2650 * @vha: HA context
2651 *
2652 * Returns 0 on success.
2653 */
2654 static int
qla81xx_reset_mpi(scsi_qla_host_t * vha)2655 qla81xx_reset_mpi(scsi_qla_host_t *vha)
2656 {
2657 uint16_t mb[4] = {0x1010, 0, 1, 0};
2658
2659 if (!IS_QLA81XX(vha->hw))
2660 return QLA_SUCCESS;
2661
2662 return qla81xx_write_mpi_register(vha, mb);
2663 }
2664
2665 /**
2666 * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC.
2667 * @vha: HA context
2668 *
2669 * Returns 0 on success.
2670 */
2671 static inline int
qla24xx_reset_risc(scsi_qla_host_t * vha)2672 qla24xx_reset_risc(scsi_qla_host_t *vha)
2673 {
2674 unsigned long flags = 0;
2675 struct qla_hw_data *ha = vha->hw;
2676 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2677 uint32_t cnt;
2678 uint16_t wd;
2679 static int abts_cnt; /* ISP abort retry counts */
2680 int rval = QLA_SUCCESS;
2681
2682 spin_lock_irqsave(&ha->hardware_lock, flags);
2683
2684 /* Reset RISC. */
2685 WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2686 for (cnt = 0; cnt < 30000; cnt++) {
2687 if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
2688 break;
2689
2690 udelay(10);
2691 }
2692
2693 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE))
2694 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
2695
2696 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e,
2697 "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n",
2698 RD_REG_DWORD(®->hccr),
2699 RD_REG_DWORD(®->ctrl_status),
2700 (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE));
2701
2702 WRT_REG_DWORD(®->ctrl_status,
2703 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
2704 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
2705
2706 udelay(100);
2707
2708 /* Wait for firmware to complete NVRAM accesses. */
2709 RD_REG_WORD(®->mailbox0);
2710 for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&
2711 rval == QLA_SUCCESS; cnt--) {
2712 barrier();
2713 if (cnt)
2714 udelay(5);
2715 else
2716 rval = QLA_FUNCTION_TIMEOUT;
2717 }
2718
2719 if (rval == QLA_SUCCESS)
2720 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
2721
2722 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f,
2723 "HCCR: 0x%x, MailBox0 Status 0x%x\n",
2724 RD_REG_DWORD(®->hccr),
2725 RD_REG_DWORD(®->mailbox0));
2726
2727 /* Wait for soft-reset to complete. */
2728 RD_REG_DWORD(®->ctrl_status);
2729 for (cnt = 0; cnt < 60; cnt++) {
2730 barrier();
2731 if ((RD_REG_DWORD(®->ctrl_status) &
2732 CSRX_ISP_SOFT_RESET) == 0)
2733 break;
2734
2735 udelay(5);
2736 }
2737 if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET))
2738 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
2739
2740 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d,
2741 "HCCR: 0x%x, Soft Reset status: 0x%x\n",
2742 RD_REG_DWORD(®->hccr),
2743 RD_REG_DWORD(®->ctrl_status));
2744
2745 /* If required, do an MPI FW reset now */
2746 if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) {
2747 if (qla81xx_reset_mpi(vha) != QLA_SUCCESS) {
2748 if (++abts_cnt < 5) {
2749 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2750 set_bit(MPI_RESET_NEEDED, &vha->dpc_flags);
2751 } else {
2752 /*
2753 * We exhausted the ISP abort retries. We have to
2754 * set the board offline.
2755 */
2756 abts_cnt = 0;
2757 vha->flags.online = 0;
2758 }
2759 }
2760 }
2761
2762 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
2763 RD_REG_DWORD(®->hccr);
2764
2765 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
2766 RD_REG_DWORD(®->hccr);
2767
2768 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);
2769 RD_REG_DWORD(®->hccr);
2770
2771 RD_REG_WORD(®->mailbox0);
2772 for (cnt = 60; RD_REG_WORD(®->mailbox0) != 0 &&
2773 rval == QLA_SUCCESS; cnt--) {
2774 barrier();
2775 if (cnt)
2776 udelay(5);
2777 else
2778 rval = QLA_FUNCTION_TIMEOUT;
2779 }
2780 if (rval == QLA_SUCCESS)
2781 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2782
2783 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e,
2784 "Host Risc 0x%x, mailbox0 0x%x\n",
2785 RD_REG_DWORD(®->hccr),
2786 RD_REG_WORD(®->mailbox0));
2787
2788 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2789
2790 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015f,
2791 "Driver in %s mode\n",
2792 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
2793
2794 if (IS_NOPOLLING_TYPE(ha))
2795 ha->isp_ops->enable_intrs(ha);
2796
2797 return rval;
2798 }
2799
2800 static void
qla25xx_read_risc_sema_reg(scsi_qla_host_t * vha,uint32_t * data)2801 qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
2802 {
2803 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2804
2805 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2806 *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET);
2807
2808 }
2809
2810 static void
qla25xx_write_risc_sema_reg(scsi_qla_host_t * vha,uint32_t data)2811 qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
2812 {
2813 struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
2814
2815 WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET);
2816 WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
2817 }
2818
2819 static void
qla25xx_manipulate_risc_semaphore(scsi_qla_host_t * vha)2820 qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
2821 {
2822 uint32_t wd32 = 0;
2823 uint delta_msec = 100;
2824 uint elapsed_msec = 0;
2825 uint timeout_msec;
2826 ulong n;
2827
2828 if (vha->hw->pdev->subsystem_device != 0x0175 &&
2829 vha->hw->pdev->subsystem_device != 0x0240)
2830 return;
2831
2832 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
2833 udelay(100);
2834
2835 attempt:
2836 timeout_msec = TIMEOUT_SEMAPHORE;
2837 n = timeout_msec / delta_msec;
2838 while (n--) {
2839 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
2840 qla25xx_read_risc_sema_reg(vha, &wd32);
2841 if (wd32 & RISC_SEMAPHORE)
2842 break;
2843 msleep(delta_msec);
2844 elapsed_msec += delta_msec;
2845 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2846 goto force;
2847 }
2848
2849 if (!(wd32 & RISC_SEMAPHORE))
2850 goto force;
2851
2852 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2853 goto acquired;
2854
2855 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
2856 timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
2857 n = timeout_msec / delta_msec;
2858 while (n--) {
2859 qla25xx_read_risc_sema_reg(vha, &wd32);
2860 if (!(wd32 & RISC_SEMAPHORE_FORCE))
2861 break;
2862 msleep(delta_msec);
2863 elapsed_msec += delta_msec;
2864 if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
2865 goto force;
2866 }
2867
2868 if (wd32 & RISC_SEMAPHORE_FORCE)
2869 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
2870
2871 goto attempt;
2872
2873 force:
2874 qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
2875
2876 acquired:
2877 return;
2878 }
2879
2880 /**
2881 * qla24xx_reset_chip() - Reset ISP24xx chip.
2882 * @vha: HA context
2883 *
2884 * Returns 0 on success.
2885 */
2886 int
qla24xx_reset_chip(scsi_qla_host_t * vha)2887 qla24xx_reset_chip(scsi_qla_host_t *vha)
2888 {
2889 struct qla_hw_data *ha = vha->hw;
2890 int rval = QLA_FUNCTION_FAILED;
2891
2892 if (pci_channel_offline(ha->pdev) &&
2893 ha->flags.pci_channel_io_perm_failure) {
2894 return rval;
2895 }
2896
2897 ha->isp_ops->disable_intrs(ha);
2898
2899 qla25xx_manipulate_risc_semaphore(vha);
2900
2901 /* Perform RISC reset. */
2902 rval = qla24xx_reset_risc(vha);
2903
2904 return rval;
2905 }
2906
2907 /**
2908 * qla2x00_chip_diag() - Test chip for proper operation.
2909 * @vha: HA context
2910 *
2911 * Returns 0 on success.
2912 */
2913 int
qla2x00_chip_diag(scsi_qla_host_t * vha)2914 qla2x00_chip_diag(scsi_qla_host_t *vha)
2915 {
2916 int rval;
2917 struct qla_hw_data *ha = vha->hw;
2918 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2919 unsigned long flags = 0;
2920 uint16_t data;
2921 uint32_t cnt;
2922 uint16_t mb[5];
2923 struct req_que *req = ha->req_q_map[0];
2924
2925 /* Assume a failed state */
2926 rval = QLA_FUNCTION_FAILED;
2927
2928 ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n",
2929 ®->flash_address);
2930
2931 spin_lock_irqsave(&ha->hardware_lock, flags);
2932
2933 /* Reset ISP chip. */
2934 WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET);
2935
2936 /*
2937 * We need to have a delay here since the card will not respond while
2938 * in reset causing an MCA on some architectures.
2939 */
2940 udelay(20);
2941 data = qla2x00_debounce_register(®->ctrl_status);
2942 for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) {
2943 udelay(5);
2944 data = RD_REG_WORD(®->ctrl_status);
2945 barrier();
2946 }
2947
2948 if (!cnt)
2949 goto chip_diag_failed;
2950
2951 ql_dbg(ql_dbg_init, vha, 0x007c,
2952 "Reset register cleared by chip reset.\n");
2953
2954 /* Reset RISC processor. */
2955 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
2956 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
2957
2958 /* Workaround for QLA2312 PCI parity error */
2959 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
2960 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
2961 for (cnt = 6000000; cnt && (data == MBS_BUSY); cnt--) {
2962 udelay(5);
2963 data = RD_MAILBOX_REG(ha, reg, 0);
2964 barrier();
2965 }
2966 } else
2967 udelay(10);
2968
2969 if (!cnt)
2970 goto chip_diag_failed;
2971
2972 /* Check product ID of chip */
2973 ql_dbg(ql_dbg_init, vha, 0x007d, "Checking product ID of chip.\n");
2974
2975 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
2976 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
2977 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
2978 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
2979 if (mb[1] != PROD_ID_1 || (mb[2] != PROD_ID_2 && mb[2] != PROD_ID_2a) ||
2980 mb[3] != PROD_ID_3) {
2981 ql_log(ql_log_warn, vha, 0x0062,
2982 "Wrong product ID = 0x%x,0x%x,0x%x.\n",
2983 mb[1], mb[2], mb[3]);
2984
2985 goto chip_diag_failed;
2986 }
2987 ha->product_id[0] = mb[1];
2988 ha->product_id[1] = mb[2];
2989 ha->product_id[2] = mb[3];
2990 ha->product_id[3] = mb[4];
2991
2992 /* Adjust fw RISC transfer size */
2993 if (req->length > 1024)
2994 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
2995 else
2996 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
2997 req->length;
2998
2999 if (IS_QLA2200(ha) &&
3000 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3001 /* Limit firmware transfer size with a 2200A */
3002 ql_dbg(ql_dbg_init, vha, 0x007e, "Found QLA2200A Chip.\n");
3003
3004 ha->device_type |= DT_ISP2200A;
3005 ha->fw_transfer_size = 128;
3006 }
3007
3008 /* Wrap Incoming Mailboxes Test. */
3009 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3010
3011 ql_dbg(ql_dbg_init, vha, 0x007f, "Checking mailboxes.\n");
3012 rval = qla2x00_mbx_reg_test(vha);
3013 if (rval)
3014 ql_log(ql_log_warn, vha, 0x0080,
3015 "Failed mailbox send register test.\n");
3016 else
3017 /* Flag a successful rval */
3018 rval = QLA_SUCCESS;
3019 spin_lock_irqsave(&ha->hardware_lock, flags);
3020
3021 chip_diag_failed:
3022 if (rval)
3023 ql_log(ql_log_info, vha, 0x0081,
3024 "Chip diagnostics **** FAILED ****.\n");
3025
3026 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3027
3028 return (rval);
3029 }
3030
3031 /**
3032 * qla24xx_chip_diag() - Test ISP24xx for proper operation.
3033 * @vha: HA context
3034 *
3035 * Returns 0 on success.
3036 */
3037 int
qla24xx_chip_diag(scsi_qla_host_t * vha)3038 qla24xx_chip_diag(scsi_qla_host_t *vha)
3039 {
3040 int rval;
3041 struct qla_hw_data *ha = vha->hw;
3042 struct req_que *req = ha->req_q_map[0];
3043
3044 if (IS_P3P_TYPE(ha))
3045 return QLA_SUCCESS;
3046
3047 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3048
3049 rval = qla2x00_mbx_reg_test(vha);
3050 if (rval) {
3051 ql_log(ql_log_warn, vha, 0x0082,
3052 "Failed mailbox send register test.\n");
3053 } else {
3054 /* Flag a successful rval */
3055 rval = QLA_SUCCESS;
3056 }
3057
3058 return rval;
3059 }
3060
3061 static void
qla2x00_init_fce_trace(scsi_qla_host_t * vha)3062 qla2x00_init_fce_trace(scsi_qla_host_t *vha)
3063 {
3064 int rval;
3065 dma_addr_t tc_dma;
3066 void *tc;
3067 struct qla_hw_data *ha = vha->hw;
3068
3069 if (!IS_FWI2_CAPABLE(ha))
3070 return;
3071
3072 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3073 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3074 return;
3075
3076 if (ha->fce) {
3077 ql_dbg(ql_dbg_init, vha, 0x00bd,
3078 "%s: FCE Mem is already allocated.\n",
3079 __func__);
3080 return;
3081 }
3082
3083 /* Allocate memory for Fibre Channel Event Buffer. */
3084 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3085 GFP_KERNEL);
3086 if (!tc) {
3087 ql_log(ql_log_warn, vha, 0x00be,
3088 "Unable to allocate (%d KB) for FCE.\n",
3089 FCE_SIZE / 1024);
3090 return;
3091 }
3092
3093 rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,
3094 ha->fce_mb, &ha->fce_bufs);
3095 if (rval) {
3096 ql_log(ql_log_warn, vha, 0x00bf,
3097 "Unable to initialize FCE (%d).\n", rval);
3098 dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);
3099 return;
3100 }
3101
3102 ql_dbg(ql_dbg_init, vha, 0x00c0,
3103 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);
3104
3105 ha->flags.fce_enabled = 1;
3106 ha->fce_dma = tc_dma;
3107 ha->fce = tc;
3108 }
3109
3110 static void
qla2x00_init_eft_trace(scsi_qla_host_t * vha)3111 qla2x00_init_eft_trace(scsi_qla_host_t *vha)
3112 {
3113 int rval;
3114 dma_addr_t tc_dma;
3115 void *tc;
3116 struct qla_hw_data *ha = vha->hw;
3117
3118 if (!IS_FWI2_CAPABLE(ha))
3119 return;
3120
3121 if (ha->eft) {
3122 ql_dbg(ql_dbg_init, vha, 0x00bd,
3123 "%s: EFT Mem is already allocated.\n",
3124 __func__);
3125 return;
3126 }
3127
3128 /* Allocate memory for Extended Trace Buffer. */
3129 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3130 GFP_KERNEL);
3131 if (!tc) {
3132 ql_log(ql_log_warn, vha, 0x00c1,
3133 "Unable to allocate (%d KB) for EFT.\n",
3134 EFT_SIZE / 1024);
3135 return;
3136 }
3137
3138 rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
3139 if (rval) {
3140 ql_log(ql_log_warn, vha, 0x00c2,
3141 "Unable to initialize EFT (%d).\n", rval);
3142 dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);
3143 return;
3144 }
3145
3146 ql_dbg(ql_dbg_init, vha, 0x00c3,
3147 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
3148
3149 ha->eft_dma = tc_dma;
3150 ha->eft = tc;
3151 }
3152
3153 static void
qla2x00_alloc_offload_mem(scsi_qla_host_t * vha)3154 qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
3155 {
3156 qla2x00_init_fce_trace(vha);
3157 qla2x00_init_eft_trace(vha);
3158 }
3159
3160 void
qla2x00_alloc_fw_dump(scsi_qla_host_t * vha)3161 qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
3162 {
3163 uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
3164 eft_size, fce_size, mq_size;
3165 struct qla_hw_data *ha = vha->hw;
3166 struct req_que *req = ha->req_q_map[0];
3167 struct rsp_que *rsp = ha->rsp_q_map[0];
3168 struct qla2xxx_fw_dump *fw_dump;
3169
3170 dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
3171 req_q_size = rsp_q_size = 0;
3172
3173 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3174 fixed_size = sizeof(struct qla2100_fw_dump);
3175 } else if (IS_QLA23XX(ha)) {
3176 fixed_size = offsetof(struct qla2300_fw_dump, data_ram);
3177 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3178 sizeof(uint16_t);
3179 } else if (IS_FWI2_CAPABLE(ha)) {
3180 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
3181 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
3182 else if (IS_QLA81XX(ha))
3183 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
3184 else if (IS_QLA25XX(ha))
3185 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
3186 else
3187 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
3188
3189 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3190 sizeof(uint32_t);
3191 if (ha->mqenable) {
3192 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
3193 !IS_QLA28XX(ha))
3194 mq_size = sizeof(struct qla2xxx_mq_chain);
3195 /*
3196 * Allocate maximum buffer size for all queues - Q0.
3197 * Resizing must be done at end-of-dump processing.
3198 */
3199 mq_size += (ha->max_req_queues - 1) *
3200 (req->length * sizeof(request_t));
3201 mq_size += (ha->max_rsp_queues - 1) *
3202 (rsp->length * sizeof(response_t));
3203 }
3204 if (ha->tgt.atio_ring)
3205 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3206
3207 qla2x00_init_fce_trace(vha);
3208 if (ha->fce)
3209 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
3210 qla2x00_init_eft_trace(vha);
3211 if (ha->eft)
3212 eft_size = EFT_SIZE;
3213 }
3214
3215 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3216 struct fwdt *fwdt = ha->fwdt;
3217 uint j;
3218
3219 for (j = 0; j < 2; j++, fwdt++) {
3220 if (!fwdt->template) {
3221 ql_dbg(ql_dbg_init, vha, 0x00ba,
3222 "-> fwdt%u no template\n", j);
3223 continue;
3224 }
3225 ql_dbg(ql_dbg_init, vha, 0x00fa,
3226 "-> fwdt%u calculating fwdump size...\n", j);
3227 fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
3228 vha, fwdt->template);
3229 ql_dbg(ql_dbg_init, vha, 0x00fa,
3230 "-> fwdt%u calculated fwdump size = %#lx bytes\n",
3231 j, fwdt->dump_size);
3232 dump_size += fwdt->dump_size;
3233 }
3234 } else {
3235 req_q_size = req->length * sizeof(request_t);
3236 rsp_q_size = rsp->length * sizeof(response_t);
3237 dump_size = offsetof(struct qla2xxx_fw_dump, isp);
3238 dump_size += fixed_size + mem_size + req_q_size + rsp_q_size
3239 + eft_size;
3240 ha->chain_offset = dump_size;
3241 dump_size += mq_size + fce_size;
3242 if (ha->exchoffld_buf)
3243 dump_size += sizeof(struct qla2xxx_offld_chain) +
3244 ha->exchoffld_size;
3245 if (ha->exlogin_buf)
3246 dump_size += sizeof(struct qla2xxx_offld_chain) +
3247 ha->exlogin_size;
3248 }
3249
3250 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3251
3252 ql_dbg(ql_dbg_init, vha, 0x00c5,
3253 "%s dump_size %d fw_dump_len %d fw_dump_alloc_len %d\n",
3254 __func__, dump_size, ha->fw_dump_len,
3255 ha->fw_dump_alloc_len);
3256
3257 fw_dump = vmalloc(dump_size);
3258 if (!fw_dump) {
3259 ql_log(ql_log_warn, vha, 0x00c4,
3260 "Unable to allocate (%d KB) for firmware dump.\n",
3261 dump_size / 1024);
3262 } else {
3263 mutex_lock(&ha->optrom_mutex);
3264 if (ha->fw_dumped) {
3265 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3266 vfree(ha->fw_dump);
3267 ha->fw_dump = fw_dump;
3268 ha->fw_dump_alloc_len = dump_size;
3269 ql_dbg(ql_dbg_init, vha, 0x00c5,
3270 "Re-Allocated (%d KB) and save firmware dump.\n",
3271 dump_size / 1024);
3272 } else {
3273 if (ha->fw_dump)
3274 vfree(ha->fw_dump);
3275 ha->fw_dump = fw_dump;
3276
3277 ha->fw_dump_len = ha->fw_dump_alloc_len =
3278 dump_size;
3279 ql_dbg(ql_dbg_init, vha, 0x00c5,
3280 "Allocated (%d KB) for firmware dump.\n",
3281 dump_size / 1024);
3282
3283 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3284 mutex_unlock(&ha->optrom_mutex);
3285 return;
3286 }
3287
3288 ha->fw_dump->signature[0] = 'Q';
3289 ha->fw_dump->signature[1] = 'L';
3290 ha->fw_dump->signature[2] = 'G';
3291 ha->fw_dump->signature[3] = 'C';
3292 ha->fw_dump->version = htonl(1);
3293
3294 ha->fw_dump->fixed_size = htonl(fixed_size);
3295 ha->fw_dump->mem_size = htonl(mem_size);
3296 ha->fw_dump->req_q_size = htonl(req_q_size);
3297 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3298
3299 ha->fw_dump->eft_size = htonl(eft_size);
3300 ha->fw_dump->eft_addr_l =
3301 htonl(LSD(ha->eft_dma));
3302 ha->fw_dump->eft_addr_h =
3303 htonl(MSD(ha->eft_dma));
3304
3305 ha->fw_dump->header_size =
3306 htonl(offsetof
3307 (struct qla2xxx_fw_dump, isp));
3308 }
3309 mutex_unlock(&ha->optrom_mutex);
3310 }
3311 }
3312 }
3313
3314 static int
qla81xx_mpi_sync(scsi_qla_host_t * vha)3315 qla81xx_mpi_sync(scsi_qla_host_t *vha)
3316 {
3317 #define MPS_MASK 0xe0
3318 int rval;
3319 uint16_t dc;
3320 uint32_t dw;
3321
3322 if (!IS_QLA81XX(vha->hw))
3323 return QLA_SUCCESS;
3324
3325 rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
3326 if (rval != QLA_SUCCESS) {
3327 ql_log(ql_log_warn, vha, 0x0105,
3328 "Unable to acquire semaphore.\n");
3329 goto done;
3330 }
3331
3332 pci_read_config_word(vha->hw->pdev, 0x54, &dc);
3333 rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
3334 if (rval != QLA_SUCCESS) {
3335 ql_log(ql_log_warn, vha, 0x0067, "Unable to read sync.\n");
3336 goto done_release;
3337 }
3338
3339 dc &= MPS_MASK;
3340 if (dc == (dw & MPS_MASK))
3341 goto done_release;
3342
3343 dw &= ~MPS_MASK;
3344 dw |= dc;
3345 rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
3346 if (rval != QLA_SUCCESS) {
3347 ql_log(ql_log_warn, vha, 0x0114, "Unable to gain sync.\n");
3348 }
3349
3350 done_release:
3351 rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
3352 if (rval != QLA_SUCCESS) {
3353 ql_log(ql_log_warn, vha, 0x006d,
3354 "Unable to release semaphore.\n");
3355 }
3356
3357 done:
3358 return rval;
3359 }
3360
3361 int
qla2x00_alloc_outstanding_cmds(struct qla_hw_data * ha,struct req_que * req)3362 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3363 {
3364 /* Don't try to reallocate the array */
3365 if (req->outstanding_cmds)
3366 return QLA_SUCCESS;
3367
3368 if (!IS_FWI2_CAPABLE(ha))
3369 req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
3370 else {
3371 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
3372 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
3373 else
3374 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
3375 }
3376
3377 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3378 sizeof(srb_t *),
3379 GFP_KERNEL);
3380
3381 if (!req->outstanding_cmds) {
3382 /*
3383 * Try to allocate a minimal size just so we can get through
3384 * initialization.
3385 */
3386 req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
3387 req->outstanding_cmds = kcalloc(req->num_outstanding_cmds,
3388 sizeof(srb_t *),
3389 GFP_KERNEL);
3390
3391 if (!req->outstanding_cmds) {
3392 ql_log(ql_log_fatal, NULL, 0x0126,
3393 "Failed to allocate memory for "
3394 "outstanding_cmds for req_que %p.\n", req);
3395 req->num_outstanding_cmds = 0;
3396 return QLA_FUNCTION_FAILED;
3397 }
3398 }
3399
3400 return QLA_SUCCESS;
3401 }
3402
3403 #define PRINT_FIELD(_field, _flag, _str) { \
3404 if (a0->_field & _flag) {\
3405 if (p) {\
3406 strcat(ptr, "|");\
3407 ptr++;\
3408 leftover--;\
3409 } \
3410 len = snprintf(ptr, leftover, "%s", _str); \
3411 p = 1;\
3412 leftover -= len;\
3413 ptr += len; \
3414 } \
3415 }
3416
qla2xxx_print_sfp_info(struct scsi_qla_host * vha)3417 static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha)
3418 {
3419 #define STR_LEN 64
3420 struct sff_8247_a0 *a0 = (struct sff_8247_a0 *)vha->hw->sfp_data;
3421 u8 str[STR_LEN], *ptr, p;
3422 int leftover, len;
3423
3424 memset(str, 0, STR_LEN);
3425 snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name);
3426 ql_dbg(ql_dbg_init, vha, 0x015a,
3427 "SFP MFG Name: %s\n", str);
3428
3429 memset(str, 0, STR_LEN);
3430 snprintf(str, SFF_PART_NAME_LEN+1, a0->vendor_pn);
3431 ql_dbg(ql_dbg_init, vha, 0x015c,
3432 "SFP Part Name: %s\n", str);
3433
3434 /* media */
3435 memset(str, 0, STR_LEN);
3436 ptr = str;
3437 leftover = STR_LEN;
3438 p = len = 0;
3439 PRINT_FIELD(fc_med_cc9, FC_MED_TW, "Twin AX");
3440 PRINT_FIELD(fc_med_cc9, FC_MED_TP, "Twisted Pair");
3441 PRINT_FIELD(fc_med_cc9, FC_MED_MI, "Min Coax");
3442 PRINT_FIELD(fc_med_cc9, FC_MED_TV, "Video Coax");
3443 PRINT_FIELD(fc_med_cc9, FC_MED_M6, "MultiMode 62.5um");
3444 PRINT_FIELD(fc_med_cc9, FC_MED_M5, "MultiMode 50um");
3445 PRINT_FIELD(fc_med_cc9, FC_MED_SM, "SingleMode");
3446 ql_dbg(ql_dbg_init, vha, 0x0160,
3447 "SFP Media: %s\n", str);
3448
3449 /* link length */
3450 memset(str, 0, STR_LEN);
3451 ptr = str;
3452 leftover = STR_LEN;
3453 p = len = 0;
3454 PRINT_FIELD(fc_ll_cc7, FC_LL_VL, "Very Long");
3455 PRINT_FIELD(fc_ll_cc7, FC_LL_S, "Short");
3456 PRINT_FIELD(fc_ll_cc7, FC_LL_I, "Intermediate");
3457 PRINT_FIELD(fc_ll_cc7, FC_LL_L, "Long");
3458 PRINT_FIELD(fc_ll_cc7, FC_LL_M, "Medium");
3459 ql_dbg(ql_dbg_init, vha, 0x0196,
3460 "SFP Link Length: %s\n", str);
3461
3462 memset(str, 0, STR_LEN);
3463 ptr = str;
3464 leftover = STR_LEN;
3465 p = len = 0;
3466 PRINT_FIELD(fc_ll_cc7, FC_LL_SA, "Short Wave (SA)");
3467 PRINT_FIELD(fc_ll_cc7, FC_LL_LC, "Long Wave(LC)");
3468 PRINT_FIELD(fc_tec_cc8, FC_TEC_SN, "Short Wave (SN)");
3469 PRINT_FIELD(fc_tec_cc8, FC_TEC_SL, "Short Wave (SL)");
3470 PRINT_FIELD(fc_tec_cc8, FC_TEC_LL, "Long Wave (LL)");
3471 ql_dbg(ql_dbg_init, vha, 0x016e,
3472 "SFP FC Link Tech: %s\n", str);
3473
3474 if (a0->length_km)
3475 ql_dbg(ql_dbg_init, vha, 0x016f,
3476 "SFP Distant: %d km\n", a0->length_km);
3477 if (a0->length_100m)
3478 ql_dbg(ql_dbg_init, vha, 0x0170,
3479 "SFP Distant: %d m\n", a0->length_100m*100);
3480 if (a0->length_50um_10m)
3481 ql_dbg(ql_dbg_init, vha, 0x0189,
3482 "SFP Distant (WL=50um): %d m\n", a0->length_50um_10m * 10);
3483 if (a0->length_62um_10m)
3484 ql_dbg(ql_dbg_init, vha, 0x018a,
3485 "SFP Distant (WL=62.5um): %d m\n", a0->length_62um_10m * 10);
3486 if (a0->length_om4_10m)
3487 ql_dbg(ql_dbg_init, vha, 0x0194,
3488 "SFP Distant (OM4): %d m\n", a0->length_om4_10m * 10);
3489 if (a0->length_om3_10m)
3490 ql_dbg(ql_dbg_init, vha, 0x0195,
3491 "SFP Distant (OM3): %d m\n", a0->length_om3_10m * 10);
3492 }
3493
3494
3495 /*
3496 * Return Code:
3497 * QLA_SUCCESS: no action
3498 * QLA_INTERFACE_ERROR: SFP is not there.
3499 * QLA_FUNCTION_FAILED: detected New SFP
3500 */
3501 int
qla24xx_detect_sfp(scsi_qla_host_t * vha)3502 qla24xx_detect_sfp(scsi_qla_host_t *vha)
3503 {
3504 int rc = QLA_SUCCESS;
3505 struct sff_8247_a0 *a;
3506 struct qla_hw_data *ha = vha->hw;
3507
3508 if (!AUTO_DETECT_SFP_SUPPORT(vha))
3509 goto out;
3510
3511 rc = qla2x00_read_sfp_dev(vha, NULL, 0);
3512 if (rc)
3513 goto out;
3514
3515 a = (struct sff_8247_a0 *)vha->hw->sfp_data;
3516 qla2xxx_print_sfp_info(vha);
3517
3518 if (a->fc_ll_cc7 & FC_LL_VL || a->fc_ll_cc7 & FC_LL_L) {
3519 /* long range */
3520 ha->flags.detected_lr_sfp = 1;
3521
3522 if (a->length_km > 5 || a->length_100m > 50)
3523 ha->long_range_distance = LR_DISTANCE_10K;
3524 else
3525 ha->long_range_distance = LR_DISTANCE_5K;
3526
3527 if (ha->flags.detected_lr_sfp != ha->flags.using_lr_setting)
3528 ql_dbg(ql_dbg_async, vha, 0x507b,
3529 "Detected Long Range SFP.\n");
3530 } else {
3531 /* short range */
3532 ha->flags.detected_lr_sfp = 0;
3533 if (ha->flags.using_lr_setting)
3534 ql_dbg(ql_dbg_async, vha, 0x5084,
3535 "Detected Short Range SFP.\n");
3536 }
3537
3538 if (!vha->flags.init_done)
3539 rc = QLA_SUCCESS;
3540 out:
3541 return rc;
3542 }
3543
3544 /**
3545 * qla2x00_setup_chip() - Load and start RISC firmware.
3546 * @vha: HA context
3547 *
3548 * Returns 0 on success.
3549 */
3550 static int
qla2x00_setup_chip(scsi_qla_host_t * vha)3551 qla2x00_setup_chip(scsi_qla_host_t *vha)
3552 {
3553 int rval;
3554 uint32_t srisc_address = 0;
3555 struct qla_hw_data *ha = vha->hw;
3556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3557 unsigned long flags;
3558 uint16_t fw_major_version;
3559
3560 if (IS_P3P_TYPE(ha)) {
3561 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3562 if (rval == QLA_SUCCESS) {
3563 qla2x00_stop_firmware(vha);
3564 goto enable_82xx_npiv;
3565 } else
3566 goto failed;
3567 }
3568
3569 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3570 /* Disable SRAM, Instruction RAM and GP RAM parity. */
3571 spin_lock_irqsave(&ha->hardware_lock, flags);
3572 WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0));
3573 RD_REG_WORD(®->hccr);
3574 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3575 }
3576
3577 qla81xx_mpi_sync(vha);
3578
3579 /* Load firmware sequences */
3580 rval = ha->isp_ops->load_risc(vha, &srisc_address);
3581 if (rval == QLA_SUCCESS) {
3582 ql_dbg(ql_dbg_init, vha, 0x00c9,
3583 "Verifying Checksum of loaded RISC code.\n");
3584
3585 rval = qla2x00_verify_checksum(vha, srisc_address);
3586 if (rval == QLA_SUCCESS) {
3587 /* Start firmware execution. */
3588 ql_dbg(ql_dbg_init, vha, 0x00ca,
3589 "Starting firmware.\n");
3590
3591 if (ql2xexlogins)
3592 ha->flags.exlogins_enabled = 1;
3593
3594 if (qla_is_exch_offld_enabled(vha))
3595 ha->flags.exchoffld_enabled = 1;
3596
3597 rval = qla2x00_execute_fw(vha, srisc_address);
3598 /* Retrieve firmware information. */
3599 if (rval == QLA_SUCCESS) {
3600 qla24xx_detect_sfp(vha);
3601
3602 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3603 IS_QLA28XX(ha)) &&
3604 (ha->zio_mode == QLA_ZIO_MODE_6))
3605 qla27xx_set_zio_threshold(vha,
3606 ha->last_zio_threshold);
3607
3608 rval = qla2x00_set_exlogins_buffer(vha);
3609 if (rval != QLA_SUCCESS)
3610 goto failed;
3611
3612 rval = qla2x00_set_exchoffld_buffer(vha);
3613 if (rval != QLA_SUCCESS)
3614 goto failed;
3615
3616 enable_82xx_npiv:
3617 fw_major_version = ha->fw_major_version;
3618 if (IS_P3P_TYPE(ha))
3619 qla82xx_check_md_needed(vha);
3620 else
3621 rval = qla2x00_get_fw_version(vha);
3622 if (rval != QLA_SUCCESS)
3623 goto failed;
3624 ha->flags.npiv_supported = 0;
3625 if (IS_QLA2XXX_MIDTYPE(ha) &&
3626 (ha->fw_attributes & BIT_2)) {
3627 ha->flags.npiv_supported = 1;
3628 if ((!ha->max_npiv_vports) ||
3629 ((ha->max_npiv_vports + 1) %
3630 MIN_MULTI_ID_FABRIC))
3631 ha->max_npiv_vports =
3632 MIN_MULTI_ID_FABRIC - 1;
3633 }
3634 qla2x00_get_resource_cnts(vha);
3635
3636 /*
3637 * Allocate the array of outstanding commands
3638 * now that we know the firmware resources.
3639 */
3640 rval = qla2x00_alloc_outstanding_cmds(ha,
3641 vha->req);
3642 if (rval != QLA_SUCCESS)
3643 goto failed;
3644
3645 if (!fw_major_version && !(IS_P3P_TYPE(ha)))
3646 qla2x00_alloc_offload_mem(vha);
3647
3648 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
3649 qla2x00_alloc_fw_dump(vha);
3650
3651 } else {
3652 goto failed;
3653 }
3654 } else {
3655 ql_log(ql_log_fatal, vha, 0x00cd,
3656 "ISP Firmware failed checksum.\n");
3657 goto failed;
3658 }
3659 } else
3660 goto failed;
3661
3662 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
3663 /* Enable proper parity. */
3664 spin_lock_irqsave(&ha->hardware_lock, flags);
3665 if (IS_QLA2300(ha))
3666 /* SRAM parity */
3667 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1);
3668 else
3669 /* SRAM, Instruction RAM and GP RAM parity */
3670 WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7);
3671 RD_REG_WORD(®->hccr);
3672 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3673 }
3674
3675 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
3676 ha->flags.fac_supported = 1;
3677 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
3678 uint32_t size;
3679
3680 rval = qla81xx_fac_get_sector_size(vha, &size);
3681 if (rval == QLA_SUCCESS) {
3682 ha->flags.fac_supported = 1;
3683 ha->fdt_block_size = size << 2;
3684 } else {
3685 ql_log(ql_log_warn, vha, 0x00ce,
3686 "Unsupported FAC firmware (%d.%02d.%02d).\n",
3687 ha->fw_major_version, ha->fw_minor_version,
3688 ha->fw_subminor_version);
3689
3690 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3691 IS_QLA28XX(ha)) {
3692 ha->flags.fac_supported = 0;
3693 rval = QLA_SUCCESS;
3694 }
3695 }
3696 }
3697 failed:
3698 if (rval) {
3699 ql_log(ql_log_fatal, vha, 0x00cf,
3700 "Setup chip ****FAILED****.\n");
3701 }
3702
3703 return (rval);
3704 }
3705
3706 /**
3707 * qla2x00_init_response_q_entries() - Initializes response queue entries.
3708 * @rsp: response queue
3709 *
3710 * Beginning of request ring has initialization control block already built
3711 * by nvram config routine.
3712 *
3713 * Returns 0 on success.
3714 */
3715 void
qla2x00_init_response_q_entries(struct rsp_que * rsp)3716 qla2x00_init_response_q_entries(struct rsp_que *rsp)
3717 {
3718 uint16_t cnt;
3719 response_t *pkt;
3720
3721 rsp->ring_ptr = rsp->ring;
3722 rsp->ring_index = 0;
3723 rsp->status_srb = NULL;
3724 pkt = rsp->ring_ptr;
3725 for (cnt = 0; cnt < rsp->length; cnt++) {
3726 pkt->signature = RESPONSE_PROCESSED;
3727 pkt++;
3728 }
3729 }
3730
3731 /**
3732 * qla2x00_update_fw_options() - Read and process firmware options.
3733 * @vha: HA context
3734 *
3735 * Returns 0 on success.
3736 */
3737 void
qla2x00_update_fw_options(scsi_qla_host_t * vha)3738 qla2x00_update_fw_options(scsi_qla_host_t *vha)
3739 {
3740 uint16_t swing, emphasis, tx_sens, rx_sens;
3741 struct qla_hw_data *ha = vha->hw;
3742
3743 memset(ha->fw_options, 0, sizeof(ha->fw_options));
3744 qla2x00_get_fw_options(vha, ha->fw_options);
3745
3746 if (IS_QLA2100(ha) || IS_QLA2200(ha))
3747 return;
3748
3749 /* Serial Link options. */
3750 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115,
3751 "Serial link options.\n");
3752 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109,
3753 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
3754
3755 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
3756 if (ha->fw_seriallink_options[3] & BIT_2) {
3757 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
3758
3759 /* 1G settings */
3760 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
3761 emphasis = (ha->fw_seriallink_options[2] &
3762 (BIT_4 | BIT_3)) >> 3;
3763 tx_sens = ha->fw_seriallink_options[0] &
3764 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3765 rx_sens = (ha->fw_seriallink_options[0] &
3766 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3767 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
3768 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3769 if (rx_sens == 0x0)
3770 rx_sens = 0x3;
3771 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
3772 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3773 ha->fw_options[10] |= BIT_5 |
3774 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3775 (tx_sens & (BIT_1 | BIT_0));
3776
3777 /* 2G settings */
3778 swing = (ha->fw_seriallink_options[2] &
3779 (BIT_7 | BIT_6 | BIT_5)) >> 5;
3780 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
3781 tx_sens = ha->fw_seriallink_options[1] &
3782 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
3783 rx_sens = (ha->fw_seriallink_options[1] &
3784 (BIT_7 | BIT_6 | BIT_5 | BIT_4)) >> 4;
3785 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
3786 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
3787 if (rx_sens == 0x0)
3788 rx_sens = 0x3;
3789 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
3790 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
3791 ha->fw_options[11] |= BIT_5 |
3792 ((rx_sens & (BIT_1 | BIT_0)) << 2) |
3793 (tx_sens & (BIT_1 | BIT_0));
3794 }
3795
3796 /* FCP2 options. */
3797 /* Return command IOCBs without waiting for an ABTS to complete. */
3798 ha->fw_options[3] |= BIT_13;
3799
3800 /* LED scheme. */
3801 if (ha->flags.enable_led_scheme)
3802 ha->fw_options[2] |= BIT_12;
3803
3804 /* Detect ISP6312. */
3805 if (IS_QLA6312(ha))
3806 ha->fw_options[2] |= BIT_13;
3807
3808 /* Set Retry FLOGI in case of P2P connection */
3809 if (ha->operating_mode == P2P) {
3810 ha->fw_options[2] |= BIT_3;
3811 ql_dbg(ql_dbg_disc, vha, 0x2100,
3812 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3813 __func__, ha->fw_options[2]);
3814 }
3815
3816 /* Update firmware options. */
3817 qla2x00_set_fw_options(vha, ha->fw_options);
3818 }
3819
3820 void
qla24xx_update_fw_options(scsi_qla_host_t * vha)3821 qla24xx_update_fw_options(scsi_qla_host_t *vha)
3822 {
3823 int rval;
3824 struct qla_hw_data *ha = vha->hw;
3825
3826 if (IS_P3P_TYPE(ha))
3827 return;
3828
3829 /* Hold status IOCBs until ABTS response received. */
3830 if (ql2xfwholdabts)
3831 ha->fw_options[3] |= BIT_12;
3832
3833 /* Set Retry FLOGI in case of P2P connection */
3834 if (ha->operating_mode == P2P) {
3835 ha->fw_options[2] |= BIT_3;
3836 ql_dbg(ql_dbg_disc, vha, 0x2101,
3837 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
3838 __func__, ha->fw_options[2]);
3839 }
3840
3841 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
3842 if (ql2xmvasynctoatio &&
3843 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
3844 if (qla_tgt_mode_enabled(vha) ||
3845 qla_dual_mode_enabled(vha))
3846 ha->fw_options[2] |= BIT_11;
3847 else
3848 ha->fw_options[2] &= ~BIT_11;
3849 }
3850
3851 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3852 IS_QLA28XX(ha)) {
3853 /*
3854 * Tell FW to track each exchange to prevent
3855 * driver from using stale exchange.
3856 */
3857 if (qla_tgt_mode_enabled(vha) ||
3858 qla_dual_mode_enabled(vha))
3859 ha->fw_options[2] |= BIT_4;
3860 else
3861 ha->fw_options[2] &= ~BIT_4;
3862
3863 /* Reserve 1/2 of emergency exchanges for ELS.*/
3864 if (qla2xuseresexchforels)
3865 ha->fw_options[2] |= BIT_8;
3866 else
3867 ha->fw_options[2] &= ~BIT_8;
3868 }
3869
3870 ql_dbg(ql_dbg_init, vha, 0x00e8,
3871 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
3872 __func__, ha->fw_options[1], ha->fw_options[2],
3873 ha->fw_options[3], vha->host->active_mode);
3874
3875 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
3876 qla2x00_set_fw_options(vha, ha->fw_options);
3877
3878 /* Update Serial Link options. */
3879 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
3880 return;
3881
3882 rval = qla2x00_set_serdes_params(vha,
3883 le16_to_cpu(ha->fw_seriallink_options24[1]),
3884 le16_to_cpu(ha->fw_seriallink_options24[2]),
3885 le16_to_cpu(ha->fw_seriallink_options24[3]));
3886 if (rval != QLA_SUCCESS) {
3887 ql_log(ql_log_warn, vha, 0x0104,
3888 "Unable to update Serial Link options (%x).\n", rval);
3889 }
3890 }
3891
3892 void
qla2x00_config_rings(struct scsi_qla_host * vha)3893 qla2x00_config_rings(struct scsi_qla_host *vha)
3894 {
3895 struct qla_hw_data *ha = vha->hw;
3896 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3897 struct req_que *req = ha->req_q_map[0];
3898 struct rsp_que *rsp = ha->rsp_q_map[0];
3899
3900 /* Setup ring parameters in initialization control block. */
3901 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
3902 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
3903 ha->init_cb->request_q_length = cpu_to_le16(req->length);
3904 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
3905 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
3906 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
3907
3908 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0);
3909 WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0);
3910 WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0);
3911 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0);
3912 RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
3913 }
3914
3915 void
qla24xx_config_rings(struct scsi_qla_host * vha)3916 qla24xx_config_rings(struct scsi_qla_host *vha)
3917 {
3918 struct qla_hw_data *ha = vha->hw;
3919 device_reg_t *reg = ISP_QUE_REG(ha, 0);
3920 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
3921 struct qla_msix_entry *msix;
3922 struct init_cb_24xx *icb;
3923 uint16_t rid = 0;
3924 struct req_que *req = ha->req_q_map[0];
3925 struct rsp_que *rsp = ha->rsp_q_map[0];
3926
3927 /* Setup ring parameters in initialization control block. */
3928 icb = (struct init_cb_24xx *)ha->init_cb;
3929 icb->request_q_outpointer = cpu_to_le16(0);
3930 icb->response_q_inpointer = cpu_to_le16(0);
3931 icb->request_q_length = cpu_to_le16(req->length);
3932 icb->response_q_length = cpu_to_le16(rsp->length);
3933 put_unaligned_le64(req->dma, &icb->request_q_address);
3934 put_unaligned_le64(rsp->dma, &icb->response_q_address);
3935
3936 /* Setup ATIO queue dma pointers for target mode */
3937 icb->atio_q_inpointer = cpu_to_le16(0);
3938 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
3939 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
3940
3941 if (IS_SHADOW_REG_CAPABLE(ha))
3942 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
3943
3944 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3945 IS_QLA28XX(ha)) {
3946 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
3947 icb->rid = cpu_to_le16(rid);
3948 if (ha->flags.msix_enabled) {
3949 msix = &ha->msix_entries[1];
3950 ql_dbg(ql_dbg_init, vha, 0x0019,
3951 "Registering vector 0x%x for base que.\n",
3952 msix->entry);
3953 icb->msix = cpu_to_le16(msix->entry);
3954 }
3955 /* Use alternate PCI bus number */
3956 if (MSB(rid))
3957 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
3958 /* Use alternate PCI devfn */
3959 if (LSB(rid))
3960 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
3961
3962 /* Use Disable MSIX Handshake mode for capable adapters */
3963 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
3964 (ha->flags.msix_enabled)) {
3965 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
3966 ha->flags.disable_msix_handshake = 1;
3967 ql_dbg(ql_dbg_init, vha, 0x00fe,
3968 "MSIX Handshake Disable Mode turned on.\n");
3969 } else {
3970 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
3971 }
3972 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
3973
3974 WRT_REG_DWORD(®->isp25mq.req_q_in, 0);
3975 WRT_REG_DWORD(®->isp25mq.req_q_out, 0);
3976 WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0);
3977 WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0);
3978 } else {
3979 WRT_REG_DWORD(®->isp24.req_q_in, 0);
3980 WRT_REG_DWORD(®->isp24.req_q_out, 0);
3981 WRT_REG_DWORD(®->isp24.rsp_q_in, 0);
3982 WRT_REG_DWORD(®->isp24.rsp_q_out, 0);
3983 }
3984
3985 qlt_24xx_config_rings(vha);
3986
3987 /* If the user has configured the speed, set it here */
3988 if (ha->set_data_rate) {
3989 ql_dbg(ql_dbg_init, vha, 0x00fd,
3990 "Speed set by user : %s Gbps \n",
3991 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
3992 icb->firmware_options_3 = (ha->set_data_rate << 13);
3993 }
3994
3995 /* PCI posting */
3996 RD_REG_DWORD(&ioreg->hccr);
3997 }
3998
3999 /**
4000 * qla2x00_init_rings() - Initializes firmware.
4001 * @vha: HA context
4002 *
4003 * Beginning of request ring has initialization control block already built
4004 * by nvram config routine.
4005 *
4006 * Returns 0 on success.
4007 */
4008 int
qla2x00_init_rings(scsi_qla_host_t * vha)4009 qla2x00_init_rings(scsi_qla_host_t *vha)
4010 {
4011 int rval;
4012 unsigned long flags = 0;
4013 int cnt, que;
4014 struct qla_hw_data *ha = vha->hw;
4015 struct req_que *req;
4016 struct rsp_que *rsp;
4017 struct mid_init_cb_24xx *mid_init_cb =
4018 (struct mid_init_cb_24xx *) ha->init_cb;
4019
4020 spin_lock_irqsave(&ha->hardware_lock, flags);
4021
4022 /* Clear outstanding commands array. */
4023 for (que = 0; que < ha->max_req_queues; que++) {
4024 req = ha->req_q_map[que];
4025 if (!req || !test_bit(que, ha->req_qid_map))
4026 continue;
4027 req->out_ptr = (void *)(req->ring + req->length);
4028 *req->out_ptr = 0;
4029 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
4030 req->outstanding_cmds[cnt] = NULL;
4031
4032 req->current_outstanding_cmd = 1;
4033
4034 /* Initialize firmware. */
4035 req->ring_ptr = req->ring;
4036 req->ring_index = 0;
4037 req->cnt = req->length;
4038 }
4039
4040 for (que = 0; que < ha->max_rsp_queues; que++) {
4041 rsp = ha->rsp_q_map[que];
4042 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4043 continue;
4044 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
4045 *rsp->in_ptr = 0;
4046 /* Initialize response queue entries */
4047 if (IS_QLAFX00(ha))
4048 qlafx00_init_response_q_entries(rsp);
4049 else
4050 qla2x00_init_response_q_entries(rsp);
4051 }
4052
4053 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4054 ha->tgt.atio_ring_index = 0;
4055 /* Initialize ATIO queue entries */
4056 qlt_init_atio_q_entries(vha);
4057
4058 ha->isp_ops->config_rings(vha);
4059
4060 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4061
4062 ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n");
4063
4064 if (IS_QLAFX00(ha)) {
4065 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4066 goto next_check;
4067 }
4068
4069 /* Update any ISP specific firmware options before initialization. */
4070 ha->isp_ops->update_fw_options(vha);
4071
4072 if (ha->flags.npiv_supported) {
4073 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4074 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4075 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4076 }
4077
4078 if (IS_FWI2_CAPABLE(ha)) {
4079 mid_init_cb->options = cpu_to_le16(BIT_1);
4080 mid_init_cb->init_cb.execution_throttle =
4081 cpu_to_le16(ha->cur_fw_xcb_count);
4082 ha->flags.dport_enabled =
4083 (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0;
4084 ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n",
4085 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4086 /* FA-WWPN Status */
4087 ha->flags.fawwpn_enabled =
4088 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0;
4089 ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n",
4090 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4091 }
4092
4093 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4094 next_check:
4095 if (rval) {
4096 ql_log(ql_log_fatal, vha, 0x00d2,
4097 "Init Firmware **** FAILED ****.\n");
4098 } else {
4099 ql_dbg(ql_dbg_init, vha, 0x00d3,
4100 "Init Firmware -- success.\n");
4101 QLA_FW_STARTED(ha);
4102 vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
4103 }
4104
4105 return (rval);
4106 }
4107
4108 /**
4109 * qla2x00_fw_ready() - Waits for firmware ready.
4110 * @vha: HA context
4111 *
4112 * Returns 0 on success.
4113 */
4114 static int
qla2x00_fw_ready(scsi_qla_host_t * vha)4115 qla2x00_fw_ready(scsi_qla_host_t *vha)
4116 {
4117 int rval;
4118 unsigned long wtime, mtime, cs84xx_time;
4119 uint16_t min_wait; /* Minimum wait time if loop is down */
4120 uint16_t wait_time; /* Wait time if loop is coming ready */
4121 uint16_t state[6];
4122 struct qla_hw_data *ha = vha->hw;
4123
4124 if (IS_QLAFX00(vha->hw))
4125 return qlafx00_fw_ready(vha);
4126
4127 rval = QLA_SUCCESS;
4128
4129 /* Time to wait for loop down */
4130 if (IS_P3P_TYPE(ha))
4131 min_wait = 30;
4132 else
4133 min_wait = 20;
4134
4135 /*
4136 * Firmware should take at most one RATOV to login, plus 5 seconds for
4137 * our own processing.
4138 */
4139 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4140 wait_time = min_wait;
4141 }
4142
4143 /* Min wait time if loop down */
4144 mtime = jiffies + (min_wait * HZ);
4145
4146 /* wait time before firmware ready */
4147 wtime = jiffies + (wait_time * HZ);
4148
4149 /* Wait for ISP to finish LIP */
4150 if (!vha->flags.init_done)
4151 ql_log(ql_log_info, vha, 0x801e,
4152 "Waiting for LIP to complete.\n");
4153
4154 do {
4155 memset(state, -1, sizeof(state));
4156 rval = qla2x00_get_firmware_state(vha, state);
4157 if (rval == QLA_SUCCESS) {
4158 if (state[0] < FSTATE_LOSS_OF_SYNC) {
4159 vha->device_flags &= ~DFLG_NO_CABLE;
4160 }
4161 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4162 ql_dbg(ql_dbg_taskm, vha, 0x801f,
4163 "fw_state=%x 84xx=%x.\n", state[0],
4164 state[2]);
4165 if ((state[2] & FSTATE_LOGGED_IN) &&
4166 (state[2] & FSTATE_WAITING_FOR_VERIFY)) {
4167 ql_dbg(ql_dbg_taskm, vha, 0x8028,
4168 "Sending verify iocb.\n");
4169
4170 cs84xx_time = jiffies;
4171 rval = qla84xx_init_chip(vha);
4172 if (rval != QLA_SUCCESS) {
4173 ql_log(ql_log_warn,
4174 vha, 0x8007,
4175 "Init chip failed.\n");
4176 break;
4177 }
4178
4179 /* Add time taken to initialize. */
4180 cs84xx_time = jiffies - cs84xx_time;
4181 wtime += cs84xx_time;
4182 mtime += cs84xx_time;
4183 ql_dbg(ql_dbg_taskm, vha, 0x8008,
4184 "Increasing wait time by %ld. "
4185 "New time %ld.\n", cs84xx_time,
4186 wtime);
4187 }
4188 } else if (state[0] == FSTATE_READY) {
4189 ql_dbg(ql_dbg_taskm, vha, 0x8037,
4190 "F/W Ready - OK.\n");
4191
4192 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4193 &ha->login_timeout, &ha->r_a_tov);
4194
4195 rval = QLA_SUCCESS;
4196 break;
4197 }
4198
4199 rval = QLA_FUNCTION_FAILED;
4200
4201 if (atomic_read(&vha->loop_down_timer) &&
4202 state[0] != FSTATE_READY) {
4203 /* Loop down. Timeout on min_wait for states
4204 * other than Wait for Login.
4205 */
4206 if (time_after_eq(jiffies, mtime)) {
4207 ql_log(ql_log_info, vha, 0x8038,
4208 "Cable is unplugged...\n");
4209
4210 vha->device_flags |= DFLG_NO_CABLE;
4211 break;
4212 }
4213 }
4214 } else {
4215 /* Mailbox cmd failed. Timeout on min_wait. */
4216 if (time_after_eq(jiffies, mtime) ||
4217 ha->flags.isp82xx_fw_hung)
4218 break;
4219 }
4220
4221 if (time_after_eq(jiffies, wtime))
4222 break;
4223
4224 /* Delay for a while */
4225 msleep(500);
4226 } while (1);
4227
4228 ql_dbg(ql_dbg_taskm, vha, 0x803a,
4229 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
4230 state[1], state[2], state[3], state[4], state[5], jiffies);
4231
4232 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
4233 ql_log(ql_log_warn, vha, 0x803b,
4234 "Firmware ready **** FAILED ****.\n");
4235 }
4236
4237 return (rval);
4238 }
4239
4240 /*
4241 * qla2x00_configure_hba
4242 * Setup adapter context.
4243 *
4244 * Input:
4245 * ha = adapter state pointer.
4246 *
4247 * Returns:
4248 * 0 = success
4249 *
4250 * Context:
4251 * Kernel context.
4252 */
4253 static int
qla2x00_configure_hba(scsi_qla_host_t * vha)4254 qla2x00_configure_hba(scsi_qla_host_t *vha)
4255 {
4256 int rval;
4257 uint16_t loop_id;
4258 uint16_t topo;
4259 uint16_t sw_cap;
4260 uint8_t al_pa;
4261 uint8_t area;
4262 uint8_t domain;
4263 char connect_type[22];
4264 struct qla_hw_data *ha = vha->hw;
4265 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
4266 port_id_t id;
4267 unsigned long flags;
4268
4269 /* Get host addresses. */
4270 rval = qla2x00_get_adapter_id(vha,
4271 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
4272 if (rval != QLA_SUCCESS) {
4273 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
4274 IS_CNA_CAPABLE(ha) ||
4275 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
4276 ql_dbg(ql_dbg_disc, vha, 0x2008,
4277 "Loop is in a transition state.\n");
4278 } else {
4279 ql_log(ql_log_warn, vha, 0x2009,
4280 "Unable to get host loop ID.\n");
4281 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
4282 (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
4283 ql_log(ql_log_warn, vha, 0x1151,
4284 "Doing link init.\n");
4285 if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
4286 return rval;
4287 }
4288 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4289 }
4290 return (rval);
4291 }
4292
4293 if (topo == 4) {
4294 ql_log(ql_log_info, vha, 0x200a,
4295 "Cannot get topology - retrying.\n");
4296 return (QLA_FUNCTION_FAILED);
4297 }
4298
4299 vha->loop_id = loop_id;
4300
4301 /* initialize */
4302 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
4303 ha->operating_mode = LOOP;
4304 ha->switch_cap = 0;
4305
4306 switch (topo) {
4307 case 0:
4308 ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n");
4309 ha->current_topology = ISP_CFG_NL;
4310 strcpy(connect_type, "(Loop)");
4311 break;
4312
4313 case 1:
4314 ql_dbg(ql_dbg_disc, vha, 0x200c, "HBA in FL topology.\n");
4315 ha->switch_cap = sw_cap;
4316 ha->current_topology = ISP_CFG_FL;
4317 strcpy(connect_type, "(FL_Port)");
4318 break;
4319
4320 case 2:
4321 ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n");
4322 ha->operating_mode = P2P;
4323 ha->current_topology = ISP_CFG_N;
4324 strcpy(connect_type, "(N_Port-to-N_Port)");
4325 break;
4326
4327 case 3:
4328 ql_dbg(ql_dbg_disc, vha, 0x200e, "HBA in F P2P topology.\n");
4329 ha->switch_cap = sw_cap;
4330 ha->operating_mode = P2P;
4331 ha->current_topology = ISP_CFG_F;
4332 strcpy(connect_type, "(F_Port)");
4333 break;
4334
4335 default:
4336 ql_dbg(ql_dbg_disc, vha, 0x200f,
4337 "HBA in unknown topology %x, using NL.\n", topo);
4338 ha->current_topology = ISP_CFG_NL;
4339 strcpy(connect_type, "(Loop)");
4340 break;
4341 }
4342
4343 /* Save Host port and loop ID. */
4344 /* byte order - Big Endian */
4345 id.b.domain = domain;
4346 id.b.area = area;
4347 id.b.al_pa = al_pa;
4348 id.b.rsvd_1 = 0;
4349 spin_lock_irqsave(&ha->hardware_lock, flags);
4350 if (!(topo == 2 && ha->flags.n2n_bigger))
4351 qlt_update_host_map(vha, id);
4352 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4353
4354 if (!vha->flags.init_done)
4355 ql_log(ql_log_info, vha, 0x2010,
4356 "Topology - %s, Host Loop address 0x%x.\n",
4357 connect_type, vha->loop_id);
4358
4359 return(rval);
4360 }
4361
4362 inline void
qla2x00_set_model_info(scsi_qla_host_t * vha,uint8_t * model,size_t len,const char * def)4363 qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
4364 const char *def)
4365 {
4366 char *st, *en;
4367 uint16_t index;
4368 uint64_t zero[2] = { 0 };
4369 struct qla_hw_data *ha = vha->hw;
4370 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
4371 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
4372
4373 if (len > sizeof(zero))
4374 len = sizeof(zero);
4375 if (memcmp(model, &zero, len) != 0) {
4376 memcpy(ha->model_number, model, len);
4377 st = en = ha->model_number;
4378 en += len - 1;
4379 while (en > st) {
4380 if (*en != 0x20 && *en != 0x00)
4381 break;
4382 *en-- = '\0';
4383 }
4384
4385 index = (ha->pdev->subsystem_device & 0xff);
4386 if (use_tbl &&
4387 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4388 index < QLA_MODEL_NAMES)
4389 strlcpy(ha->model_desc,
4390 qla2x00_model_name[index * 2 + 1],
4391 sizeof(ha->model_desc));
4392 } else {
4393 index = (ha->pdev->subsystem_device & 0xff);
4394 if (use_tbl &&
4395 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
4396 index < QLA_MODEL_NAMES) {
4397 strlcpy(ha->model_number,
4398 qla2x00_model_name[index * 2],
4399 sizeof(ha->model_number));
4400 strlcpy(ha->model_desc,
4401 qla2x00_model_name[index * 2 + 1],
4402 sizeof(ha->model_desc));
4403 } else {
4404 strlcpy(ha->model_number, def,
4405 sizeof(ha->model_number));
4406 }
4407 }
4408 if (IS_FWI2_CAPABLE(ha))
4409 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
4410 sizeof(ha->model_desc));
4411 }
4412
4413 /* On sparc systems, obtain port and node WWN from firmware
4414 * properties.
4415 */
qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,nvram_t * nv)4416 static void qla2xxx_nvram_wwn_from_ofw(scsi_qla_host_t *vha, nvram_t *nv)
4417 {
4418 #ifdef CONFIG_SPARC
4419 struct qla_hw_data *ha = vha->hw;
4420 struct pci_dev *pdev = ha->pdev;
4421 struct device_node *dp = pci_device_to_OF_node(pdev);
4422 const u8 *val;
4423 int len;
4424
4425 val = of_get_property(dp, "port-wwn", &len);
4426 if (val && len >= WWN_SIZE)
4427 memcpy(nv->port_name, val, WWN_SIZE);
4428
4429 val = of_get_property(dp, "node-wwn", &len);
4430 if (val && len >= WWN_SIZE)
4431 memcpy(nv->node_name, val, WWN_SIZE);
4432 #endif
4433 }
4434
4435 /*
4436 * NVRAM configuration for ISP 2xxx
4437 *
4438 * Input:
4439 * ha = adapter block pointer.
4440 *
4441 * Output:
4442 * initialization control block in response_ring
4443 * host adapters parameters in host adapter block
4444 *
4445 * Returns:
4446 * 0 = success.
4447 */
4448 int
qla2x00_nvram_config(scsi_qla_host_t * vha)4449 qla2x00_nvram_config(scsi_qla_host_t *vha)
4450 {
4451 int rval;
4452 uint8_t chksum = 0;
4453 uint16_t cnt;
4454 uint8_t *dptr1, *dptr2;
4455 struct qla_hw_data *ha = vha->hw;
4456 init_cb_t *icb = ha->init_cb;
4457 nvram_t *nv = ha->nvram;
4458 uint8_t *ptr = ha->nvram;
4459 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4460
4461 rval = QLA_SUCCESS;
4462
4463 /* Determine NVRAM starting address. */
4464 ha->nvram_size = sizeof(*nv);
4465 ha->nvram_base = 0;
4466 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
4467 if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1)
4468 ha->nvram_base = 0x80;
4469
4470 /* Get NVRAM data and calculate checksum. */
4471 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
4472 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
4473 chksum += *ptr++;
4474
4475 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f,
4476 "Contents of NVRAM.\n");
4477 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110,
4478 nv, ha->nvram_size);
4479
4480 /* Bad NVRAM data, set defaults parameters. */
4481 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
4482 nv->nvram_version < 1) {
4483 /* Reset NVRAM data. */
4484 ql_log(ql_log_warn, vha, 0x0064,
4485 "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n",
4486 chksum, nv->id, nv->nvram_version);
4487 ql_log(ql_log_warn, vha, 0x0065,
4488 "Falling back to "
4489 "functioning (yet invalid -- WWPN) defaults.\n");
4490
4491 /*
4492 * Set default initialization control block.
4493 */
4494 memset(nv, 0, ha->nvram_size);
4495 nv->parameter_block_version = ICB_VERSION;
4496
4497 if (IS_QLA23XX(ha)) {
4498 nv->firmware_options[0] = BIT_2 | BIT_1;
4499 nv->firmware_options[1] = BIT_7 | BIT_5;
4500 nv->add_firmware_options[0] = BIT_5;
4501 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4502 nv->frame_payload_size = 2048;
4503 nv->special_options[1] = BIT_7;
4504 } else if (IS_QLA2200(ha)) {
4505 nv->firmware_options[0] = BIT_2 | BIT_1;
4506 nv->firmware_options[1] = BIT_7 | BIT_5;
4507 nv->add_firmware_options[0] = BIT_5;
4508 nv->add_firmware_options[1] = BIT_5 | BIT_4;
4509 nv->frame_payload_size = 1024;
4510 } else if (IS_QLA2100(ha)) {
4511 nv->firmware_options[0] = BIT_3 | BIT_1;
4512 nv->firmware_options[1] = BIT_5;
4513 nv->frame_payload_size = 1024;
4514 }
4515
4516 nv->max_iocb_allocation = cpu_to_le16(256);
4517 nv->execution_throttle = cpu_to_le16(16);
4518 nv->retry_count = 8;
4519 nv->retry_delay = 1;
4520
4521 nv->port_name[0] = 33;
4522 nv->port_name[3] = 224;
4523 nv->port_name[4] = 139;
4524
4525 qla2xxx_nvram_wwn_from_ofw(vha, nv);
4526
4527 nv->login_timeout = 4;
4528
4529 /*
4530 * Set default host adapter parameters
4531 */
4532 nv->host_p[1] = BIT_2;
4533 nv->reset_delay = 5;
4534 nv->port_down_retry_count = 8;
4535 nv->max_luns_per_target = cpu_to_le16(8);
4536 nv->link_down_timeout = 60;
4537
4538 rval = 1;
4539 }
4540
4541 /* Reset Initialization control block */
4542 memset(icb, 0, ha->init_cb_size);
4543
4544 /*
4545 * Setup driver NVRAM options.
4546 */
4547 nv->firmware_options[0] |= (BIT_6 | BIT_1);
4548 nv->firmware_options[0] &= ~(BIT_5 | BIT_4);
4549 nv->firmware_options[1] |= (BIT_5 | BIT_0);
4550 nv->firmware_options[1] &= ~BIT_4;
4551
4552 if (IS_QLA23XX(ha)) {
4553 nv->firmware_options[0] |= BIT_2;
4554 nv->firmware_options[0] &= ~BIT_3;
4555 nv->special_options[0] &= ~BIT_6;
4556 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
4557
4558 if (IS_QLA2300(ha)) {
4559 if (ha->fb_rev == FPM_2310) {
4560 strcpy(ha->model_number, "QLA2310");
4561 } else {
4562 strcpy(ha->model_number, "QLA2300");
4563 }
4564 } else {
4565 qla2x00_set_model_info(vha, nv->model_number,
4566 sizeof(nv->model_number), "QLA23xx");
4567 }
4568 } else if (IS_QLA2200(ha)) {
4569 nv->firmware_options[0] |= BIT_2;
4570 /*
4571 * 'Point-to-point preferred, else loop' is not a safe
4572 * connection mode setting.
4573 */
4574 if ((nv->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) ==
4575 (BIT_5 | BIT_4)) {
4576 /* Force 'loop preferred, else point-to-point'. */
4577 nv->add_firmware_options[0] &= ~(BIT_6 | BIT_5 | BIT_4);
4578 nv->add_firmware_options[0] |= BIT_5;
4579 }
4580 strcpy(ha->model_number, "QLA22xx");
4581 } else /*if (IS_QLA2100(ha))*/ {
4582 strcpy(ha->model_number, "QLA2100");
4583 }
4584
4585 /*
4586 * Copy over NVRAM RISC parameter block to initialization control block.
4587 */
4588 dptr1 = (uint8_t *)icb;
4589 dptr2 = (uint8_t *)&nv->parameter_block_version;
4590 cnt = (uint8_t *)&icb->request_q_outpointer - (uint8_t *)&icb->version;
4591 while (cnt--)
4592 *dptr1++ = *dptr2++;
4593
4594 /* Copy 2nd half. */
4595 dptr1 = (uint8_t *)icb->add_firmware_options;
4596 cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options;
4597 while (cnt--)
4598 *dptr1++ = *dptr2++;
4599 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
4600 /* Use alternate WWN? */
4601 if (nv->host_p[1] & BIT_7) {
4602 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4603 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4604 }
4605
4606 /* Prepare nodename */
4607 if ((icb->firmware_options[1] & BIT_6) == 0) {
4608 /*
4609 * Firmware will apply the following mask if the nodename was
4610 * not provided.
4611 */
4612 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
4613 icb->node_name[0] &= 0xF0;
4614 }
4615
4616 /*
4617 * Set host adapter parameters.
4618 */
4619
4620 /*
4621 * BIT_7 in the host-parameters section allows for modification to
4622 * internal driver logging.
4623 */
4624 if (nv->host_p[0] & BIT_7)
4625 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
4626 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
4627 /* Always load RISC code on non ISP2[12]00 chips. */
4628 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
4629 ha->flags.disable_risc_code_load = 0;
4630 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
4631 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
4632 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
4633 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
4634 ha->flags.disable_serdes = 0;
4635
4636 ha->operating_mode =
4637 (icb->add_firmware_options[0] & (BIT_6 | BIT_5 | BIT_4)) >> 4;
4638
4639 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
4640 sizeof(ha->fw_seriallink_options));
4641
4642 /* save HBA serial number */
4643 ha->serial0 = icb->port_name[5];
4644 ha->serial1 = icb->port_name[6];
4645 ha->serial2 = icb->port_name[7];
4646 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
4647 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
4648
4649 icb->execution_throttle = cpu_to_le16(0xFFFF);
4650
4651 ha->retry_count = nv->retry_count;
4652
4653 /* Set minimum login_timeout to 4 seconds. */
4654 if (nv->login_timeout != ql2xlogintimeout)
4655 nv->login_timeout = ql2xlogintimeout;
4656 if (nv->login_timeout < 4)
4657 nv->login_timeout = 4;
4658 ha->login_timeout = nv->login_timeout;
4659
4660 /* Set minimum RATOV to 100 tenths of a second. */
4661 ha->r_a_tov = 100;
4662
4663 ha->loop_reset_delay = nv->reset_delay;
4664
4665 /* Link Down Timeout = 0:
4666 *
4667 * When Port Down timer expires we will start returning
4668 * I/O's to OS with "DID_NO_CONNECT".
4669 *
4670 * Link Down Timeout != 0:
4671 *
4672 * The driver waits for the link to come up after link down
4673 * before returning I/Os to OS with "DID_NO_CONNECT".
4674 */
4675 if (nv->link_down_timeout == 0) {
4676 ha->loop_down_abort_time =
4677 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
4678 } else {
4679 ha->link_down_timeout = nv->link_down_timeout;
4680 ha->loop_down_abort_time =
4681 (LOOP_DOWN_TIME - ha->link_down_timeout);
4682 }
4683
4684 /*
4685 * Need enough time to try and get the port back.
4686 */
4687 ha->port_down_retry_count = nv->port_down_retry_count;
4688 if (qlport_down_retry)
4689 ha->port_down_retry_count = qlport_down_retry;
4690 /* Set login_retry_count */
4691 ha->login_retry_count = nv->retry_count;
4692 if (ha->port_down_retry_count == nv->port_down_retry_count &&
4693 ha->port_down_retry_count > 3)
4694 ha->login_retry_count = ha->port_down_retry_count;
4695 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
4696 ha->login_retry_count = ha->port_down_retry_count;
4697 if (ql2xloginretrycount)
4698 ha->login_retry_count = ql2xloginretrycount;
4699
4700 icb->lun_enables = cpu_to_le16(0);
4701 icb->command_resource_count = 0;
4702 icb->immediate_notify_resource_count = 0;
4703 icb->timeout = cpu_to_le16(0);
4704
4705 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4706 /* Enable RIO */
4707 icb->firmware_options[0] &= ~BIT_3;
4708 icb->add_firmware_options[0] &=
4709 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4710 icb->add_firmware_options[0] |= BIT_2;
4711 icb->response_accumulation_timer = 3;
4712 icb->interrupt_delay_timer = 5;
4713
4714 vha->flags.process_response_queue = 1;
4715 } else {
4716 /* Enable ZIO. */
4717 if (!vha->flags.init_done) {
4718 ha->zio_mode = icb->add_firmware_options[0] &
4719 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
4720 ha->zio_timer = icb->interrupt_delay_timer ?
4721 icb->interrupt_delay_timer : 2;
4722 }
4723 icb->add_firmware_options[0] &=
4724 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
4725 vha->flags.process_response_queue = 0;
4726 if (ha->zio_mode != QLA_ZIO_DISABLED) {
4727 ha->zio_mode = QLA_ZIO_MODE_6;
4728
4729 ql_log(ql_log_info, vha, 0x0068,
4730 "ZIO mode %d enabled; timer delay (%d us).\n",
4731 ha->zio_mode, ha->zio_timer * 100);
4732
4733 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
4734 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
4735 vha->flags.process_response_queue = 1;
4736 }
4737 }
4738
4739 if (rval) {
4740 ql_log(ql_log_warn, vha, 0x0069,
4741 "NVRAM configuration failed.\n");
4742 }
4743 return (rval);
4744 }
4745
4746 static void
qla2x00_rport_del(void * data)4747 qla2x00_rport_del(void *data)
4748 {
4749 fc_port_t *fcport = data;
4750 struct fc_rport *rport;
4751 unsigned long flags;
4752
4753 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
4754 rport = fcport->drport ? fcport->drport : fcport->rport;
4755 fcport->drport = NULL;
4756 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
4757 if (rport) {
4758 ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
4759 "%s %8phN. rport %p roles %x\n",
4760 __func__, fcport->port_name, rport,
4761 rport->roles);
4762
4763 fc_remote_port_delete(rport);
4764 }
4765 }
4766
qla2x00_set_fcport_state(fc_port_t * fcport,int state)4767 void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
4768 {
4769 int old_state;
4770
4771 old_state = atomic_read(&fcport->state);
4772 atomic_set(&fcport->state, state);
4773
4774 /* Don't print state transitions during initial allocation of fcport */
4775 if (old_state && old_state != state) {
4776 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
4777 "FCPort %8phC state transitioned from %s to %s - portid=%02x%02x%02x.\n",
4778 fcport->port_name, port_state_str[old_state],
4779 port_state_str[state], fcport->d_id.b.domain,
4780 fcport->d_id.b.area, fcport->d_id.b.al_pa);
4781 }
4782 }
4783
4784 /**
4785 * qla2x00_alloc_fcport() - Allocate a generic fcport.
4786 * @vha: HA context
4787 * @flags: allocation flags
4788 *
4789 * Returns a pointer to the allocated fcport, or NULL, if none available.
4790 */
4791 fc_port_t *
qla2x00_alloc_fcport(scsi_qla_host_t * vha,gfp_t flags)4792 qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
4793 {
4794 fc_port_t *fcport;
4795
4796 fcport = kzalloc(sizeof(fc_port_t), flags);
4797 if (!fcport)
4798 return NULL;
4799
4800 fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
4801 sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
4802 flags);
4803 if (!fcport->ct_desc.ct_sns) {
4804 ql_log(ql_log_warn, vha, 0xd049,
4805 "Failed to allocate ct_sns request.\n");
4806 kfree(fcport);
4807 return NULL;
4808 }
4809
4810 /* Setup fcport template structure. */
4811 fcport->vha = vha;
4812 fcport->port_type = FCT_UNKNOWN;
4813 fcport->loop_id = FC_NO_LOOP_ID;
4814 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
4815 fcport->supported_classes = FC_COS_UNSPECIFIED;
4816 fcport->fp_speed = PORT_SPEED_UNKNOWN;
4817
4818 fcport->disc_state = DSC_DELETED;
4819 fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
4820 fcport->deleted = QLA_SESS_DELETED;
4821 fcport->login_retry = vha->hw->login_retry_count;
4822 fcport->chip_reset = vha->hw->base_qpair->chip_reset;
4823 fcport->logout_on_delete = 1;
4824
4825 if (!fcport->ct_desc.ct_sns) {
4826 ql_log(ql_log_warn, vha, 0xd049,
4827 "Failed to allocate ct_sns request.\n");
4828 kfree(fcport);
4829 return NULL;
4830 }
4831
4832 INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
4833 INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
4834 INIT_LIST_HEAD(&fcport->gnl_entry);
4835 INIT_LIST_HEAD(&fcport->list);
4836
4837 return fcport;
4838 }
4839
4840 void
qla2x00_free_fcport(fc_port_t * fcport)4841 qla2x00_free_fcport(fc_port_t *fcport)
4842 {
4843 if (fcport->ct_desc.ct_sns) {
4844 dma_free_coherent(&fcport->vha->hw->pdev->dev,
4845 sizeof(struct ct_sns_pkt), fcport->ct_desc.ct_sns,
4846 fcport->ct_desc.ct_sns_dma);
4847
4848 fcport->ct_desc.ct_sns = NULL;
4849 }
4850 list_del(&fcport->list);
4851 qla2x00_clear_loop_id(fcport);
4852 kfree(fcport);
4853 }
4854
4855 /*
4856 * qla2x00_configure_loop
4857 * Updates Fibre Channel Device Database with what is actually on loop.
4858 *
4859 * Input:
4860 * ha = adapter block pointer.
4861 *
4862 * Returns:
4863 * 0 = success.
4864 * 1 = error.
4865 * 2 = database was full and device was not configured.
4866 */
4867 static int
qla2x00_configure_loop(scsi_qla_host_t * vha)4868 qla2x00_configure_loop(scsi_qla_host_t *vha)
4869 {
4870 int rval;
4871 unsigned long flags, save_flags;
4872 struct qla_hw_data *ha = vha->hw;
4873
4874 rval = QLA_SUCCESS;
4875
4876 /* Get Initiator ID */
4877 if (test_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags)) {
4878 rval = qla2x00_configure_hba(vha);
4879 if (rval != QLA_SUCCESS) {
4880 ql_dbg(ql_dbg_disc, vha, 0x2013,
4881 "Unable to configure HBA.\n");
4882 return (rval);
4883 }
4884 }
4885
4886 save_flags = flags = vha->dpc_flags;
4887 ql_dbg(ql_dbg_disc, vha, 0x2014,
4888 "Configure loop -- dpc flags = 0x%lx.\n", flags);
4889
4890 /*
4891 * If we have both an RSCN and PORT UPDATE pending then handle them
4892 * both at the same time.
4893 */
4894 clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4895 clear_bit(RSCN_UPDATE, &vha->dpc_flags);
4896
4897 qla2x00_get_data_rate(vha);
4898
4899 /* Determine what we need to do */
4900 if (ha->current_topology == ISP_CFG_FL &&
4901 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4902
4903 set_bit(RSCN_UPDATE, &flags);
4904
4905 } else if (ha->current_topology == ISP_CFG_F &&
4906 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
4907
4908 set_bit(RSCN_UPDATE, &flags);
4909 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4910
4911 } else if (ha->current_topology == ISP_CFG_N) {
4912 clear_bit(RSCN_UPDATE, &flags);
4913 if (qla_tgt_mode_enabled(vha)) {
4914 /* allow the other side to start the login */
4915 clear_bit(LOCAL_LOOP_UPDATE, &flags);
4916 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4917 }
4918 } else if (ha->current_topology == ISP_CFG_NL) {
4919 clear_bit(RSCN_UPDATE, &flags);
4920 set_bit(LOCAL_LOOP_UPDATE, &flags);
4921 } else if (!vha->flags.online ||
4922 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
4923 set_bit(RSCN_UPDATE, &flags);
4924 set_bit(LOCAL_LOOP_UPDATE, &flags);
4925 }
4926
4927 if (test_bit(LOCAL_LOOP_UPDATE, &flags)) {
4928 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4929 ql_dbg(ql_dbg_disc, vha, 0x2015,
4930 "Loop resync needed, failing.\n");
4931 rval = QLA_FUNCTION_FAILED;
4932 } else
4933 rval = qla2x00_configure_local_loop(vha);
4934 }
4935
4936 if (rval == QLA_SUCCESS && test_bit(RSCN_UPDATE, &flags)) {
4937 if (LOOP_TRANSITION(vha)) {
4938 ql_dbg(ql_dbg_disc, vha, 0x2099,
4939 "Needs RSCN update and loop transition.\n");
4940 rval = QLA_FUNCTION_FAILED;
4941 }
4942 else
4943 rval = qla2x00_configure_fabric(vha);
4944 }
4945
4946 if (rval == QLA_SUCCESS) {
4947 if (atomic_read(&vha->loop_down_timer) ||
4948 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4949 rval = QLA_FUNCTION_FAILED;
4950 } else {
4951 atomic_set(&vha->loop_state, LOOP_READY);
4952 ql_dbg(ql_dbg_disc, vha, 0x2069,
4953 "LOOP READY.\n");
4954 ha->flags.fw_init_done = 1;
4955
4956 /*
4957 * Process any ATIO queue entries that came in
4958 * while we weren't online.
4959 */
4960 if (qla_tgt_mode_enabled(vha) ||
4961 qla_dual_mode_enabled(vha)) {
4962 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4963 qlt_24xx_process_atio_queue(vha, 0);
4964 spin_unlock_irqrestore(&ha->tgt.atio_lock,
4965 flags);
4966 }
4967 }
4968 }
4969
4970 if (rval) {
4971 ql_dbg(ql_dbg_disc, vha, 0x206a,
4972 "%s *** FAILED ***.\n", __func__);
4973 } else {
4974 ql_dbg(ql_dbg_disc, vha, 0x206b,
4975 "%s: exiting normally.\n", __func__);
4976 }
4977
4978 /* Restore state if a resync event occurred during processing */
4979 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
4980 if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
4981 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4982 if (test_bit(RSCN_UPDATE, &save_flags)) {
4983 set_bit(RSCN_UPDATE, &vha->dpc_flags);
4984 }
4985 }
4986
4987 return (rval);
4988 }
4989
4990 /*
4991 * qla2x00_configure_local_loop
4992 * Updates Fibre Channel Device Database with local loop devices.
4993 *
4994 * Input:
4995 * ha = adapter block pointer.
4996 *
4997 * Returns:
4998 * 0 = success.
4999 */
5000 static int
qla2x00_configure_local_loop(scsi_qla_host_t * vha)5001 qla2x00_configure_local_loop(scsi_qla_host_t *vha)
5002 {
5003 int rval, rval2;
5004 int found_devs;
5005 int found;
5006 fc_port_t *fcport, *new_fcport;
5007
5008 uint16_t index;
5009 uint16_t entries;
5010 struct gid_list_info *gid;
5011 uint16_t loop_id;
5012 uint8_t domain, area, al_pa;
5013 struct qla_hw_data *ha = vha->hw;
5014 unsigned long flags;
5015
5016 /* Inititae N2N login. */
5017 if (N2N_TOPO(ha)) {
5018 if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
5019 /* borrowing */
5020 u32 *bp, i, sz;
5021
5022 memset(ha->init_cb, 0, ha->init_cb_size);
5023 sz = min_t(int, sizeof(struct els_plogi_payload),
5024 ha->init_cb_size);
5025 rval = qla24xx_get_port_login_templ(vha,
5026 ha->init_cb_dma, (void *)ha->init_cb, sz);
5027 if (rval == QLA_SUCCESS) {
5028 bp = (uint32_t *)ha->init_cb;
5029 for (i = 0; i < sz/4 ; i++, bp++)
5030 *bp = cpu_to_be32(*bp);
5031
5032 memcpy(&ha->plogi_els_payld.data,
5033 (void *)ha->init_cb,
5034 sizeof(ha->plogi_els_payld.data));
5035 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
5036 } else {
5037 ql_dbg(ql_dbg_init, vha, 0x00d1,
5038 "PLOGI ELS param read fail.\n");
5039 goto skip_login;
5040 }
5041 }
5042
5043 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5044 if (fcport->n2n_flag) {
5045 qla24xx_fcport_handle_login(vha, fcport);
5046 return QLA_SUCCESS;
5047 }
5048 }
5049 skip_login:
5050 spin_lock_irqsave(&vha->work_lock, flags);
5051 vha->scan.scan_retry++;
5052 spin_unlock_irqrestore(&vha->work_lock, flags);
5053
5054 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5055 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5056 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5057 }
5058 }
5059
5060 found_devs = 0;
5061 new_fcport = NULL;
5062 entries = MAX_FIBRE_DEVICES_LOOP;
5063
5064 /* Get list of logged in devices. */
5065 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5066 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5067 &entries);
5068 if (rval != QLA_SUCCESS)
5069 goto cleanup_allocation;
5070
5071 ql_dbg(ql_dbg_disc, vha, 0x2011,
5072 "Entries in ID list (%d).\n", entries);
5073 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
5074 ha->gid_list, entries * sizeof(*ha->gid_list));
5075
5076 if (entries == 0) {
5077 spin_lock_irqsave(&vha->work_lock, flags);
5078 vha->scan.scan_retry++;
5079 spin_unlock_irqrestore(&vha->work_lock, flags);
5080
5081 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
5082 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5083 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5084 }
5085 } else {
5086 vha->scan.scan_retry = 0;
5087 }
5088
5089 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5090 fcport->scan_state = QLA_FCPORT_SCAN;
5091 }
5092
5093 /* Allocate temporary fcport for any new fcports discovered. */
5094 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5095 if (new_fcport == NULL) {
5096 ql_log(ql_log_warn, vha, 0x2012,
5097 "Memory allocation failed for fcport.\n");
5098 rval = QLA_MEMORY_ALLOC_FAILED;
5099 goto cleanup_allocation;
5100 }
5101 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5102
5103 /* Add devices to port list. */
5104 gid = ha->gid_list;
5105 for (index = 0; index < entries; index++) {
5106 domain = gid->domain;
5107 area = gid->area;
5108 al_pa = gid->al_pa;
5109 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5110 loop_id = gid->loop_id_2100;
5111 else
5112 loop_id = le16_to_cpu(gid->loop_id);
5113 gid = (void *)gid + ha->gid_list_info_size;
5114
5115 /* Bypass reserved domain fields. */
5116 if ((domain & 0xf0) == 0xf0)
5117 continue;
5118
5119 /* Bypass if not same domain and area of adapter. */
5120 if (area && domain && ((area != vha->d_id.b.area) ||
5121 (domain != vha->d_id.b.domain)) &&
5122 (ha->current_topology == ISP_CFG_NL))
5123 continue;
5124
5125
5126 /* Bypass invalid local loop ID. */
5127 if (loop_id > LAST_LOCAL_LOOP_ID)
5128 continue;
5129
5130 memset(new_fcport->port_name, 0, WWN_SIZE);
5131
5132 /* Fill in member data. */
5133 new_fcport->d_id.b.domain = domain;
5134 new_fcport->d_id.b.area = area;
5135 new_fcport->d_id.b.al_pa = al_pa;
5136 new_fcport->loop_id = loop_id;
5137 new_fcport->scan_state = QLA_FCPORT_FOUND;
5138
5139 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
5140 if (rval2 != QLA_SUCCESS) {
5141 ql_dbg(ql_dbg_disc, vha, 0x2097,
5142 "Failed to retrieve fcport information "
5143 "-- get_port_database=%x, loop_id=0x%04x.\n",
5144 rval2, new_fcport->loop_id);
5145 /* Skip retry if N2N */
5146 if (ha->current_topology != ISP_CFG_N) {
5147 ql_dbg(ql_dbg_disc, vha, 0x2105,
5148 "Scheduling resync.\n");
5149 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5150 continue;
5151 }
5152 }
5153
5154 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5155 /* Check for matching device in port list. */
5156 found = 0;
5157 fcport = NULL;
5158 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5159 if (memcmp(new_fcport->port_name, fcport->port_name,
5160 WWN_SIZE))
5161 continue;
5162
5163 fcport->flags &= ~FCF_FABRIC_DEVICE;
5164 fcport->loop_id = new_fcport->loop_id;
5165 fcport->port_type = new_fcport->port_type;
5166 fcport->d_id.b24 = new_fcport->d_id.b24;
5167 memcpy(fcport->node_name, new_fcport->node_name,
5168 WWN_SIZE);
5169 fcport->scan_state = QLA_FCPORT_FOUND;
5170 found++;
5171 break;
5172 }
5173
5174 if (!found) {
5175 /* New device, add to fcports list. */
5176 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5177
5178 /* Allocate a new replacement fcport. */
5179 fcport = new_fcport;
5180
5181 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5182
5183 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5184
5185 if (new_fcport == NULL) {
5186 ql_log(ql_log_warn, vha, 0xd031,
5187 "Failed to allocate memory for fcport.\n");
5188 rval = QLA_MEMORY_ALLOC_FAILED;
5189 goto cleanup_allocation;
5190 }
5191 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5192 new_fcport->flags &= ~FCF_FABRIC_DEVICE;
5193 }
5194
5195 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5196
5197 /* Base iIDMA settings on HBA port speed. */
5198 fcport->fp_speed = ha->link_data_rate;
5199
5200 found_devs++;
5201 }
5202
5203 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5204 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5205 break;
5206
5207 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5208 if ((qla_dual_mode_enabled(vha) ||
5209 qla_ini_mode_enabled(vha)) &&
5210 atomic_read(&fcport->state) == FCS_ONLINE) {
5211 qla2x00_mark_device_lost(vha, fcport,
5212 ql2xplogiabsentdevice, 0);
5213 if (fcport->loop_id != FC_NO_LOOP_ID &&
5214 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5215 fcport->port_type != FCT_INITIATOR &&
5216 fcport->port_type != FCT_BROADCAST) {
5217 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5218 "%s %d %8phC post del sess\n",
5219 __func__, __LINE__,
5220 fcport->port_name);
5221
5222 qlt_schedule_sess_for_deletion(fcport);
5223 continue;
5224 }
5225 }
5226 }
5227
5228 if (fcport->scan_state == QLA_FCPORT_FOUND)
5229 qla24xx_fcport_handle_login(vha, fcport);
5230 }
5231
5232 cleanup_allocation:
5233 kfree(new_fcport);
5234
5235 if (rval != QLA_SUCCESS) {
5236 ql_dbg(ql_dbg_disc, vha, 0x2098,
5237 "Configure local loop error exit: rval=%x.\n", rval);
5238 }
5239
5240 return (rval);
5241 }
5242
5243 static void
qla2x00_iidma_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)5244 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5245 {
5246 int rval;
5247 uint16_t mb[MAILBOX_REGISTER_COUNT];
5248 struct qla_hw_data *ha = vha->hw;
5249
5250 if (!IS_IIDMA_CAPABLE(ha))
5251 return;
5252
5253 if (atomic_read(&fcport->state) != FCS_ONLINE)
5254 return;
5255
5256 if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
5257 fcport->fp_speed > ha->link_data_rate ||
5258 !ha->flags.gpsc_supported)
5259 return;
5260
5261 rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
5262 mb);
5263 if (rval != QLA_SUCCESS) {
5264 ql_dbg(ql_dbg_disc, vha, 0x2004,
5265 "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
5266 fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
5267 } else {
5268 ql_dbg(ql_dbg_disc, vha, 0x2005,
5269 "iIDMA adjusted to %s GB/s (%X) on %8phN.\n",
5270 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
5271 fcport->fp_speed, fcport->port_name);
5272 }
5273 }
5274
qla_do_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)5275 void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5276 {
5277 qla2x00_iidma_fcport(vha, fcport);
5278 qla24xx_update_fcport_fcp_prio(vha, fcport);
5279 }
5280
qla_post_iidma_work(struct scsi_qla_host * vha,fc_port_t * fcport)5281 int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport)
5282 {
5283 struct qla_work_evt *e;
5284
5285 e = qla2x00_alloc_work(vha, QLA_EVT_IIDMA);
5286 if (!e)
5287 return QLA_FUNCTION_FAILED;
5288
5289 e->u.fcport.fcport = fcport;
5290 return qla2x00_post_work(vha, e);
5291 }
5292
5293 /* qla2x00_reg_remote_port is reserved for Initiator Mode only.*/
5294 static void
qla2x00_reg_remote_port(scsi_qla_host_t * vha,fc_port_t * fcport)5295 qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
5296 {
5297 struct fc_rport_identifiers rport_ids;
5298 struct fc_rport *rport;
5299 unsigned long flags;
5300
5301 if (atomic_read(&fcport->state) == FCS_ONLINE)
5302 return;
5303
5304 rport_ids.node_name = wwn_to_u64(fcport->node_name);
5305 rport_ids.port_name = wwn_to_u64(fcport->port_name);
5306 rport_ids.port_id = fcport->d_id.b.domain << 16 |
5307 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
5308 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
5309 fcport->rport = rport = fc_remote_port_add(vha->host, 0, &rport_ids);
5310 if (!rport) {
5311 ql_log(ql_log_warn, vha, 0x2006,
5312 "Unable to allocate fc remote port.\n");
5313 return;
5314 }
5315
5316 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
5317 *((fc_port_t **)rport->dd_data) = fcport;
5318 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
5319
5320 rport->supported_classes = fcport->supported_classes;
5321
5322 rport_ids.roles = FC_PORT_ROLE_UNKNOWN;
5323 if (fcport->port_type == FCT_INITIATOR)
5324 rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
5325 if (fcport->port_type == FCT_TARGET)
5326 rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
5327 if (fcport->port_type & FCT_NVME_INITIATOR)
5328 rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
5329 if (fcport->port_type & FCT_NVME_TARGET)
5330 rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
5331 if (fcport->port_type & FCT_NVME_DISCOVERY)
5332 rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
5333
5334 ql_dbg(ql_dbg_disc, vha, 0x20ee,
5335 "%s %8phN. rport %p is %s mode\n",
5336 __func__, fcport->port_name, rport,
5337 (fcport->port_type == FCT_TARGET) ? "tgt" :
5338 ((fcport->port_type & FCT_NVME) ? "nvme" : "ini"));
5339
5340 fc_remote_port_rolechg(rport, rport_ids.roles);
5341 }
5342
5343 /*
5344 * qla2x00_update_fcport
5345 * Updates device on list.
5346 *
5347 * Input:
5348 * ha = adapter block pointer.
5349 * fcport = port structure pointer.
5350 *
5351 * Return:
5352 * 0 - Success
5353 * BIT_0 - error
5354 *
5355 * Context:
5356 * Kernel context.
5357 */
5358 void
qla2x00_update_fcport(scsi_qla_host_t * vha,fc_port_t * fcport)5359 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
5360 {
5361 if (IS_SW_RESV_ADDR(fcport->d_id))
5362 return;
5363
5364 ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
5365 __func__, fcport->port_name);
5366
5367 fcport->disc_state = DSC_UPD_FCPORT;
5368 fcport->login_retry = vha->hw->login_retry_count;
5369 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
5370 fcport->deleted = 0;
5371 fcport->logout_on_delete = 1;
5372 fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
5373
5374 switch (vha->hw->current_topology) {
5375 case ISP_CFG_N:
5376 case ISP_CFG_NL:
5377 fcport->keep_nport_handle = 1;
5378 break;
5379 default:
5380 break;
5381 }
5382
5383 qla2x00_iidma_fcport(vha, fcport);
5384
5385 if (fcport->fc4f_nvme) {
5386 qla_nvme_register_remote(vha, fcport);
5387 fcport->disc_state = DSC_LOGIN_COMPLETE;
5388 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5389 return;
5390 }
5391
5392 qla24xx_update_fcport_fcp_prio(vha, fcport);
5393
5394 switch (vha->host->active_mode) {
5395 case MODE_INITIATOR:
5396 qla2x00_reg_remote_port(vha, fcport);
5397 break;
5398 case MODE_TARGET:
5399 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5400 !vha->vha_tgt.qla_tgt->tgt_stopped)
5401 qlt_fc_port_added(vha, fcport);
5402 break;
5403 case MODE_DUAL:
5404 qla2x00_reg_remote_port(vha, fcport);
5405 if (!vha->vha_tgt.qla_tgt->tgt_stop &&
5406 !vha->vha_tgt.qla_tgt->tgt_stopped)
5407 qlt_fc_port_added(vha, fcport);
5408 break;
5409 default:
5410 break;
5411 }
5412
5413 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
5414
5415 if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
5416 if (fcport->id_changed) {
5417 fcport->id_changed = 0;
5418 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5419 "%s %d %8phC post gfpnid fcp_cnt %d\n",
5420 __func__, __LINE__, fcport->port_name,
5421 vha->fcport_count);
5422 qla24xx_post_gfpnid_work(vha, fcport);
5423 } else {
5424 ql_dbg(ql_dbg_disc, vha, 0x20d7,
5425 "%s %d %8phC post gpsc fcp_cnt %d\n",
5426 __func__, __LINE__, fcport->port_name,
5427 vha->fcport_count);
5428 qla24xx_post_gpsc_work(vha, fcport);
5429 }
5430 }
5431
5432 fcport->disc_state = DSC_LOGIN_COMPLETE;
5433 }
5434
qla_register_fcport_fn(struct work_struct * work)5435 void qla_register_fcport_fn(struct work_struct *work)
5436 {
5437 fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
5438 u32 rscn_gen = fcport->rscn_gen;
5439 u16 data[2];
5440
5441 if (IS_SW_RESV_ADDR(fcport->d_id))
5442 return;
5443
5444 qla2x00_update_fcport(fcport->vha, fcport);
5445
5446 if (rscn_gen != fcport->rscn_gen) {
5447 /* RSCN(s) came in while registration */
5448 switch (fcport->next_disc_state) {
5449 case DSC_DELETE_PEND:
5450 qlt_schedule_sess_for_deletion(fcport);
5451 break;
5452 case DSC_ADISC:
5453 data[0] = data[1] = 0;
5454 qla2x00_post_async_adisc_work(fcport->vha, fcport,
5455 data);
5456 break;
5457 default:
5458 break;
5459 }
5460 }
5461 }
5462
5463 /*
5464 * qla2x00_configure_fabric
5465 * Setup SNS devices with loop ID's.
5466 *
5467 * Input:
5468 * ha = adapter block pointer.
5469 *
5470 * Returns:
5471 * 0 = success.
5472 * BIT_0 = error
5473 */
5474 static int
qla2x00_configure_fabric(scsi_qla_host_t * vha)5475 qla2x00_configure_fabric(scsi_qla_host_t *vha)
5476 {
5477 int rval;
5478 fc_port_t *fcport;
5479 uint16_t mb[MAILBOX_REGISTER_COUNT];
5480 uint16_t loop_id;
5481 LIST_HEAD(new_fcports);
5482 struct qla_hw_data *ha = vha->hw;
5483 int discovery_gen;
5484
5485 /* If FL port exists, then SNS is present */
5486 if (IS_FWI2_CAPABLE(ha))
5487 loop_id = NPH_F_PORT;
5488 else
5489 loop_id = SNS_FL_PORT;
5490 rval = qla2x00_get_port_name(vha, loop_id, vha->fabric_node_name, 1);
5491 if (rval != QLA_SUCCESS) {
5492 ql_dbg(ql_dbg_disc, vha, 0x20a0,
5493 "MBX_GET_PORT_NAME failed, No FL Port.\n");
5494
5495 vha->device_flags &= ~SWITCH_FOUND;
5496 return (QLA_SUCCESS);
5497 }
5498 vha->device_flags |= SWITCH_FOUND;
5499
5500
5501 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
5502 rval = qla2x00_send_change_request(vha, 0x3, 0);
5503 if (rval != QLA_SUCCESS)
5504 ql_log(ql_log_warn, vha, 0x121,
5505 "Failed to enable receiving of RSCN requests: 0x%x.\n",
5506 rval);
5507 }
5508
5509
5510 do {
5511 qla2x00_mgmt_svr_login(vha);
5512
5513 /* FDMI support. */
5514 if (ql2xfdmienable &&
5515 test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags))
5516 qla2x00_fdmi_register(vha);
5517
5518 /* Ensure we are logged into the SNS. */
5519 loop_id = NPH_SNS_LID(ha);
5520 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
5521 0xfc, mb, BIT_1|BIT_0);
5522 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5523 ql_dbg(ql_dbg_disc, vha, 0x20a1,
5524 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x (%x).\n",
5525 loop_id, mb[0], mb[1], mb[2], mb[6], mb[7], rval);
5526 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5527 return rval;
5528 }
5529 if (test_and_clear_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags)) {
5530 if (qla2x00_rft_id(vha)) {
5531 /* EMPTY */
5532 ql_dbg(ql_dbg_disc, vha, 0x20a2,
5533 "Register FC-4 TYPE failed.\n");
5534 if (test_bit(LOOP_RESYNC_NEEDED,
5535 &vha->dpc_flags))
5536 break;
5537 }
5538 if (qla2x00_rff_id(vha, FC4_TYPE_FCP_SCSI)) {
5539 /* EMPTY */
5540 ql_dbg(ql_dbg_disc, vha, 0x209a,
5541 "Register FC-4 Features failed.\n");
5542 if (test_bit(LOOP_RESYNC_NEEDED,
5543 &vha->dpc_flags))
5544 break;
5545 }
5546 if (vha->flags.nvme_enabled) {
5547 if (qla2x00_rff_id(vha, FC_TYPE_NVME)) {
5548 ql_dbg(ql_dbg_disc, vha, 0x2049,
5549 "Register NVME FC Type Features failed.\n");
5550 }
5551 }
5552 if (qla2x00_rnn_id(vha)) {
5553 /* EMPTY */
5554 ql_dbg(ql_dbg_disc, vha, 0x2104,
5555 "Register Node Name failed.\n");
5556 if (test_bit(LOOP_RESYNC_NEEDED,
5557 &vha->dpc_flags))
5558 break;
5559 } else if (qla2x00_rsnn_nn(vha)) {
5560 /* EMPTY */
5561 ql_dbg(ql_dbg_disc, vha, 0x209b,
5562 "Register Symbolic Node Name failed.\n");
5563 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5564 break;
5565 }
5566 }
5567
5568
5569 /* Mark the time right before querying FW for connected ports.
5570 * This process is long, asynchronous and by the time it's done,
5571 * collected information might not be accurate anymore. E.g.
5572 * disconnected port might have re-connected and a brand new
5573 * session has been created. In this case session's generation
5574 * will be newer than discovery_gen. */
5575 qlt_do_generation_tick(vha, &discovery_gen);
5576
5577 if (USE_ASYNC_SCAN(ha)) {
5578 rval = qla24xx_async_gpnft(vha, FC4_TYPE_FCP_SCSI,
5579 NULL);
5580 if (rval)
5581 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5582 } else {
5583 list_for_each_entry(fcport, &vha->vp_fcports, list)
5584 fcport->scan_state = QLA_FCPORT_SCAN;
5585
5586 rval = qla2x00_find_all_fabric_devs(vha);
5587 }
5588 if (rval != QLA_SUCCESS)
5589 break;
5590 } while (0);
5591
5592 if (!vha->nvme_local_port && vha->flags.nvme_enabled)
5593 qla_nvme_register_hba(vha);
5594
5595 if (rval)
5596 ql_dbg(ql_dbg_disc, vha, 0x2068,
5597 "Configure fabric error exit rval=%d.\n", rval);
5598
5599 return (rval);
5600 }
5601
5602 /*
5603 * qla2x00_find_all_fabric_devs
5604 *
5605 * Input:
5606 * ha = adapter block pointer.
5607 * dev = database device entry pointer.
5608 *
5609 * Returns:
5610 * 0 = success.
5611 *
5612 * Context:
5613 * Kernel context.
5614 */
5615 static int
qla2x00_find_all_fabric_devs(scsi_qla_host_t * vha)5616 qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
5617 {
5618 int rval;
5619 uint16_t loop_id;
5620 fc_port_t *fcport, *new_fcport;
5621 int found;
5622
5623 sw_info_t *swl;
5624 int swl_idx;
5625 int first_dev, last_dev;
5626 port_id_t wrap = {}, nxt_d_id;
5627 struct qla_hw_data *ha = vha->hw;
5628 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
5629 unsigned long flags;
5630
5631 rval = QLA_SUCCESS;
5632
5633 /* Try GID_PT to get device list, else GAN. */
5634 if (!ha->swl)
5635 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
5636 GFP_KERNEL);
5637 swl = ha->swl;
5638 if (!swl) {
5639 /*EMPTY*/
5640 ql_dbg(ql_dbg_disc, vha, 0x209c,
5641 "GID_PT allocations failed, fallback on GA_NXT.\n");
5642 } else {
5643 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
5644 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
5645 swl = NULL;
5646 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5647 return rval;
5648 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
5649 swl = NULL;
5650 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5651 return rval;
5652 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
5653 swl = NULL;
5654 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5655 return rval;
5656 } else if (qla2x00_gfpn_id(vha, swl) != QLA_SUCCESS) {
5657 swl = NULL;
5658 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5659 return rval;
5660 }
5661
5662 /* If other queries succeeded probe for FC-4 type */
5663 if (swl) {
5664 qla2x00_gff_id(vha, swl);
5665 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5666 return rval;
5667 }
5668 }
5669 swl_idx = 0;
5670
5671 /* Allocate temporary fcport for any new fcports discovered. */
5672 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5673 if (new_fcport == NULL) {
5674 ql_log(ql_log_warn, vha, 0x209d,
5675 "Failed to allocate memory for fcport.\n");
5676 return (QLA_MEMORY_ALLOC_FAILED);
5677 }
5678 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5679 /* Set start port ID scan at adapter ID. */
5680 first_dev = 1;
5681 last_dev = 0;
5682
5683 /* Starting free loop ID. */
5684 loop_id = ha->min_external_loopid;
5685 for (; loop_id <= ha->max_loop_id; loop_id++) {
5686 if (qla2x00_is_reserved_id(vha, loop_id))
5687 continue;
5688
5689 if (ha->current_topology == ISP_CFG_FL &&
5690 (atomic_read(&vha->loop_down_timer) ||
5691 LOOP_TRANSITION(vha))) {
5692 atomic_set(&vha->loop_down_timer, 0);
5693 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5694 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5695 break;
5696 }
5697
5698 if (swl != NULL) {
5699 if (last_dev) {
5700 wrap.b24 = new_fcport->d_id.b24;
5701 } else {
5702 new_fcport->d_id.b24 = swl[swl_idx].d_id.b24;
5703 memcpy(new_fcport->node_name,
5704 swl[swl_idx].node_name, WWN_SIZE);
5705 memcpy(new_fcport->port_name,
5706 swl[swl_idx].port_name, WWN_SIZE);
5707 memcpy(new_fcport->fabric_port_name,
5708 swl[swl_idx].fabric_port_name, WWN_SIZE);
5709 new_fcport->fp_speed = swl[swl_idx].fp_speed;
5710 new_fcport->fc4_type = swl[swl_idx].fc4_type;
5711
5712 new_fcport->nvme_flag = 0;
5713 new_fcport->fc4f_nvme = 0;
5714 if (vha->flags.nvme_enabled &&
5715 swl[swl_idx].fc4f_nvme) {
5716 new_fcport->fc4f_nvme =
5717 swl[swl_idx].fc4f_nvme;
5718 ql_log(ql_log_info, vha, 0x2131,
5719 "FOUND: NVME port %8phC as FC Type 28h\n",
5720 new_fcport->port_name);
5721 }
5722
5723 if (swl[swl_idx].d_id.b.rsvd_1 != 0) {
5724 last_dev = 1;
5725 }
5726 swl_idx++;
5727 }
5728 } else {
5729 /* Send GA_NXT to the switch */
5730 rval = qla2x00_ga_nxt(vha, new_fcport);
5731 if (rval != QLA_SUCCESS) {
5732 ql_log(ql_log_warn, vha, 0x209e,
5733 "SNS scan failed -- assuming "
5734 "zero-entry result.\n");
5735 rval = QLA_SUCCESS;
5736 break;
5737 }
5738 }
5739
5740 /* If wrap on switch device list, exit. */
5741 if (first_dev) {
5742 wrap.b24 = new_fcport->d_id.b24;
5743 first_dev = 0;
5744 } else if (new_fcport->d_id.b24 == wrap.b24) {
5745 ql_dbg(ql_dbg_disc, vha, 0x209f,
5746 "Device wrap (%02x%02x%02x).\n",
5747 new_fcport->d_id.b.domain,
5748 new_fcport->d_id.b.area,
5749 new_fcport->d_id.b.al_pa);
5750 break;
5751 }
5752
5753 /* Bypass if same physical adapter. */
5754 if (new_fcport->d_id.b24 == base_vha->d_id.b24)
5755 continue;
5756
5757 /* Bypass virtual ports of the same host. */
5758 if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
5759 continue;
5760
5761 /* Bypass if same domain and area of adapter. */
5762 if (((new_fcport->d_id.b24 & 0xffff00) ==
5763 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
5764 ISP_CFG_FL)
5765 continue;
5766
5767 /* Bypass reserved domain fields. */
5768 if ((new_fcport->d_id.b.domain & 0xf0) == 0xf0)
5769 continue;
5770
5771 /* Bypass ports whose FCP-4 type is not FCP_SCSI */
5772 if (ql2xgffidenable &&
5773 (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
5774 new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
5775 continue;
5776
5777 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
5778
5779 /* Locate matching device in database. */
5780 found = 0;
5781 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5782 if (memcmp(new_fcport->port_name, fcport->port_name,
5783 WWN_SIZE))
5784 continue;
5785
5786 fcport->scan_state = QLA_FCPORT_FOUND;
5787
5788 found++;
5789
5790 /* Update port state. */
5791 memcpy(fcport->fabric_port_name,
5792 new_fcport->fabric_port_name, WWN_SIZE);
5793 fcport->fp_speed = new_fcport->fp_speed;
5794
5795 /*
5796 * If address the same and state FCS_ONLINE
5797 * (or in target mode), nothing changed.
5798 */
5799 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
5800 (atomic_read(&fcport->state) == FCS_ONLINE ||
5801 (vha->host->active_mode == MODE_TARGET))) {
5802 break;
5803 }
5804
5805 /*
5806 * If device was not a fabric device before.
5807 */
5808 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
5809 fcport->d_id.b24 = new_fcport->d_id.b24;
5810 qla2x00_clear_loop_id(fcport);
5811 fcport->flags |= (FCF_FABRIC_DEVICE |
5812 FCF_LOGIN_NEEDED);
5813 break;
5814 }
5815
5816 /*
5817 * Port ID changed or device was marked to be updated;
5818 * Log it out if still logged in and mark it for
5819 * relogin later.
5820 */
5821 if (qla_tgt_mode_enabled(base_vha)) {
5822 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
5823 "port changed FC ID, %8phC"
5824 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
5825 fcport->port_name,
5826 fcport->d_id.b.domain,
5827 fcport->d_id.b.area,
5828 fcport->d_id.b.al_pa,
5829 fcport->loop_id,
5830 new_fcport->d_id.b.domain,
5831 new_fcport->d_id.b.area,
5832 new_fcport->d_id.b.al_pa);
5833 fcport->d_id.b24 = new_fcport->d_id.b24;
5834 break;
5835 }
5836
5837 fcport->d_id.b24 = new_fcport->d_id.b24;
5838 fcport->flags |= FCF_LOGIN_NEEDED;
5839 break;
5840 }
5841
5842 if (fcport->fc4f_nvme) {
5843 if (fcport->disc_state == DSC_DELETE_PEND) {
5844 fcport->disc_state = DSC_GNL;
5845 vha->fcport_count--;
5846 fcport->login_succ = 0;
5847 }
5848 }
5849
5850 if (found) {
5851 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5852 continue;
5853 }
5854 /* If device was not in our fcports list, then add it. */
5855 new_fcport->scan_state = QLA_FCPORT_FOUND;
5856 list_add_tail(&new_fcport->list, &vha->vp_fcports);
5857
5858 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
5859
5860
5861 /* Allocate a new replacement fcport. */
5862 nxt_d_id.b24 = new_fcport->d_id.b24;
5863 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
5864 if (new_fcport == NULL) {
5865 ql_log(ql_log_warn, vha, 0xd032,
5866 "Memory allocation failed for fcport.\n");
5867 return (QLA_MEMORY_ALLOC_FAILED);
5868 }
5869 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
5870 new_fcport->d_id.b24 = nxt_d_id.b24;
5871 }
5872
5873 qla2x00_free_fcport(new_fcport);
5874
5875 /*
5876 * Logout all previous fabric dev marked lost, except FCP2 devices.
5877 */
5878 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5879 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
5880 break;
5881
5882 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
5883 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
5884 continue;
5885
5886 if (fcport->scan_state == QLA_FCPORT_SCAN) {
5887 if ((qla_dual_mode_enabled(vha) ||
5888 qla_ini_mode_enabled(vha)) &&
5889 atomic_read(&fcport->state) == FCS_ONLINE) {
5890 qla2x00_mark_device_lost(vha, fcport,
5891 ql2xplogiabsentdevice, 0);
5892 if (fcport->loop_id != FC_NO_LOOP_ID &&
5893 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
5894 fcport->port_type != FCT_INITIATOR &&
5895 fcport->port_type != FCT_BROADCAST) {
5896 ql_dbg(ql_dbg_disc, vha, 0x20f0,
5897 "%s %d %8phC post del sess\n",
5898 __func__, __LINE__,
5899 fcport->port_name);
5900 qlt_schedule_sess_for_deletion(fcport);
5901 continue;
5902 }
5903 }
5904 }
5905
5906 if (fcport->scan_state == QLA_FCPORT_FOUND)
5907 qla24xx_fcport_handle_login(vha, fcport);
5908 }
5909 return (rval);
5910 }
5911
5912 /* FW does not set aside Loop id for MGMT Server/FFFFFAh */
5913 int
qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t * vha)5914 qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha)
5915 {
5916 int loop_id = FC_NO_LOOP_ID;
5917 int lid = NPH_MGMT_SERVER - vha->vp_idx;
5918 unsigned long flags;
5919 struct qla_hw_data *ha = vha->hw;
5920
5921 if (vha->vp_idx == 0) {
5922 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
5923 return NPH_MGMT_SERVER;
5924 }
5925
5926 /* pick id from high and work down to low */
5927 spin_lock_irqsave(&ha->vport_slock, flags);
5928 for (; lid > 0; lid--) {
5929 if (!test_bit(lid, vha->hw->loop_id_map)) {
5930 set_bit(lid, vha->hw->loop_id_map);
5931 loop_id = lid;
5932 break;
5933 }
5934 }
5935 spin_unlock_irqrestore(&ha->vport_slock, flags);
5936
5937 return loop_id;
5938 }
5939
5940 /*
5941 * qla2x00_fabric_login
5942 * Issue fabric login command.
5943 *
5944 * Input:
5945 * ha = adapter block pointer.
5946 * device = pointer to FC device type structure.
5947 *
5948 * Returns:
5949 * 0 - Login successfully
5950 * 1 - Login failed
5951 * 2 - Initiator device
5952 * 3 - Fatal error
5953 */
5954 int
qla2x00_fabric_login(scsi_qla_host_t * vha,fc_port_t * fcport,uint16_t * next_loopid)5955 qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
5956 uint16_t *next_loopid)
5957 {
5958 int rval;
5959 int retry;
5960 uint16_t tmp_loopid;
5961 uint16_t mb[MAILBOX_REGISTER_COUNT];
5962 struct qla_hw_data *ha = vha->hw;
5963
5964 retry = 0;
5965 tmp_loopid = 0;
5966
5967 for (;;) {
5968 ql_dbg(ql_dbg_disc, vha, 0x2000,
5969 "Trying Fabric Login w/loop id 0x%04x for port "
5970 "%02x%02x%02x.\n",
5971 fcport->loop_id, fcport->d_id.b.domain,
5972 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5973
5974 /* Login fcport on switch. */
5975 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
5976 fcport->d_id.b.domain, fcport->d_id.b.area,
5977 fcport->d_id.b.al_pa, mb, BIT_0);
5978 if (rval != QLA_SUCCESS) {
5979 return rval;
5980 }
5981 if (mb[0] == MBS_PORT_ID_USED) {
5982 /*
5983 * Device has another loop ID. The firmware team
5984 * recommends the driver perform an implicit login with
5985 * the specified ID again. The ID we just used is save
5986 * here so we return with an ID that can be tried by
5987 * the next login.
5988 */
5989 retry++;
5990 tmp_loopid = fcport->loop_id;
5991 fcport->loop_id = mb[1];
5992
5993 ql_dbg(ql_dbg_disc, vha, 0x2001,
5994 "Fabric Login: port in use - next loop "
5995 "id=0x%04x, port id= %02x%02x%02x.\n",
5996 fcport->loop_id, fcport->d_id.b.domain,
5997 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5998
5999 } else if (mb[0] == MBS_COMMAND_COMPLETE) {
6000 /*
6001 * Login succeeded.
6002 */
6003 if (retry) {
6004 /* A retry occurred before. */
6005 *next_loopid = tmp_loopid;
6006 } else {
6007 /*
6008 * No retry occurred before. Just increment the
6009 * ID value for next login.
6010 */
6011 *next_loopid = (fcport->loop_id + 1);
6012 }
6013
6014 if (mb[1] & BIT_0) {
6015 fcport->port_type = FCT_INITIATOR;
6016 } else {
6017 fcport->port_type = FCT_TARGET;
6018 if (mb[1] & BIT_1) {
6019 fcport->flags |= FCF_FCP2_DEVICE;
6020 }
6021 }
6022
6023 if (mb[10] & BIT_0)
6024 fcport->supported_classes |= FC_COS_CLASS2;
6025 if (mb[10] & BIT_1)
6026 fcport->supported_classes |= FC_COS_CLASS3;
6027
6028 if (IS_FWI2_CAPABLE(ha)) {
6029 if (mb[10] & BIT_7)
6030 fcport->flags |=
6031 FCF_CONF_COMP_SUPPORTED;
6032 }
6033
6034 rval = QLA_SUCCESS;
6035 break;
6036 } else if (mb[0] == MBS_LOOP_ID_USED) {
6037 /*
6038 * Loop ID already used, try next loop ID.
6039 */
6040 fcport->loop_id++;
6041 rval = qla2x00_find_new_loop_id(vha, fcport);
6042 if (rval != QLA_SUCCESS) {
6043 /* Ran out of loop IDs to use */
6044 break;
6045 }
6046 } else if (mb[0] == MBS_COMMAND_ERROR) {
6047 /*
6048 * Firmware possibly timed out during login. If NO
6049 * retries are left to do then the device is declared
6050 * dead.
6051 */
6052 *next_loopid = fcport->loop_id;
6053 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6054 fcport->d_id.b.domain, fcport->d_id.b.area,
6055 fcport->d_id.b.al_pa);
6056 qla2x00_mark_device_lost(vha, fcport, 1, 0);
6057
6058 rval = 1;
6059 break;
6060 } else {
6061 /*
6062 * unrecoverable / not handled error
6063 */
6064 ql_dbg(ql_dbg_disc, vha, 0x2002,
6065 "Failed=%x port_id=%02x%02x%02x loop_id=%x "
6066 "jiffies=%lx.\n", mb[0], fcport->d_id.b.domain,
6067 fcport->d_id.b.area, fcport->d_id.b.al_pa,
6068 fcport->loop_id, jiffies);
6069
6070 *next_loopid = fcport->loop_id;
6071 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6072 fcport->d_id.b.domain, fcport->d_id.b.area,
6073 fcport->d_id.b.al_pa);
6074 qla2x00_clear_loop_id(fcport);
6075 fcport->login_retry = 0;
6076
6077 rval = 3;
6078 break;
6079 }
6080 }
6081
6082 return (rval);
6083 }
6084
6085 /*
6086 * qla2x00_local_device_login
6087 * Issue local device login command.
6088 *
6089 * Input:
6090 * ha = adapter block pointer.
6091 * loop_id = loop id of device to login to.
6092 *
6093 * Returns (Where's the #define!!!!):
6094 * 0 - Login successfully
6095 * 1 - Login failed
6096 * 3 - Fatal error
6097 */
6098 int
qla2x00_local_device_login(scsi_qla_host_t * vha,fc_port_t * fcport)6099 qla2x00_local_device_login(scsi_qla_host_t *vha, fc_port_t *fcport)
6100 {
6101 int rval;
6102 uint16_t mb[MAILBOX_REGISTER_COUNT];
6103
6104 memset(mb, 0, sizeof(mb));
6105 rval = qla2x00_login_local_device(vha, fcport, mb, BIT_0);
6106 if (rval == QLA_SUCCESS) {
6107 /* Interrogate mailbox registers for any errors */
6108 if (mb[0] == MBS_COMMAND_ERROR)
6109 rval = 1;
6110 else if (mb[0] == MBS_COMMAND_PARAMETER_ERROR)
6111 /* device not in PCB table */
6112 rval = 3;
6113 }
6114
6115 return (rval);
6116 }
6117
6118 /*
6119 * qla2x00_loop_resync
6120 * Resync with fibre channel devices.
6121 *
6122 * Input:
6123 * ha = adapter block pointer.
6124 *
6125 * Returns:
6126 * 0 = success
6127 */
6128 int
qla2x00_loop_resync(scsi_qla_host_t * vha)6129 qla2x00_loop_resync(scsi_qla_host_t *vha)
6130 {
6131 int rval = QLA_SUCCESS;
6132 uint32_t wait_time;
6133
6134 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6135 if (vha->flags.online) {
6136 if (!(rval = qla2x00_fw_ready(vha))) {
6137 /* Wait at most MAX_TARGET RSCNs for a stable link. */
6138 wait_time = 256;
6139 do {
6140 if (!IS_QLAFX00(vha->hw)) {
6141 /*
6142 * Issue a marker after FW becomes
6143 * ready.
6144 */
6145 qla2x00_marker(vha, vha->hw->base_qpair,
6146 0, 0, MK_SYNC_ALL);
6147 vha->marker_needed = 0;
6148 }
6149
6150 /* Remap devices on Loop. */
6151 clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6152
6153 if (IS_QLAFX00(vha->hw))
6154 qlafx00_configure_devices(vha);
6155 else
6156 qla2x00_configure_loop(vha);
6157
6158 wait_time--;
6159 } while (!atomic_read(&vha->loop_down_timer) &&
6160 !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6161 && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
6162 &vha->dpc_flags)));
6163 }
6164 }
6165
6166 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
6167 return (QLA_FUNCTION_FAILED);
6168
6169 if (rval)
6170 ql_dbg(ql_dbg_disc, vha, 0x206c,
6171 "%s *** FAILED ***.\n", __func__);
6172
6173 return (rval);
6174 }
6175
6176 /*
6177 * qla2x00_perform_loop_resync
6178 * Description: This function will set the appropriate flags and call
6179 * qla2x00_loop_resync. If successful loop will be resynced
6180 * Arguments : scsi_qla_host_t pointer
6181 * returm : Success or Failure
6182 */
6183
qla2x00_perform_loop_resync(scsi_qla_host_t * ha)6184 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
6185 {
6186 int32_t rval = 0;
6187
6188 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
6189 /*Configure the flags so that resync happens properly*/
6190 atomic_set(&ha->loop_down_timer, 0);
6191 if (!(ha->device_flags & DFLG_NO_CABLE)) {
6192 atomic_set(&ha->loop_state, LOOP_UP);
6193 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
6194 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
6195 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
6196
6197 rval = qla2x00_loop_resync(ha);
6198 } else
6199 atomic_set(&ha->loop_state, LOOP_DEAD);
6200
6201 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
6202 }
6203
6204 return rval;
6205 }
6206
6207 void
qla2x00_update_fcports(scsi_qla_host_t * base_vha)6208 qla2x00_update_fcports(scsi_qla_host_t *base_vha)
6209 {
6210 fc_port_t *fcport;
6211 struct scsi_qla_host *vha;
6212 struct qla_hw_data *ha = base_vha->hw;
6213 unsigned long flags;
6214
6215 spin_lock_irqsave(&ha->vport_slock, flags);
6216 /* Go with deferred removal of rport references. */
6217 list_for_each_entry(vha, &base_vha->hw->vp_list, list) {
6218 atomic_inc(&vha->vref_count);
6219 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6220 if (fcport->drport &&
6221 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
6222 spin_unlock_irqrestore(&ha->vport_slock, flags);
6223 qla2x00_rport_del(fcport);
6224
6225 spin_lock_irqsave(&ha->vport_slock, flags);
6226 }
6227 }
6228 atomic_dec(&vha->vref_count);
6229 wake_up(&vha->vref_waitq);
6230 }
6231 spin_unlock_irqrestore(&ha->vport_slock, flags);
6232 }
6233
6234 /* Assumes idc_lock always held on entry */
6235 void
qla83xx_reset_ownership(scsi_qla_host_t * vha)6236 qla83xx_reset_ownership(scsi_qla_host_t *vha)
6237 {
6238 struct qla_hw_data *ha = vha->hw;
6239 uint32_t drv_presence, drv_presence_mask;
6240 uint32_t dev_part_info1, dev_part_info2, class_type;
6241 uint32_t class_type_mask = 0x3;
6242 uint16_t fcoe_other_function = 0xffff, i;
6243
6244 if (IS_QLA8044(ha)) {
6245 drv_presence = qla8044_rd_direct(vha,
6246 QLA8044_CRB_DRV_ACTIVE_INDEX);
6247 dev_part_info1 = qla8044_rd_direct(vha,
6248 QLA8044_CRB_DEV_PART_INFO_INDEX);
6249 dev_part_info2 = qla8044_rd_direct(vha,
6250 QLA8044_CRB_DEV_PART_INFO2);
6251 } else {
6252 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6253 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
6254 qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
6255 }
6256 for (i = 0; i < 8; i++) {
6257 class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
6258 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6259 (i != ha->portnum)) {
6260 fcoe_other_function = i;
6261 break;
6262 }
6263 }
6264 if (fcoe_other_function == 0xffff) {
6265 for (i = 0; i < 8; i++) {
6266 class_type = ((dev_part_info2 >> (i * 4)) &
6267 class_type_mask);
6268 if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
6269 ((i + 8) != ha->portnum)) {
6270 fcoe_other_function = i + 8;
6271 break;
6272 }
6273 }
6274 }
6275 /*
6276 * Prepare drv-presence mask based on fcoe functions present.
6277 * However consider only valid physical fcoe function numbers (0-15).
6278 */
6279 drv_presence_mask = ~((1 << (ha->portnum)) |
6280 ((fcoe_other_function == 0xffff) ?
6281 0 : (1 << (fcoe_other_function))));
6282
6283 /* We are the reset owner iff:
6284 * - No other protocol drivers present.
6285 * - This is the lowest among fcoe functions. */
6286 if (!(drv_presence & drv_presence_mask) &&
6287 (ha->portnum < fcoe_other_function)) {
6288 ql_dbg(ql_dbg_p3p, vha, 0xb07f,
6289 "This host is Reset owner.\n");
6290 ha->flags.nic_core_reset_owner = 1;
6291 }
6292 }
6293
6294 static int
__qla83xx_set_drv_ack(scsi_qla_host_t * vha)6295 __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
6296 {
6297 int rval = QLA_SUCCESS;
6298 struct qla_hw_data *ha = vha->hw;
6299 uint32_t drv_ack;
6300
6301 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6302 if (rval == QLA_SUCCESS) {
6303 drv_ack |= (1 << ha->portnum);
6304 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6305 }
6306
6307 return rval;
6308 }
6309
6310 static int
__qla83xx_clear_drv_ack(scsi_qla_host_t * vha)6311 __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
6312 {
6313 int rval = QLA_SUCCESS;
6314 struct qla_hw_data *ha = vha->hw;
6315 uint32_t drv_ack;
6316
6317 rval = qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
6318 if (rval == QLA_SUCCESS) {
6319 drv_ack &= ~(1 << ha->portnum);
6320 rval = qla83xx_wr_reg(vha, QLA83XX_IDC_DRIVER_ACK, drv_ack);
6321 }
6322
6323 return rval;
6324 }
6325
6326 static const char *
qla83xx_dev_state_to_string(uint32_t dev_state)6327 qla83xx_dev_state_to_string(uint32_t dev_state)
6328 {
6329 switch (dev_state) {
6330 case QLA8XXX_DEV_COLD:
6331 return "COLD/RE-INIT";
6332 case QLA8XXX_DEV_INITIALIZING:
6333 return "INITIALIZING";
6334 case QLA8XXX_DEV_READY:
6335 return "READY";
6336 case QLA8XXX_DEV_NEED_RESET:
6337 return "NEED RESET";
6338 case QLA8XXX_DEV_NEED_QUIESCENT:
6339 return "NEED QUIESCENT";
6340 case QLA8XXX_DEV_FAILED:
6341 return "FAILED";
6342 case QLA8XXX_DEV_QUIESCENT:
6343 return "QUIESCENT";
6344 default:
6345 return "Unknown";
6346 }
6347 }
6348
6349 /* Assumes idc-lock always held on entry */
6350 void
qla83xx_idc_audit(scsi_qla_host_t * vha,int audit_type)6351 qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
6352 {
6353 struct qla_hw_data *ha = vha->hw;
6354 uint32_t idc_audit_reg = 0, duration_secs = 0;
6355
6356 switch (audit_type) {
6357 case IDC_AUDIT_TIMESTAMP:
6358 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
6359 idc_audit_reg = (ha->portnum) |
6360 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
6361 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6362 break;
6363
6364 case IDC_AUDIT_COMPLETION:
6365 duration_secs = ((jiffies_to_msecs(jiffies) -
6366 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
6367 idc_audit_reg = (ha->portnum) |
6368 (IDC_AUDIT_COMPLETION << 7) | (duration_secs << 8);
6369 qla83xx_wr_reg(vha, QLA83XX_IDC_AUDIT, idc_audit_reg);
6370 break;
6371
6372 default:
6373 ql_log(ql_log_warn, vha, 0xb078,
6374 "Invalid audit type specified.\n");
6375 break;
6376 }
6377 }
6378
6379 /* Assumes idc_lock always held on entry */
6380 static int
qla83xx_initiating_reset(scsi_qla_host_t * vha)6381 qla83xx_initiating_reset(scsi_qla_host_t *vha)
6382 {
6383 struct qla_hw_data *ha = vha->hw;
6384 uint32_t idc_control, dev_state;
6385
6386 __qla83xx_get_idc_control(vha, &idc_control);
6387 if ((idc_control & QLA83XX_IDC_RESET_DISABLED)) {
6388 ql_log(ql_log_info, vha, 0xb080,
6389 "NIC Core reset has been disabled. idc-control=0x%x\n",
6390 idc_control);
6391 return QLA_FUNCTION_FAILED;
6392 }
6393
6394 /* Set NEED-RESET iff in READY state and we are the reset-owner */
6395 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6396 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
6397 qla83xx_wr_reg(vha, QLA83XX_IDC_DEV_STATE,
6398 QLA8XXX_DEV_NEED_RESET);
6399 ql_log(ql_log_info, vha, 0xb056, "HW State: NEED RESET.\n");
6400 qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
6401 } else {
6402 const char *state = qla83xx_dev_state_to_string(dev_state);
6403
6404 ql_log(ql_log_info, vha, 0xb057, "HW State: %s.\n", state);
6405
6406 /* SV: XXX: Is timeout required here? */
6407 /* Wait for IDC state change READY -> NEED_RESET */
6408 while (dev_state == QLA8XXX_DEV_READY) {
6409 qla83xx_idc_unlock(vha, 0);
6410 msleep(200);
6411 qla83xx_idc_lock(vha, 0);
6412 qla83xx_rd_reg(vha, QLA83XX_IDC_DEV_STATE, &dev_state);
6413 }
6414 }
6415
6416 /* Send IDC ack by writing to drv-ack register */
6417 __qla83xx_set_drv_ack(vha);
6418
6419 return QLA_SUCCESS;
6420 }
6421
6422 int
__qla83xx_set_idc_control(scsi_qla_host_t * vha,uint32_t idc_control)6423 __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
6424 {
6425 return qla83xx_wr_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6426 }
6427
6428 int
__qla83xx_get_idc_control(scsi_qla_host_t * vha,uint32_t * idc_control)6429 __qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
6430 {
6431 return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
6432 }
6433
6434 static int
qla83xx_check_driver_presence(scsi_qla_host_t * vha)6435 qla83xx_check_driver_presence(scsi_qla_host_t *vha)
6436 {
6437 uint32_t drv_presence = 0;
6438 struct qla_hw_data *ha = vha->hw;
6439
6440 qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
6441 if (drv_presence & (1 << ha->portnum))
6442 return QLA_SUCCESS;
6443 else
6444 return QLA_TEST_FAILED;
6445 }
6446
6447 int
qla83xx_nic_core_reset(scsi_qla_host_t * vha)6448 qla83xx_nic_core_reset(scsi_qla_host_t *vha)
6449 {
6450 int rval = QLA_SUCCESS;
6451 struct qla_hw_data *ha = vha->hw;
6452
6453 ql_dbg(ql_dbg_p3p, vha, 0xb058,
6454 "Entered %s().\n", __func__);
6455
6456 if (vha->device_flags & DFLG_DEV_FAILED) {
6457 ql_log(ql_log_warn, vha, 0xb059,
6458 "Device in unrecoverable FAILED state.\n");
6459 return QLA_FUNCTION_FAILED;
6460 }
6461
6462 qla83xx_idc_lock(vha, 0);
6463
6464 if (qla83xx_check_driver_presence(vha) != QLA_SUCCESS) {
6465 ql_log(ql_log_warn, vha, 0xb05a,
6466 "Function=0x%x has been removed from IDC participation.\n",
6467 ha->portnum);
6468 rval = QLA_FUNCTION_FAILED;
6469 goto exit;
6470 }
6471
6472 qla83xx_reset_ownership(vha);
6473
6474 rval = qla83xx_initiating_reset(vha);
6475
6476 /*
6477 * Perform reset if we are the reset-owner,
6478 * else wait till IDC state changes to READY/FAILED.
6479 */
6480 if (rval == QLA_SUCCESS) {
6481 rval = qla83xx_idc_state_handler(vha);
6482
6483 if (rval == QLA_SUCCESS)
6484 ha->flags.nic_core_hung = 0;
6485 __qla83xx_clear_drv_ack(vha);
6486 }
6487
6488 exit:
6489 qla83xx_idc_unlock(vha, 0);
6490
6491 ql_dbg(ql_dbg_p3p, vha, 0xb05b, "Exiting %s.\n", __func__);
6492
6493 return rval;
6494 }
6495
6496 int
qla2xxx_mctp_dump(scsi_qla_host_t * vha)6497 qla2xxx_mctp_dump(scsi_qla_host_t *vha)
6498 {
6499 struct qla_hw_data *ha = vha->hw;
6500 int rval = QLA_FUNCTION_FAILED;
6501
6502 if (!IS_MCTP_CAPABLE(ha)) {
6503 /* This message can be removed from the final version */
6504 ql_log(ql_log_info, vha, 0x506d,
6505 "This board is not MCTP capable\n");
6506 return rval;
6507 }
6508
6509 if (!ha->mctp_dump) {
6510 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
6511 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
6512
6513 if (!ha->mctp_dump) {
6514 ql_log(ql_log_warn, vha, 0x506e,
6515 "Failed to allocate memory for mctp dump\n");
6516 return rval;
6517 }
6518 }
6519
6520 #define MCTP_DUMP_STR_ADDR 0x00000000
6521 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
6522 MCTP_DUMP_STR_ADDR, MCTP_DUMP_SIZE/4);
6523 if (rval != QLA_SUCCESS) {
6524 ql_log(ql_log_warn, vha, 0x506f,
6525 "Failed to capture mctp dump\n");
6526 } else {
6527 ql_log(ql_log_info, vha, 0x5070,
6528 "Mctp dump capture for host (%ld/%p).\n",
6529 vha->host_no, ha->mctp_dump);
6530 ha->mctp_dumped = 1;
6531 }
6532
6533 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
6534 ha->flags.nic_core_reset_hdlr_active = 1;
6535 rval = qla83xx_restart_nic_firmware(vha);
6536 if (rval)
6537 /* NIC Core reset failed. */
6538 ql_log(ql_log_warn, vha, 0x5071,
6539 "Failed to restart nic firmware\n");
6540 else
6541 ql_dbg(ql_dbg_p3p, vha, 0xb084,
6542 "Restarted NIC firmware successfully.\n");
6543 ha->flags.nic_core_reset_hdlr_active = 0;
6544 }
6545
6546 return rval;
6547
6548 }
6549
6550 /*
6551 * qla2x00_quiesce_io
6552 * Description: This function will block the new I/Os
6553 * Its not aborting any I/Os as context
6554 * is not destroyed during quiescence
6555 * Arguments: scsi_qla_host_t
6556 * return : void
6557 */
6558 void
qla2x00_quiesce_io(scsi_qla_host_t * vha)6559 qla2x00_quiesce_io(scsi_qla_host_t *vha)
6560 {
6561 struct qla_hw_data *ha = vha->hw;
6562 struct scsi_qla_host *vp;
6563
6564 ql_dbg(ql_dbg_dpc, vha, 0x401d,
6565 "Quiescing I/O - ha=%p.\n", ha);
6566
6567 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
6568 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6569 atomic_set(&vha->loop_state, LOOP_DOWN);
6570 qla2x00_mark_all_devices_lost(vha, 0);
6571 list_for_each_entry(vp, &ha->vp_list, list)
6572 qla2x00_mark_all_devices_lost(vp, 0);
6573 } else {
6574 if (!atomic_read(&vha->loop_down_timer))
6575 atomic_set(&vha->loop_down_timer,
6576 LOOP_DOWN_TIME);
6577 }
6578 /* Wait for pending cmds to complete */
6579 WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST)
6580 != QLA_SUCCESS);
6581 }
6582
6583 void
qla2x00_abort_isp_cleanup(scsi_qla_host_t * vha)6584 qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
6585 {
6586 struct qla_hw_data *ha = vha->hw;
6587 struct scsi_qla_host *vp;
6588 unsigned long flags;
6589 fc_port_t *fcport;
6590 u16 i;
6591
6592 /* For ISP82XX, driver waits for completion of the commands.
6593 * online flag should be set.
6594 */
6595 if (!(IS_P3P_TYPE(ha)))
6596 vha->flags.online = 0;
6597 ha->flags.chip_reset_done = 0;
6598 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6599 vha->qla_stats.total_isp_aborts++;
6600
6601 ql_log(ql_log_info, vha, 0x00af,
6602 "Performing ISP error recovery - ha=%p.\n", ha);
6603
6604 ha->flags.purge_mbox = 1;
6605 /* For ISP82XX, reset_chip is just disabling interrupts.
6606 * Driver waits for the completion of the commands.
6607 * the interrupts need to be enabled.
6608 */
6609 if (!(IS_P3P_TYPE(ha)))
6610 ha->isp_ops->reset_chip(vha);
6611
6612 ha->link_data_rate = PORT_SPEED_UNKNOWN;
6613 SAVE_TOPO(ha);
6614 ha->flags.rida_fmt2 = 0;
6615 ha->flags.n2n_ae = 0;
6616 ha->flags.lip_ae = 0;
6617 ha->current_topology = 0;
6618 ha->flags.fw_started = 0;
6619 ha->flags.fw_init_done = 0;
6620 ha->chip_reset++;
6621 ha->base_qpair->chip_reset = ha->chip_reset;
6622 for (i = 0; i < ha->max_qpairs; i++) {
6623 if (ha->queue_pair_map[i])
6624 ha->queue_pair_map[i]->chip_reset =
6625 ha->base_qpair->chip_reset;
6626 }
6627
6628 /* purge MBox commands */
6629 if (atomic_read(&ha->num_pend_mbx_stage3)) {
6630 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
6631 complete(&ha->mbx_intr_comp);
6632 }
6633
6634 i = 0;
6635 while (atomic_read(&ha->num_pend_mbx_stage3) ||
6636 atomic_read(&ha->num_pend_mbx_stage2) ||
6637 atomic_read(&ha->num_pend_mbx_stage1)) {
6638 msleep(20);
6639 i++;
6640 if (i > 50)
6641 break;
6642 }
6643 ha->flags.purge_mbox = 0;
6644
6645 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
6646 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
6647 atomic_set(&vha->loop_state, LOOP_DOWN);
6648 qla2x00_mark_all_devices_lost(vha, 0);
6649
6650 spin_lock_irqsave(&ha->vport_slock, flags);
6651 list_for_each_entry(vp, &ha->vp_list, list) {
6652 atomic_inc(&vp->vref_count);
6653 spin_unlock_irqrestore(&ha->vport_slock, flags);
6654
6655 qla2x00_mark_all_devices_lost(vp, 0);
6656
6657 spin_lock_irqsave(&ha->vport_slock, flags);
6658 atomic_dec(&vp->vref_count);
6659 }
6660 spin_unlock_irqrestore(&ha->vport_slock, flags);
6661 } else {
6662 if (!atomic_read(&vha->loop_down_timer))
6663 atomic_set(&vha->loop_down_timer,
6664 LOOP_DOWN_TIME);
6665 }
6666
6667 /* Clear all async request states across all VPs. */
6668 list_for_each_entry(fcport, &vha->vp_fcports, list) {
6669 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6670 fcport->scan_state = 0;
6671 }
6672 spin_lock_irqsave(&ha->vport_slock, flags);
6673 list_for_each_entry(vp, &ha->vp_list, list) {
6674 atomic_inc(&vp->vref_count);
6675 spin_unlock_irqrestore(&ha->vport_slock, flags);
6676
6677 list_for_each_entry(fcport, &vp->vp_fcports, list)
6678 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
6679
6680 spin_lock_irqsave(&ha->vport_slock, flags);
6681 atomic_dec(&vp->vref_count);
6682 }
6683 spin_unlock_irqrestore(&ha->vport_slock, flags);
6684
6685 if (!ha->flags.eeh_busy) {
6686 /* Make sure for ISP 82XX IO DMA is complete */
6687 if (IS_P3P_TYPE(ha)) {
6688 qla82xx_chip_reset_cleanup(vha);
6689 ql_log(ql_log_info, vha, 0x00b4,
6690 "Done chip reset cleanup.\n");
6691
6692 /* Done waiting for pending commands.
6693 * Reset the online flag.
6694 */
6695 vha->flags.online = 0;
6696 }
6697
6698 /* Requeue all commands in outstanding command list. */
6699 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
6700 }
6701 /* memory barrier */
6702 wmb();
6703 }
6704
6705 /*
6706 * qla2x00_abort_isp
6707 * Resets ISP and aborts all outstanding commands.
6708 *
6709 * Input:
6710 * ha = adapter block pointer.
6711 *
6712 * Returns:
6713 * 0 = success
6714 */
6715 int
qla2x00_abort_isp(scsi_qla_host_t * vha)6716 qla2x00_abort_isp(scsi_qla_host_t *vha)
6717 {
6718 int rval;
6719 uint8_t status = 0;
6720 struct qla_hw_data *ha = vha->hw;
6721 struct scsi_qla_host *vp;
6722 struct req_que *req = ha->req_q_map[0];
6723 unsigned long flags;
6724
6725 if (vha->flags.online) {
6726 qla2x00_abort_isp_cleanup(vha);
6727
6728 if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
6729 ha->flags.chip_reset_done = 1;
6730 vha->flags.online = 1;
6731 status = 0;
6732 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6733 return status;
6734 }
6735
6736 if (IS_QLA8031(ha)) {
6737 ql_dbg(ql_dbg_p3p, vha, 0xb05c,
6738 "Clearing fcoe driver presence.\n");
6739 if (qla83xx_clear_drv_presence(vha) != QLA_SUCCESS)
6740 ql_dbg(ql_dbg_p3p, vha, 0xb073,
6741 "Error while clearing DRV-Presence.\n");
6742 }
6743
6744 if (unlikely(pci_channel_offline(ha->pdev) &&
6745 ha->flags.pci_channel_io_perm_failure)) {
6746 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6747 status = 0;
6748 return status;
6749 }
6750
6751 switch (vha->qlini_mode) {
6752 case QLA2XXX_INI_MODE_DISABLED:
6753 if (!qla_tgt_mode_enabled(vha))
6754 return 0;
6755 break;
6756 case QLA2XXX_INI_MODE_DUAL:
6757 if (!qla_dual_mode_enabled(vha))
6758 return 0;
6759 break;
6760 case QLA2XXX_INI_MODE_ENABLED:
6761 default:
6762 break;
6763 }
6764
6765 ha->isp_ops->get_flash_version(vha, req->ring);
6766
6767 ha->isp_ops->nvram_config(vha);
6768
6769 if (!qla2x00_restart_isp(vha)) {
6770 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6771
6772 if (!atomic_read(&vha->loop_down_timer)) {
6773 /*
6774 * Issue marker command only when we are going
6775 * to start the I/O .
6776 */
6777 vha->marker_needed = 1;
6778 }
6779
6780 vha->flags.online = 1;
6781
6782 ha->isp_ops->enable_intrs(ha);
6783
6784 ha->isp_abort_cnt = 0;
6785 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6786
6787 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
6788 qla2x00_get_fw_version(vha);
6789 if (ha->fce) {
6790 ha->flags.fce_enabled = 1;
6791 memset(ha->fce, 0,
6792 fce_calc_size(ha->fce_bufs));
6793 rval = qla2x00_enable_fce_trace(vha,
6794 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
6795 &ha->fce_bufs);
6796 if (rval) {
6797 ql_log(ql_log_warn, vha, 0x8033,
6798 "Unable to reinitialize FCE "
6799 "(%d).\n", rval);
6800 ha->flags.fce_enabled = 0;
6801 }
6802 }
6803
6804 if (ha->eft) {
6805 memset(ha->eft, 0, EFT_SIZE);
6806 rval = qla2x00_enable_eft_trace(vha,
6807 ha->eft_dma, EFT_NUM_BUFFERS);
6808 if (rval) {
6809 ql_log(ql_log_warn, vha, 0x8034,
6810 "Unable to reinitialize EFT "
6811 "(%d).\n", rval);
6812 }
6813 }
6814 } else { /* failed the ISP abort */
6815 vha->flags.online = 1;
6816 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
6817 if (ha->isp_abort_cnt == 0) {
6818 ql_log(ql_log_fatal, vha, 0x8035,
6819 "ISP error recover failed - "
6820 "board disabled.\n");
6821 /*
6822 * The next call disables the board
6823 * completely.
6824 */
6825 qla2x00_abort_isp_cleanup(vha);
6826 vha->flags.online = 0;
6827 clear_bit(ISP_ABORT_RETRY,
6828 &vha->dpc_flags);
6829 status = 0;
6830 } else { /* schedule another ISP abort */
6831 ha->isp_abort_cnt--;
6832 ql_dbg(ql_dbg_taskm, vha, 0x8020,
6833 "ISP abort - retry remaining %d.\n",
6834 ha->isp_abort_cnt);
6835 status = 1;
6836 }
6837 } else {
6838 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
6839 ql_dbg(ql_dbg_taskm, vha, 0x8021,
6840 "ISP error recovery - retrying (%d) "
6841 "more times.\n", ha->isp_abort_cnt);
6842 set_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
6843 status = 1;
6844 }
6845 }
6846
6847 }
6848
6849 if (!status) {
6850 ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
6851 qla2x00_configure_hba(vha);
6852 spin_lock_irqsave(&ha->vport_slock, flags);
6853 list_for_each_entry(vp, &ha->vp_list, list) {
6854 if (vp->vp_idx) {
6855 atomic_inc(&vp->vref_count);
6856 spin_unlock_irqrestore(&ha->vport_slock, flags);
6857
6858 qla2x00_vp_abort_isp(vp);
6859
6860 spin_lock_irqsave(&ha->vport_slock, flags);
6861 atomic_dec(&vp->vref_count);
6862 }
6863 }
6864 spin_unlock_irqrestore(&ha->vport_slock, flags);
6865
6866 if (IS_QLA8031(ha)) {
6867 ql_dbg(ql_dbg_p3p, vha, 0xb05d,
6868 "Setting back fcoe driver presence.\n");
6869 if (qla83xx_set_drv_presence(vha) != QLA_SUCCESS)
6870 ql_dbg(ql_dbg_p3p, vha, 0xb074,
6871 "Error while setting DRV-Presence.\n");
6872 }
6873 } else {
6874 ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
6875 __func__);
6876 }
6877
6878 return(status);
6879 }
6880
6881 /*
6882 * qla2x00_restart_isp
6883 * restarts the ISP after a reset
6884 *
6885 * Input:
6886 * ha = adapter block pointer.
6887 *
6888 * Returns:
6889 * 0 = success
6890 */
6891 static int
qla2x00_restart_isp(scsi_qla_host_t * vha)6892 qla2x00_restart_isp(scsi_qla_host_t *vha)
6893 {
6894 int status = 0;
6895 struct qla_hw_data *ha = vha->hw;
6896
6897 /* If firmware needs to be loaded */
6898 if (qla2x00_isp_firmware(vha)) {
6899 vha->flags.online = 0;
6900 status = ha->isp_ops->chip_diag(vha);
6901 if (!status)
6902 status = qla2x00_setup_chip(vha);
6903 }
6904
6905 if (!status && !(status = qla2x00_init_rings(vha))) {
6906 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
6907 ha->flags.chip_reset_done = 1;
6908
6909 /* Initialize the queues in use */
6910 qla25xx_init_queues(ha);
6911
6912 status = qla2x00_fw_ready(vha);
6913 if (!status) {
6914 /* Issue a marker after FW becomes ready. */
6915 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
6916 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
6917 }
6918
6919 /* if no cable then assume it's good */
6920 if ((vha->device_flags & DFLG_NO_CABLE))
6921 status = 0;
6922 }
6923 return (status);
6924 }
6925
6926 static int
qla25xx_init_queues(struct qla_hw_data * ha)6927 qla25xx_init_queues(struct qla_hw_data *ha)
6928 {
6929 struct rsp_que *rsp = NULL;
6930 struct req_que *req = NULL;
6931 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6932 int ret = -1;
6933 int i;
6934
6935 for (i = 1; i < ha->max_rsp_queues; i++) {
6936 rsp = ha->rsp_q_map[i];
6937 if (rsp && test_bit(i, ha->rsp_qid_map)) {
6938 rsp->options &= ~BIT_0;
6939 ret = qla25xx_init_rsp_que(base_vha, rsp);
6940 if (ret != QLA_SUCCESS)
6941 ql_dbg(ql_dbg_init, base_vha, 0x00ff,
6942 "%s Rsp que: %d init failed.\n",
6943 __func__, rsp->id);
6944 else
6945 ql_dbg(ql_dbg_init, base_vha, 0x0100,
6946 "%s Rsp que: %d inited.\n",
6947 __func__, rsp->id);
6948 }
6949 }
6950 for (i = 1; i < ha->max_req_queues; i++) {
6951 req = ha->req_q_map[i];
6952 if (req && test_bit(i, ha->req_qid_map)) {
6953 /* Clear outstanding commands array. */
6954 req->options &= ~BIT_0;
6955 ret = qla25xx_init_req_que(base_vha, req);
6956 if (ret != QLA_SUCCESS)
6957 ql_dbg(ql_dbg_init, base_vha, 0x0101,
6958 "%s Req que: %d init failed.\n",
6959 __func__, req->id);
6960 else
6961 ql_dbg(ql_dbg_init, base_vha, 0x0102,
6962 "%s Req que: %d inited.\n",
6963 __func__, req->id);
6964 }
6965 }
6966 return ret;
6967 }
6968
6969 /*
6970 * qla2x00_reset_adapter
6971 * Reset adapter.
6972 *
6973 * Input:
6974 * ha = adapter block pointer.
6975 */
6976 int
qla2x00_reset_adapter(scsi_qla_host_t * vha)6977 qla2x00_reset_adapter(scsi_qla_host_t *vha)
6978 {
6979 unsigned long flags = 0;
6980 struct qla_hw_data *ha = vha->hw;
6981 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
6982
6983 vha->flags.online = 0;
6984 ha->isp_ops->disable_intrs(ha);
6985
6986 spin_lock_irqsave(&ha->hardware_lock, flags);
6987 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
6988 RD_REG_WORD(®->hccr); /* PCI Posting. */
6989 WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC);
6990 RD_REG_WORD(®->hccr); /* PCI Posting. */
6991 spin_unlock_irqrestore(&ha->hardware_lock, flags);
6992
6993 return QLA_SUCCESS;
6994 }
6995
6996 int
qla24xx_reset_adapter(scsi_qla_host_t * vha)6997 qla24xx_reset_adapter(scsi_qla_host_t *vha)
6998 {
6999 unsigned long flags = 0;
7000 struct qla_hw_data *ha = vha->hw;
7001 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7002 int rval = QLA_SUCCESS;
7003
7004 if (IS_P3P_TYPE(ha))
7005 return rval;
7006
7007 vha->flags.online = 0;
7008 ha->isp_ops->disable_intrs(ha);
7009
7010 spin_lock_irqsave(&ha->hardware_lock, flags);
7011 WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET);
7012 RD_REG_DWORD(®->hccr);
7013 WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE);
7014 RD_REG_DWORD(®->hccr);
7015 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7016
7017 if (IS_NOPOLLING_TYPE(ha))
7018 ha->isp_ops->enable_intrs(ha);
7019
7020 return rval;
7021 }
7022
7023 /* On sparc systems, obtain port and node WWN from firmware
7024 * properties.
7025 */
qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t * vha,struct nvram_24xx * nv)7026 static void qla24xx_nvram_wwn_from_ofw(scsi_qla_host_t *vha,
7027 struct nvram_24xx *nv)
7028 {
7029 #ifdef CONFIG_SPARC
7030 struct qla_hw_data *ha = vha->hw;
7031 struct pci_dev *pdev = ha->pdev;
7032 struct device_node *dp = pci_device_to_OF_node(pdev);
7033 const u8 *val;
7034 int len;
7035
7036 val = of_get_property(dp, "port-wwn", &len);
7037 if (val && len >= WWN_SIZE)
7038 memcpy(nv->port_name, val, WWN_SIZE);
7039
7040 val = of_get_property(dp, "node-wwn", &len);
7041 if (val && len >= WWN_SIZE)
7042 memcpy(nv->node_name, val, WWN_SIZE);
7043 #endif
7044 }
7045
7046 int
qla24xx_nvram_config(scsi_qla_host_t * vha)7047 qla24xx_nvram_config(scsi_qla_host_t *vha)
7048 {
7049 int rval;
7050 struct init_cb_24xx *icb;
7051 struct nvram_24xx *nv;
7052 uint32_t *dptr;
7053 uint8_t *dptr1, *dptr2;
7054 uint32_t chksum;
7055 uint16_t cnt;
7056 struct qla_hw_data *ha = vha->hw;
7057
7058 rval = QLA_SUCCESS;
7059 icb = (struct init_cb_24xx *)ha->init_cb;
7060 nv = ha->nvram;
7061
7062 /* Determine NVRAM starting address. */
7063 if (ha->port_no == 0) {
7064 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7065 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7066 } else {
7067 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7068 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7069 }
7070
7071 ha->nvram_size = sizeof(*nv);
7072 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7073
7074 /* Get VPD data into cache */
7075 ha->vpd = ha->nvram + VPD_OFFSET;
7076 ha->isp_ops->read_nvram(vha, ha->vpd,
7077 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7078
7079 /* Get NVRAM data into cache and calculate checksum. */
7080 dptr = (uint32_t *)nv;
7081 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7082 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7083 chksum += le32_to_cpu(*dptr);
7084
7085 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a,
7086 "Contents of NVRAM\n");
7087 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d,
7088 nv, ha->nvram_size);
7089
7090 /* Bad NVRAM data, set defaults parameters. */
7091 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
7092 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
7093 /* Reset NVRAM data. */
7094 ql_log(ql_log_warn, vha, 0x006b,
7095 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
7096 chksum, nv->id, nv->nvram_version);
7097 ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv));
7098 ql_log(ql_log_warn, vha, 0x006c,
7099 "Falling back to functioning (yet invalid -- WWPN) "
7100 "defaults.\n");
7101
7102 /*
7103 * Set default initialization control block.
7104 */
7105 memset(nv, 0, ha->nvram_size);
7106 nv->nvram_version = cpu_to_le16(ICB_VERSION);
7107 nv->version = cpu_to_le16(ICB_VERSION);
7108 nv->frame_payload_size = 2048;
7109 nv->execution_throttle = cpu_to_le16(0xFFFF);
7110 nv->exchange_count = cpu_to_le16(0);
7111 nv->hard_address = cpu_to_le16(124);
7112 nv->port_name[0] = 0x21;
7113 nv->port_name[1] = 0x00 + ha->port_no + 1;
7114 nv->port_name[2] = 0x00;
7115 nv->port_name[3] = 0xe0;
7116 nv->port_name[4] = 0x8b;
7117 nv->port_name[5] = 0x1c;
7118 nv->port_name[6] = 0x55;
7119 nv->port_name[7] = 0x86;
7120 nv->node_name[0] = 0x20;
7121 nv->node_name[1] = 0x00;
7122 nv->node_name[2] = 0x00;
7123 nv->node_name[3] = 0xe0;
7124 nv->node_name[4] = 0x8b;
7125 nv->node_name[5] = 0x1c;
7126 nv->node_name[6] = 0x55;
7127 nv->node_name[7] = 0x86;
7128 qla24xx_nvram_wwn_from_ofw(vha, nv);
7129 nv->login_retry_count = cpu_to_le16(8);
7130 nv->interrupt_delay_timer = cpu_to_le16(0);
7131 nv->login_timeout = cpu_to_le16(0);
7132 nv->firmware_options_1 =
7133 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
7134 nv->firmware_options_2 = cpu_to_le32(2 << 4);
7135 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
7136 nv->firmware_options_3 = cpu_to_le32(2 << 13);
7137 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
7138 nv->efi_parameters = cpu_to_le32(0);
7139 nv->reset_delay = 5;
7140 nv->max_luns_per_target = cpu_to_le16(128);
7141 nv->port_down_retry_count = cpu_to_le16(30);
7142 nv->link_down_timeout = cpu_to_le16(30);
7143
7144 rval = 1;
7145 }
7146
7147 if (qla_tgt_mode_enabled(vha)) {
7148 /* Don't enable full login after initial LIP */
7149 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
7150 /* Don't enable LIP full login for initiator */
7151 nv->host_p &= cpu_to_le32(~BIT_10);
7152 }
7153
7154 qlt_24xx_config_nvram_stage1(vha, nv);
7155
7156 /* Reset Initialization control block */
7157 memset(icb, 0, ha->init_cb_size);
7158
7159 /* Copy 1st segment. */
7160 dptr1 = (uint8_t *)icb;
7161 dptr2 = (uint8_t *)&nv->version;
7162 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
7163 while (cnt--)
7164 *dptr1++ = *dptr2++;
7165
7166 icb->login_retry_count = nv->login_retry_count;
7167 icb->link_down_on_nos = nv->link_down_on_nos;
7168
7169 /* Copy 2nd segment. */
7170 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
7171 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
7172 cnt = (uint8_t *)&icb->reserved_3 -
7173 (uint8_t *)&icb->interrupt_delay_timer;
7174 while (cnt--)
7175 *dptr1++ = *dptr2++;
7176 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
7177 /*
7178 * Setup driver NVRAM options.
7179 */
7180 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
7181 "QLA2462");
7182
7183 qlt_24xx_config_nvram_stage2(vha, icb);
7184
7185 if (nv->host_p & cpu_to_le32(BIT_15)) {
7186 /* Use alternate WWN? */
7187 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
7188 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
7189 }
7190
7191 /* Prepare nodename */
7192 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
7193 /*
7194 * Firmware will apply the following mask if the nodename was
7195 * not provided.
7196 */
7197 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
7198 icb->node_name[0] &= 0xF0;
7199 }
7200
7201 /* Set host adapter parameters. */
7202 ha->flags.disable_risc_code_load = 0;
7203 ha->flags.enable_lip_reset = 0;
7204 ha->flags.enable_lip_full_login =
7205 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
7206 ha->flags.enable_target_reset =
7207 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
7208 ha->flags.enable_led_scheme = 0;
7209 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
7210
7211 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
7212 (BIT_6 | BIT_5 | BIT_4)) >> 4;
7213
7214 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
7215 sizeof(ha->fw_seriallink_options24));
7216
7217 /* save HBA serial number */
7218 ha->serial0 = icb->port_name[5];
7219 ha->serial1 = icb->port_name[6];
7220 ha->serial2 = icb->port_name[7];
7221 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
7222 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
7223
7224 icb->execution_throttle = cpu_to_le16(0xFFFF);
7225
7226 ha->retry_count = le16_to_cpu(nv->login_retry_count);
7227
7228 /* Set minimum login_timeout to 4 seconds. */
7229 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
7230 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
7231 if (le16_to_cpu(nv->login_timeout) < 4)
7232 nv->login_timeout = cpu_to_le16(4);
7233 ha->login_timeout = le16_to_cpu(nv->login_timeout);
7234
7235 /* Set minimum RATOV to 100 tenths of a second. */
7236 ha->r_a_tov = 100;
7237
7238 ha->loop_reset_delay = nv->reset_delay;
7239
7240 /* Link Down Timeout = 0:
7241 *
7242 * When Port Down timer expires we will start returning
7243 * I/O's to OS with "DID_NO_CONNECT".
7244 *
7245 * Link Down Timeout != 0:
7246 *
7247 * The driver waits for the link to come up after link down
7248 * before returning I/Os to OS with "DID_NO_CONNECT".
7249 */
7250 if (le16_to_cpu(nv->link_down_timeout) == 0) {
7251 ha->loop_down_abort_time =
7252 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
7253 } else {
7254 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
7255 ha->loop_down_abort_time =
7256 (LOOP_DOWN_TIME - ha->link_down_timeout);
7257 }
7258
7259 /* Need enough time to try and get the port back. */
7260 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
7261 if (qlport_down_retry)
7262 ha->port_down_retry_count = qlport_down_retry;
7263
7264 /* Set login_retry_count */
7265 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
7266 if (ha->port_down_retry_count ==
7267 le16_to_cpu(nv->port_down_retry_count) &&
7268 ha->port_down_retry_count > 3)
7269 ha->login_retry_count = ha->port_down_retry_count;
7270 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
7271 ha->login_retry_count = ha->port_down_retry_count;
7272 if (ql2xloginretrycount)
7273 ha->login_retry_count = ql2xloginretrycount;
7274
7275 /* N2N: driver will initiate Login instead of FW */
7276 icb->firmware_options_3 |= BIT_8;
7277
7278 /* Enable ZIO. */
7279 if (!vha->flags.init_done) {
7280 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
7281 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
7282 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
7283 le16_to_cpu(icb->interrupt_delay_timer) : 2;
7284 }
7285 icb->firmware_options_2 &= cpu_to_le32(
7286 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
7287 if (ha->zio_mode != QLA_ZIO_DISABLED) {
7288 ha->zio_mode = QLA_ZIO_MODE_6;
7289
7290 ql_log(ql_log_info, vha, 0x006f,
7291 "ZIO mode %d enabled; timer delay (%d us).\n",
7292 ha->zio_mode, ha->zio_timer * 100);
7293
7294 icb->firmware_options_2 |= cpu_to_le32(
7295 (uint32_t)ha->zio_mode);
7296 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
7297 }
7298
7299 if (rval) {
7300 ql_log(ql_log_warn, vha, 0x0070,
7301 "NVRAM configuration failed.\n");
7302 }
7303 return (rval);
7304 }
7305
7306 static void
qla27xx_print_image(struct scsi_qla_host * vha,char * name,struct qla27xx_image_status * image_status)7307 qla27xx_print_image(struct scsi_qla_host *vha, char *name,
7308 struct qla27xx_image_status *image_status)
7309 {
7310 ql_dbg(ql_dbg_init, vha, 0x018b,
7311 "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
7312 name, "status",
7313 image_status->image_status_mask,
7314 le16_to_cpu(image_status->generation),
7315 image_status->ver_major,
7316 image_status->ver_minor,
7317 image_status->bitmap,
7318 le32_to_cpu(image_status->checksum),
7319 le32_to_cpu(image_status->signature));
7320 }
7321
7322 static bool
qla28xx_check_aux_image_status_signature(struct qla27xx_image_status * image_status)7323 qla28xx_check_aux_image_status_signature(
7324 struct qla27xx_image_status *image_status)
7325 {
7326 ulong signature = le32_to_cpu(image_status->signature);
7327
7328 return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
7329 }
7330
7331 static bool
qla27xx_check_image_status_signature(struct qla27xx_image_status * image_status)7332 qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
7333 {
7334 ulong signature = le32_to_cpu(image_status->signature);
7335
7336 return
7337 signature != QLA27XX_IMG_STATUS_SIGN &&
7338 signature != QLA28XX_IMG_STATUS_SIGN;
7339 }
7340
7341 static ulong
qla27xx_image_status_checksum(struct qla27xx_image_status * image_status)7342 qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
7343 {
7344 uint32_t *p = (void *)image_status;
7345 uint n = sizeof(*image_status) / sizeof(*p);
7346 uint32_t sum = 0;
7347
7348 for ( ; n--; p++)
7349 sum += le32_to_cpup(p);
7350
7351 return sum;
7352 }
7353
7354 static inline uint
qla28xx_component_bitmask(struct qla27xx_image_status * aux,uint bitmask)7355 qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
7356 {
7357 return aux->bitmap & bitmask ?
7358 QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
7359 }
7360
7361 static void
qla28xx_component_status(struct active_regions * active_regions,struct qla27xx_image_status * aux)7362 qla28xx_component_status(
7363 struct active_regions *active_regions, struct qla27xx_image_status *aux)
7364 {
7365 active_regions->aux.board_config =
7366 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
7367
7368 active_regions->aux.vpd_nvram =
7369 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
7370
7371 active_regions->aux.npiv_config_0_1 =
7372 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
7373
7374 active_regions->aux.npiv_config_2_3 =
7375 qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
7376 }
7377
7378 static int
qla27xx_compare_image_generation(struct qla27xx_image_status * pri_image_status,struct qla27xx_image_status * sec_image_status)7379 qla27xx_compare_image_generation(
7380 struct qla27xx_image_status *pri_image_status,
7381 struct qla27xx_image_status *sec_image_status)
7382 {
7383 /* calculate generation delta as uint16 (this accounts for wrap) */
7384 int16_t delta =
7385 le16_to_cpu(pri_image_status->generation) -
7386 le16_to_cpu(sec_image_status->generation);
7387
7388 ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
7389
7390 return delta;
7391 }
7392
7393 void
qla28xx_get_aux_images(struct scsi_qla_host * vha,struct active_regions * active_regions)7394 qla28xx_get_aux_images(
7395 struct scsi_qla_host *vha, struct active_regions *active_regions)
7396 {
7397 struct qla_hw_data *ha = vha->hw;
7398 struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
7399 bool valid_pri_image = false, valid_sec_image = false;
7400 bool active_pri_image = false, active_sec_image = false;
7401
7402 if (!ha->flt_region_aux_img_status_pri) {
7403 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
7404 goto check_sec_image;
7405 }
7406
7407 qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
7408 ha->flt_region_aux_img_status_pri,
7409 sizeof(pri_aux_image_status) >> 2);
7410 qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
7411
7412 if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
7413 ql_dbg(ql_dbg_init, vha, 0x018b,
7414 "Primary aux image signature (%#x) not valid\n",
7415 le32_to_cpu(pri_aux_image_status.signature));
7416 goto check_sec_image;
7417 }
7418
7419 if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
7420 ql_dbg(ql_dbg_init, vha, 0x018c,
7421 "Primary aux image checksum failed\n");
7422 goto check_sec_image;
7423 }
7424
7425 valid_pri_image = true;
7426
7427 if (pri_aux_image_status.image_status_mask & 1) {
7428 ql_dbg(ql_dbg_init, vha, 0x018d,
7429 "Primary aux image is active\n");
7430 active_pri_image = true;
7431 }
7432
7433 check_sec_image:
7434 if (!ha->flt_region_aux_img_status_sec) {
7435 ql_dbg(ql_dbg_init, vha, 0x018a,
7436 "Secondary aux image not addressed\n");
7437 goto check_valid_image;
7438 }
7439
7440 qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
7441 ha->flt_region_aux_img_status_sec,
7442 sizeof(sec_aux_image_status) >> 2);
7443 qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
7444
7445 if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
7446 ql_dbg(ql_dbg_init, vha, 0x018b,
7447 "Secondary aux image signature (%#x) not valid\n",
7448 le32_to_cpu(sec_aux_image_status.signature));
7449 goto check_valid_image;
7450 }
7451
7452 if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
7453 ql_dbg(ql_dbg_init, vha, 0x018c,
7454 "Secondary aux image checksum failed\n");
7455 goto check_valid_image;
7456 }
7457
7458 valid_sec_image = true;
7459
7460 if (sec_aux_image_status.image_status_mask & 1) {
7461 ql_dbg(ql_dbg_init, vha, 0x018d,
7462 "Secondary aux image is active\n");
7463 active_sec_image = true;
7464 }
7465
7466 check_valid_image:
7467 if (valid_pri_image && active_pri_image &&
7468 valid_sec_image && active_sec_image) {
7469 if (qla27xx_compare_image_generation(&pri_aux_image_status,
7470 &sec_aux_image_status) >= 0) {
7471 qla28xx_component_status(active_regions,
7472 &pri_aux_image_status);
7473 } else {
7474 qla28xx_component_status(active_regions,
7475 &sec_aux_image_status);
7476 }
7477 } else if (valid_pri_image && active_pri_image) {
7478 qla28xx_component_status(active_regions, &pri_aux_image_status);
7479 } else if (valid_sec_image && active_sec_image) {
7480 qla28xx_component_status(active_regions, &sec_aux_image_status);
7481 }
7482
7483 ql_dbg(ql_dbg_init, vha, 0x018f,
7484 "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
7485 active_regions->aux.board_config,
7486 active_regions->aux.vpd_nvram,
7487 active_regions->aux.npiv_config_0_1,
7488 active_regions->aux.npiv_config_2_3);
7489 }
7490
7491 void
qla27xx_get_active_image(struct scsi_qla_host * vha,struct active_regions * active_regions)7492 qla27xx_get_active_image(struct scsi_qla_host *vha,
7493 struct active_regions *active_regions)
7494 {
7495 struct qla_hw_data *ha = vha->hw;
7496 struct qla27xx_image_status pri_image_status, sec_image_status;
7497 bool valid_pri_image = false, valid_sec_image = false;
7498 bool active_pri_image = false, active_sec_image = false;
7499
7500 if (!ha->flt_region_img_status_pri) {
7501 ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
7502 goto check_sec_image;
7503 }
7504
7505 if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
7506 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
7507 QLA_SUCCESS) {
7508 WARN_ON_ONCE(true);
7509 goto check_sec_image;
7510 }
7511 qla27xx_print_image(vha, "Primary image", &pri_image_status);
7512
7513 if (qla27xx_check_image_status_signature(&pri_image_status)) {
7514 ql_dbg(ql_dbg_init, vha, 0x018b,
7515 "Primary image signature (%#x) not valid\n",
7516 le32_to_cpu(pri_image_status.signature));
7517 goto check_sec_image;
7518 }
7519
7520 if (qla27xx_image_status_checksum(&pri_image_status)) {
7521 ql_dbg(ql_dbg_init, vha, 0x018c,
7522 "Primary image checksum failed\n");
7523 goto check_sec_image;
7524 }
7525
7526 valid_pri_image = true;
7527
7528 if (pri_image_status.image_status_mask & 1) {
7529 ql_dbg(ql_dbg_init, vha, 0x018d,
7530 "Primary image is active\n");
7531 active_pri_image = true;
7532 }
7533
7534 check_sec_image:
7535 if (!ha->flt_region_img_status_sec) {
7536 ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
7537 goto check_valid_image;
7538 }
7539
7540 qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
7541 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
7542 qla27xx_print_image(vha, "Secondary image", &sec_image_status);
7543
7544 if (qla27xx_check_image_status_signature(&sec_image_status)) {
7545 ql_dbg(ql_dbg_init, vha, 0x018b,
7546 "Secondary image signature (%#x) not valid\n",
7547 le32_to_cpu(sec_image_status.signature));
7548 goto check_valid_image;
7549 }
7550
7551 if (qla27xx_image_status_checksum(&sec_image_status)) {
7552 ql_dbg(ql_dbg_init, vha, 0x018c,
7553 "Secondary image checksum failed\n");
7554 goto check_valid_image;
7555 }
7556
7557 valid_sec_image = true;
7558
7559 if (sec_image_status.image_status_mask & 1) {
7560 ql_dbg(ql_dbg_init, vha, 0x018d,
7561 "Secondary image is active\n");
7562 active_sec_image = true;
7563 }
7564
7565 check_valid_image:
7566 if (valid_pri_image && active_pri_image)
7567 active_regions->global = QLA27XX_PRIMARY_IMAGE;
7568
7569 if (valid_sec_image && active_sec_image) {
7570 if (!active_regions->global ||
7571 qla27xx_compare_image_generation(
7572 &pri_image_status, &sec_image_status) < 0) {
7573 active_regions->global = QLA27XX_SECONDARY_IMAGE;
7574 }
7575 }
7576
7577 ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
7578 active_regions->global == QLA27XX_DEFAULT_IMAGE ?
7579 "default (boot/fw)" :
7580 active_regions->global == QLA27XX_PRIMARY_IMAGE ?
7581 "primary" :
7582 active_regions->global == QLA27XX_SECONDARY_IMAGE ?
7583 "secondary" : "invalid",
7584 active_regions->global);
7585 }
7586
qla24xx_risc_firmware_invalid(uint32_t * dword)7587 bool qla24xx_risc_firmware_invalid(uint32_t *dword)
7588 {
7589 return
7590 !(dword[4] | dword[5] | dword[6] | dword[7]) ||
7591 !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]);
7592 }
7593
7594 static int
qla24xx_load_risc_flash(scsi_qla_host_t * vha,uint32_t * srisc_addr,uint32_t faddr)7595 qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
7596 uint32_t faddr)
7597 {
7598 int rval;
7599 uint templates, segments, fragment;
7600 ulong i;
7601 uint j;
7602 ulong dlen;
7603 uint32_t *dcode;
7604 uint32_t risc_addr, risc_size, risc_attr = 0;
7605 struct qla_hw_data *ha = vha->hw;
7606 struct req_que *req = ha->req_q_map[0];
7607 struct fwdt *fwdt = ha->fwdt;
7608
7609 ql_dbg(ql_dbg_init, vha, 0x008b,
7610 "FW: Loading firmware from flash (%x).\n", faddr);
7611
7612 dcode = (void *)req->ring;
7613 qla24xx_read_flash_data(vha, dcode, faddr, 8);
7614 if (qla24xx_risc_firmware_invalid(dcode)) {
7615 ql_log(ql_log_fatal, vha, 0x008c,
7616 "Unable to verify the integrity of flash firmware "
7617 "image.\n");
7618 ql_log(ql_log_fatal, vha, 0x008d,
7619 "Firmware data: %08x %08x %08x %08x.\n",
7620 dcode[0], dcode[1], dcode[2], dcode[3]);
7621
7622 return QLA_FUNCTION_FAILED;
7623 }
7624
7625 dcode = (void *)req->ring;
7626 *srisc_addr = 0;
7627 segments = FA_RISC_CODE_SEGMENTS;
7628 for (j = 0; j < segments; j++) {
7629 ql_dbg(ql_dbg_init, vha, 0x008d,
7630 "-> Loading segment %u...\n", j);
7631 qla24xx_read_flash_data(vha, dcode, faddr, 10);
7632 risc_addr = be32_to_cpu(dcode[2]);
7633 risc_size = be32_to_cpu(dcode[3]);
7634 if (!*srisc_addr) {
7635 *srisc_addr = risc_addr;
7636 risc_attr = be32_to_cpu(dcode[9]);
7637 }
7638
7639 dlen = ha->fw_transfer_size >> 2;
7640 for (fragment = 0; risc_size; fragment++) {
7641 if (dlen > risc_size)
7642 dlen = risc_size;
7643
7644 ql_dbg(ql_dbg_init, vha, 0x008e,
7645 "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
7646 fragment, risc_addr, faddr, dlen);
7647 qla24xx_read_flash_data(vha, dcode, faddr, dlen);
7648 for (i = 0; i < dlen; i++)
7649 dcode[i] = swab32(dcode[i]);
7650
7651 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
7652 if (rval) {
7653 ql_log(ql_log_fatal, vha, 0x008f,
7654 "-> Failed load firmware fragment %u.\n",
7655 fragment);
7656 return QLA_FUNCTION_FAILED;
7657 }
7658
7659 faddr += dlen;
7660 risc_addr += dlen;
7661 risc_size -= dlen;
7662 }
7663 }
7664
7665 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7666 return QLA_SUCCESS;
7667
7668 templates = (risc_attr & BIT_9) ? 2 : 1;
7669 ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
7670 for (j = 0; j < templates; j++, fwdt++) {
7671 if (fwdt->template)
7672 vfree(fwdt->template);
7673 fwdt->template = NULL;
7674 fwdt->length = 0;
7675
7676 dcode = (void *)req->ring;
7677 qla24xx_read_flash_data(vha, dcode, faddr, 7);
7678 risc_size = be32_to_cpu(dcode[2]);
7679 ql_dbg(ql_dbg_init, vha, 0x0161,
7680 "-> fwdt%u template array at %#x (%#x dwords)\n",
7681 j, faddr, risc_size);
7682 if (!risc_size || !~risc_size) {
7683 ql_dbg(ql_dbg_init, vha, 0x0162,
7684 "-> fwdt%u failed to read array\n", j);
7685 goto failed;
7686 }
7687
7688 /* skip header and ignore checksum */
7689 faddr += 7;
7690 risc_size -= 8;
7691
7692 ql_dbg(ql_dbg_init, vha, 0x0163,
7693 "-> fwdt%u template allocate template %#x words...\n",
7694 j, risc_size);
7695 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7696 if (!fwdt->template) {
7697 ql_log(ql_log_warn, vha, 0x0164,
7698 "-> fwdt%u failed allocate template.\n", j);
7699 goto failed;
7700 }
7701
7702 dcode = fwdt->template;
7703 qla24xx_read_flash_data(vha, dcode, faddr, risc_size);
7704
7705 if (!qla27xx_fwdt_template_valid(dcode)) {
7706 ql_log(ql_log_warn, vha, 0x0165,
7707 "-> fwdt%u failed template validate\n", j);
7708 goto failed;
7709 }
7710
7711 dlen = qla27xx_fwdt_template_size(dcode);
7712 ql_dbg(ql_dbg_init, vha, 0x0166,
7713 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
7714 j, dlen, dlen / sizeof(*dcode));
7715 if (dlen > risc_size * sizeof(*dcode)) {
7716 ql_log(ql_log_warn, vha, 0x0167,
7717 "-> fwdt%u template exceeds array (%-lu bytes)\n",
7718 j, dlen - risc_size * sizeof(*dcode));
7719 goto failed;
7720 }
7721
7722 fwdt->length = dlen;
7723 ql_dbg(ql_dbg_init, vha, 0x0168,
7724 "-> fwdt%u loaded template ok\n", j);
7725
7726 faddr += risc_size + 1;
7727 }
7728
7729 return QLA_SUCCESS;
7730
7731 failed:
7732 if (fwdt->template)
7733 vfree(fwdt->template);
7734 fwdt->template = NULL;
7735 fwdt->length = 0;
7736
7737 return QLA_SUCCESS;
7738 }
7739
7740 #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
7741
7742 int
qla2x00_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)7743 qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7744 {
7745 int rval;
7746 int i, fragment;
7747 uint16_t *wcode, *fwcode;
7748 uint32_t risc_addr, risc_size, fwclen, wlen, *seg;
7749 struct fw_blob *blob;
7750 struct qla_hw_data *ha = vha->hw;
7751 struct req_que *req = ha->req_q_map[0];
7752
7753 /* Load firmware blob. */
7754 blob = qla2x00_request_firmware(vha);
7755 if (!blob) {
7756 ql_log(ql_log_info, vha, 0x0083,
7757 "Firmware image unavailable.\n");
7758 ql_log(ql_log_info, vha, 0x0084,
7759 "Firmware images can be retrieved from: "QLA_FW_URL ".\n");
7760 return QLA_FUNCTION_FAILED;
7761 }
7762
7763 rval = QLA_SUCCESS;
7764
7765 wcode = (uint16_t *)req->ring;
7766 *srisc_addr = 0;
7767 fwcode = (uint16_t *)blob->fw->data;
7768 fwclen = 0;
7769
7770 /* Validate firmware image by checking version. */
7771 if (blob->fw->size < 8 * sizeof(uint16_t)) {
7772 ql_log(ql_log_fatal, vha, 0x0085,
7773 "Unable to verify integrity of firmware image (%zd).\n",
7774 blob->fw->size);
7775 goto fail_fw_integrity;
7776 }
7777 for (i = 0; i < 4; i++)
7778 wcode[i] = be16_to_cpu(fwcode[i + 4]);
7779 if ((wcode[0] == 0xffff && wcode[1] == 0xffff && wcode[2] == 0xffff &&
7780 wcode[3] == 0xffff) || (wcode[0] == 0 && wcode[1] == 0 &&
7781 wcode[2] == 0 && wcode[3] == 0)) {
7782 ql_log(ql_log_fatal, vha, 0x0086,
7783 "Unable to verify integrity of firmware image.\n");
7784 ql_log(ql_log_fatal, vha, 0x0087,
7785 "Firmware data: %04x %04x %04x %04x.\n",
7786 wcode[0], wcode[1], wcode[2], wcode[3]);
7787 goto fail_fw_integrity;
7788 }
7789
7790 seg = blob->segs;
7791 while (*seg && rval == QLA_SUCCESS) {
7792 risc_addr = *seg;
7793 *srisc_addr = *srisc_addr == 0 ? *seg : *srisc_addr;
7794 risc_size = be16_to_cpu(fwcode[3]);
7795
7796 /* Validate firmware image size. */
7797 fwclen += risc_size * sizeof(uint16_t);
7798 if (blob->fw->size < fwclen) {
7799 ql_log(ql_log_fatal, vha, 0x0088,
7800 "Unable to verify integrity of firmware image "
7801 "(%zd).\n", blob->fw->size);
7802 goto fail_fw_integrity;
7803 }
7804
7805 fragment = 0;
7806 while (risc_size > 0 && rval == QLA_SUCCESS) {
7807 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
7808 if (wlen > risc_size)
7809 wlen = risc_size;
7810 ql_dbg(ql_dbg_init, vha, 0x0089,
7811 "Loading risc segment@ risc addr %x number of "
7812 "words 0x%x.\n", risc_addr, wlen);
7813
7814 for (i = 0; i < wlen; i++)
7815 wcode[i] = swab16(fwcode[i]);
7816
7817 rval = qla2x00_load_ram(vha, req->dma, risc_addr,
7818 wlen);
7819 if (rval) {
7820 ql_log(ql_log_fatal, vha, 0x008a,
7821 "Failed to load segment %d of firmware.\n",
7822 fragment);
7823 break;
7824 }
7825
7826 fwcode += wlen;
7827 risc_addr += wlen;
7828 risc_size -= wlen;
7829 fragment++;
7830 }
7831
7832 /* Next segment. */
7833 seg++;
7834 }
7835 return rval;
7836
7837 fail_fw_integrity:
7838 return QLA_FUNCTION_FAILED;
7839 }
7840
7841 static int
qla24xx_load_risc_blob(scsi_qla_host_t * vha,uint32_t * srisc_addr)7842 qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7843 {
7844 int rval;
7845 uint templates, segments, fragment;
7846 uint32_t *dcode;
7847 ulong dlen;
7848 uint32_t risc_addr, risc_size, risc_attr = 0;
7849 ulong i;
7850 uint j;
7851 struct fw_blob *blob;
7852 uint32_t *fwcode;
7853 struct qla_hw_data *ha = vha->hw;
7854 struct req_que *req = ha->req_q_map[0];
7855 struct fwdt *fwdt = ha->fwdt;
7856
7857 ql_dbg(ql_dbg_init, vha, 0x0090,
7858 "-> FW: Loading via request-firmware.\n");
7859
7860 blob = qla2x00_request_firmware(vha);
7861 if (!blob) {
7862 ql_log(ql_log_warn, vha, 0x0092,
7863 "-> Firmware file not found.\n");
7864
7865 return QLA_FUNCTION_FAILED;
7866 }
7867
7868 fwcode = (void *)blob->fw->data;
7869 dcode = fwcode;
7870 if (qla24xx_risc_firmware_invalid(dcode)) {
7871 ql_log(ql_log_fatal, vha, 0x0093,
7872 "Unable to verify integrity of firmware image (%zd).\n",
7873 blob->fw->size);
7874 ql_log(ql_log_fatal, vha, 0x0095,
7875 "Firmware data: %08x %08x %08x %08x.\n",
7876 dcode[0], dcode[1], dcode[2], dcode[3]);
7877 return QLA_FUNCTION_FAILED;
7878 }
7879
7880 dcode = (void *)req->ring;
7881 *srisc_addr = 0;
7882 segments = FA_RISC_CODE_SEGMENTS;
7883 for (j = 0; j < segments; j++) {
7884 ql_dbg(ql_dbg_init, vha, 0x0096,
7885 "-> Loading segment %u...\n", j);
7886 risc_addr = be32_to_cpu(fwcode[2]);
7887 risc_size = be32_to_cpu(fwcode[3]);
7888
7889 if (!*srisc_addr) {
7890 *srisc_addr = risc_addr;
7891 risc_attr = be32_to_cpu(fwcode[9]);
7892 }
7893
7894 dlen = ha->fw_transfer_size >> 2;
7895 for (fragment = 0; risc_size; fragment++) {
7896 if (dlen > risc_size)
7897 dlen = risc_size;
7898
7899 ql_dbg(ql_dbg_init, vha, 0x0097,
7900 "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n",
7901 fragment, risc_addr,
7902 (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data),
7903 dlen);
7904
7905 for (i = 0; i < dlen; i++)
7906 dcode[i] = swab32(fwcode[i]);
7907
7908 rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
7909 if (rval) {
7910 ql_log(ql_log_fatal, vha, 0x0098,
7911 "-> Failed load firmware fragment %u.\n",
7912 fragment);
7913 return QLA_FUNCTION_FAILED;
7914 }
7915
7916 fwcode += dlen;
7917 risc_addr += dlen;
7918 risc_size -= dlen;
7919 }
7920 }
7921
7922 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7923 return QLA_SUCCESS;
7924
7925 templates = (risc_attr & BIT_9) ? 2 : 1;
7926 ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
7927 for (j = 0; j < templates; j++, fwdt++) {
7928 if (fwdt->template)
7929 vfree(fwdt->template);
7930 fwdt->template = NULL;
7931 fwdt->length = 0;
7932
7933 risc_size = be32_to_cpu(fwcode[2]);
7934 ql_dbg(ql_dbg_init, vha, 0x0171,
7935 "-> fwdt%u template array at %#x (%#x dwords)\n",
7936 j, (uint32_t)((void *)fwcode - (void *)blob->fw->data),
7937 risc_size);
7938 if (!risc_size || !~risc_size) {
7939 ql_dbg(ql_dbg_init, vha, 0x0172,
7940 "-> fwdt%u failed to read array\n", j);
7941 goto failed;
7942 }
7943
7944 /* skip header and ignore checksum */
7945 fwcode += 7;
7946 risc_size -= 8;
7947
7948 ql_dbg(ql_dbg_init, vha, 0x0173,
7949 "-> fwdt%u template allocate template %#x words...\n",
7950 j, risc_size);
7951 fwdt->template = vmalloc(risc_size * sizeof(*dcode));
7952 if (!fwdt->template) {
7953 ql_log(ql_log_warn, vha, 0x0174,
7954 "-> fwdt%u failed allocate template.\n", j);
7955 goto failed;
7956 }
7957
7958 dcode = fwdt->template;
7959 for (i = 0; i < risc_size; i++)
7960 dcode[i] = fwcode[i];
7961
7962 if (!qla27xx_fwdt_template_valid(dcode)) {
7963 ql_log(ql_log_warn, vha, 0x0175,
7964 "-> fwdt%u failed template validate\n", j);
7965 goto failed;
7966 }
7967
7968 dlen = qla27xx_fwdt_template_size(dcode);
7969 ql_dbg(ql_dbg_init, vha, 0x0176,
7970 "-> fwdt%u template size %#lx bytes (%#lx words)\n",
7971 j, dlen, dlen / sizeof(*dcode));
7972 if (dlen > risc_size * sizeof(*dcode)) {
7973 ql_log(ql_log_warn, vha, 0x0177,
7974 "-> fwdt%u template exceeds array (%-lu bytes)\n",
7975 j, dlen - risc_size * sizeof(*dcode));
7976 goto failed;
7977 }
7978
7979 fwdt->length = dlen;
7980 ql_dbg(ql_dbg_init, vha, 0x0178,
7981 "-> fwdt%u loaded template ok\n", j);
7982
7983 fwcode += risc_size + 1;
7984 }
7985
7986 return QLA_SUCCESS;
7987
7988 failed:
7989 if (fwdt->template)
7990 vfree(fwdt->template);
7991 fwdt->template = NULL;
7992 fwdt->length = 0;
7993
7994 return QLA_SUCCESS;
7995 }
7996
7997 int
qla24xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)7998 qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
7999 {
8000 int rval;
8001
8002 if (ql2xfwloadbin == 1)
8003 return qla81xx_load_risc(vha, srisc_addr);
8004
8005 /*
8006 * FW Load priority:
8007 * 1) Firmware via request-firmware interface (.bin file).
8008 * 2) Firmware residing in flash.
8009 */
8010 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8011 if (rval == QLA_SUCCESS)
8012 return rval;
8013
8014 return qla24xx_load_risc_flash(vha, srisc_addr,
8015 vha->hw->flt_region_fw);
8016 }
8017
8018 int
qla81xx_load_risc(scsi_qla_host_t * vha,uint32_t * srisc_addr)8019 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
8020 {
8021 int rval;
8022 struct qla_hw_data *ha = vha->hw;
8023 struct active_regions active_regions = { };
8024
8025 if (ql2xfwloadbin == 2)
8026 goto try_blob_fw;
8027
8028 /* FW Load priority:
8029 * 1) Firmware residing in flash.
8030 * 2) Firmware via request-firmware interface (.bin file).
8031 * 3) Golden-Firmware residing in flash -- (limited operation).
8032 */
8033
8034 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8035 goto try_primary_fw;
8036
8037 qla27xx_get_active_image(vha, &active_regions);
8038
8039 if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
8040 goto try_primary_fw;
8041
8042 ql_dbg(ql_dbg_init, vha, 0x008b,
8043 "Loading secondary firmware image.\n");
8044 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8045 if (!rval)
8046 return rval;
8047
8048 try_primary_fw:
8049 ql_dbg(ql_dbg_init, vha, 0x008b,
8050 "Loading primary firmware image.\n");
8051 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8052 if (!rval)
8053 return rval;
8054
8055 try_blob_fw:
8056 rval = qla24xx_load_risc_blob(vha, srisc_addr);
8057 if (!rval || !ha->flt_region_gold_fw)
8058 return rval;
8059
8060 ql_log(ql_log_info, vha, 0x0099,
8061 "Attempting to fallback to golden firmware.\n");
8062 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8063 if (rval)
8064 return rval;
8065
8066 ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n");
8067 ha->flags.running_gold_fw = 1;
8068 return rval;
8069 }
8070
8071 void
qla2x00_try_to_stop_firmware(scsi_qla_host_t * vha)8072 qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
8073 {
8074 int ret, retries;
8075 struct qla_hw_data *ha = vha->hw;
8076
8077 if (ha->flags.pci_channel_io_perm_failure)
8078 return;
8079 if (!IS_FWI2_CAPABLE(ha))
8080 return;
8081 if (!ha->fw_major_version)
8082 return;
8083 if (!ha->flags.fw_started)
8084 return;
8085
8086 ret = qla2x00_stop_firmware(vha);
8087 for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
8088 ret != QLA_INVALID_COMMAND && retries ; retries--) {
8089 ha->isp_ops->reset_chip(vha);
8090 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8091 continue;
8092 if (qla2x00_setup_chip(vha) != QLA_SUCCESS)
8093 continue;
8094 ql_log(ql_log_info, vha, 0x8015,
8095 "Attempting retry of stop-firmware command.\n");
8096 ret = qla2x00_stop_firmware(vha);
8097 }
8098
8099 QLA_FW_STOPPED(ha);
8100 ha->flags.fw_init_done = 0;
8101 }
8102
8103 int
qla24xx_configure_vhba(scsi_qla_host_t * vha)8104 qla24xx_configure_vhba(scsi_qla_host_t *vha)
8105 {
8106 int rval = QLA_SUCCESS;
8107 int rval2;
8108 uint16_t mb[MAILBOX_REGISTER_COUNT];
8109 struct qla_hw_data *ha = vha->hw;
8110 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8111
8112 if (!vha->vp_idx)
8113 return -EINVAL;
8114
8115 rval = qla2x00_fw_ready(base_vha);
8116
8117 if (rval == QLA_SUCCESS) {
8118 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8119 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8120 }
8121
8122 vha->flags.management_server_logged_in = 0;
8123
8124 /* Login to SNS first */
8125 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8126 BIT_1);
8127 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
8128 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
8129 ql_dbg(ql_dbg_init, vha, 0x0120,
8130 "Failed SNS login: loop_id=%x, rval2=%d\n",
8131 NPH_SNS, rval2);
8132 else
8133 ql_dbg(ql_dbg_init, vha, 0x0103,
8134 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
8135 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
8136 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
8137 return (QLA_FUNCTION_FAILED);
8138 }
8139
8140 atomic_set(&vha->loop_down_timer, 0);
8141 atomic_set(&vha->loop_state, LOOP_UP);
8142 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8143 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
8144 rval = qla2x00_loop_resync(base_vha);
8145
8146 return rval;
8147 }
8148
8149 /* 84XX Support **************************************************************/
8150
8151 static LIST_HEAD(qla_cs84xx_list);
8152 static DEFINE_MUTEX(qla_cs84xx_mutex);
8153
8154 static struct qla_chip_state_84xx *
qla84xx_get_chip(struct scsi_qla_host * vha)8155 qla84xx_get_chip(struct scsi_qla_host *vha)
8156 {
8157 struct qla_chip_state_84xx *cs84xx;
8158 struct qla_hw_data *ha = vha->hw;
8159
8160 mutex_lock(&qla_cs84xx_mutex);
8161
8162 /* Find any shared 84xx chip. */
8163 list_for_each_entry(cs84xx, &qla_cs84xx_list, list) {
8164 if (cs84xx->bus == ha->pdev->bus) {
8165 kref_get(&cs84xx->kref);
8166 goto done;
8167 }
8168 }
8169
8170 cs84xx = kzalloc(sizeof(*cs84xx), GFP_KERNEL);
8171 if (!cs84xx)
8172 goto done;
8173
8174 kref_init(&cs84xx->kref);
8175 spin_lock_init(&cs84xx->access_lock);
8176 mutex_init(&cs84xx->fw_update_mutex);
8177 cs84xx->bus = ha->pdev->bus;
8178
8179 list_add_tail(&cs84xx->list, &qla_cs84xx_list);
8180 done:
8181 mutex_unlock(&qla_cs84xx_mutex);
8182 return cs84xx;
8183 }
8184
8185 static void
__qla84xx_chip_release(struct kref * kref)8186 __qla84xx_chip_release(struct kref *kref)
8187 {
8188 struct qla_chip_state_84xx *cs84xx =
8189 container_of(kref, struct qla_chip_state_84xx, kref);
8190
8191 mutex_lock(&qla_cs84xx_mutex);
8192 list_del(&cs84xx->list);
8193 mutex_unlock(&qla_cs84xx_mutex);
8194 kfree(cs84xx);
8195 }
8196
8197 void
qla84xx_put_chip(struct scsi_qla_host * vha)8198 qla84xx_put_chip(struct scsi_qla_host *vha)
8199 {
8200 struct qla_hw_data *ha = vha->hw;
8201
8202 if (ha->cs84xx)
8203 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
8204 }
8205
8206 static int
qla84xx_init_chip(scsi_qla_host_t * vha)8207 qla84xx_init_chip(scsi_qla_host_t *vha)
8208 {
8209 int rval;
8210 uint16_t status[2];
8211 struct qla_hw_data *ha = vha->hw;
8212
8213 mutex_lock(&ha->cs84xx->fw_update_mutex);
8214
8215 rval = qla84xx_verify_chip(vha, status);
8216
8217 mutex_unlock(&ha->cs84xx->fw_update_mutex);
8218
8219 return rval != QLA_SUCCESS || status[0] ? QLA_FUNCTION_FAILED :
8220 QLA_SUCCESS;
8221 }
8222
8223 /* 81XX Support **************************************************************/
8224
8225 int
qla81xx_nvram_config(scsi_qla_host_t * vha)8226 qla81xx_nvram_config(scsi_qla_host_t *vha)
8227 {
8228 int rval;
8229 struct init_cb_81xx *icb;
8230 struct nvram_81xx *nv;
8231 uint32_t *dptr;
8232 uint8_t *dptr1, *dptr2;
8233 uint32_t chksum;
8234 uint16_t cnt;
8235 struct qla_hw_data *ha = vha->hw;
8236 uint32_t faddr;
8237 struct active_regions active_regions = { };
8238
8239 rval = QLA_SUCCESS;
8240 icb = (struct init_cb_81xx *)ha->init_cb;
8241 nv = ha->nvram;
8242
8243 /* Determine NVRAM starting address. */
8244 ha->nvram_size = sizeof(*nv);
8245 ha->vpd_size = FA_NVRAM_VPD_SIZE;
8246 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
8247 ha->vpd_size = FA_VPD_SIZE_82XX;
8248
8249 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
8250 qla28xx_get_aux_images(vha, &active_regions);
8251
8252 /* Get VPD data into cache */
8253 ha->vpd = ha->nvram + VPD_OFFSET;
8254
8255 faddr = ha->flt_region_vpd;
8256 if (IS_QLA28XX(ha)) {
8257 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8258 faddr = ha->flt_region_vpd_sec;
8259 ql_dbg(ql_dbg_init, vha, 0x0110,
8260 "Loading %s nvram image.\n",
8261 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8262 "primary" : "secondary");
8263 }
8264 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
8265
8266 /* Get NVRAM data into cache and calculate checksum. */
8267 faddr = ha->flt_region_nvram;
8268 if (IS_QLA28XX(ha)) {
8269 if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
8270 faddr = ha->flt_region_nvram_sec;
8271 }
8272 ql_dbg(ql_dbg_init, vha, 0x0110,
8273 "Loading %s nvram image.\n",
8274 active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
8275 "primary" : "secondary");
8276 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
8277
8278 dptr = (uint32_t *)nv;
8279 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
8280 chksum += le32_to_cpu(*dptr);
8281
8282 ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111,
8283 "Contents of NVRAM:\n");
8284 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112,
8285 nv, ha->nvram_size);
8286
8287 /* Bad NVRAM data, set defaults parameters. */
8288 if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
8289 le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
8290 /* Reset NVRAM data. */
8291 ql_log(ql_log_info, vha, 0x0073,
8292 "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n",
8293 chksum, nv->id, le16_to_cpu(nv->nvram_version));
8294 ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv));
8295 ql_log(ql_log_info, vha, 0x0074,
8296 "Falling back to functioning (yet invalid -- WWPN) "
8297 "defaults.\n");
8298
8299 /*
8300 * Set default initialization control block.
8301 */
8302 memset(nv, 0, ha->nvram_size);
8303 nv->nvram_version = cpu_to_le16(ICB_VERSION);
8304 nv->version = cpu_to_le16(ICB_VERSION);
8305 nv->frame_payload_size = 2048;
8306 nv->execution_throttle = cpu_to_le16(0xFFFF);
8307 nv->exchange_count = cpu_to_le16(0);
8308 nv->port_name[0] = 0x21;
8309 nv->port_name[1] = 0x00 + ha->port_no + 1;
8310 nv->port_name[2] = 0x00;
8311 nv->port_name[3] = 0xe0;
8312 nv->port_name[4] = 0x8b;
8313 nv->port_name[5] = 0x1c;
8314 nv->port_name[6] = 0x55;
8315 nv->port_name[7] = 0x86;
8316 nv->node_name[0] = 0x20;
8317 nv->node_name[1] = 0x00;
8318 nv->node_name[2] = 0x00;
8319 nv->node_name[3] = 0xe0;
8320 nv->node_name[4] = 0x8b;
8321 nv->node_name[5] = 0x1c;
8322 nv->node_name[6] = 0x55;
8323 nv->node_name[7] = 0x86;
8324 nv->login_retry_count = cpu_to_le16(8);
8325 nv->interrupt_delay_timer = cpu_to_le16(0);
8326 nv->login_timeout = cpu_to_le16(0);
8327 nv->firmware_options_1 =
8328 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
8329 nv->firmware_options_2 = cpu_to_le32(2 << 4);
8330 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
8331 nv->firmware_options_3 = cpu_to_le32(2 << 13);
8332 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
8333 nv->efi_parameters = cpu_to_le32(0);
8334 nv->reset_delay = 5;
8335 nv->max_luns_per_target = cpu_to_le16(128);
8336 nv->port_down_retry_count = cpu_to_le16(30);
8337 nv->link_down_timeout = cpu_to_le16(180);
8338 nv->enode_mac[0] = 0x00;
8339 nv->enode_mac[1] = 0xC0;
8340 nv->enode_mac[2] = 0xDD;
8341 nv->enode_mac[3] = 0x04;
8342 nv->enode_mac[4] = 0x05;
8343 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
8344
8345 rval = 1;
8346 }
8347
8348 if (IS_T10_PI_CAPABLE(ha))
8349 nv->frame_payload_size &= ~7;
8350
8351 qlt_81xx_config_nvram_stage1(vha, nv);
8352
8353 /* Reset Initialization control block */
8354 memset(icb, 0, ha->init_cb_size);
8355
8356 /* Copy 1st segment. */
8357 dptr1 = (uint8_t *)icb;
8358 dptr2 = (uint8_t *)&nv->version;
8359 cnt = (uint8_t *)&icb->response_q_inpointer - (uint8_t *)&icb->version;
8360 while (cnt--)
8361 *dptr1++ = *dptr2++;
8362
8363 icb->login_retry_count = nv->login_retry_count;
8364
8365 /* Copy 2nd segment. */
8366 dptr1 = (uint8_t *)&icb->interrupt_delay_timer;
8367 dptr2 = (uint8_t *)&nv->interrupt_delay_timer;
8368 cnt = (uint8_t *)&icb->reserved_5 -
8369 (uint8_t *)&icb->interrupt_delay_timer;
8370 while (cnt--)
8371 *dptr1++ = *dptr2++;
8372
8373 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
8374 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
8375 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
8376 icb->enode_mac[0] = 0x00;
8377 icb->enode_mac[1] = 0xC0;
8378 icb->enode_mac[2] = 0xDD;
8379 icb->enode_mac[3] = 0x04;
8380 icb->enode_mac[4] = 0x05;
8381 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
8382 }
8383
8384 /* Use extended-initialization control block. */
8385 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
8386 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8387 /*
8388 * Setup driver NVRAM options.
8389 */
8390 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
8391 "QLE8XXX");
8392
8393 qlt_81xx_config_nvram_stage2(vha, icb);
8394
8395 /* Use alternate WWN? */
8396 if (nv->host_p & cpu_to_le32(BIT_15)) {
8397 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
8398 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
8399 }
8400
8401 /* Prepare nodename */
8402 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
8403 /*
8404 * Firmware will apply the following mask if the nodename was
8405 * not provided.
8406 */
8407 memcpy(icb->node_name, icb->port_name, WWN_SIZE);
8408 icb->node_name[0] &= 0xF0;
8409 }
8410
8411 /* Set host adapter parameters. */
8412 ha->flags.disable_risc_code_load = 0;
8413 ha->flags.enable_lip_reset = 0;
8414 ha->flags.enable_lip_full_login =
8415 le32_to_cpu(nv->host_p) & BIT_10 ? 1 : 0;
8416 ha->flags.enable_target_reset =
8417 le32_to_cpu(nv->host_p) & BIT_11 ? 1 : 0;
8418 ha->flags.enable_led_scheme = 0;
8419 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8420
8421 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8422 (BIT_6 | BIT_5 | BIT_4)) >> 4;
8423
8424 /* save HBA serial number */
8425 ha->serial0 = icb->port_name[5];
8426 ha->serial1 = icb->port_name[6];
8427 ha->serial2 = icb->port_name[7];
8428 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
8429 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
8430
8431 icb->execution_throttle = cpu_to_le16(0xFFFF);
8432
8433 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8434
8435 /* Set minimum login_timeout to 4 seconds. */
8436 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
8437 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
8438 if (le16_to_cpu(nv->login_timeout) < 4)
8439 nv->login_timeout = cpu_to_le16(4);
8440 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8441
8442 /* Set minimum RATOV to 100 tenths of a second. */
8443 ha->r_a_tov = 100;
8444
8445 ha->loop_reset_delay = nv->reset_delay;
8446
8447 /* Link Down Timeout = 0:
8448 *
8449 * When Port Down timer expires we will start returning
8450 * I/O's to OS with "DID_NO_CONNECT".
8451 *
8452 * Link Down Timeout != 0:
8453 *
8454 * The driver waits for the link to come up after link down
8455 * before returning I/Os to OS with "DID_NO_CONNECT".
8456 */
8457 if (le16_to_cpu(nv->link_down_timeout) == 0) {
8458 ha->loop_down_abort_time =
8459 (LOOP_DOWN_TIME - LOOP_DOWN_TIMEOUT);
8460 } else {
8461 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8462 ha->loop_down_abort_time =
8463 (LOOP_DOWN_TIME - ha->link_down_timeout);
8464 }
8465
8466 /* Need enough time to try and get the port back. */
8467 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8468 if (qlport_down_retry)
8469 ha->port_down_retry_count = qlport_down_retry;
8470
8471 /* Set login_retry_count */
8472 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8473 if (ha->port_down_retry_count ==
8474 le16_to_cpu(nv->port_down_retry_count) &&
8475 ha->port_down_retry_count > 3)
8476 ha->login_retry_count = ha->port_down_retry_count;
8477 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8478 ha->login_retry_count = ha->port_down_retry_count;
8479 if (ql2xloginretrycount)
8480 ha->login_retry_count = ql2xloginretrycount;
8481
8482 /* if not running MSI-X we need handshaking on interrupts */
8483 if (!vha->hw->flags.msix_enabled &&
8484 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
8485 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
8486
8487 /* Enable ZIO. */
8488 if (!vha->flags.init_done) {
8489 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8490 (BIT_3 | BIT_2 | BIT_1 | BIT_0);
8491 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8492 le16_to_cpu(icb->interrupt_delay_timer) : 2;
8493 }
8494 icb->firmware_options_2 &= cpu_to_le32(
8495 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
8496 vha->flags.process_response_queue = 0;
8497 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8498 ha->zio_mode = QLA_ZIO_MODE_6;
8499
8500 ql_log(ql_log_info, vha, 0x0075,
8501 "ZIO mode %d enabled; timer delay (%d us).\n",
8502 ha->zio_mode,
8503 ha->zio_timer * 100);
8504
8505 icb->firmware_options_2 |= cpu_to_le32(
8506 (uint32_t)ha->zio_mode);
8507 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8508 vha->flags.process_response_queue = 1;
8509 }
8510
8511 /* enable RIDA Format2 */
8512 icb->firmware_options_3 |= BIT_0;
8513
8514 /* N2N: driver will initiate Login instead of FW */
8515 icb->firmware_options_3 |= BIT_8;
8516
8517 if (rval) {
8518 ql_log(ql_log_warn, vha, 0x0076,
8519 "NVRAM configuration failed.\n");
8520 }
8521 return (rval);
8522 }
8523
8524 int
qla82xx_restart_isp(scsi_qla_host_t * vha)8525 qla82xx_restart_isp(scsi_qla_host_t *vha)
8526 {
8527 int status, rval;
8528 struct qla_hw_data *ha = vha->hw;
8529 struct scsi_qla_host *vp;
8530 unsigned long flags;
8531
8532 status = qla2x00_init_rings(vha);
8533 if (!status) {
8534 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8535 ha->flags.chip_reset_done = 1;
8536
8537 status = qla2x00_fw_ready(vha);
8538 if (!status) {
8539 /* Issue a marker after FW becomes ready. */
8540 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8541 vha->flags.online = 1;
8542 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
8543 }
8544
8545 /* if no cable then assume it's good */
8546 if ((vha->device_flags & DFLG_NO_CABLE))
8547 status = 0;
8548 }
8549
8550 if (!status) {
8551 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
8552
8553 if (!atomic_read(&vha->loop_down_timer)) {
8554 /*
8555 * Issue marker command only when we are going
8556 * to start the I/O .
8557 */
8558 vha->marker_needed = 1;
8559 }
8560
8561 ha->isp_ops->enable_intrs(ha);
8562
8563 ha->isp_abort_cnt = 0;
8564 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
8565
8566 /* Update the firmware version */
8567 status = qla82xx_check_md_needed(vha);
8568
8569 if (ha->fce) {
8570 ha->flags.fce_enabled = 1;
8571 memset(ha->fce, 0,
8572 fce_calc_size(ha->fce_bufs));
8573 rval = qla2x00_enable_fce_trace(vha,
8574 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
8575 &ha->fce_bufs);
8576 if (rval) {
8577 ql_log(ql_log_warn, vha, 0x8001,
8578 "Unable to reinitialize FCE (%d).\n",
8579 rval);
8580 ha->flags.fce_enabled = 0;
8581 }
8582 }
8583
8584 if (ha->eft) {
8585 memset(ha->eft, 0, EFT_SIZE);
8586 rval = qla2x00_enable_eft_trace(vha,
8587 ha->eft_dma, EFT_NUM_BUFFERS);
8588 if (rval) {
8589 ql_log(ql_log_warn, vha, 0x8010,
8590 "Unable to reinitialize EFT (%d).\n",
8591 rval);
8592 }
8593 }
8594 }
8595
8596 if (!status) {
8597 ql_dbg(ql_dbg_taskm, vha, 0x8011,
8598 "qla82xx_restart_isp succeeded.\n");
8599
8600 spin_lock_irqsave(&ha->vport_slock, flags);
8601 list_for_each_entry(vp, &ha->vp_list, list) {
8602 if (vp->vp_idx) {
8603 atomic_inc(&vp->vref_count);
8604 spin_unlock_irqrestore(&ha->vport_slock, flags);
8605
8606 qla2x00_vp_abort_isp(vp);
8607
8608 spin_lock_irqsave(&ha->vport_slock, flags);
8609 atomic_dec(&vp->vref_count);
8610 }
8611 }
8612 spin_unlock_irqrestore(&ha->vport_slock, flags);
8613
8614 } else {
8615 ql_log(ql_log_warn, vha, 0x8016,
8616 "qla82xx_restart_isp **** FAILED ****.\n");
8617 }
8618
8619 return status;
8620 }
8621
8622 void
qla81xx_update_fw_options(scsi_qla_host_t * vha)8623 qla81xx_update_fw_options(scsi_qla_host_t *vha)
8624 {
8625 struct qla_hw_data *ha = vha->hw;
8626
8627 /* Hold status IOCBs until ABTS response received. */
8628 if (ql2xfwholdabts)
8629 ha->fw_options[3] |= BIT_12;
8630
8631 /* Set Retry FLOGI in case of P2P connection */
8632 if (ha->operating_mode == P2P) {
8633 ha->fw_options[2] |= BIT_3;
8634 ql_dbg(ql_dbg_disc, vha, 0x2103,
8635 "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n",
8636 __func__, ha->fw_options[2]);
8637 }
8638
8639 /* Move PUREX, ABTS RX & RIDA to ATIOQ */
8640 if (ql2xmvasynctoatio) {
8641 if (qla_tgt_mode_enabled(vha) ||
8642 qla_dual_mode_enabled(vha))
8643 ha->fw_options[2] |= BIT_11;
8644 else
8645 ha->fw_options[2] &= ~BIT_11;
8646 }
8647
8648 if (qla_tgt_mode_enabled(vha) ||
8649 qla_dual_mode_enabled(vha)) {
8650 /* FW auto send SCSI status during */
8651 ha->fw_options[1] |= BIT_8;
8652 ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8;
8653
8654 /* FW perform Exchange validation */
8655 ha->fw_options[2] |= BIT_4;
8656 } else {
8657 ha->fw_options[1] &= ~BIT_8;
8658 ha->fw_options[10] &= 0x00ff;
8659
8660 ha->fw_options[2] &= ~BIT_4;
8661 }
8662
8663 if (ql2xetsenable) {
8664 /* Enable ETS Burst. */
8665 memset(ha->fw_options, 0, sizeof(ha->fw_options));
8666 ha->fw_options[2] |= BIT_9;
8667 }
8668
8669 ql_dbg(ql_dbg_init, vha, 0x00e9,
8670 "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n",
8671 __func__, ha->fw_options[1], ha->fw_options[2],
8672 ha->fw_options[3], vha->host->active_mode);
8673
8674 qla2x00_set_fw_options(vha, ha->fw_options);
8675 }
8676
8677 /*
8678 * qla24xx_get_fcp_prio
8679 * Gets the fcp cmd priority value for the logged in port.
8680 * Looks for a match of the port descriptors within
8681 * each of the fcp prio config entries. If a match is found,
8682 * the tag (priority) value is returned.
8683 *
8684 * Input:
8685 * vha = scsi host structure pointer.
8686 * fcport = port structure pointer.
8687 *
8688 * Return:
8689 * non-zero (if found)
8690 * -1 (if not found)
8691 *
8692 * Context:
8693 * Kernel context
8694 */
8695 static int
qla24xx_get_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)8696 qla24xx_get_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8697 {
8698 int i, entries;
8699 uint8_t pid_match, wwn_match;
8700 int priority;
8701 uint32_t pid1, pid2;
8702 uint64_t wwn1, wwn2;
8703 struct qla_fcp_prio_entry *pri_entry;
8704 struct qla_hw_data *ha = vha->hw;
8705
8706 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
8707 return -1;
8708
8709 priority = -1;
8710 entries = ha->fcp_prio_cfg->num_entries;
8711 pri_entry = &ha->fcp_prio_cfg->entry[0];
8712
8713 for (i = 0; i < entries; i++) {
8714 pid_match = wwn_match = 0;
8715
8716 if (!(pri_entry->flags & FCP_PRIO_ENTRY_VALID)) {
8717 pri_entry++;
8718 continue;
8719 }
8720
8721 /* check source pid for a match */
8722 if (pri_entry->flags & FCP_PRIO_ENTRY_SPID_VALID) {
8723 pid1 = pri_entry->src_pid & INVALID_PORT_ID;
8724 pid2 = vha->d_id.b24 & INVALID_PORT_ID;
8725 if (pid1 == INVALID_PORT_ID)
8726 pid_match++;
8727 else if (pid1 == pid2)
8728 pid_match++;
8729 }
8730
8731 /* check destination pid for a match */
8732 if (pri_entry->flags & FCP_PRIO_ENTRY_DPID_VALID) {
8733 pid1 = pri_entry->dst_pid & INVALID_PORT_ID;
8734 pid2 = fcport->d_id.b24 & INVALID_PORT_ID;
8735 if (pid1 == INVALID_PORT_ID)
8736 pid_match++;
8737 else if (pid1 == pid2)
8738 pid_match++;
8739 }
8740
8741 /* check source WWN for a match */
8742 if (pri_entry->flags & FCP_PRIO_ENTRY_SWWN_VALID) {
8743 wwn1 = wwn_to_u64(vha->port_name);
8744 wwn2 = wwn_to_u64(pri_entry->src_wwpn);
8745 if (wwn2 == (uint64_t)-1)
8746 wwn_match++;
8747 else if (wwn1 == wwn2)
8748 wwn_match++;
8749 }
8750
8751 /* check destination WWN for a match */
8752 if (pri_entry->flags & FCP_PRIO_ENTRY_DWWN_VALID) {
8753 wwn1 = wwn_to_u64(fcport->port_name);
8754 wwn2 = wwn_to_u64(pri_entry->dst_wwpn);
8755 if (wwn2 == (uint64_t)-1)
8756 wwn_match++;
8757 else if (wwn1 == wwn2)
8758 wwn_match++;
8759 }
8760
8761 if (pid_match == 2 || wwn_match == 2) {
8762 /* Found a matching entry */
8763 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
8764 priority = pri_entry->tag;
8765 break;
8766 }
8767
8768 pri_entry++;
8769 }
8770
8771 return priority;
8772 }
8773
8774 /*
8775 * qla24xx_update_fcport_fcp_prio
8776 * Activates fcp priority for the logged in fc port
8777 *
8778 * Input:
8779 * vha = scsi host structure pointer.
8780 * fcp = port structure pointer.
8781 *
8782 * Return:
8783 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8784 *
8785 * Context:
8786 * Kernel context.
8787 */
8788 int
qla24xx_update_fcport_fcp_prio(scsi_qla_host_t * vha,fc_port_t * fcport)8789 qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
8790 {
8791 int ret;
8792 int priority;
8793 uint16_t mb[5];
8794
8795 if (fcport->port_type != FCT_TARGET ||
8796 fcport->loop_id == FC_NO_LOOP_ID)
8797 return QLA_FUNCTION_FAILED;
8798
8799 priority = qla24xx_get_fcp_prio(vha, fcport);
8800 if (priority < 0)
8801 return QLA_FUNCTION_FAILED;
8802
8803 if (IS_P3P_TYPE(vha->hw)) {
8804 fcport->fcp_prio = priority & 0xf;
8805 return QLA_SUCCESS;
8806 }
8807
8808 ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
8809 if (ret == QLA_SUCCESS) {
8810 if (fcport->fcp_prio != priority)
8811 ql_dbg(ql_dbg_user, vha, 0x709e,
8812 "Updated FCP_CMND priority - value=%d loop_id=%d "
8813 "port_id=%02x%02x%02x.\n", priority,
8814 fcport->loop_id, fcport->d_id.b.domain,
8815 fcport->d_id.b.area, fcport->d_id.b.al_pa);
8816 fcport->fcp_prio = priority & 0xf;
8817 } else
8818 ql_dbg(ql_dbg_user, vha, 0x704f,
8819 "Unable to update FCP_CMND priority - ret=0x%x for "
8820 "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
8821 fcport->d_id.b.domain, fcport->d_id.b.area,
8822 fcport->d_id.b.al_pa);
8823 return ret;
8824 }
8825
8826 /*
8827 * qla24xx_update_all_fcp_prio
8828 * Activates fcp priority for all the logged in ports
8829 *
8830 * Input:
8831 * ha = adapter block pointer.
8832 *
8833 * Return:
8834 * QLA_SUCCESS or QLA_FUNCTION_FAILED
8835 *
8836 * Context:
8837 * Kernel context.
8838 */
8839 int
qla24xx_update_all_fcp_prio(scsi_qla_host_t * vha)8840 qla24xx_update_all_fcp_prio(scsi_qla_host_t *vha)
8841 {
8842 int ret;
8843 fc_port_t *fcport;
8844
8845 ret = QLA_FUNCTION_FAILED;
8846 /* We need to set priority for all logged in ports */
8847 list_for_each_entry(fcport, &vha->vp_fcports, list)
8848 ret = qla24xx_update_fcport_fcp_prio(vha, fcport);
8849
8850 return ret;
8851 }
8852
qla2xxx_create_qpair(struct scsi_qla_host * vha,int qos,int vp_idx,bool startqp)8853 struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
8854 int vp_idx, bool startqp)
8855 {
8856 int rsp_id = 0;
8857 int req_id = 0;
8858 int i;
8859 struct qla_hw_data *ha = vha->hw;
8860 uint16_t qpair_id = 0;
8861 struct qla_qpair *qpair = NULL;
8862 struct qla_msix_entry *msix;
8863
8864 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
8865 ql_log(ql_log_warn, vha, 0x00181,
8866 "FW/Driver is not multi-queue capable.\n");
8867 return NULL;
8868 }
8869
8870 if (ql2xmqsupport || ql2xnvmeenable) {
8871 qpair = kzalloc(sizeof(struct qla_qpair), GFP_KERNEL);
8872 if (qpair == NULL) {
8873 ql_log(ql_log_warn, vha, 0x0182,
8874 "Failed to allocate memory for queue pair.\n");
8875 return NULL;
8876 }
8877
8878 qpair->hw = vha->hw;
8879 qpair->vha = vha;
8880 qpair->qp_lock_ptr = &qpair->qp_lock;
8881 spin_lock_init(&qpair->qp_lock);
8882 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
8883
8884 /* Assign available que pair id */
8885 mutex_lock(&ha->mq_lock);
8886 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
8887 if (ha->num_qpairs >= ha->max_qpairs) {
8888 mutex_unlock(&ha->mq_lock);
8889 ql_log(ql_log_warn, vha, 0x0183,
8890 "No resources to create additional q pair.\n");
8891 goto fail_qid_map;
8892 }
8893 ha->num_qpairs++;
8894 set_bit(qpair_id, ha->qpair_qid_map);
8895 ha->queue_pair_map[qpair_id] = qpair;
8896 qpair->id = qpair_id;
8897 qpair->vp_idx = vp_idx;
8898 qpair->fw_started = ha->flags.fw_started;
8899 INIT_LIST_HEAD(&qpair->hints_list);
8900 qpair->chip_reset = ha->base_qpair->chip_reset;
8901 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
8902 qpair->enable_explicit_conf =
8903 ha->base_qpair->enable_explicit_conf;
8904
8905 for (i = 0; i < ha->msix_count; i++) {
8906 msix = &ha->msix_entries[i];
8907 if (msix->in_use)
8908 continue;
8909 qpair->msix = msix;
8910 ql_dbg(ql_dbg_multiq, vha, 0xc00f,
8911 "Vector %x selected for qpair\n", msix->vector);
8912 break;
8913 }
8914 if (!qpair->msix) {
8915 ql_log(ql_log_warn, vha, 0x0184,
8916 "Out of MSI-X vectors!.\n");
8917 goto fail_msix;
8918 }
8919
8920 qpair->msix->in_use = 1;
8921 list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
8922 qpair->pdev = ha->pdev;
8923 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
8924 qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
8925
8926 mutex_unlock(&ha->mq_lock);
8927
8928 /* Create response queue first */
8929 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
8930 if (!rsp_id) {
8931 ql_log(ql_log_warn, vha, 0x0185,
8932 "Failed to create response queue.\n");
8933 goto fail_rsp;
8934 }
8935
8936 qpair->rsp = ha->rsp_q_map[rsp_id];
8937
8938 /* Create request queue */
8939 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
8940 startqp);
8941 if (!req_id) {
8942 ql_log(ql_log_warn, vha, 0x0186,
8943 "Failed to create request queue.\n");
8944 goto fail_req;
8945 }
8946
8947 qpair->req = ha->req_q_map[req_id];
8948 qpair->rsp->req = qpair->req;
8949 qpair->rsp->qpair = qpair;
8950 /* init qpair to this cpu. Will adjust at run time. */
8951 qla_cpu_update(qpair, smp_processor_id());
8952
8953 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
8954 if (ha->fw_attributes & BIT_4)
8955 qpair->difdix_supported = 1;
8956 }
8957
8958 qpair->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
8959 if (!qpair->srb_mempool) {
8960 ql_log(ql_log_warn, vha, 0xd036,
8961 "Failed to create srb mempool for qpair %d\n",
8962 qpair->id);
8963 goto fail_mempool;
8964 }
8965
8966 /* Mark as online */
8967 qpair->online = 1;
8968
8969 if (!vha->flags.qpairs_available)
8970 vha->flags.qpairs_available = 1;
8971
8972 ql_dbg(ql_dbg_multiq, vha, 0xc00d,
8973 "Request/Response queue pair created, id %d\n",
8974 qpair->id);
8975 ql_dbg(ql_dbg_init, vha, 0x0187,
8976 "Request/Response queue pair created, id %d\n",
8977 qpair->id);
8978 }
8979 return qpair;
8980
8981 fail_mempool:
8982 fail_req:
8983 qla25xx_delete_rsp_que(vha, qpair->rsp);
8984 fail_rsp:
8985 mutex_lock(&ha->mq_lock);
8986 qpair->msix->in_use = 0;
8987 list_del(&qpair->qp_list_elem);
8988 if (list_empty(&vha->qp_list))
8989 vha->flags.qpairs_available = 0;
8990 fail_msix:
8991 ha->queue_pair_map[qpair_id] = NULL;
8992 clear_bit(qpair_id, ha->qpair_qid_map);
8993 ha->num_qpairs--;
8994 mutex_unlock(&ha->mq_lock);
8995 fail_qid_map:
8996 kfree(qpair);
8997 return NULL;
8998 }
8999
qla2xxx_delete_qpair(struct scsi_qla_host * vha,struct qla_qpair * qpair)9000 int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
9001 {
9002 int ret = QLA_FUNCTION_FAILED;
9003 struct qla_hw_data *ha = qpair->hw;
9004
9005 qpair->delete_in_progress = 1;
9006 while (atomic_read(&qpair->ref_count))
9007 msleep(500);
9008
9009 ret = qla25xx_delete_req_que(vha, qpair->req);
9010 if (ret != QLA_SUCCESS)
9011 goto fail;
9012
9013 ret = qla25xx_delete_rsp_que(vha, qpair->rsp);
9014 if (ret != QLA_SUCCESS)
9015 goto fail;
9016
9017 mutex_lock(&ha->mq_lock);
9018 ha->queue_pair_map[qpair->id] = NULL;
9019 clear_bit(qpair->id, ha->qpair_qid_map);
9020 ha->num_qpairs--;
9021 list_del(&qpair->qp_list_elem);
9022 if (list_empty(&vha->qp_list)) {
9023 vha->flags.qpairs_available = 0;
9024 vha->flags.qpairs_req_created = 0;
9025 vha->flags.qpairs_rsp_created = 0;
9026 }
9027 mempool_destroy(qpair->srb_mempool);
9028 kfree(qpair);
9029 mutex_unlock(&ha->mq_lock);
9030
9031 return QLA_SUCCESS;
9032 fail:
9033 return ret;
9034 }
9035