1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4   * Copyright (c) 2014- QLogic Corporation.
5   * All rights reserved
6   * www.qlogic.com
7   *
8   * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9   */
10  
11  #include "bfad_drv.h"
12  #include "bfad_im.h"
13  #include "bfa_plog.h"
14  #include "bfa_cs.h"
15  #include "bfa_modules.h"
16  
17  BFA_TRC_FILE(HAL, FCXP);
18  
19  /*
20   * LPS related definitions
21   */
22  #define BFA_LPS_MIN_LPORTS      (1)
23  #define BFA_LPS_MAX_LPORTS      (256)
24  
25  /*
26   * Maximum Vports supported per physical port or vf.
27   */
28  #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
29  #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
30  
31  
32  /*
33   * FC PORT related definitions
34   */
35  /*
36   * The port is considered disabled if corresponding physical port or IOC are
37   * disabled explicitly
38   */
39  #define BFA_PORT_IS_DISABLED(bfa) \
40  	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
41  	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
42  
43  /*
44   * BFA port state machine events
45   */
46  enum bfa_fcport_sm_event {
47  	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
48  	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
49  	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
50  	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
51  	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
52  	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
53  	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
54  	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
55  	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
56  	BFA_FCPORT_SM_DPORTENABLE = 10, /*  enable dport      */
57  	BFA_FCPORT_SM_DPORTDISABLE = 11,/*  disable dport     */
58  	BFA_FCPORT_SM_FAA_MISCONFIG = 12,	/* FAA misconfiguratin */
59  	BFA_FCPORT_SM_DDPORTENABLE  = 13,	/* enable ddport	*/
60  	BFA_FCPORT_SM_DDPORTDISABLE = 14,	/* disable ddport	*/
61  };
62  
63  /*
64   * BFA port link notification state machine events
65   */
66  
67  enum bfa_fcport_ln_sm_event {
68  	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
69  	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
70  	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
71  };
72  
73  /*
74   * RPORT related definitions
75   */
76  #define bfa_rport_offline_cb(__rp) do {					\
77  	if ((__rp)->bfa->fcs)						\
78  		bfa_cb_rport_offline((__rp)->rport_drv);      \
79  	else {								\
80  		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
81  				__bfa_cb_rport_offline, (__rp));      \
82  	}								\
83  } while (0)
84  
85  #define bfa_rport_online_cb(__rp) do {					\
86  	if ((__rp)->bfa->fcs)						\
87  		bfa_cb_rport_online((__rp)->rport_drv);      \
88  	else {								\
89  		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
90  				  __bfa_cb_rport_online, (__rp));      \
91  		}							\
92  } while (0)
93  
94  /*
95   * forward declarations FCXP related functions
96   */
97  static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
98  static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
99  				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
100  static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
101  				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
102  static void	bfa_fcxp_qresume(void *cbarg);
103  static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
104  				struct bfi_fcxp_send_req_s *send_req);
105  
106  /*
107   * forward declarations for LPS functions
108   */
109  static void bfa_lps_login_rsp(struct bfa_s *bfa,
110  				struct bfi_lps_login_rsp_s *rsp);
111  static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
112  static void bfa_lps_logout_rsp(struct bfa_s *bfa,
113  				struct bfi_lps_logout_rsp_s *rsp);
114  static void bfa_lps_reqq_resume(void *lps_arg);
115  static void bfa_lps_free(struct bfa_lps_s *lps);
116  static void bfa_lps_send_login(struct bfa_lps_s *lps);
117  static void bfa_lps_send_logout(struct bfa_lps_s *lps);
118  static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
119  static void bfa_lps_login_comp(struct bfa_lps_s *lps);
120  static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
121  static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
122  
123  /*
124   * forward declaration for LPS state machine
125   */
126  static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
127  static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
128  static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
129  					event);
130  static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
131  static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
132  					enum bfa_lps_event event);
133  static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
134  static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
135  					event);
136  
137  /*
138   * forward declaration for FC Port functions
139   */
140  static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
141  static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
142  static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
143  static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
144  static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
145  static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
146  static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
147  			enum bfa_port_linkstate event, bfa_boolean_t trunk);
148  static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
149  				enum bfa_port_linkstate event);
150  static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
151  static void bfa_fcport_stats_get_timeout(void *cbarg);
152  static void bfa_fcport_stats_clr_timeout(void *cbarg);
153  static void bfa_trunk_iocdisable(struct bfa_s *bfa);
154  
155  /*
156   * forward declaration for FC PORT state machine
157   */
158  static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
159  					enum bfa_fcport_sm_event event);
160  static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
161  					enum bfa_fcport_sm_event event);
162  static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
163  					enum bfa_fcport_sm_event event);
164  static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
165  					enum bfa_fcport_sm_event event);
166  static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
167  					enum bfa_fcport_sm_event event);
168  static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
169  					enum bfa_fcport_sm_event event);
170  static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
171  					enum bfa_fcport_sm_event event);
172  static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
173  					enum bfa_fcport_sm_event event);
174  static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
175  					enum bfa_fcport_sm_event event);
176  static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
177  					enum bfa_fcport_sm_event event);
178  static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
179  					enum bfa_fcport_sm_event event);
180  static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
181  					enum bfa_fcport_sm_event event);
182  static void	bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
183  					enum bfa_fcport_sm_event event);
184  static void     bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
185  					enum bfa_fcport_sm_event event);
186  static void	bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
187  					enum bfa_fcport_sm_event event);
188  
189  static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
190  					enum bfa_fcport_ln_sm_event event);
191  static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
192  					enum bfa_fcport_ln_sm_event event);
193  static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
194  					enum bfa_fcport_ln_sm_event event);
195  static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
196  					enum bfa_fcport_ln_sm_event event);
197  static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
198  					enum bfa_fcport_ln_sm_event event);
199  static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
200  					enum bfa_fcport_ln_sm_event event);
201  static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
202  					enum bfa_fcport_ln_sm_event event);
203  
204  static struct bfa_sm_table_s hal_port_sm_table[] = {
205  	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
206  	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
207  	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
208  	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
209  	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
210  	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
211  	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
212  	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
213  	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
214  	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
215  	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
216  	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
217  	{BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
218  	{BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
219  	{BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
220  };
221  
222  
223  /*
224   * forward declaration for RPORT related functions
225   */
226  static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
227  static void		bfa_rport_free(struct bfa_rport_s *rport);
228  static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
229  static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
230  static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
231  static void		__bfa_cb_rport_online(void *cbarg,
232  						bfa_boolean_t complete);
233  static void		__bfa_cb_rport_offline(void *cbarg,
234  						bfa_boolean_t complete);
235  
236  /*
237   * forward declaration for RPORT state machine
238   */
239  static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
240  					enum bfa_rport_event event);
241  static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
242  					enum bfa_rport_event event);
243  static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
244  					enum bfa_rport_event event);
245  static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
246  					enum bfa_rport_event event);
247  static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
248  					enum bfa_rport_event event);
249  static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
250  					enum bfa_rport_event event);
251  static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
252  					enum bfa_rport_event event);
253  static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
254  					enum bfa_rport_event event);
255  static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
256  					enum bfa_rport_event event);
257  static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
258  					enum bfa_rport_event event);
259  static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
260  					enum bfa_rport_event event);
261  static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
262  					enum bfa_rport_event event);
263  static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
264  					enum bfa_rport_event event);
265  
266  /*
267   * PLOG related definitions
268   */
269  static int
plkd_validate_logrec(struct bfa_plog_rec_s * pl_rec)270  plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
271  {
272  	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
273  		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
274  		return 1;
275  
276  	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
277  		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
278  		return 1;
279  
280  	return 0;
281  }
282  
283  static void
bfa_plog_add(struct bfa_plog_s * plog,struct bfa_plog_rec_s * pl_rec)284  bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
285  {
286  	u16 tail;
287  	struct bfa_plog_rec_s *pl_recp;
288  
289  	if (plog->plog_enabled == 0)
290  		return;
291  
292  	if (plkd_validate_logrec(pl_rec)) {
293  		WARN_ON(1);
294  		return;
295  	}
296  
297  	tail = plog->tail;
298  
299  	pl_recp = &(plog->plog_recs[tail]);
300  
301  	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
302  
303  	pl_recp->tv = ktime_get_real_seconds();
304  	BFA_PL_LOG_REC_INCR(plog->tail);
305  
306  	if (plog->head == plog->tail)
307  		BFA_PL_LOG_REC_INCR(plog->head);
308  }
309  
310  void
bfa_plog_init(struct bfa_plog_s * plog)311  bfa_plog_init(struct bfa_plog_s *plog)
312  {
313  	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
314  
315  	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
316  	plog->head = plog->tail = 0;
317  	plog->plog_enabled = 1;
318  }
319  
320  void
bfa_plog_str(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,char * log_str)321  bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
322  		enum bfa_plog_eid event,
323  		u16 misc, char *log_str)
324  {
325  	struct bfa_plog_rec_s  lp;
326  
327  	if (plog->plog_enabled) {
328  		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
329  		lp.mid = mid;
330  		lp.eid = event;
331  		lp.log_type = BFA_PL_LOG_TYPE_STRING;
332  		lp.misc = misc;
333  		strscpy(lp.log_entry.string_log, log_str,
334  			BFA_PL_STRING_LOG_SZ);
335  		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
336  		bfa_plog_add(plog, &lp);
337  	}
338  }
339  
340  void
bfa_plog_intarr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,u32 * intarr,u32 num_ints)341  bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
342  		enum bfa_plog_eid event,
343  		u16 misc, u32 *intarr, u32 num_ints)
344  {
345  	struct bfa_plog_rec_s  lp;
346  	u32 i;
347  
348  	if (num_ints > BFA_PL_INT_LOG_SZ)
349  		num_ints = BFA_PL_INT_LOG_SZ;
350  
351  	if (plog->plog_enabled) {
352  		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
353  		lp.mid = mid;
354  		lp.eid = event;
355  		lp.log_type = BFA_PL_LOG_TYPE_INT;
356  		lp.misc = misc;
357  
358  		for (i = 0; i < num_ints; i++)
359  			lp.log_entry.int_log[i] = intarr[i];
360  
361  		lp.log_num_ints = (u8) num_ints;
362  
363  		bfa_plog_add(plog, &lp);
364  	}
365  }
366  
367  void
bfa_plog_fchdr(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr)368  bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
369  			enum bfa_plog_eid event,
370  			u16 misc, struct fchs_s *fchdr)
371  {
372  	u32	*tmp_int = (u32 *) fchdr;
373  	u32	ints[BFA_PL_INT_LOG_SZ];
374  
375  	if (plog->plog_enabled) {
376  		ints[0] = tmp_int[0];
377  		ints[1] = tmp_int[1];
378  		ints[2] = tmp_int[4];
379  
380  		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
381  	}
382  }
383  
384  void
bfa_plog_fchdr_and_pl(struct bfa_plog_s * plog,enum bfa_plog_mid mid,enum bfa_plog_eid event,u16 misc,struct fchs_s * fchdr,u32 pld_w0)385  bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
386  		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
387  		      u32 pld_w0)
388  {
389  	u32	*tmp_int = (u32 *) fchdr;
390  	u32	ints[BFA_PL_INT_LOG_SZ];
391  
392  	if (plog->plog_enabled) {
393  		ints[0] = tmp_int[0];
394  		ints[1] = tmp_int[1];
395  		ints[2] = tmp_int[4];
396  		ints[3] = pld_w0;
397  
398  		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
399  	}
400  }
401  
402  
403  /*
404   *  fcxp_pvt BFA FCXP private functions
405   */
406  
407  static void
claim_fcxps_mem(struct bfa_fcxp_mod_s * mod)408  claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
409  {
410  	u16	i;
411  	struct bfa_fcxp_s *fcxp;
412  
413  	fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
414  	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
415  
416  	INIT_LIST_HEAD(&mod->fcxp_req_free_q);
417  	INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
418  	INIT_LIST_HEAD(&mod->fcxp_active_q);
419  	INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
420  	INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
421  
422  	mod->fcxp_list = fcxp;
423  
424  	for (i = 0; i < mod->num_fcxps; i++) {
425  		fcxp->fcxp_mod = mod;
426  		fcxp->fcxp_tag = i;
427  
428  		if (i < (mod->num_fcxps / 2)) {
429  			list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
430  			fcxp->req_rsp = BFA_TRUE;
431  		} else {
432  			list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
433  			fcxp->req_rsp = BFA_FALSE;
434  		}
435  
436  		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
437  		fcxp->reqq_waiting = BFA_FALSE;
438  
439  		fcxp = fcxp + 1;
440  	}
441  
442  	bfa_mem_kva_curp(mod) = (void *)fcxp;
443  }
444  
445  void
bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)446  bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
447  		struct bfa_s *bfa)
448  {
449  	struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
450  	struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
451  	struct bfa_mem_dma_s *seg_ptr;
452  	u16	nsegs, idx, per_seg_fcxp;
453  	u16	num_fcxps = cfg->fwcfg.num_fcxp_reqs;
454  	u32	per_fcxp_sz;
455  
456  	if (num_fcxps == 0)
457  		return;
458  
459  	if (cfg->drvcfg.min_cfg)
460  		per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
461  	else
462  		per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
463  
464  	/* dma memory */
465  	nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
466  	per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
467  
468  	bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
469  		if (num_fcxps >= per_seg_fcxp) {
470  			num_fcxps -= per_seg_fcxp;
471  			bfa_mem_dma_setup(minfo, seg_ptr,
472  				per_seg_fcxp * per_fcxp_sz);
473  		} else
474  			bfa_mem_dma_setup(minfo, seg_ptr,
475  				num_fcxps * per_fcxp_sz);
476  	}
477  
478  	/* kva memory */
479  	bfa_mem_kva_setup(minfo, fcxp_kva,
480  		cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
481  }
482  
483  void
bfa_fcxp_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)484  bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
485  		struct bfa_pcidev_s *pcidev)
486  {
487  	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
488  
489  	mod->bfa = bfa;
490  	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
491  
492  	/*
493  	 * Initialize FCXP request and response payload sizes.
494  	 */
495  	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
496  	if (!cfg->drvcfg.min_cfg)
497  		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
498  
499  	INIT_LIST_HEAD(&mod->req_wait_q);
500  	INIT_LIST_HEAD(&mod->rsp_wait_q);
501  
502  	claim_fcxps_mem(mod);
503  }
504  
505  void
bfa_fcxp_iocdisable(struct bfa_s * bfa)506  bfa_fcxp_iocdisable(struct bfa_s *bfa)
507  {
508  	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
509  	struct bfa_fcxp_s *fcxp;
510  	struct list_head	      *qe, *qen;
511  
512  	/* Enqueue unused fcxp resources to free_q */
513  	list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
514  	list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
515  
516  	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
517  		fcxp = (struct bfa_fcxp_s *) qe;
518  		if (fcxp->caller == NULL) {
519  			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
520  					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
521  			bfa_fcxp_free(fcxp);
522  		} else {
523  			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
524  			bfa_cb_queue(bfa, &fcxp->hcb_qe,
525  				     __bfa_fcxp_send_cbfn, fcxp);
526  		}
527  	}
528  }
529  
530  static struct bfa_fcxp_s *
bfa_fcxp_get(struct bfa_fcxp_mod_s * fm,bfa_boolean_t req)531  bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
532  {
533  	struct bfa_fcxp_s *fcxp;
534  
535  	if (req)
536  		bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
537  	else
538  		bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
539  
540  	if (fcxp)
541  		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
542  
543  	return fcxp;
544  }
545  
546  static void
bfa_fcxp_init_reqrsp(struct bfa_fcxp_s * fcxp,struct bfa_s * bfa,u8 * use_ibuf,u32 * nr_sgles,bfa_fcxp_get_sgaddr_t * r_sga_cbfn,bfa_fcxp_get_sglen_t * r_sglen_cbfn,struct list_head * r_sgpg_q,int n_sgles,bfa_fcxp_get_sgaddr_t sga_cbfn,bfa_fcxp_get_sglen_t sglen_cbfn)547  bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
548  	       struct bfa_s *bfa,
549  	       u8 *use_ibuf,
550  	       u32 *nr_sgles,
551  	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
552  	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
553  	       struct list_head *r_sgpg_q,
554  	       int n_sgles,
555  	       bfa_fcxp_get_sgaddr_t sga_cbfn,
556  	       bfa_fcxp_get_sglen_t sglen_cbfn)
557  {
558  
559  	WARN_ON(bfa == NULL);
560  
561  	bfa_trc(bfa, fcxp->fcxp_tag);
562  
563  	if (n_sgles == 0) {
564  		*use_ibuf = 1;
565  	} else {
566  		WARN_ON(*sga_cbfn == NULL);
567  		WARN_ON(*sglen_cbfn == NULL);
568  
569  		*use_ibuf = 0;
570  		*r_sga_cbfn = sga_cbfn;
571  		*r_sglen_cbfn = sglen_cbfn;
572  
573  		*nr_sgles = n_sgles;
574  
575  		/*
576  		 * alloc required sgpgs
577  		 */
578  		if (n_sgles > BFI_SGE_INLINE)
579  			WARN_ON(1);
580  	}
581  
582  }
583  
584  static void
bfa_fcxp_init(struct bfa_fcxp_s * fcxp,void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn)585  bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
586  	       void *caller, struct bfa_s *bfa, int nreq_sgles,
587  	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
588  	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
589  	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
590  	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
591  {
592  
593  	WARN_ON(bfa == NULL);
594  
595  	bfa_trc(bfa, fcxp->fcxp_tag);
596  
597  	fcxp->caller = caller;
598  
599  	bfa_fcxp_init_reqrsp(fcxp, bfa,
600  		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
601  		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
602  		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
603  
604  	bfa_fcxp_init_reqrsp(fcxp, bfa,
605  		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
606  		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
607  		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
608  
609  }
610  
611  static void
bfa_fcxp_put(struct bfa_fcxp_s * fcxp)612  bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
613  {
614  	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
615  	struct bfa_fcxp_wqe_s *wqe;
616  
617  	if (fcxp->req_rsp)
618  		bfa_q_deq(&mod->req_wait_q, &wqe);
619  	else
620  		bfa_q_deq(&mod->rsp_wait_q, &wqe);
621  
622  	if (wqe) {
623  		bfa_trc(mod->bfa, fcxp->fcxp_tag);
624  
625  		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
626  			wqe->nrsp_sgles, wqe->req_sga_cbfn,
627  			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
628  			wqe->rsp_sglen_cbfn);
629  
630  		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
631  		return;
632  	}
633  
634  	WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
635  	list_del(&fcxp->qe);
636  
637  	if (fcxp->req_rsp)
638  		list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
639  	else
640  		list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
641  }
642  
643  static void
bfa_fcxp_null_comp(void * bfad_fcxp,struct bfa_fcxp_s * fcxp,void * cbarg,bfa_status_t req_status,u32 rsp_len,u32 resid_len,struct fchs_s * rsp_fchs)644  bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
645  		   bfa_status_t req_status, u32 rsp_len,
646  		   u32 resid_len, struct fchs_s *rsp_fchs)
647  {
648  	/* discarded fcxp completion */
649  }
650  
651  static void
__bfa_fcxp_send_cbfn(void * cbarg,bfa_boolean_t complete)652  __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
653  {
654  	struct bfa_fcxp_s *fcxp = cbarg;
655  
656  	if (complete) {
657  		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
658  				fcxp->rsp_status, fcxp->rsp_len,
659  				fcxp->residue_len, &fcxp->rsp_fchs);
660  	} else {
661  		bfa_fcxp_free(fcxp);
662  	}
663  }
664  
665  static void
hal_fcxp_send_comp(struct bfa_s * bfa,struct bfi_fcxp_send_rsp_s * fcxp_rsp)666  hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
667  {
668  	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
669  	struct bfa_fcxp_s	*fcxp;
670  	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
671  
672  	bfa_trc(bfa, fcxp_tag);
673  
674  	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
675  
676  	/*
677  	 * @todo f/w should not set residue to non-0 when everything
678  	 *	 is received.
679  	 */
680  	if (fcxp_rsp->req_status == BFA_STATUS_OK)
681  		fcxp_rsp->residue_len = 0;
682  	else
683  		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
684  
685  	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
686  
687  	WARN_ON(fcxp->send_cbfn == NULL);
688  
689  	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
690  
691  	if (fcxp->send_cbfn != NULL) {
692  		bfa_trc(mod->bfa, (NULL == fcxp->caller));
693  		if (fcxp->caller == NULL) {
694  			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
695  					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
696  					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
697  			/*
698  			 * fcxp automatically freed on return from the callback
699  			 */
700  			bfa_fcxp_free(fcxp);
701  		} else {
702  			fcxp->rsp_status = fcxp_rsp->req_status;
703  			fcxp->rsp_len = fcxp_rsp->rsp_len;
704  			fcxp->residue_len = fcxp_rsp->residue_len;
705  			fcxp->rsp_fchs = fcxp_rsp->fchs;
706  
707  			bfa_cb_queue(bfa, &fcxp->hcb_qe,
708  					__bfa_fcxp_send_cbfn, fcxp);
709  		}
710  	} else {
711  		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
712  	}
713  }
714  
715  static void
hal_fcxp_tx_plog(struct bfa_s * bfa,u32 reqlen,struct bfa_fcxp_s * fcxp,struct fchs_s * fchs)716  hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
717  		 struct fchs_s *fchs)
718  {
719  	/*
720  	 * TODO: TX ox_id
721  	 */
722  	if (reqlen > 0) {
723  		if (fcxp->use_ireqbuf) {
724  			u32	pld_w0 =
725  				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
726  
727  			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
728  					BFA_PL_EID_TX,
729  					reqlen + sizeof(struct fchs_s), fchs,
730  					pld_w0);
731  		} else {
732  			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
733  					BFA_PL_EID_TX,
734  					reqlen + sizeof(struct fchs_s),
735  					fchs);
736  		}
737  	} else {
738  		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
739  			       reqlen + sizeof(struct fchs_s), fchs);
740  	}
741  }
742  
743  static void
hal_fcxp_rx_plog(struct bfa_s * bfa,struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_rsp_s * fcxp_rsp)744  hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
745  		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
746  {
747  	if (fcxp_rsp->rsp_len > 0) {
748  		if (fcxp->use_irspbuf) {
749  			u32	pld_w0 =
750  				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
751  
752  			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
753  					      BFA_PL_EID_RX,
754  					      (u16) fcxp_rsp->rsp_len,
755  					      &fcxp_rsp->fchs, pld_w0);
756  		} else {
757  			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
758  				       BFA_PL_EID_RX,
759  				       (u16) fcxp_rsp->rsp_len,
760  				       &fcxp_rsp->fchs);
761  		}
762  	} else {
763  		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
764  			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
765  	}
766  }
767  
768  /*
769   * Handler to resume sending fcxp when space in available in cpe queue.
770   */
771  static void
bfa_fcxp_qresume(void * cbarg)772  bfa_fcxp_qresume(void *cbarg)
773  {
774  	struct bfa_fcxp_s		*fcxp = cbarg;
775  	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
776  	struct bfi_fcxp_send_req_s	*send_req;
777  
778  	fcxp->reqq_waiting = BFA_FALSE;
779  	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
780  	bfa_fcxp_queue(fcxp, send_req);
781  }
782  
783  /*
784   * Queue fcxp send request to foimrware.
785   */
786  static void
bfa_fcxp_queue(struct bfa_fcxp_s * fcxp,struct bfi_fcxp_send_req_s * send_req)787  bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
788  {
789  	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
790  	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
791  	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
792  	struct bfa_rport_s		*rport = reqi->bfa_rport;
793  
794  	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
795  		    bfa_fn_lpu(bfa));
796  
797  	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
798  	if (rport) {
799  		send_req->rport_fw_hndl = rport->fw_handle;
800  		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
801  		if (send_req->max_frmsz == 0)
802  			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
803  	} else {
804  		send_req->rport_fw_hndl = 0;
805  		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
806  	}
807  
808  	send_req->vf_id = cpu_to_be16(reqi->vf_id);
809  	send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
810  	send_req->class = reqi->class;
811  	send_req->rsp_timeout = rspi->rsp_timeout;
812  	send_req->cts = reqi->cts;
813  	send_req->fchs = reqi->fchs;
814  
815  	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
816  	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
817  
818  	/*
819  	 * setup req sgles
820  	 */
821  	if (fcxp->use_ireqbuf == 1) {
822  		bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
823  					BFA_FCXP_REQ_PLD_PA(fcxp));
824  	} else {
825  		if (fcxp->nreq_sgles > 0) {
826  			WARN_ON(fcxp->nreq_sgles != 1);
827  			bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
828  				fcxp->req_sga_cbfn(fcxp->caller, 0));
829  		} else {
830  			WARN_ON(reqi->req_tot_len != 0);
831  			bfa_alen_set(&send_req->rsp_alen, 0, 0);
832  		}
833  	}
834  
835  	/*
836  	 * setup rsp sgles
837  	 */
838  	if (fcxp->use_irspbuf == 1) {
839  		WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
840  
841  		bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
842  					BFA_FCXP_RSP_PLD_PA(fcxp));
843  	} else {
844  		if (fcxp->nrsp_sgles > 0) {
845  			WARN_ON(fcxp->nrsp_sgles != 1);
846  			bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
847  				fcxp->rsp_sga_cbfn(fcxp->caller, 0));
848  
849  		} else {
850  			WARN_ON(rspi->rsp_maxlen != 0);
851  			bfa_alen_set(&send_req->rsp_alen, 0, 0);
852  		}
853  	}
854  
855  	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
856  
857  	bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
858  
859  	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
860  	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
861  }
862  
863  /*
864   * Allocate an FCXP instance to send a response or to send a request
865   * that has a response. Request/response buffers are allocated by caller.
866   *
867   * @param[in]	bfa		BFA bfa instance
868   * @param[in]	nreq_sgles	Number of SG elements required for request
869   *				buffer. 0, if fcxp internal buffers are	used.
870   *				Use bfa_fcxp_get_reqbuf() to get the
871   *				internal req buffer.
872   * @param[in]	req_sgles	SG elements describing request buffer. Will be
873   *				copied in by BFA and hence can be freed on
874   *				return from this function.
875   * @param[in]	get_req_sga	function ptr to be called to get a request SG
876   *				Address (given the sge index).
877   * @param[in]	get_req_sglen	function ptr to be called to get a request SG
878   *				len (given the sge index).
879   * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
880   *				Address (given the sge index).
881   * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
882   *				len (given the sge index).
883   * @param[in]	req		Allocated FCXP is used to send req or rsp?
884   *				request - BFA_TRUE, response - BFA_FALSE
885   *
886   * @return FCXP instance. NULL on failure.
887   */
888  struct bfa_fcxp_s *
bfa_fcxp_req_rsp_alloc(void * caller,struct bfa_s * bfa,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn,bfa_boolean_t req)889  bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
890  		int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
891  		bfa_fcxp_get_sglen_t req_sglen_cbfn,
892  		bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
893  		bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
894  {
895  	struct bfa_fcxp_s *fcxp = NULL;
896  
897  	WARN_ON(bfa == NULL);
898  
899  	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
900  	if (fcxp == NULL)
901  		return NULL;
902  
903  	bfa_trc(bfa, fcxp->fcxp_tag);
904  
905  	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
906  			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
907  
908  	return fcxp;
909  }
910  
911  /*
912   * Get the internal request buffer pointer
913   *
914   * @param[in]	fcxp	BFA fcxp pointer
915   *
916   * @return		pointer to the internal request buffer
917   */
918  void *
bfa_fcxp_get_reqbuf(struct bfa_fcxp_s * fcxp)919  bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
920  {
921  	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
922  	void	*reqbuf;
923  
924  	WARN_ON(fcxp->use_ireqbuf != 1);
925  	reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
926  				mod->req_pld_sz + mod->rsp_pld_sz);
927  	return reqbuf;
928  }
929  
930  u32
bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s * fcxp)931  bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
932  {
933  	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
934  
935  	return mod->req_pld_sz;
936  }
937  
938  /*
939   * Get the internal response buffer pointer
940   *
941   * @param[in]	fcxp	BFA fcxp pointer
942   *
943   * @return		pointer to the internal request buffer
944   */
945  void *
bfa_fcxp_get_rspbuf(struct bfa_fcxp_s * fcxp)946  bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
947  {
948  	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
949  	void	*fcxp_buf;
950  
951  	WARN_ON(fcxp->use_irspbuf != 1);
952  
953  	fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
954  				mod->req_pld_sz + mod->rsp_pld_sz);
955  
956  	/* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
957  	return ((u8 *) fcxp_buf) + mod->req_pld_sz;
958  }
959  
960  /*
961   * Free the BFA FCXP
962   *
963   * @param[in]	fcxp			BFA fcxp pointer
964   *
965   * @return		void
966   */
967  void
bfa_fcxp_free(struct bfa_fcxp_s * fcxp)968  bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
969  {
970  	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
971  
972  	WARN_ON(fcxp == NULL);
973  	bfa_trc(mod->bfa, fcxp->fcxp_tag);
974  	bfa_fcxp_put(fcxp);
975  }
976  
977  /*
978   * Send a FCXP request
979   *
980   * @param[in]	fcxp	BFA fcxp pointer
981   * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
982   * @param[in]	vf_id	virtual Fabric ID
983   * @param[in]	lp_tag	lport tag
984   * @param[in]	cts	use Continuous sequence
985   * @param[in]	cos	fc Class of Service
986   * @param[in]	reqlen	request length, does not include FCHS length
987   * @param[in]	fchs	fc Header Pointer. The header content will be copied
988   *			in by BFA.
989   *
990   * @param[in]	cbfn	call back function to be called on receiving
991   *								the response
992   * @param[in]	cbarg	arg for cbfn
993   * @param[in]	rsp_timeout
994   *			response timeout
995   *
996   * @return		bfa_status_t
997   */
998  void
bfa_fcxp_send(struct bfa_fcxp_s * fcxp,struct bfa_rport_s * rport,u16 vf_id,u8 lp_tag,bfa_boolean_t cts,enum fc_cos cos,u32 reqlen,struct fchs_s * fchs,bfa_cb_fcxp_send_t cbfn,void * cbarg,u32 rsp_maxlen,u8 rsp_timeout)999  bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1000  	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1001  	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1002  	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1003  {
1004  	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
1005  	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
1006  	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
1007  	struct bfi_fcxp_send_req_s	*send_req;
1008  
1009  	bfa_trc(bfa, fcxp->fcxp_tag);
1010  
1011  	/*
1012  	 * setup request/response info
1013  	 */
1014  	reqi->bfa_rport = rport;
1015  	reqi->vf_id = vf_id;
1016  	reqi->lp_tag = lp_tag;
1017  	reqi->class = cos;
1018  	rspi->rsp_timeout = rsp_timeout;
1019  	reqi->cts = cts;
1020  	reqi->fchs = *fchs;
1021  	reqi->req_tot_len = reqlen;
1022  	rspi->rsp_maxlen = rsp_maxlen;
1023  	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1024  	fcxp->send_cbarg = cbarg;
1025  
1026  	/*
1027  	 * If no room in CPE queue, wait for space in request queue
1028  	 */
1029  	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1030  	if (!send_req) {
1031  		bfa_trc(bfa, fcxp->fcxp_tag);
1032  		fcxp->reqq_waiting = BFA_TRUE;
1033  		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1034  		return;
1035  	}
1036  
1037  	bfa_fcxp_queue(fcxp, send_req);
1038  }
1039  
1040  /*
1041   * Abort a BFA FCXP
1042   *
1043   * @param[in]	fcxp	BFA fcxp pointer
1044   *
1045   * @return		void
1046   */
1047  bfa_status_t
bfa_fcxp_abort(struct bfa_fcxp_s * fcxp)1048  bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1049  {
1050  	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1051  	WARN_ON(1);
1052  	return BFA_STATUS_OK;
1053  }
1054  
1055  void
bfa_fcxp_req_rsp_alloc_wait(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe,bfa_fcxp_alloc_cbfn_t alloc_cbfn,void * alloc_cbarg,void * caller,int nreq_sgles,int nrsp_sgles,bfa_fcxp_get_sgaddr_t req_sga_cbfn,bfa_fcxp_get_sglen_t req_sglen_cbfn,bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,bfa_fcxp_get_sglen_t rsp_sglen_cbfn,bfa_boolean_t req)1056  bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1057  	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1058  	       void *caller, int nreq_sgles,
1059  	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1060  	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
1061  	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1062  	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
1063  {
1064  	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1065  
1066  	if (req)
1067  		WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1068  	else
1069  		WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
1070  
1071  	wqe->alloc_cbfn = alloc_cbfn;
1072  	wqe->alloc_cbarg = alloc_cbarg;
1073  	wqe->caller = caller;
1074  	wqe->bfa = bfa;
1075  	wqe->nreq_sgles = nreq_sgles;
1076  	wqe->nrsp_sgles = nrsp_sgles;
1077  	wqe->req_sga_cbfn = req_sga_cbfn;
1078  	wqe->req_sglen_cbfn = req_sglen_cbfn;
1079  	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1080  	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1081  
1082  	if (req)
1083  		list_add_tail(&wqe->qe, &mod->req_wait_q);
1084  	else
1085  		list_add_tail(&wqe->qe, &mod->rsp_wait_q);
1086  }
1087  
1088  void
bfa_fcxp_walloc_cancel(struct bfa_s * bfa,struct bfa_fcxp_wqe_s * wqe)1089  bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1090  {
1091  	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1092  
1093  	WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1094  		!bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
1095  	list_del(&wqe->qe);
1096  }
1097  
1098  void
bfa_fcxp_discard(struct bfa_fcxp_s * fcxp)1099  bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1100  {
1101  	/*
1102  	 * If waiting for room in request queue, cancel reqq wait
1103  	 * and free fcxp.
1104  	 */
1105  	if (fcxp->reqq_waiting) {
1106  		fcxp->reqq_waiting = BFA_FALSE;
1107  		bfa_reqq_wcancel(&fcxp->reqq_wqe);
1108  		bfa_fcxp_free(fcxp);
1109  		return;
1110  	}
1111  
1112  	fcxp->send_cbfn = bfa_fcxp_null_comp;
1113  }
1114  
1115  void
bfa_fcxp_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)1116  bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1117  {
1118  	switch (msg->mhdr.msg_id) {
1119  	case BFI_FCXP_I2H_SEND_RSP:
1120  		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1121  		break;
1122  
1123  	default:
1124  		bfa_trc(bfa, msg->mhdr.msg_id);
1125  		WARN_ON(1);
1126  	}
1127  }
1128  
1129  u32
bfa_fcxp_get_maxrsp(struct bfa_s * bfa)1130  bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1131  {
1132  	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1133  
1134  	return mod->rsp_pld_sz;
1135  }
1136  
1137  void
bfa_fcxp_res_recfg(struct bfa_s * bfa,u16 num_fcxp_fw)1138  bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1139  {
1140  	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
1141  	struct list_head	*qe;
1142  	int	i;
1143  
1144  	for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1145  		if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1146  			bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1147  			list_add_tail(qe, &mod->fcxp_req_unused_q);
1148  		} else {
1149  			bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1150  			list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1151  		}
1152  	}
1153  }
1154  
1155  /*
1156   *  BFA LPS state machine functions
1157   */
1158  
1159  /*
1160   * Init state -- no login
1161   */
1162  static void
bfa_lps_sm_init(struct bfa_lps_s * lps,enum bfa_lps_event event)1163  bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1164  {
1165  	bfa_trc(lps->bfa, lps->bfa_tag);
1166  	bfa_trc(lps->bfa, event);
1167  
1168  	switch (event) {
1169  	case BFA_LPS_SM_LOGIN:
1170  		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1171  			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1172  			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1173  		} else {
1174  			bfa_sm_set_state(lps, bfa_lps_sm_login);
1175  			bfa_lps_send_login(lps);
1176  		}
1177  
1178  		if (lps->fdisc)
1179  			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1180  				BFA_PL_EID_LOGIN, 0, "FDISC Request");
1181  		else
1182  			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1183  				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1184  		break;
1185  
1186  	case BFA_LPS_SM_LOGOUT:
1187  		bfa_lps_logout_comp(lps);
1188  		break;
1189  
1190  	case BFA_LPS_SM_DELETE:
1191  		bfa_lps_free(lps);
1192  		break;
1193  
1194  	case BFA_LPS_SM_RX_CVL:
1195  	case BFA_LPS_SM_OFFLINE:
1196  		break;
1197  
1198  	case BFA_LPS_SM_FWRSP:
1199  		/*
1200  		 * Could happen when fabric detects loopback and discards
1201  		 * the lps request. Fw will eventually sent out the timeout
1202  		 * Just ignore
1203  		 */
1204  		break;
1205  	case BFA_LPS_SM_SET_N2N_PID:
1206  		/*
1207  		 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1208  		 * this event. Ignore this event.
1209  		 */
1210  		break;
1211  
1212  	default:
1213  		bfa_sm_fault(lps->bfa, event);
1214  	}
1215  }
1216  
1217  /*
1218   * login is in progress -- awaiting response from firmware
1219   */
1220  static void
bfa_lps_sm_login(struct bfa_lps_s * lps,enum bfa_lps_event event)1221  bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1222  {
1223  	bfa_trc(lps->bfa, lps->bfa_tag);
1224  	bfa_trc(lps->bfa, event);
1225  
1226  	switch (event) {
1227  	case BFA_LPS_SM_FWRSP:
1228  		if (lps->status == BFA_STATUS_OK) {
1229  			bfa_sm_set_state(lps, bfa_lps_sm_online);
1230  			if (lps->fdisc)
1231  				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1232  					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1233  			else
1234  				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1235  					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1236  			/* If N2N, send the assigned PID to FW */
1237  			bfa_trc(lps->bfa, lps->fport);
1238  			bfa_trc(lps->bfa, lps->lp_pid);
1239  
1240  			if (!lps->fport && lps->lp_pid)
1241  				bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1242  		} else {
1243  			bfa_sm_set_state(lps, bfa_lps_sm_init);
1244  			if (lps->fdisc)
1245  				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1246  					BFA_PL_EID_LOGIN, 0,
1247  					"FDISC Fail (RJT or timeout)");
1248  			else
1249  				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1250  					BFA_PL_EID_LOGIN, 0,
1251  					"FLOGI Fail (RJT or timeout)");
1252  		}
1253  		bfa_lps_login_comp(lps);
1254  		break;
1255  
1256  	case BFA_LPS_SM_OFFLINE:
1257  	case BFA_LPS_SM_DELETE:
1258  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1259  		break;
1260  
1261  	case BFA_LPS_SM_SET_N2N_PID:
1262  		bfa_trc(lps->bfa, lps->fport);
1263  		bfa_trc(lps->bfa, lps->lp_pid);
1264  		break;
1265  
1266  	default:
1267  		bfa_sm_fault(lps->bfa, event);
1268  	}
1269  }
1270  
1271  /*
1272   * login pending - awaiting space in request queue
1273   */
1274  static void
bfa_lps_sm_loginwait(struct bfa_lps_s * lps,enum bfa_lps_event event)1275  bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1276  {
1277  	bfa_trc(lps->bfa, lps->bfa_tag);
1278  	bfa_trc(lps->bfa, event);
1279  
1280  	switch (event) {
1281  	case BFA_LPS_SM_RESUME:
1282  		bfa_sm_set_state(lps, bfa_lps_sm_login);
1283  		bfa_lps_send_login(lps);
1284  		break;
1285  
1286  	case BFA_LPS_SM_OFFLINE:
1287  	case BFA_LPS_SM_DELETE:
1288  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1289  		bfa_reqq_wcancel(&lps->wqe);
1290  		break;
1291  
1292  	case BFA_LPS_SM_RX_CVL:
1293  		/*
1294  		 * Login was not even sent out; so when getting out
1295  		 * of this state, it will appear like a login retry
1296  		 * after Clear virtual link
1297  		 */
1298  		break;
1299  
1300  	default:
1301  		bfa_sm_fault(lps->bfa, event);
1302  	}
1303  }
1304  
1305  /*
1306   * login complete
1307   */
1308  static void
bfa_lps_sm_online(struct bfa_lps_s * lps,enum bfa_lps_event event)1309  bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1310  {
1311  	bfa_trc(lps->bfa, lps->bfa_tag);
1312  	bfa_trc(lps->bfa, event);
1313  
1314  	switch (event) {
1315  	case BFA_LPS_SM_LOGOUT:
1316  		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1317  			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1318  			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1319  		} else {
1320  			bfa_sm_set_state(lps, bfa_lps_sm_logout);
1321  			bfa_lps_send_logout(lps);
1322  		}
1323  		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1324  			BFA_PL_EID_LOGO, 0, "Logout");
1325  		break;
1326  
1327  	case BFA_LPS_SM_RX_CVL:
1328  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1329  
1330  		/* Let the vport module know about this event */
1331  		bfa_lps_cvl_event(lps);
1332  		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1333  			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1334  		break;
1335  
1336  	case BFA_LPS_SM_SET_N2N_PID:
1337  		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1338  			bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1339  			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1340  		} else
1341  			bfa_lps_send_set_n2n_pid(lps);
1342  		break;
1343  
1344  	case BFA_LPS_SM_OFFLINE:
1345  	case BFA_LPS_SM_DELETE:
1346  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1347  		break;
1348  
1349  	default:
1350  		bfa_sm_fault(lps->bfa, event);
1351  	}
1352  }
1353  
1354  /*
1355   * login complete
1356   */
1357  static void
bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s * lps,enum bfa_lps_event event)1358  bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1359  {
1360  	bfa_trc(lps->bfa, lps->bfa_tag);
1361  	bfa_trc(lps->bfa, event);
1362  
1363  	switch (event) {
1364  	case BFA_LPS_SM_RESUME:
1365  		bfa_sm_set_state(lps, bfa_lps_sm_online);
1366  		bfa_lps_send_set_n2n_pid(lps);
1367  		break;
1368  
1369  	case BFA_LPS_SM_LOGOUT:
1370  		bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1371  		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1372  			BFA_PL_EID_LOGO, 0, "Logout");
1373  		break;
1374  
1375  	case BFA_LPS_SM_RX_CVL:
1376  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1377  		bfa_reqq_wcancel(&lps->wqe);
1378  
1379  		/* Let the vport module know about this event */
1380  		bfa_lps_cvl_event(lps);
1381  		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1382  			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1383  		break;
1384  
1385  	case BFA_LPS_SM_OFFLINE:
1386  	case BFA_LPS_SM_DELETE:
1387  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1388  		bfa_reqq_wcancel(&lps->wqe);
1389  		break;
1390  
1391  	default:
1392  		bfa_sm_fault(lps->bfa, event);
1393  	}
1394  }
1395  
1396  /*
1397   * logout in progress - awaiting firmware response
1398   */
1399  static void
bfa_lps_sm_logout(struct bfa_lps_s * lps,enum bfa_lps_event event)1400  bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1401  {
1402  	bfa_trc(lps->bfa, lps->bfa_tag);
1403  	bfa_trc(lps->bfa, event);
1404  
1405  	switch (event) {
1406  	case BFA_LPS_SM_FWRSP:
1407  	case BFA_LPS_SM_OFFLINE:
1408  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1409  		bfa_lps_logout_comp(lps);
1410  		break;
1411  
1412  	case BFA_LPS_SM_DELETE:
1413  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1414  		break;
1415  
1416  	default:
1417  		bfa_sm_fault(lps->bfa, event);
1418  	}
1419  }
1420  
1421  /*
1422   * logout pending -- awaiting space in request queue
1423   */
1424  static void
bfa_lps_sm_logowait(struct bfa_lps_s * lps,enum bfa_lps_event event)1425  bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1426  {
1427  	bfa_trc(lps->bfa, lps->bfa_tag);
1428  	bfa_trc(lps->bfa, event);
1429  
1430  	switch (event) {
1431  	case BFA_LPS_SM_RESUME:
1432  		bfa_sm_set_state(lps, bfa_lps_sm_logout);
1433  		bfa_lps_send_logout(lps);
1434  		break;
1435  
1436  	case BFA_LPS_SM_OFFLINE:
1437  	case BFA_LPS_SM_DELETE:
1438  		bfa_sm_set_state(lps, bfa_lps_sm_init);
1439  		bfa_reqq_wcancel(&lps->wqe);
1440  		break;
1441  
1442  	default:
1443  		bfa_sm_fault(lps->bfa, event);
1444  	}
1445  }
1446  
1447  
1448  
1449  /*
1450   *  lps_pvt BFA LPS private functions
1451   */
1452  
1453  /*
1454   * return memory requirement
1455   */
1456  void
bfa_lps_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)1457  bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1458  		struct bfa_s *bfa)
1459  {
1460  	struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1461  
1462  	if (cfg->drvcfg.min_cfg)
1463  		bfa_mem_kva_setup(minfo, lps_kva,
1464  			sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
1465  	else
1466  		bfa_mem_kva_setup(minfo, lps_kva,
1467  			sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
1468  }
1469  
1470  /*
1471   * bfa module attach at initialization time
1472   */
1473  void
bfa_lps_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)1474  bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1475  	struct bfa_pcidev_s *pcidev)
1476  {
1477  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1478  	struct bfa_lps_s	*lps;
1479  	int			i;
1480  
1481  	mod->num_lps = BFA_LPS_MAX_LPORTS;
1482  	if (cfg->drvcfg.min_cfg)
1483  		mod->num_lps = BFA_LPS_MIN_LPORTS;
1484  	else
1485  		mod->num_lps = BFA_LPS_MAX_LPORTS;
1486  	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
1487  
1488  	bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
1489  
1490  	INIT_LIST_HEAD(&mod->lps_free_q);
1491  	INIT_LIST_HEAD(&mod->lps_active_q);
1492  	INIT_LIST_HEAD(&mod->lps_login_q);
1493  
1494  	for (i = 0; i < mod->num_lps; i++, lps++) {
1495  		lps->bfa	= bfa;
1496  		lps->bfa_tag	= (u8) i;
1497  		lps->reqq	= BFA_REQQ_LPS;
1498  		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1499  		list_add_tail(&lps->qe, &mod->lps_free_q);
1500  	}
1501  }
1502  
1503  /*
1504   * IOC in disabled state -- consider all lps offline
1505   */
1506  void
bfa_lps_iocdisable(struct bfa_s * bfa)1507  bfa_lps_iocdisable(struct bfa_s *bfa)
1508  {
1509  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1510  	struct bfa_lps_s	*lps;
1511  	struct list_head		*qe, *qen;
1512  
1513  	list_for_each_safe(qe, qen, &mod->lps_active_q) {
1514  		lps = (struct bfa_lps_s *) qe;
1515  		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1516  	}
1517  	list_for_each_safe(qe, qen, &mod->lps_login_q) {
1518  		lps = (struct bfa_lps_s *) qe;
1519  		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1520  	}
1521  	list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
1522  }
1523  
1524  /*
1525   * Firmware login response
1526   */
1527  static void
bfa_lps_login_rsp(struct bfa_s * bfa,struct bfi_lps_login_rsp_s * rsp)1528  bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1529  {
1530  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1531  	struct bfa_lps_s	*lps;
1532  
1533  	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1534  	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1535  
1536  	lps->status = rsp->status;
1537  	switch (rsp->status) {
1538  	case BFA_STATUS_OK:
1539  		lps->fw_tag	= rsp->fw_tag;
1540  		lps->fport	= rsp->f_port;
1541  		if (lps->fport)
1542  			lps->lp_pid = rsp->lp_pid;
1543  		lps->npiv_en	= rsp->npiv_en;
1544  		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
1545  		lps->pr_pwwn	= rsp->port_name;
1546  		lps->pr_nwwn	= rsp->node_name;
1547  		lps->auth_req	= rsp->auth_req;
1548  		lps->lp_mac	= rsp->lp_mac;
1549  		lps->brcd_switch = rsp->brcd_switch;
1550  		lps->fcf_mac	= rsp->fcf_mac;
1551  
1552  		break;
1553  
1554  	case BFA_STATUS_FABRIC_RJT:
1555  		lps->lsrjt_rsn = rsp->lsrjt_rsn;
1556  		lps->lsrjt_expl = rsp->lsrjt_expl;
1557  
1558  		break;
1559  
1560  	case BFA_STATUS_EPROTOCOL:
1561  		lps->ext_status = rsp->ext_status;
1562  
1563  		break;
1564  
1565  	case BFA_STATUS_VPORT_MAX:
1566  		if (rsp->ext_status)
1567  			bfa_lps_no_res(lps, rsp->ext_status);
1568  		break;
1569  
1570  	default:
1571  		/* Nothing to do with other status */
1572  		break;
1573  	}
1574  
1575  	list_del(&lps->qe);
1576  	list_add_tail(&lps->qe, &mod->lps_active_q);
1577  	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1578  }
1579  
1580  static void
bfa_lps_no_res(struct bfa_lps_s * first_lps,u8 count)1581  bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1582  {
1583  	struct bfa_s		*bfa = first_lps->bfa;
1584  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1585  	struct list_head	*qe, *qe_next;
1586  	struct bfa_lps_s	*lps;
1587  
1588  	bfa_trc(bfa, count);
1589  
1590  	qe = bfa_q_next(first_lps);
1591  
1592  	while (count && qe) {
1593  		qe_next = bfa_q_next(qe);
1594  		lps = (struct bfa_lps_s *)qe;
1595  		bfa_trc(bfa, lps->bfa_tag);
1596  		lps->status = first_lps->status;
1597  		list_del(&lps->qe);
1598  		list_add_tail(&lps->qe, &mod->lps_active_q);
1599  		bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1600  		qe = qe_next;
1601  		count--;
1602  	}
1603  }
1604  
1605  /*
1606   * Firmware logout response
1607   */
1608  static void
bfa_lps_logout_rsp(struct bfa_s * bfa,struct bfi_lps_logout_rsp_s * rsp)1609  bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1610  {
1611  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1612  	struct bfa_lps_s	*lps;
1613  
1614  	WARN_ON(rsp->bfa_tag >= mod->num_lps);
1615  	lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
1616  
1617  	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1618  }
1619  
1620  /*
1621   * Firmware received a Clear virtual link request (for FCoE)
1622   */
1623  static void
bfa_lps_rx_cvl_event(struct bfa_s * bfa,struct bfi_lps_cvl_event_s * cvl)1624  bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1625  {
1626  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1627  	struct bfa_lps_s	*lps;
1628  
1629  	lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
1630  
1631  	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1632  }
1633  
1634  /*
1635   * Space is available in request queue, resume queueing request to firmware.
1636   */
1637  static void
bfa_lps_reqq_resume(void * lps_arg)1638  bfa_lps_reqq_resume(void *lps_arg)
1639  {
1640  	struct bfa_lps_s	*lps = lps_arg;
1641  
1642  	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1643  }
1644  
1645  /*
1646   * lps is freed -- triggered by vport delete
1647   */
1648  static void
bfa_lps_free(struct bfa_lps_s * lps)1649  bfa_lps_free(struct bfa_lps_s *lps)
1650  {
1651  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1652  
1653  	lps->lp_pid = 0;
1654  	list_del(&lps->qe);
1655  	list_add_tail(&lps->qe, &mod->lps_free_q);
1656  }
1657  
1658  /*
1659   * send login request to firmware
1660   */
1661  static void
bfa_lps_send_login(struct bfa_lps_s * lps)1662  bfa_lps_send_login(struct bfa_lps_s *lps)
1663  {
1664  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
1665  	struct bfi_lps_login_req_s	*m;
1666  
1667  	m = bfa_reqq_next(lps->bfa, lps->reqq);
1668  	WARN_ON(!m);
1669  
1670  	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1671  		bfa_fn_lpu(lps->bfa));
1672  
1673  	m->bfa_tag	= lps->bfa_tag;
1674  	m->alpa		= lps->alpa;
1675  	m->pdu_size	= cpu_to_be16(lps->pdusz);
1676  	m->pwwn		= lps->pwwn;
1677  	m->nwwn		= lps->nwwn;
1678  	m->fdisc	= lps->fdisc;
1679  	m->auth_en	= lps->auth_en;
1680  
1681  	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1682  	list_del(&lps->qe);
1683  	list_add_tail(&lps->qe, &mod->lps_login_q);
1684  }
1685  
1686  /*
1687   * send logout request to firmware
1688   */
1689  static void
bfa_lps_send_logout(struct bfa_lps_s * lps)1690  bfa_lps_send_logout(struct bfa_lps_s *lps)
1691  {
1692  	struct bfi_lps_logout_req_s *m;
1693  
1694  	m = bfa_reqq_next(lps->bfa, lps->reqq);
1695  	WARN_ON(!m);
1696  
1697  	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1698  		bfa_fn_lpu(lps->bfa));
1699  
1700  	m->fw_tag = lps->fw_tag;
1701  	m->port_name = lps->pwwn;
1702  	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1703  }
1704  
1705  /*
1706   * send n2n pid set request to firmware
1707   */
1708  static void
bfa_lps_send_set_n2n_pid(struct bfa_lps_s * lps)1709  bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1710  {
1711  	struct bfi_lps_n2n_pid_req_s *m;
1712  
1713  	m = bfa_reqq_next(lps->bfa, lps->reqq);
1714  	WARN_ON(!m);
1715  
1716  	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
1717  		bfa_fn_lpu(lps->bfa));
1718  
1719  	m->fw_tag = lps->fw_tag;
1720  	m->lp_pid = lps->lp_pid;
1721  	bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1722  }
1723  
1724  /*
1725   * Indirect login completion handler for non-fcs
1726   */
1727  static void
bfa_lps_login_comp_cb(void * arg,bfa_boolean_t complete)1728  bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1729  {
1730  	struct bfa_lps_s *lps	= arg;
1731  
1732  	if (!complete)
1733  		return;
1734  
1735  	if (lps->fdisc)
1736  		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1737  	else
1738  		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1739  }
1740  
1741  /*
1742   * Login completion handler -- direct call for fcs, queue for others
1743   */
1744  static void
bfa_lps_login_comp(struct bfa_lps_s * lps)1745  bfa_lps_login_comp(struct bfa_lps_s *lps)
1746  {
1747  	if (!lps->bfa->fcs) {
1748  		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1749  			lps);
1750  		return;
1751  	}
1752  
1753  	if (lps->fdisc)
1754  		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1755  	else
1756  		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1757  }
1758  
1759  /*
1760   * Indirect logout completion handler for non-fcs
1761   */
1762  static void
bfa_lps_logout_comp_cb(void * arg,bfa_boolean_t complete)1763  bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1764  {
1765  	struct bfa_lps_s *lps	= arg;
1766  
1767  	if (!complete)
1768  		return;
1769  
1770  	if (lps->fdisc)
1771  		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1772  	else
1773  		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
1774  }
1775  
1776  /*
1777   * Logout completion handler -- direct call for fcs, queue for others
1778   */
1779  static void
bfa_lps_logout_comp(struct bfa_lps_s * lps)1780  bfa_lps_logout_comp(struct bfa_lps_s *lps)
1781  {
1782  	if (!lps->bfa->fcs) {
1783  		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1784  			lps);
1785  		return;
1786  	}
1787  	if (lps->fdisc)
1788  		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1789  }
1790  
1791  /*
1792   * Clear virtual link completion handler for non-fcs
1793   */
1794  static void
bfa_lps_cvl_event_cb(void * arg,bfa_boolean_t complete)1795  bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1796  {
1797  	struct bfa_lps_s *lps	= arg;
1798  
1799  	if (!complete)
1800  		return;
1801  
1802  	/* Clear virtual link to base port will result in link down */
1803  	if (lps->fdisc)
1804  		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1805  }
1806  
1807  /*
1808   * Received Clear virtual link event --direct call for fcs,
1809   * queue for others
1810   */
1811  static void
bfa_lps_cvl_event(struct bfa_lps_s * lps)1812  bfa_lps_cvl_event(struct bfa_lps_s *lps)
1813  {
1814  	if (!lps->bfa->fcs) {
1815  		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1816  			lps);
1817  		return;
1818  	}
1819  
1820  	/* Clear virtual link to base port will result in link down */
1821  	if (lps->fdisc)
1822  		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1823  }
1824  
1825  
1826  
1827  /*
1828   *  lps_public BFA LPS public functions
1829   */
1830  
1831  u32
bfa_lps_get_max_vport(struct bfa_s * bfa)1832  bfa_lps_get_max_vport(struct bfa_s *bfa)
1833  {
1834  	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1835  		return BFA_LPS_MAX_VPORTS_SUPP_CT;
1836  	else
1837  		return BFA_LPS_MAX_VPORTS_SUPP_CB;
1838  }
1839  
1840  /*
1841   * Allocate a lport srvice tag.
1842   */
1843  struct bfa_lps_s  *
bfa_lps_alloc(struct bfa_s * bfa)1844  bfa_lps_alloc(struct bfa_s *bfa)
1845  {
1846  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1847  	struct bfa_lps_s	*lps = NULL;
1848  
1849  	bfa_q_deq(&mod->lps_free_q, &lps);
1850  
1851  	if (lps == NULL)
1852  		return NULL;
1853  
1854  	list_add_tail(&lps->qe, &mod->lps_active_q);
1855  
1856  	bfa_sm_set_state(lps, bfa_lps_sm_init);
1857  	return lps;
1858  }
1859  
1860  /*
1861   * Free lport service tag. This can be called anytime after an alloc.
1862   * No need to wait for any pending login/logout completions.
1863   */
1864  void
bfa_lps_delete(struct bfa_lps_s * lps)1865  bfa_lps_delete(struct bfa_lps_s *lps)
1866  {
1867  	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1868  }
1869  
1870  /*
1871   * Initiate a lport login.
1872   */
1873  void
bfa_lps_flogi(struct bfa_lps_s * lps,void * uarg,u8 alpa,u16 pdusz,wwn_t pwwn,wwn_t nwwn,bfa_boolean_t auth_en)1874  bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1875  	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1876  {
1877  	lps->uarg	= uarg;
1878  	lps->alpa	= alpa;
1879  	lps->pdusz	= pdusz;
1880  	lps->pwwn	= pwwn;
1881  	lps->nwwn	= nwwn;
1882  	lps->fdisc	= BFA_FALSE;
1883  	lps->auth_en	= auth_en;
1884  	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1885  }
1886  
1887  /*
1888   * Initiate a lport fdisc login.
1889   */
1890  void
bfa_lps_fdisc(struct bfa_lps_s * lps,void * uarg,u16 pdusz,wwn_t pwwn,wwn_t nwwn)1891  bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1892  	wwn_t nwwn)
1893  {
1894  	lps->uarg	= uarg;
1895  	lps->alpa	= 0;
1896  	lps->pdusz	= pdusz;
1897  	lps->pwwn	= pwwn;
1898  	lps->nwwn	= nwwn;
1899  	lps->fdisc	= BFA_TRUE;
1900  	lps->auth_en	= BFA_FALSE;
1901  	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1902  }
1903  
1904  
1905  /*
1906   * Initiate a lport FDSIC logout.
1907   */
1908  void
bfa_lps_fdisclogo(struct bfa_lps_s * lps)1909  bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1910  {
1911  	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1912  }
1913  
1914  u8
bfa_lps_get_fwtag(struct bfa_s * bfa,u8 lp_tag)1915  bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1916  {
1917  	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
1918  
1919  	return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1920  }
1921  
1922  /*
1923   * Return lport services tag given the pid
1924   */
1925  u8
bfa_lps_get_tag_from_pid(struct bfa_s * bfa,u32 pid)1926  bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1927  {
1928  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1929  	struct bfa_lps_s	*lps;
1930  	int			i;
1931  
1932  	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1933  		if (lps->lp_pid == pid)
1934  			return lps->bfa_tag;
1935  	}
1936  
1937  	/* Return base port tag anyway */
1938  	return 0;
1939  }
1940  
1941  
1942  /*
1943   * return port id assigned to the base lport
1944   */
1945  u32
bfa_lps_get_base_pid(struct bfa_s * bfa)1946  bfa_lps_get_base_pid(struct bfa_s *bfa)
1947  {
1948  	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
1949  
1950  	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1951  }
1952  
1953  /*
1954   * Set PID in case of n2n (which is assigned during PLOGI)
1955   */
1956  void
bfa_lps_set_n2n_pid(struct bfa_lps_s * lps,uint32_t n2n_pid)1957  bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1958  {
1959  	bfa_trc(lps->bfa, lps->bfa_tag);
1960  	bfa_trc(lps->bfa, n2n_pid);
1961  
1962  	lps->lp_pid = n2n_pid;
1963  	bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1964  }
1965  
1966  /*
1967   * LPS firmware message class handler.
1968   */
1969  void
bfa_lps_isr(struct bfa_s * bfa,struct bfi_msg_s * m)1970  bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1971  {
1972  	union bfi_lps_i2h_msg_u	msg;
1973  
1974  	bfa_trc(bfa, m->mhdr.msg_id);
1975  	msg.msg = m;
1976  
1977  	switch (m->mhdr.msg_id) {
1978  	case BFI_LPS_I2H_LOGIN_RSP:
1979  		bfa_lps_login_rsp(bfa, msg.login_rsp);
1980  		break;
1981  
1982  	case BFI_LPS_I2H_LOGOUT_RSP:
1983  		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1984  		break;
1985  
1986  	case BFI_LPS_I2H_CVL_EVENT:
1987  		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1988  		break;
1989  
1990  	default:
1991  		bfa_trc(bfa, m->mhdr.msg_id);
1992  		WARN_ON(1);
1993  	}
1994  }
1995  
1996  static void
bfa_fcport_aen_post(struct bfa_fcport_s * fcport,enum bfa_port_aen_event event)1997  bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
1998  {
1999  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2000  	struct bfa_aen_entry_s  *aen_entry;
2001  
2002  	bfad_get_aen_entry(bfad, aen_entry);
2003  	if (!aen_entry)
2004  		return;
2005  
2006  	aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2007  	aen_entry->aen_data.port.pwwn = fcport->pwwn;
2008  
2009  	/* Send the AEN notification */
2010  	bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2011  				  BFA_AEN_CAT_PORT, event);
2012  }
2013  
2014  /*
2015   * FC PORT state machine functions
2016   */
2017  static void
bfa_fcport_sm_uninit(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2018  bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2019  			enum bfa_fcport_sm_event event)
2020  {
2021  	bfa_trc(fcport->bfa, event);
2022  
2023  	switch (event) {
2024  	case BFA_FCPORT_SM_START:
2025  		/*
2026  		 * Start event after IOC is configured and BFA is started.
2027  		 */
2028  		fcport->use_flash_cfg = BFA_TRUE;
2029  
2030  		if (bfa_fcport_send_enable(fcport)) {
2031  			bfa_trc(fcport->bfa, BFA_TRUE);
2032  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2033  		} else {
2034  			bfa_trc(fcport->bfa, BFA_FALSE);
2035  			bfa_sm_set_state(fcport,
2036  					bfa_fcport_sm_enabling_qwait);
2037  		}
2038  		break;
2039  
2040  	case BFA_FCPORT_SM_ENABLE:
2041  		/*
2042  		 * Port is persistently configured to be in enabled state. Do
2043  		 * not change state. Port enabling is done when START event is
2044  		 * received.
2045  		 */
2046  		break;
2047  
2048  	case BFA_FCPORT_SM_DISABLE:
2049  		/*
2050  		 * If a port is persistently configured to be disabled, the
2051  		 * first event will a port disable request.
2052  		 */
2053  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2054  		break;
2055  
2056  	case BFA_FCPORT_SM_HWFAIL:
2057  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2058  		break;
2059  
2060  	default:
2061  		bfa_sm_fault(fcport->bfa, event);
2062  	}
2063  }
2064  
2065  static void
bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2066  bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2067  				enum bfa_fcport_sm_event event)
2068  {
2069  	char pwwn_buf[BFA_STRING_32];
2070  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2071  	bfa_trc(fcport->bfa, event);
2072  
2073  	switch (event) {
2074  	case BFA_FCPORT_SM_QRESUME:
2075  		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2076  		bfa_fcport_send_enable(fcport);
2077  		break;
2078  
2079  	case BFA_FCPORT_SM_STOP:
2080  		bfa_reqq_wcancel(&fcport->reqq_wait);
2081  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2082  		break;
2083  
2084  	case BFA_FCPORT_SM_ENABLE:
2085  		/*
2086  		 * Already enable is in progress.
2087  		 */
2088  		break;
2089  
2090  	case BFA_FCPORT_SM_DISABLE:
2091  		/*
2092  		 * Just send disable request to firmware when room becomes
2093  		 * available in request queue.
2094  		 */
2095  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2096  		bfa_reqq_wcancel(&fcport->reqq_wait);
2097  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2098  				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2099  		wwn2str(pwwn_buf, fcport->pwwn);
2100  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2101  			"Base port disabled: WWN = %s\n", pwwn_buf);
2102  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2103  		break;
2104  
2105  	case BFA_FCPORT_SM_LINKUP:
2106  	case BFA_FCPORT_SM_LINKDOWN:
2107  		/*
2108  		 * Possible to get link events when doing back-to-back
2109  		 * enable/disables.
2110  		 */
2111  		break;
2112  
2113  	case BFA_FCPORT_SM_HWFAIL:
2114  		bfa_reqq_wcancel(&fcport->reqq_wait);
2115  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2116  		break;
2117  
2118  	case BFA_FCPORT_SM_FAA_MISCONFIG:
2119  		bfa_fcport_reset_linkinfo(fcport);
2120  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2121  		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2122  		break;
2123  
2124  	default:
2125  		bfa_sm_fault(fcport->bfa, event);
2126  	}
2127  }
2128  
2129  static void
bfa_fcport_sm_enabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2130  bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2131  						enum bfa_fcport_sm_event event)
2132  {
2133  	char pwwn_buf[BFA_STRING_32];
2134  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2135  	bfa_trc(fcport->bfa, event);
2136  
2137  	switch (event) {
2138  	case BFA_FCPORT_SM_FWRSP:
2139  	case BFA_FCPORT_SM_LINKDOWN:
2140  		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2141  		break;
2142  
2143  	case BFA_FCPORT_SM_LINKUP:
2144  		bfa_fcport_update_linkinfo(fcport);
2145  		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2146  
2147  		WARN_ON(!fcport->event_cbfn);
2148  		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2149  		break;
2150  
2151  	case BFA_FCPORT_SM_ENABLE:
2152  		/*
2153  		 * Already being enabled.
2154  		 */
2155  		break;
2156  
2157  	case BFA_FCPORT_SM_DISABLE:
2158  		if (bfa_fcport_send_disable(fcport))
2159  			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2160  		else
2161  			bfa_sm_set_state(fcport,
2162  					 bfa_fcport_sm_disabling_qwait);
2163  
2164  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2165  				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2166  		wwn2str(pwwn_buf, fcport->pwwn);
2167  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2168  			"Base port disabled: WWN = %s\n", pwwn_buf);
2169  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2170  		break;
2171  
2172  	case BFA_FCPORT_SM_STOP:
2173  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2174  		break;
2175  
2176  	case BFA_FCPORT_SM_HWFAIL:
2177  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2178  		break;
2179  
2180  	case BFA_FCPORT_SM_FAA_MISCONFIG:
2181  		bfa_fcport_reset_linkinfo(fcport);
2182  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2183  		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2184  		break;
2185  
2186  	default:
2187  		bfa_sm_fault(fcport->bfa, event);
2188  	}
2189  }
2190  
2191  static void
bfa_fcport_sm_linkdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2192  bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2193  						enum bfa_fcport_sm_event event)
2194  {
2195  	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2196  	char pwwn_buf[BFA_STRING_32];
2197  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2198  
2199  	bfa_trc(fcport->bfa, event);
2200  
2201  	switch (event) {
2202  	case BFA_FCPORT_SM_LINKUP:
2203  		bfa_fcport_update_linkinfo(fcport);
2204  		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2205  		WARN_ON(!fcport->event_cbfn);
2206  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2207  				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2208  		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2209  
2210  			bfa_trc(fcport->bfa,
2211  				pevent->link_state.attr.vc_fcf.fcf.fipenabled);
2212  			bfa_trc(fcport->bfa,
2213  				pevent->link_state.attr.vc_fcf.fcf.fipfailed);
2214  
2215  			if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
2216  				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2217  					BFA_PL_EID_FIP_FCF_DISC, 0,
2218  					"FIP FCF Discovery Failed");
2219  			else
2220  				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2221  					BFA_PL_EID_FIP_FCF_DISC, 0,
2222  					"FIP FCF Discovered");
2223  		}
2224  
2225  		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2226  		wwn2str(pwwn_buf, fcport->pwwn);
2227  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2228  			"Base port online: WWN = %s\n", pwwn_buf);
2229  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
2230  
2231  		/* If QoS is enabled and it is not online, send AEN */
2232  		if (fcport->cfg.qos_enabled &&
2233  		    fcport->qos_attr.state != BFA_QOS_ONLINE)
2234  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
2235  		break;
2236  
2237  	case BFA_FCPORT_SM_LINKDOWN:
2238  		/*
2239  		 * Possible to get link down event.
2240  		 */
2241  		break;
2242  
2243  	case BFA_FCPORT_SM_ENABLE:
2244  		/*
2245  		 * Already enabled.
2246  		 */
2247  		break;
2248  
2249  	case BFA_FCPORT_SM_DISABLE:
2250  		if (bfa_fcport_send_disable(fcport))
2251  			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2252  		else
2253  			bfa_sm_set_state(fcport,
2254  					 bfa_fcport_sm_disabling_qwait);
2255  
2256  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2257  				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2258  		wwn2str(pwwn_buf, fcport->pwwn);
2259  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2260  			"Base port disabled: WWN = %s\n", pwwn_buf);
2261  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2262  		break;
2263  
2264  	case BFA_FCPORT_SM_STOP:
2265  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2266  		break;
2267  
2268  	case BFA_FCPORT_SM_HWFAIL:
2269  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2270  		break;
2271  
2272  	case BFA_FCPORT_SM_FAA_MISCONFIG:
2273  		bfa_fcport_reset_linkinfo(fcport);
2274  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2275  		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2276  		break;
2277  
2278  	default:
2279  		bfa_sm_fault(fcport->bfa, event);
2280  	}
2281  }
2282  
2283  static void
bfa_fcport_sm_linkup(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2284  bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2285  	enum bfa_fcport_sm_event event)
2286  {
2287  	char pwwn_buf[BFA_STRING_32];
2288  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2289  
2290  	bfa_trc(fcport->bfa, event);
2291  
2292  	switch (event) {
2293  	case BFA_FCPORT_SM_ENABLE:
2294  		/*
2295  		 * Already enabled.
2296  		 */
2297  		break;
2298  
2299  	case BFA_FCPORT_SM_DISABLE:
2300  		if (bfa_fcport_send_disable(fcport))
2301  			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2302  		else
2303  			bfa_sm_set_state(fcport,
2304  					 bfa_fcport_sm_disabling_qwait);
2305  
2306  		bfa_fcport_reset_linkinfo(fcport);
2307  		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2308  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2309  				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2310  		wwn2str(pwwn_buf, fcport->pwwn);
2311  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2312  			"Base port offline: WWN = %s\n", pwwn_buf);
2313  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2314  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2315  			"Base port disabled: WWN = %s\n", pwwn_buf);
2316  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2317  		break;
2318  
2319  	case BFA_FCPORT_SM_LINKDOWN:
2320  		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2321  		bfa_fcport_reset_linkinfo(fcport);
2322  		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2323  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2324  				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2325  		wwn2str(pwwn_buf, fcport->pwwn);
2326  		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2327  			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2328  				"Base port offline: WWN = %s\n", pwwn_buf);
2329  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2330  		} else {
2331  			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2332  				"Base port (WWN = %s) "
2333  				"lost fabric connectivity\n", pwwn_buf);
2334  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2335  		}
2336  		break;
2337  
2338  	case BFA_FCPORT_SM_STOP:
2339  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2340  		bfa_fcport_reset_linkinfo(fcport);
2341  		wwn2str(pwwn_buf, fcport->pwwn);
2342  		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2343  			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2344  				"Base port offline: WWN = %s\n", pwwn_buf);
2345  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2346  		} else {
2347  			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2348  				"Base port (WWN = %s) "
2349  				"lost fabric connectivity\n", pwwn_buf);
2350  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2351  		}
2352  		break;
2353  
2354  	case BFA_FCPORT_SM_HWFAIL:
2355  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2356  		bfa_fcport_reset_linkinfo(fcport);
2357  		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2358  		wwn2str(pwwn_buf, fcport->pwwn);
2359  		if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
2360  			BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2361  				"Base port offline: WWN = %s\n", pwwn_buf);
2362  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2363  		} else {
2364  			BFA_LOG(KERN_ERR, bfad, bfa_log_level,
2365  				"Base port (WWN = %s) "
2366  				"lost fabric connectivity\n", pwwn_buf);
2367  			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2368  		}
2369  		break;
2370  
2371  	case BFA_FCPORT_SM_FAA_MISCONFIG:
2372  		bfa_fcport_reset_linkinfo(fcport);
2373  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2374  		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2375  		break;
2376  
2377  	default:
2378  		bfa_sm_fault(fcport->bfa, event);
2379  	}
2380  }
2381  
2382  static void
bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2383  bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2384  				 enum bfa_fcport_sm_event event)
2385  {
2386  	bfa_trc(fcport->bfa, event);
2387  
2388  	switch (event) {
2389  	case BFA_FCPORT_SM_QRESUME:
2390  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2391  		bfa_fcport_send_disable(fcport);
2392  		break;
2393  
2394  	case BFA_FCPORT_SM_STOP:
2395  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2396  		bfa_reqq_wcancel(&fcport->reqq_wait);
2397  		break;
2398  
2399  	case BFA_FCPORT_SM_ENABLE:
2400  		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2401  		break;
2402  
2403  	case BFA_FCPORT_SM_DISABLE:
2404  		/*
2405  		 * Already being disabled.
2406  		 */
2407  		break;
2408  
2409  	case BFA_FCPORT_SM_LINKUP:
2410  	case BFA_FCPORT_SM_LINKDOWN:
2411  		/*
2412  		 * Possible to get link events when doing back-to-back
2413  		 * enable/disables.
2414  		 */
2415  		break;
2416  
2417  	case BFA_FCPORT_SM_HWFAIL:
2418  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2419  		bfa_reqq_wcancel(&fcport->reqq_wait);
2420  		break;
2421  
2422  	case BFA_FCPORT_SM_FAA_MISCONFIG:
2423  		bfa_fcport_reset_linkinfo(fcport);
2424  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2425  		bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
2426  		break;
2427  
2428  	default:
2429  		bfa_sm_fault(fcport->bfa, event);
2430  	}
2431  }
2432  
2433  static void
bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2434  bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2435  				 enum bfa_fcport_sm_event event)
2436  {
2437  	bfa_trc(fcport->bfa, event);
2438  
2439  	switch (event) {
2440  	case BFA_FCPORT_SM_QRESUME:
2441  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2442  		bfa_fcport_send_disable(fcport);
2443  		if (bfa_fcport_send_enable(fcport))
2444  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2445  		else
2446  			bfa_sm_set_state(fcport,
2447  					 bfa_fcport_sm_enabling_qwait);
2448  		break;
2449  
2450  	case BFA_FCPORT_SM_STOP:
2451  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2452  		bfa_reqq_wcancel(&fcport->reqq_wait);
2453  		break;
2454  
2455  	case BFA_FCPORT_SM_ENABLE:
2456  		break;
2457  
2458  	case BFA_FCPORT_SM_DISABLE:
2459  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2460  		break;
2461  
2462  	case BFA_FCPORT_SM_LINKUP:
2463  	case BFA_FCPORT_SM_LINKDOWN:
2464  		/*
2465  		 * Possible to get link events when doing back-to-back
2466  		 * enable/disables.
2467  		 */
2468  		break;
2469  
2470  	case BFA_FCPORT_SM_HWFAIL:
2471  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2472  		bfa_reqq_wcancel(&fcport->reqq_wait);
2473  		break;
2474  
2475  	default:
2476  		bfa_sm_fault(fcport->bfa, event);
2477  	}
2478  }
2479  
2480  static void
bfa_fcport_sm_disabling(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2481  bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2482  						enum bfa_fcport_sm_event event)
2483  {
2484  	char pwwn_buf[BFA_STRING_32];
2485  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2486  	bfa_trc(fcport->bfa, event);
2487  
2488  	switch (event) {
2489  	case BFA_FCPORT_SM_FWRSP:
2490  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2491  		break;
2492  
2493  	case BFA_FCPORT_SM_DISABLE:
2494  		/*
2495  		 * Already being disabled.
2496  		 */
2497  		break;
2498  
2499  	case BFA_FCPORT_SM_ENABLE:
2500  		if (bfa_fcport_send_enable(fcport))
2501  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2502  		else
2503  			bfa_sm_set_state(fcport,
2504  					 bfa_fcport_sm_enabling_qwait);
2505  
2506  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2507  				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2508  		wwn2str(pwwn_buf, fcport->pwwn);
2509  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2510  			"Base port enabled: WWN = %s\n", pwwn_buf);
2511  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2512  		break;
2513  
2514  	case BFA_FCPORT_SM_STOP:
2515  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2516  		break;
2517  
2518  	case BFA_FCPORT_SM_LINKUP:
2519  	case BFA_FCPORT_SM_LINKDOWN:
2520  		/*
2521  		 * Possible to get link events when doing back-to-back
2522  		 * enable/disables.
2523  		 */
2524  		break;
2525  
2526  	case BFA_FCPORT_SM_HWFAIL:
2527  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2528  		break;
2529  
2530  	default:
2531  		bfa_sm_fault(fcport->bfa, event);
2532  	}
2533  }
2534  
2535  static void
bfa_fcport_sm_disabled(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2536  bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2537  						enum bfa_fcport_sm_event event)
2538  {
2539  	char pwwn_buf[BFA_STRING_32];
2540  	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2541  	bfa_trc(fcport->bfa, event);
2542  
2543  	switch (event) {
2544  	case BFA_FCPORT_SM_START:
2545  		/*
2546  		 * Ignore start event for a port that is disabled.
2547  		 */
2548  		break;
2549  
2550  	case BFA_FCPORT_SM_STOP:
2551  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2552  		break;
2553  
2554  	case BFA_FCPORT_SM_ENABLE:
2555  		if (bfa_fcport_send_enable(fcport))
2556  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2557  		else
2558  			bfa_sm_set_state(fcport,
2559  					 bfa_fcport_sm_enabling_qwait);
2560  
2561  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2562  				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2563  		wwn2str(pwwn_buf, fcport->pwwn);
2564  		BFA_LOG(KERN_INFO, bfad, bfa_log_level,
2565  			"Base port enabled: WWN = %s\n", pwwn_buf);
2566  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
2567  		break;
2568  
2569  	case BFA_FCPORT_SM_DISABLE:
2570  		/*
2571  		 * Already disabled.
2572  		 */
2573  		break;
2574  
2575  	case BFA_FCPORT_SM_HWFAIL:
2576  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2577  		break;
2578  
2579  	case BFA_FCPORT_SM_DPORTENABLE:
2580  		bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2581  		break;
2582  
2583  	case BFA_FCPORT_SM_DDPORTENABLE:
2584  		bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
2585  		break;
2586  
2587  	default:
2588  		bfa_sm_fault(fcport->bfa, event);
2589  	}
2590  }
2591  
2592  static void
bfa_fcport_sm_stopped(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2593  bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2594  			 enum bfa_fcport_sm_event event)
2595  {
2596  	bfa_trc(fcport->bfa, event);
2597  
2598  	switch (event) {
2599  	case BFA_FCPORT_SM_START:
2600  		if (bfa_fcport_send_enable(fcport))
2601  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2602  		else
2603  			bfa_sm_set_state(fcport,
2604  					 bfa_fcport_sm_enabling_qwait);
2605  		break;
2606  
2607  	default:
2608  		/*
2609  		 * Ignore all other events.
2610  		 */
2611  		;
2612  	}
2613  }
2614  
2615  /*
2616   * Port is enabled. IOC is down/failed.
2617   */
2618  static void
bfa_fcport_sm_iocdown(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2619  bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2620  			 enum bfa_fcport_sm_event event)
2621  {
2622  	bfa_trc(fcport->bfa, event);
2623  
2624  	switch (event) {
2625  	case BFA_FCPORT_SM_START:
2626  		if (bfa_fcport_send_enable(fcport))
2627  			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2628  		else
2629  			bfa_sm_set_state(fcport,
2630  					 bfa_fcport_sm_enabling_qwait);
2631  		break;
2632  
2633  	default:
2634  		/*
2635  		 * Ignore all events.
2636  		 */
2637  		;
2638  	}
2639  }
2640  
2641  /*
2642   * Port is disabled. IOC is down/failed.
2643   */
2644  static void
bfa_fcport_sm_iocfail(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2645  bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2646  			 enum bfa_fcport_sm_event event)
2647  {
2648  	bfa_trc(fcport->bfa, event);
2649  
2650  	switch (event) {
2651  	case BFA_FCPORT_SM_START:
2652  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2653  		break;
2654  
2655  	case BFA_FCPORT_SM_ENABLE:
2656  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2657  		break;
2658  
2659  	default:
2660  		/*
2661  		 * Ignore all events.
2662  		 */
2663  		;
2664  	}
2665  }
2666  
2667  static void
bfa_fcport_sm_dport(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2668  bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2669  {
2670  	bfa_trc(fcport->bfa, event);
2671  
2672  	switch (event) {
2673  	case BFA_FCPORT_SM_DPORTENABLE:
2674  	case BFA_FCPORT_SM_DISABLE:
2675  	case BFA_FCPORT_SM_ENABLE:
2676  	case BFA_FCPORT_SM_START:
2677  		/*
2678  		 * Ignore event for a port that is dport
2679  		 */
2680  		break;
2681  
2682  	case BFA_FCPORT_SM_STOP:
2683  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2684  		break;
2685  
2686  	case BFA_FCPORT_SM_HWFAIL:
2687  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2688  		break;
2689  
2690  	case BFA_FCPORT_SM_DPORTDISABLE:
2691  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2692  		break;
2693  
2694  	default:
2695  		bfa_sm_fault(fcport->bfa, event);
2696  	}
2697  }
2698  
2699  static void
bfa_fcport_sm_ddport(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2700  bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
2701  			enum bfa_fcport_sm_event event)
2702  {
2703  	bfa_trc(fcport->bfa, event);
2704  
2705  	switch (event) {
2706  	case BFA_FCPORT_SM_DISABLE:
2707  	case BFA_FCPORT_SM_DDPORTDISABLE:
2708  		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2709  		break;
2710  
2711  	case BFA_FCPORT_SM_DPORTENABLE:
2712  	case BFA_FCPORT_SM_DPORTDISABLE:
2713  	case BFA_FCPORT_SM_ENABLE:
2714  	case BFA_FCPORT_SM_START:
2715  		/*
2716  		 * Ignore event for a port that is ddport
2717  		 */
2718  		break;
2719  
2720  	case BFA_FCPORT_SM_STOP:
2721  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2722  		break;
2723  
2724  	case BFA_FCPORT_SM_HWFAIL:
2725  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2726  		break;
2727  
2728  	default:
2729  		bfa_sm_fault(fcport->bfa, event);
2730  	}
2731  }
2732  
2733  static void
bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s * fcport,enum bfa_fcport_sm_event event)2734  bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
2735  			    enum bfa_fcport_sm_event event)
2736  {
2737  	bfa_trc(fcport->bfa, event);
2738  
2739  	switch (event) {
2740  	case BFA_FCPORT_SM_DPORTENABLE:
2741  	case BFA_FCPORT_SM_ENABLE:
2742  	case BFA_FCPORT_SM_START:
2743  		/*
2744  		 * Ignore event for a port as there is FAA misconfig
2745  		 */
2746  		break;
2747  
2748  	case BFA_FCPORT_SM_DISABLE:
2749  		if (bfa_fcport_send_disable(fcport))
2750  			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2751  		else
2752  			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2753  
2754  		bfa_fcport_reset_linkinfo(fcport);
2755  		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2756  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2757  			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2758  		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
2759  		break;
2760  
2761  	case BFA_FCPORT_SM_STOP:
2762  		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2763  		break;
2764  
2765  	case BFA_FCPORT_SM_HWFAIL:
2766  		bfa_fcport_reset_linkinfo(fcport);
2767  		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2768  		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2769  		break;
2770  
2771  	default:
2772  		bfa_sm_fault(fcport->bfa, event);
2773  	}
2774  }
2775  
2776  /*
2777   * Link state is down
2778   */
2779  static void
bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2780  bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2781  		enum bfa_fcport_ln_sm_event event)
2782  {
2783  	bfa_trc(ln->fcport->bfa, event);
2784  
2785  	switch (event) {
2786  	case BFA_FCPORT_LN_SM_LINKUP:
2787  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2788  		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2789  		break;
2790  
2791  	default:
2792  		bfa_sm_fault(ln->fcport->bfa, event);
2793  	}
2794  }
2795  
2796  /*
2797   * Link state is waiting for down notification
2798   */
2799  static void
bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2800  bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2801  		enum bfa_fcport_ln_sm_event event)
2802  {
2803  	bfa_trc(ln->fcport->bfa, event);
2804  
2805  	switch (event) {
2806  	case BFA_FCPORT_LN_SM_LINKUP:
2807  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2808  		break;
2809  
2810  	case BFA_FCPORT_LN_SM_NOTIFICATION:
2811  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2812  		break;
2813  
2814  	default:
2815  		bfa_sm_fault(ln->fcport->bfa, event);
2816  	}
2817  }
2818  
2819  /*
2820   * Link state is waiting for down notification and there is a pending up
2821   */
2822  static void
bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2823  bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2824  		enum bfa_fcport_ln_sm_event event)
2825  {
2826  	bfa_trc(ln->fcport->bfa, event);
2827  
2828  	switch (event) {
2829  	case BFA_FCPORT_LN_SM_LINKDOWN:
2830  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2831  		break;
2832  
2833  	case BFA_FCPORT_LN_SM_NOTIFICATION:
2834  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2835  		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2836  		break;
2837  
2838  	default:
2839  		bfa_sm_fault(ln->fcport->bfa, event);
2840  	}
2841  }
2842  
2843  /*
2844   * Link state is up
2845   */
2846  static void
bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2847  bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2848  		enum bfa_fcport_ln_sm_event event)
2849  {
2850  	bfa_trc(ln->fcport->bfa, event);
2851  
2852  	switch (event) {
2853  	case BFA_FCPORT_LN_SM_LINKDOWN:
2854  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2855  		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2856  		break;
2857  
2858  	default:
2859  		bfa_sm_fault(ln->fcport->bfa, event);
2860  	}
2861  }
2862  
2863  /*
2864   * Link state is waiting for up notification
2865   */
2866  static void
bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2867  bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2868  		enum bfa_fcport_ln_sm_event event)
2869  {
2870  	bfa_trc(ln->fcport->bfa, event);
2871  
2872  	switch (event) {
2873  	case BFA_FCPORT_LN_SM_LINKDOWN:
2874  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2875  		break;
2876  
2877  	case BFA_FCPORT_LN_SM_NOTIFICATION:
2878  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2879  		break;
2880  
2881  	default:
2882  		bfa_sm_fault(ln->fcport->bfa, event);
2883  	}
2884  }
2885  
2886  /*
2887   * Link state is waiting for up notification and there is a pending down
2888   */
2889  static void
bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2890  bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2891  		enum bfa_fcport_ln_sm_event event)
2892  {
2893  	bfa_trc(ln->fcport->bfa, event);
2894  
2895  	switch (event) {
2896  	case BFA_FCPORT_LN_SM_LINKUP:
2897  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2898  		break;
2899  
2900  	case BFA_FCPORT_LN_SM_NOTIFICATION:
2901  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2902  		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2903  		break;
2904  
2905  	default:
2906  		bfa_sm_fault(ln->fcport->bfa, event);
2907  	}
2908  }
2909  
2910  /*
2911   * Link state is waiting for up notification and there are pending down and up
2912   */
2913  static void
bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s * ln,enum bfa_fcport_ln_sm_event event)2914  bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2915  			enum bfa_fcport_ln_sm_event event)
2916  {
2917  	bfa_trc(ln->fcport->bfa, event);
2918  
2919  	switch (event) {
2920  	case BFA_FCPORT_LN_SM_LINKDOWN:
2921  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2922  		break;
2923  
2924  	case BFA_FCPORT_LN_SM_NOTIFICATION:
2925  		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2926  		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2927  		break;
2928  
2929  	default:
2930  		bfa_sm_fault(ln->fcport->bfa, event);
2931  	}
2932  }
2933  
2934  static void
__bfa_cb_fcport_event(void * cbarg,bfa_boolean_t complete)2935  __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2936  {
2937  	struct bfa_fcport_ln_s *ln = cbarg;
2938  
2939  	if (complete)
2940  		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2941  	else
2942  		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2943  }
2944  
2945  /*
2946   * Send SCN notification to upper layers.
2947   * trunk - false if caller is fcport to ignore fcport event in trunked mode
2948   */
2949  static void
bfa_fcport_scn(struct bfa_fcport_s * fcport,enum bfa_port_linkstate event,bfa_boolean_t trunk)2950  bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2951  	bfa_boolean_t trunk)
2952  {
2953  	if (fcport->cfg.trunked && !trunk)
2954  		return;
2955  
2956  	switch (event) {
2957  	case BFA_PORT_LINKUP:
2958  		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2959  		break;
2960  	case BFA_PORT_LINKDOWN:
2961  		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2962  		break;
2963  	default:
2964  		WARN_ON(1);
2965  	}
2966  }
2967  
2968  static void
bfa_fcport_queue_cb(struct bfa_fcport_ln_s * ln,enum bfa_port_linkstate event)2969  bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2970  {
2971  	struct bfa_fcport_s *fcport = ln->fcport;
2972  
2973  	if (fcport->bfa->fcs) {
2974  		fcport->event_cbfn(fcport->event_cbarg, event);
2975  		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2976  	} else {
2977  		ln->ln_event = event;
2978  		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2979  			__bfa_cb_fcport_event, ln);
2980  	}
2981  }
2982  
2983  #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2984  							BFA_CACHELINE_SZ))
2985  
2986  void
bfa_fcport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)2987  bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2988  		   struct bfa_s *bfa)
2989  {
2990  	struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2991  
2992  	bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
2993  }
2994  
2995  static void
bfa_fcport_qresume(void * cbarg)2996  bfa_fcport_qresume(void *cbarg)
2997  {
2998  	struct bfa_fcport_s *fcport = cbarg;
2999  
3000  	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
3001  }
3002  
3003  static void
bfa_fcport_mem_claim(struct bfa_fcport_s * fcport)3004  bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
3005  {
3006  	struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
3007  
3008  	fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
3009  	fcport->stats_pa  = bfa_mem_dma_phys(fcport_dma);
3010  	fcport->stats = (union bfa_fcport_stats_u *)
3011  				bfa_mem_dma_virt(fcport_dma);
3012  }
3013  
3014  /*
3015   * Memory initialization.
3016   */
3017  void
bfa_fcport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)3018  bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3019  		struct bfa_pcidev_s *pcidev)
3020  {
3021  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3022  	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
3023  	struct bfa_fcport_ln_s *ln = &fcport->ln;
3024  
3025  	fcport->bfa = bfa;
3026  	ln->fcport = fcport;
3027  
3028  	bfa_fcport_mem_claim(fcport);
3029  
3030  	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
3031  	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
3032  
3033  	/*
3034  	 * initialize time stamp for stats reset
3035  	 */
3036  	fcport->stats_reset_time = ktime_get_seconds();
3037  	fcport->stats_dma_ready = BFA_FALSE;
3038  
3039  	/*
3040  	 * initialize and set default configuration
3041  	 */
3042  	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
3043  	port_cfg->speed = BFA_PORT_SPEED_AUTO;
3044  	port_cfg->trunked = BFA_FALSE;
3045  	port_cfg->maxfrsize = 0;
3046  
3047  	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3048  	port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
3049  	port_cfg->qos_bw.med = BFA_QOS_BW_MED;
3050  	port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
3051  
3052  	fcport->fec_state = BFA_FEC_OFFLINE;
3053  
3054  	INIT_LIST_HEAD(&fcport->stats_pending_q);
3055  	INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3056  
3057  	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3058  }
3059  
3060  void
bfa_fcport_start(struct bfa_s * bfa)3061  bfa_fcport_start(struct bfa_s *bfa)
3062  {
3063  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3064  }
3065  
3066  /*
3067   * Called when IOC failure is detected.
3068   */
3069  void
bfa_fcport_iocdisable(struct bfa_s * bfa)3070  bfa_fcport_iocdisable(struct bfa_s *bfa)
3071  {
3072  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3073  
3074  	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3075  	bfa_trunk_iocdisable(bfa);
3076  }
3077  
3078  /*
3079   * Update loop info in fcport for SCN online
3080   */
3081  static void
bfa_fcport_update_loop_info(struct bfa_fcport_s * fcport,struct bfa_fcport_loop_info_s * loop_info)3082  bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3083  			struct bfa_fcport_loop_info_s *loop_info)
3084  {
3085  	fcport->myalpa = loop_info->myalpa;
3086  	fcport->alpabm_valid =
3087  			loop_info->alpabm_val;
3088  	memcpy(fcport->alpabm.alpa_bm,
3089  			loop_info->alpabm.alpa_bm,
3090  			sizeof(struct fc_alpabm_s));
3091  }
3092  
3093  static void
bfa_fcport_update_linkinfo(struct bfa_fcport_s * fcport)3094  bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3095  {
3096  	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3097  	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3098  
3099  	fcport->speed = pevent->link_state.speed;
3100  	fcport->topology = pevent->link_state.topology;
3101  
3102  	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3103  		bfa_fcport_update_loop_info(fcport,
3104  				&pevent->link_state.attr.loop_info);
3105  		return;
3106  	}
3107  
3108  	/* QoS Details */
3109  	fcport->qos_attr = pevent->link_state.qos_attr;
3110  	fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
3111  
3112  	if (fcport->cfg.bb_cr_enabled)
3113  		fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
3114  
3115  	fcport->fec_state = pevent->link_state.fec_state;
3116  
3117  	/*
3118  	 * update trunk state if applicable
3119  	 */
3120  	if (!fcport->cfg.trunked)
3121  		trunk->attr.state = BFA_TRUNK_DISABLED;
3122  
3123  	/* update FCoE specific */
3124  	fcport->fcoe_vlan =
3125  		be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
3126  
3127  	bfa_trc(fcport->bfa, fcport->speed);
3128  	bfa_trc(fcport->bfa, fcport->topology);
3129  }
3130  
3131  static void
bfa_fcport_reset_linkinfo(struct bfa_fcport_s * fcport)3132  bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3133  {
3134  	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3135  	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3136  	fcport->fec_state = BFA_FEC_OFFLINE;
3137  }
3138  
3139  /*
3140   * Send port enable message to firmware.
3141   */
3142  static bfa_boolean_t
bfa_fcport_send_enable(struct bfa_fcport_s * fcport)3143  bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3144  {
3145  	struct bfi_fcport_enable_req_s *m;
3146  
3147  	/*
3148  	 * Increment message tag before queue check, so that responses to old
3149  	 * requests are discarded.
3150  	 */
3151  	fcport->msgtag++;
3152  
3153  	/*
3154  	 * check for room in queue to send request now
3155  	 */
3156  	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3157  	if (!m) {
3158  		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3159  							&fcport->reqq_wait);
3160  		return BFA_FALSE;
3161  	}
3162  
3163  	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3164  			bfa_fn_lpu(fcport->bfa));
3165  	m->nwwn = fcport->nwwn;
3166  	m->pwwn = fcport->pwwn;
3167  	m->port_cfg = fcport->cfg;
3168  	m->msgtag = fcport->msgtag;
3169  	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
3170  	m->use_flash_cfg = fcport->use_flash_cfg;
3171  	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3172  	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3173  	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3174  
3175  	/*
3176  	 * queue I/O message to firmware
3177  	 */
3178  	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3179  	return BFA_TRUE;
3180  }
3181  
3182  /*
3183   * Send port disable message to firmware.
3184   */
3185  static	bfa_boolean_t
bfa_fcport_send_disable(struct bfa_fcport_s * fcport)3186  bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3187  {
3188  	struct bfi_fcport_req_s *m;
3189  
3190  	/*
3191  	 * Increment message tag before queue check, so that responses to old
3192  	 * requests are discarded.
3193  	 */
3194  	fcport->msgtag++;
3195  
3196  	/*
3197  	 * check for room in queue to send request now
3198  	 */
3199  	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3200  	if (!m) {
3201  		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3202  							&fcport->reqq_wait);
3203  		return BFA_FALSE;
3204  	}
3205  
3206  	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3207  			bfa_fn_lpu(fcport->bfa));
3208  	m->msgtag = fcport->msgtag;
3209  
3210  	/*
3211  	 * queue I/O message to firmware
3212  	 */
3213  	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
3214  
3215  	return BFA_TRUE;
3216  }
3217  
3218  static void
bfa_fcport_set_wwns(struct bfa_fcport_s * fcport)3219  bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3220  {
3221  	fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3222  	fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
3223  
3224  	bfa_trc(fcport->bfa, fcport->pwwn);
3225  	bfa_trc(fcport->bfa, fcport->nwwn);
3226  }
3227  
3228  static void
bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s * d,struct bfa_qos_stats_s * s)3229  bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3230  	struct bfa_qos_stats_s *s)
3231  {
3232  	u32	*dip = (u32 *) d;
3233  	__be32	*sip = (__be32 *) s;
3234  	int		i;
3235  
3236  	/* Now swap the 32 bit fields */
3237  	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3238  		dip[i] = be32_to_cpu(sip[i]);
3239  }
3240  
3241  static void
bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s * d,struct bfa_fcoe_stats_s * s)3242  bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3243  	struct bfa_fcoe_stats_s *s)
3244  {
3245  	u32	*dip = (u32 *) d;
3246  	__be32	*sip = (__be32 *) s;
3247  	int		i;
3248  
3249  	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3250  	     i = i + 2) {
3251  #ifdef __BIG_ENDIAN
3252  		dip[i] = be32_to_cpu(sip[i]);
3253  		dip[i + 1] = be32_to_cpu(sip[i + 1]);
3254  #else
3255  		dip[i] = be32_to_cpu(sip[i + 1]);
3256  		dip[i + 1] = be32_to_cpu(sip[i]);
3257  #endif
3258  	}
3259  }
3260  
3261  static void
__bfa_cb_fcport_stats_get(void * cbarg,bfa_boolean_t complete)3262  __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3263  {
3264  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3265  	struct bfa_cb_pending_q_s *cb;
3266  	struct list_head *qe, *qen;
3267  	union bfa_fcport_stats_u *ret;
3268  
3269  	if (complete) {
3270  		time64_t time = ktime_get_seconds();
3271  
3272  		list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3273  			bfa_q_deq(&fcport->stats_pending_q, &qe);
3274  			cb = (struct bfa_cb_pending_q_s *)qe;
3275  			if (fcport->stats_status == BFA_STATUS_OK) {
3276  				ret = (union bfa_fcport_stats_u *)cb->data;
3277  				/* Swap FC QoS or FCoE stats */
3278  				if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3279  					bfa_fcport_qos_stats_swap(&ret->fcqos,
3280  							&fcport->stats->fcqos);
3281  				else {
3282  					bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3283  							&fcport->stats->fcoe);
3284  					ret->fcoe.secs_reset =
3285  						time - fcport->stats_reset_time;
3286  				}
3287  			}
3288  			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3289  					fcport->stats_status);
3290  		}
3291  		fcport->stats_status = BFA_STATUS_OK;
3292  	} else {
3293  		INIT_LIST_HEAD(&fcport->stats_pending_q);
3294  		fcport->stats_status = BFA_STATUS_OK;
3295  	}
3296  }
3297  
3298  static void
bfa_fcport_stats_get_timeout(void * cbarg)3299  bfa_fcport_stats_get_timeout(void *cbarg)
3300  {
3301  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3302  
3303  	bfa_trc(fcport->bfa, fcport->stats_qfull);
3304  
3305  	if (fcport->stats_qfull) {
3306  		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3307  		fcport->stats_qfull = BFA_FALSE;
3308  	}
3309  
3310  	fcport->stats_status = BFA_STATUS_ETIMER;
3311  	__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3312  }
3313  
3314  static void
bfa_fcport_send_stats_get(void * cbarg)3315  bfa_fcport_send_stats_get(void *cbarg)
3316  {
3317  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3318  	struct bfi_fcport_req_s *msg;
3319  
3320  	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3321  
3322  	if (!msg) {
3323  		fcport->stats_qfull = BFA_TRUE;
3324  		bfa_reqq_winit(&fcport->stats_reqq_wait,
3325  				bfa_fcport_send_stats_get, fcport);
3326  		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3327  				&fcport->stats_reqq_wait);
3328  		return;
3329  	}
3330  	fcport->stats_qfull = BFA_FALSE;
3331  
3332  	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3333  	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3334  			bfa_fn_lpu(fcport->bfa));
3335  	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3336  }
3337  
3338  static void
__bfa_cb_fcport_stats_clr(void * cbarg,bfa_boolean_t complete)3339  __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3340  {
3341  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3342  	struct bfa_cb_pending_q_s *cb;
3343  	struct list_head *qe, *qen;
3344  
3345  	if (complete) {
3346  		/*
3347  		 * re-initialize time stamp for stats reset
3348  		 */
3349  		fcport->stats_reset_time = ktime_get_seconds();
3350  		list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3351  			bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3352  			cb = (struct bfa_cb_pending_q_s *)qe;
3353  			bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3354  						fcport->stats_status);
3355  		}
3356  		fcport->stats_status = BFA_STATUS_OK;
3357  	} else {
3358  		INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3359  		fcport->stats_status = BFA_STATUS_OK;
3360  	}
3361  }
3362  
3363  static void
bfa_fcport_stats_clr_timeout(void * cbarg)3364  bfa_fcport_stats_clr_timeout(void *cbarg)
3365  {
3366  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3367  
3368  	bfa_trc(fcport->bfa, fcport->stats_qfull);
3369  
3370  	if (fcport->stats_qfull) {
3371  		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3372  		fcport->stats_qfull = BFA_FALSE;
3373  	}
3374  
3375  	fcport->stats_status = BFA_STATUS_ETIMER;
3376  	__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3377  }
3378  
3379  static void
bfa_fcport_send_stats_clear(void * cbarg)3380  bfa_fcport_send_stats_clear(void *cbarg)
3381  {
3382  	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3383  	struct bfi_fcport_req_s *msg;
3384  
3385  	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3386  
3387  	if (!msg) {
3388  		fcport->stats_qfull = BFA_TRUE;
3389  		bfa_reqq_winit(&fcport->stats_reqq_wait,
3390  				bfa_fcport_send_stats_clear, fcport);
3391  		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3392  						&fcport->stats_reqq_wait);
3393  		return;
3394  	}
3395  	fcport->stats_qfull = BFA_FALSE;
3396  
3397  	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3398  	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3399  			bfa_fn_lpu(fcport->bfa));
3400  	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
3401  }
3402  
3403  /*
3404   * Handle trunk SCN event from firmware.
3405   */
3406  static void
bfa_trunk_scn(struct bfa_fcport_s * fcport,struct bfi_fcport_trunk_scn_s * scn)3407  bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3408  {
3409  	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3410  	struct bfi_fcport_trunk_link_s *tlink;
3411  	struct bfa_trunk_link_attr_s *lattr;
3412  	enum bfa_trunk_state state_prev;
3413  	int i;
3414  	int link_bm = 0;
3415  
3416  	bfa_trc(fcport->bfa, fcport->cfg.trunked);
3417  	WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3418  		   scn->trunk_state != BFA_TRUNK_OFFLINE);
3419  
3420  	bfa_trc(fcport->bfa, trunk->attr.state);
3421  	bfa_trc(fcport->bfa, scn->trunk_state);
3422  	bfa_trc(fcport->bfa, scn->trunk_speed);
3423  
3424  	/*
3425  	 * Save off new state for trunk attribute query
3426  	 */
3427  	state_prev = trunk->attr.state;
3428  	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3429  		trunk->attr.state = scn->trunk_state;
3430  	trunk->attr.speed = scn->trunk_speed;
3431  	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3432  		lattr = &trunk->attr.link_attr[i];
3433  		tlink = &scn->tlink[i];
3434  
3435  		lattr->link_state = tlink->state;
3436  		lattr->trunk_wwn  = tlink->trunk_wwn;
3437  		lattr->fctl	  = tlink->fctl;
3438  		lattr->speed	  = tlink->speed;
3439  		lattr->deskew	  = be32_to_cpu(tlink->deskew);
3440  
3441  		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3442  			fcport->speed	 = tlink->speed;
3443  			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3444  			link_bm |= 1 << i;
3445  		}
3446  
3447  		bfa_trc(fcport->bfa, lattr->link_state);
3448  		bfa_trc(fcport->bfa, lattr->trunk_wwn);
3449  		bfa_trc(fcport->bfa, lattr->fctl);
3450  		bfa_trc(fcport->bfa, lattr->speed);
3451  		bfa_trc(fcport->bfa, lattr->deskew);
3452  	}
3453  
3454  	switch (link_bm) {
3455  	case 3:
3456  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3457  			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3458  		break;
3459  	case 2:
3460  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3461  			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3462  		break;
3463  	case 1:
3464  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3465  			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3466  		break;
3467  	default:
3468  		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3469  			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3470  	}
3471  
3472  	/*
3473  	 * Notify upper layers if trunk state changed.
3474  	 */
3475  	if ((state_prev != trunk->attr.state) ||
3476  		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3477  		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3478  			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3479  	}
3480  }
3481  
3482  static void
bfa_trunk_iocdisable(struct bfa_s * bfa)3483  bfa_trunk_iocdisable(struct bfa_s *bfa)
3484  {
3485  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3486  	int i = 0;
3487  
3488  	/*
3489  	 * In trunked mode, notify upper layers that link is down
3490  	 */
3491  	if (fcport->cfg.trunked) {
3492  		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3493  			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3494  
3495  		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3496  		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3497  		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3498  			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3499  			fcport->trunk.attr.link_attr[i].fctl =
3500  						BFA_TRUNK_LINK_FCTL_NORMAL;
3501  			fcport->trunk.attr.link_attr[i].link_state =
3502  						BFA_TRUNK_LINK_STATE_DN_LINKDN;
3503  			fcport->trunk.attr.link_attr[i].speed =
3504  						BFA_PORT_SPEED_UNKNOWN;
3505  			fcport->trunk.attr.link_attr[i].deskew = 0;
3506  		}
3507  	}
3508  }
3509  
3510  /*
3511   * Called to initialize port attributes
3512   */
3513  void
bfa_fcport_init(struct bfa_s * bfa)3514  bfa_fcport_init(struct bfa_s *bfa)
3515  {
3516  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3517  
3518  	/*
3519  	 * Initialize port attributes from IOC hardware data.
3520  	 */
3521  	bfa_fcport_set_wwns(fcport);
3522  	if (fcport->cfg.maxfrsize == 0)
3523  		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3524  	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3525  	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3526  
3527  	if (bfa_fcport_is_pbcdisabled(bfa))
3528  		bfa->modules.port.pbc_disabled = BFA_TRUE;
3529  
3530  	WARN_ON(!fcport->cfg.maxfrsize);
3531  	WARN_ON(!fcport->cfg.rx_bbcredit);
3532  	WARN_ON(!fcport->speed_sup);
3533  }
3534  
3535  /*
3536   * Firmware message handler.
3537   */
3538  void
bfa_fcport_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)3539  bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3540  {
3541  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3542  	union bfi_fcport_i2h_msg_u i2hmsg;
3543  
3544  	i2hmsg.msg = msg;
3545  	fcport->event_arg.i2hmsg = i2hmsg;
3546  
3547  	bfa_trc(bfa, msg->mhdr.msg_id);
3548  	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3549  
3550  	switch (msg->mhdr.msg_id) {
3551  	case BFI_FCPORT_I2H_ENABLE_RSP:
3552  		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3553  
3554  			fcport->stats_dma_ready = BFA_TRUE;
3555  			if (fcport->use_flash_cfg) {
3556  				fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3557  				fcport->cfg.maxfrsize =
3558  					cpu_to_be16(fcport->cfg.maxfrsize);
3559  				fcport->cfg.path_tov =
3560  					cpu_to_be16(fcport->cfg.path_tov);
3561  				fcport->cfg.q_depth =
3562  					cpu_to_be16(fcport->cfg.q_depth);
3563  
3564  				if (fcport->cfg.trunked)
3565  					fcport->trunk.attr.state =
3566  						BFA_TRUNK_OFFLINE;
3567  				else
3568  					fcport->trunk.attr.state =
3569  						BFA_TRUNK_DISABLED;
3570  				fcport->qos_attr.qos_bw =
3571  					i2hmsg.penable_rsp->port_cfg.qos_bw;
3572  				fcport->use_flash_cfg = BFA_FALSE;
3573  			}
3574  
3575  			if (fcport->cfg.qos_enabled)
3576  				fcport->qos_attr.state = BFA_QOS_OFFLINE;
3577  			else
3578  				fcport->qos_attr.state = BFA_QOS_DISABLED;
3579  
3580  			fcport->qos_attr.qos_bw_op =
3581  					i2hmsg.penable_rsp->port_cfg.qos_bw;
3582  
3583  			if (fcport->cfg.bb_cr_enabled)
3584  				fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3585  			else
3586  				fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3587  
3588  			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3589  		}
3590  		break;
3591  
3592  	case BFI_FCPORT_I2H_DISABLE_RSP:
3593  		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3594  			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3595  		break;
3596  
3597  	case BFI_FCPORT_I2H_EVENT:
3598  		if (fcport->cfg.bb_cr_enabled)
3599  			fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
3600  		else
3601  			fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
3602  
3603  		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3604  			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3605  		else {
3606  			if (i2hmsg.event->link_state.linkstate_rsn ==
3607  			    BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
3608  				bfa_sm_send_event(fcport,
3609  						  BFA_FCPORT_SM_FAA_MISCONFIG);
3610  			else
3611  				bfa_sm_send_event(fcport,
3612  						  BFA_FCPORT_SM_LINKDOWN);
3613  		}
3614  		fcport->qos_attr.qos_bw_op =
3615  				i2hmsg.event->link_state.qos_attr.qos_bw_op;
3616  		break;
3617  
3618  	case BFI_FCPORT_I2H_TRUNK_SCN:
3619  		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3620  		break;
3621  
3622  	case BFI_FCPORT_I2H_STATS_GET_RSP:
3623  		/*
3624  		 * check for timer pop before processing the rsp
3625  		 */
3626  		if (list_empty(&fcport->stats_pending_q) ||
3627  		    (fcport->stats_status == BFA_STATUS_ETIMER))
3628  			break;
3629  
3630  		bfa_timer_stop(&fcport->timer);
3631  		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3632  		__bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
3633  		break;
3634  
3635  	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3636  		/*
3637  		 * check for timer pop before processing the rsp
3638  		 */
3639  		if (list_empty(&fcport->statsclr_pending_q) ||
3640  		    (fcport->stats_status == BFA_STATUS_ETIMER))
3641  			break;
3642  
3643  		bfa_timer_stop(&fcport->timer);
3644  		fcport->stats_status = BFA_STATUS_OK;
3645  		__bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
3646  		break;
3647  
3648  	case BFI_FCPORT_I2H_ENABLE_AEN:
3649  		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3650  		break;
3651  
3652  	case BFI_FCPORT_I2H_DISABLE_AEN:
3653  		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3654  		break;
3655  
3656  	default:
3657  		WARN_ON(1);
3658  	break;
3659  	}
3660  }
3661  
3662  /*
3663   * Registered callback for port events.
3664   */
3665  void
bfa_fcport_event_register(struct bfa_s * bfa,void (* cbfn)(void * cbarg,enum bfa_port_linkstate event),void * cbarg)3666  bfa_fcport_event_register(struct bfa_s *bfa,
3667  				void (*cbfn) (void *cbarg,
3668  				enum bfa_port_linkstate event),
3669  				void *cbarg)
3670  {
3671  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3672  
3673  	fcport->event_cbfn = cbfn;
3674  	fcport->event_cbarg = cbarg;
3675  }
3676  
3677  bfa_status_t
bfa_fcport_enable(struct bfa_s * bfa)3678  bfa_fcport_enable(struct bfa_s *bfa)
3679  {
3680  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3681  
3682  	if (bfa_fcport_is_pbcdisabled(bfa))
3683  		return BFA_STATUS_PBC;
3684  
3685  	if (bfa_ioc_is_disabled(&bfa->ioc))
3686  		return BFA_STATUS_IOC_DISABLED;
3687  
3688  	if (fcport->diag_busy)
3689  		return BFA_STATUS_DIAG_BUSY;
3690  
3691  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3692  	return BFA_STATUS_OK;
3693  }
3694  
3695  bfa_status_t
bfa_fcport_disable(struct bfa_s * bfa)3696  bfa_fcport_disable(struct bfa_s *bfa)
3697  {
3698  	if (bfa_fcport_is_pbcdisabled(bfa))
3699  		return BFA_STATUS_PBC;
3700  
3701  	if (bfa_ioc_is_disabled(&bfa->ioc))
3702  		return BFA_STATUS_IOC_DISABLED;
3703  
3704  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3705  	return BFA_STATUS_OK;
3706  }
3707  
3708  /* If PBC is disabled on port, return error */
3709  bfa_status_t
bfa_fcport_is_pbcdisabled(struct bfa_s * bfa)3710  bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3711  {
3712  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3713  	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3714  	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3715  
3716  	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3717  		bfa_trc(bfa, fcport->pwwn);
3718  		return BFA_STATUS_PBC;
3719  	}
3720  	return BFA_STATUS_OK;
3721  }
3722  
3723  /*
3724   * Configure port speed.
3725   */
3726  bfa_status_t
bfa_fcport_cfg_speed(struct bfa_s * bfa,enum bfa_port_speed speed)3727  bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3728  {
3729  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3730  
3731  	bfa_trc(bfa, speed);
3732  
3733  	if (fcport->cfg.trunked == BFA_TRUE)
3734  		return BFA_STATUS_TRUNK_ENABLED;
3735  	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3736  			(speed == BFA_PORT_SPEED_16GBPS))
3737  		return BFA_STATUS_UNSUPP_SPEED;
3738  	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3739  		bfa_trc(bfa, fcport->speed_sup);
3740  		return BFA_STATUS_UNSUPP_SPEED;
3741  	}
3742  
3743  	/* Port speed entered needs to be checked */
3744  	if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3745  		/* For CT2, 1G is not supported */
3746  		if ((speed == BFA_PORT_SPEED_1GBPS) &&
3747  		    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3748  			return BFA_STATUS_UNSUPP_SPEED;
3749  
3750  		/* Already checked for Auto Speed and Max Speed supp */
3751  		if (!(speed == BFA_PORT_SPEED_1GBPS ||
3752  		      speed == BFA_PORT_SPEED_2GBPS ||
3753  		      speed == BFA_PORT_SPEED_4GBPS ||
3754  		      speed == BFA_PORT_SPEED_8GBPS ||
3755  		      speed == BFA_PORT_SPEED_16GBPS ||
3756  		      speed == BFA_PORT_SPEED_AUTO))
3757  			return BFA_STATUS_UNSUPP_SPEED;
3758  	} else {
3759  		if (speed != BFA_PORT_SPEED_10GBPS)
3760  			return BFA_STATUS_UNSUPP_SPEED;
3761  	}
3762  
3763  	fcport->cfg.speed = speed;
3764  
3765  	return BFA_STATUS_OK;
3766  }
3767  
3768  /*
3769   * Get current speed.
3770   */
3771  enum bfa_port_speed
bfa_fcport_get_speed(struct bfa_s * bfa)3772  bfa_fcport_get_speed(struct bfa_s *bfa)
3773  {
3774  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3775  
3776  	return fcport->speed;
3777  }
3778  
3779  /*
3780   * Configure port topology.
3781   */
3782  bfa_status_t
bfa_fcport_cfg_topology(struct bfa_s * bfa,enum bfa_port_topology topology)3783  bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3784  {
3785  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3786  
3787  	bfa_trc(bfa, topology);
3788  	bfa_trc(bfa, fcport->cfg.topology);
3789  
3790  	switch (topology) {
3791  	case BFA_PORT_TOPOLOGY_P2P:
3792  		break;
3793  
3794  	case BFA_PORT_TOPOLOGY_LOOP:
3795  		if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3796  			(fcport->qos_attr.state != BFA_QOS_DISABLED))
3797  			return BFA_STATUS_ERROR_QOS_ENABLED;
3798  		if (fcport->cfg.ratelimit != BFA_FALSE)
3799  			return BFA_STATUS_ERROR_TRL_ENABLED;
3800  		if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3801  			(fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3802  			return BFA_STATUS_ERROR_TRUNK_ENABLED;
3803  		if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3804  			(fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3805  			return BFA_STATUS_UNSUPP_SPEED;
3806  		if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3807  			return BFA_STATUS_LOOP_UNSUPP_MEZZ;
3808  		if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3809  			return BFA_STATUS_DPORT_ERR;
3810  		if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
3811  			return BFA_STATUS_DPORT_ERR;
3812  		break;
3813  
3814  	case BFA_PORT_TOPOLOGY_AUTO:
3815  		break;
3816  
3817  	default:
3818  		return BFA_STATUS_EINVAL;
3819  	}
3820  
3821  	fcport->cfg.topology = topology;
3822  	return BFA_STATUS_OK;
3823  }
3824  
3825  /*
3826   * Get current topology.
3827   */
3828  enum bfa_port_topology
bfa_fcport_get_topology(struct bfa_s * bfa)3829  bfa_fcport_get_topology(struct bfa_s *bfa)
3830  {
3831  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3832  
3833  	return fcport->topology;
3834  }
3835  
3836  /*
3837   * Get config topology.
3838   */
3839  enum bfa_port_topology
bfa_fcport_get_cfg_topology(struct bfa_s * bfa)3840  bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3841  {
3842  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3843  
3844  	return fcport->cfg.topology;
3845  }
3846  
3847  bfa_status_t
bfa_fcport_cfg_hardalpa(struct bfa_s * bfa,u8 alpa)3848  bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3849  {
3850  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3851  
3852  	bfa_trc(bfa, alpa);
3853  	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3854  	bfa_trc(bfa, fcport->cfg.hardalpa);
3855  
3856  	fcport->cfg.cfg_hardalpa = BFA_TRUE;
3857  	fcport->cfg.hardalpa = alpa;
3858  
3859  	return BFA_STATUS_OK;
3860  }
3861  
3862  bfa_status_t
bfa_fcport_clr_hardalpa(struct bfa_s * bfa)3863  bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3864  {
3865  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3866  
3867  	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3868  	bfa_trc(bfa, fcport->cfg.hardalpa);
3869  
3870  	fcport->cfg.cfg_hardalpa = BFA_FALSE;
3871  	return BFA_STATUS_OK;
3872  }
3873  
3874  bfa_boolean_t
bfa_fcport_get_hardalpa(struct bfa_s * bfa,u8 * alpa)3875  bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3876  {
3877  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3878  
3879  	*alpa = fcport->cfg.hardalpa;
3880  	return fcport->cfg.cfg_hardalpa;
3881  }
3882  
3883  u8
bfa_fcport_get_myalpa(struct bfa_s * bfa)3884  bfa_fcport_get_myalpa(struct bfa_s *bfa)
3885  {
3886  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887  
3888  	return fcport->myalpa;
3889  }
3890  
3891  bfa_status_t
bfa_fcport_cfg_maxfrsize(struct bfa_s * bfa,u16 maxfrsize)3892  bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3893  {
3894  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3895  
3896  	bfa_trc(bfa, maxfrsize);
3897  	bfa_trc(bfa, fcport->cfg.maxfrsize);
3898  
3899  	/* with in range */
3900  	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3901  		return BFA_STATUS_INVLD_DFSZ;
3902  
3903  	/* power of 2, if not the max frame size of 2112 */
3904  	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3905  		return BFA_STATUS_INVLD_DFSZ;
3906  
3907  	fcport->cfg.maxfrsize = maxfrsize;
3908  	return BFA_STATUS_OK;
3909  }
3910  
3911  u16
bfa_fcport_get_maxfrsize(struct bfa_s * bfa)3912  bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3913  {
3914  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3915  
3916  	return fcport->cfg.maxfrsize;
3917  }
3918  
3919  u8
bfa_fcport_get_rx_bbcredit(struct bfa_s * bfa)3920  bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3921  {
3922  	if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3923  		return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
3924  
3925  	else
3926  		return 0;
3927  }
3928  
3929  void
bfa_fcport_set_tx_bbcredit(struct bfa_s * bfa,u16 tx_bbcredit)3930  bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3931  {
3932  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3933  
3934  	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3935  }
3936  
3937  /*
3938   * Get port attributes.
3939   */
3940  
3941  wwn_t
bfa_fcport_get_wwn(struct bfa_s * bfa,bfa_boolean_t node)3942  bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3943  {
3944  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3945  	if (node)
3946  		return fcport->nwwn;
3947  	else
3948  		return fcport->pwwn;
3949  }
3950  
3951  void
bfa_fcport_get_attr(struct bfa_s * bfa,struct bfa_port_attr_s * attr)3952  bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3953  {
3954  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3955  
3956  	memset(attr, 0, sizeof(struct bfa_port_attr_s));
3957  
3958  	attr->nwwn = fcport->nwwn;
3959  	attr->pwwn = fcport->pwwn;
3960  
3961  	attr->factorypwwn =  bfa->ioc.attr->mfg_pwwn;
3962  	attr->factorynwwn =  bfa->ioc.attr->mfg_nwwn;
3963  
3964  	memcpy(&attr->pport_cfg, &fcport->cfg,
3965  		sizeof(struct bfa_port_cfg_s));
3966  	/* speed attributes */
3967  	attr->pport_cfg.speed = fcport->cfg.speed;
3968  	attr->speed_supported = fcport->speed_sup;
3969  	attr->speed = fcport->speed;
3970  	attr->cos_supported = FC_CLASS_3;
3971  
3972  	/* topology attributes */
3973  	attr->pport_cfg.topology = fcport->cfg.topology;
3974  	attr->topology = fcport->topology;
3975  	attr->pport_cfg.trunked = fcport->cfg.trunked;
3976  
3977  	/* beacon attributes */
3978  	attr->beacon = fcport->beacon;
3979  	attr->link_e2e_beacon = fcport->link_e2e_beacon;
3980  
3981  	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
3982  	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
3983  	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3984  
3985  	attr->fec_state = fcport->fec_state;
3986  
3987  	/* PBC Disabled State */
3988  	if (bfa_fcport_is_pbcdisabled(bfa))
3989  		attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3990  	else {
3991  		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3992  			attr->port_state = BFA_PORT_ST_IOCDIS;
3993  		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3994  			attr->port_state = BFA_PORT_ST_FWMISMATCH;
3995  	}
3996  
3997  	/* FCoE vlan */
3998  	attr->fcoe_vlan = fcport->fcoe_vlan;
3999  }
4000  
4001  #define BFA_FCPORT_STATS_TOV	1000
4002  
4003  /*
4004   * Fetch port statistics (FCQoS or FCoE).
4005   */
4006  bfa_status_t
bfa_fcport_get_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)4007  bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4008  {
4009  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4010  
4011  	if (!bfa_iocfc_is_operational(bfa) ||
4012  	    !fcport->stats_dma_ready)
4013  		return BFA_STATUS_IOC_NON_OP;
4014  
4015  	if (!list_empty(&fcport->statsclr_pending_q))
4016  		return BFA_STATUS_DEVBUSY;
4017  
4018  	if (list_empty(&fcport->stats_pending_q)) {
4019  		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4020  		bfa_fcport_send_stats_get(fcport);
4021  		bfa_timer_start(bfa, &fcport->timer,
4022  				bfa_fcport_stats_get_timeout,
4023  				fcport, BFA_FCPORT_STATS_TOV);
4024  	} else
4025  		list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
4026  
4027  	return BFA_STATUS_OK;
4028  }
4029  
4030  /*
4031   * Reset port statistics (FCQoS or FCoE).
4032   */
4033  bfa_status_t
bfa_fcport_clear_stats(struct bfa_s * bfa,struct bfa_cb_pending_q_s * cb)4034  bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
4035  {
4036  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4037  
4038  	if (!bfa_iocfc_is_operational(bfa) ||
4039  	    !fcport->stats_dma_ready)
4040  		return BFA_STATUS_IOC_NON_OP;
4041  
4042  	if (!list_empty(&fcport->stats_pending_q))
4043  		return BFA_STATUS_DEVBUSY;
4044  
4045  	if (list_empty(&fcport->statsclr_pending_q)) {
4046  		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4047  		bfa_fcport_send_stats_clear(fcport);
4048  		bfa_timer_start(bfa, &fcport->timer,
4049  				bfa_fcport_stats_clr_timeout,
4050  				fcport, BFA_FCPORT_STATS_TOV);
4051  	} else
4052  		list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
4053  
4054  	return BFA_STATUS_OK;
4055  }
4056  
4057  /*
4058   * Fetch port attributes.
4059   */
4060  bfa_boolean_t
bfa_fcport_is_disabled(struct bfa_s * bfa)4061  bfa_fcport_is_disabled(struct bfa_s *bfa)
4062  {
4063  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4064  
4065  	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4066  		BFA_PORT_ST_DISABLED;
4067  
4068  }
4069  
4070  bfa_boolean_t
bfa_fcport_is_dport(struct bfa_s * bfa)4071  bfa_fcport_is_dport(struct bfa_s *bfa)
4072  {
4073  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4074  
4075  	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4076  		BFA_PORT_ST_DPORT);
4077  }
4078  
4079  bfa_boolean_t
bfa_fcport_is_ddport(struct bfa_s * bfa)4080  bfa_fcport_is_ddport(struct bfa_s *bfa)
4081  {
4082  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4083  
4084  	return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4085  		BFA_PORT_ST_DDPORT);
4086  }
4087  
4088  bfa_status_t
bfa_fcport_set_qos_bw(struct bfa_s * bfa,struct bfa_qos_bw_s * qos_bw)4089  bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
4090  {
4091  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4092  	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
4093  
4094  	bfa_trc(bfa, ioc_type);
4095  
4096  	if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
4097  		return BFA_STATUS_QOS_BW_INVALID;
4098  
4099  	if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
4100  		return BFA_STATUS_QOS_BW_INVALID;
4101  
4102  	if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
4103  	    (qos_bw->low > qos_bw->high))
4104  		return BFA_STATUS_QOS_BW_INVALID;
4105  
4106  	if ((ioc_type == BFA_IOC_TYPE_FC) &&
4107  	    (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
4108  		fcport->cfg.qos_bw = *qos_bw;
4109  
4110  	return BFA_STATUS_OK;
4111  }
4112  
4113  bfa_boolean_t
bfa_fcport_is_ratelim(struct bfa_s * bfa)4114  bfa_fcport_is_ratelim(struct bfa_s *bfa)
4115  {
4116  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4117  
4118  	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4119  
4120  }
4121  
4122  /*
4123   *	Enable/Disable FAA feature in port config
4124   */
4125  void
bfa_fcport_cfg_faa(struct bfa_s * bfa,u8 state)4126  bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4127  {
4128  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4129  
4130  	bfa_trc(bfa, state);
4131  	fcport->cfg.faa_state = state;
4132  }
4133  
4134  /*
4135   * Get default minimum ratelim speed
4136   */
4137  enum bfa_port_speed
bfa_fcport_get_ratelim_speed(struct bfa_s * bfa)4138  bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4139  {
4140  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4141  
4142  	bfa_trc(bfa, fcport->cfg.trl_def_speed);
4143  	return fcport->cfg.trl_def_speed;
4144  
4145  }
4146  
4147  void
bfa_fcport_beacon(void * dev,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon)4148  bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4149  		  bfa_boolean_t link_e2e_beacon)
4150  {
4151  	struct bfa_s *bfa = dev;
4152  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4153  
4154  	bfa_trc(bfa, beacon);
4155  	bfa_trc(bfa, link_e2e_beacon);
4156  	bfa_trc(bfa, fcport->beacon);
4157  	bfa_trc(bfa, fcport->link_e2e_beacon);
4158  
4159  	fcport->beacon = beacon;
4160  	fcport->link_e2e_beacon = link_e2e_beacon;
4161  }
4162  
4163  bfa_boolean_t
bfa_fcport_is_linkup(struct bfa_s * bfa)4164  bfa_fcport_is_linkup(struct bfa_s *bfa)
4165  {
4166  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4167  
4168  	return	(!fcport->cfg.trunked &&
4169  		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4170  		(fcport->cfg.trunked &&
4171  		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4172  }
4173  
4174  bfa_boolean_t
bfa_fcport_is_qos_enabled(struct bfa_s * bfa)4175  bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4176  {
4177  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4178  
4179  	return fcport->cfg.qos_enabled;
4180  }
4181  
4182  bfa_boolean_t
bfa_fcport_is_trunk_enabled(struct bfa_s * bfa)4183  bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4184  {
4185  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4186  
4187  	return fcport->cfg.trunked;
4188  }
4189  
4190  bfa_status_t
bfa_fcport_cfg_bbcr(struct bfa_s * bfa,bfa_boolean_t on_off,u8 bb_scn)4191  bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
4192  {
4193  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4194  
4195  	bfa_trc(bfa, on_off);
4196  
4197  	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4198  		return BFA_STATUS_BBCR_FC_ONLY;
4199  
4200  	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
4201  		(bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
4202  		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
4203  
4204  	if (on_off) {
4205  		if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4206  			return BFA_STATUS_TOPOLOGY_LOOP;
4207  
4208  		if (fcport->cfg.qos_enabled)
4209  			return BFA_STATUS_ERROR_QOS_ENABLED;
4210  
4211  		if (fcport->cfg.trunked)
4212  			return BFA_STATUS_TRUNK_ENABLED;
4213  
4214  		if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
4215  			(fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
4216  			return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
4217  
4218  		if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
4219  			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
4220  
4221  		if (fcport->cfg.bb_cr_enabled) {
4222  			if (bb_scn != fcport->cfg.bb_scn)
4223  				return BFA_STATUS_BBCR_CFG_NO_CHANGE;
4224  			else
4225  				return BFA_STATUS_NO_CHANGE;
4226  		}
4227  
4228  		if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
4229  			bb_scn = BFA_BB_SCN_DEF;
4230  
4231  		fcport->cfg.bb_cr_enabled = on_off;
4232  		fcport->cfg.bb_scn = bb_scn;
4233  	} else {
4234  		if (!fcport->cfg.bb_cr_enabled)
4235  			return BFA_STATUS_NO_CHANGE;
4236  
4237  		fcport->cfg.bb_cr_enabled = on_off;
4238  		fcport->cfg.bb_scn = 0;
4239  	}
4240  
4241  	return BFA_STATUS_OK;
4242  }
4243  
4244  bfa_status_t
bfa_fcport_get_bbcr_attr(struct bfa_s * bfa,struct bfa_bbcr_attr_s * bbcr_attr)4245  bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
4246  		struct bfa_bbcr_attr_s *bbcr_attr)
4247  {
4248  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4249  
4250  	if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
4251  		return BFA_STATUS_BBCR_FC_ONLY;
4252  
4253  	if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
4254  		return BFA_STATUS_TOPOLOGY_LOOP;
4255  
4256  	*bbcr_attr = fcport->bbcr_attr;
4257  
4258  	return BFA_STATUS_OK;
4259  }
4260  
4261  void
bfa_fcport_dportenable(struct bfa_s * bfa)4262  bfa_fcport_dportenable(struct bfa_s *bfa)
4263  {
4264  	/*
4265  	 * Assume caller check for port is in disable state
4266  	 */
4267  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4268  	bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4269  }
4270  
4271  void
bfa_fcport_dportdisable(struct bfa_s * bfa)4272  bfa_fcport_dportdisable(struct bfa_s *bfa)
4273  {
4274  	/*
4275  	 * Assume caller check for port is in disable state
4276  	 */
4277  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4278  	bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4279  }
4280  
4281  static void
bfa_fcport_ddportenable(struct bfa_s * bfa)4282  bfa_fcport_ddportenable(struct bfa_s *bfa)
4283  {
4284  	/*
4285  	 * Assume caller check for port is in disable state
4286  	 */
4287  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
4288  }
4289  
4290  static void
bfa_fcport_ddportdisable(struct bfa_s * bfa)4291  bfa_fcport_ddportdisable(struct bfa_s *bfa)
4292  {
4293  	/*
4294  	 * Assume caller check for port is in disable state
4295  	 */
4296  	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
4297  }
4298  
4299  /*
4300   * Rport State machine functions
4301   */
4302  /*
4303   * Beginning state, only online event expected.
4304   */
4305  static void
bfa_rport_sm_uninit(struct bfa_rport_s * rp,enum bfa_rport_event event)4306  bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4307  {
4308  	bfa_trc(rp->bfa, rp->rport_tag);
4309  	bfa_trc(rp->bfa, event);
4310  
4311  	switch (event) {
4312  	case BFA_RPORT_SM_CREATE:
4313  		bfa_stats(rp, sm_un_cr);
4314  		bfa_sm_set_state(rp, bfa_rport_sm_created);
4315  		break;
4316  
4317  	default:
4318  		bfa_stats(rp, sm_un_unexp);
4319  		bfa_sm_fault(rp->bfa, event);
4320  	}
4321  }
4322  
4323  static void
bfa_rport_sm_created(struct bfa_rport_s * rp,enum bfa_rport_event event)4324  bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4325  {
4326  	bfa_trc(rp->bfa, rp->rport_tag);
4327  	bfa_trc(rp->bfa, event);
4328  
4329  	switch (event) {
4330  	case BFA_RPORT_SM_ONLINE:
4331  		bfa_stats(rp, sm_cr_on);
4332  		if (bfa_rport_send_fwcreate(rp))
4333  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4334  		else
4335  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4336  		break;
4337  
4338  	case BFA_RPORT_SM_DELETE:
4339  		bfa_stats(rp, sm_cr_del);
4340  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4341  		bfa_rport_free(rp);
4342  		break;
4343  
4344  	case BFA_RPORT_SM_HWFAIL:
4345  		bfa_stats(rp, sm_cr_hwf);
4346  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4347  		break;
4348  
4349  	default:
4350  		bfa_stats(rp, sm_cr_unexp);
4351  		bfa_sm_fault(rp->bfa, event);
4352  	}
4353  }
4354  
4355  /*
4356   * Waiting for rport create response from firmware.
4357   */
4358  static void
bfa_rport_sm_fwcreate(struct bfa_rport_s * rp,enum bfa_rport_event event)4359  bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4360  {
4361  	bfa_trc(rp->bfa, rp->rport_tag);
4362  	bfa_trc(rp->bfa, event);
4363  
4364  	switch (event) {
4365  	case BFA_RPORT_SM_FWRSP:
4366  		bfa_stats(rp, sm_fwc_rsp);
4367  		bfa_sm_set_state(rp, bfa_rport_sm_online);
4368  		bfa_rport_online_cb(rp);
4369  		break;
4370  
4371  	case BFA_RPORT_SM_DELETE:
4372  		bfa_stats(rp, sm_fwc_del);
4373  		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4374  		break;
4375  
4376  	case BFA_RPORT_SM_OFFLINE:
4377  		bfa_stats(rp, sm_fwc_off);
4378  		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4379  		break;
4380  
4381  	case BFA_RPORT_SM_HWFAIL:
4382  		bfa_stats(rp, sm_fwc_hwf);
4383  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4384  		break;
4385  
4386  	default:
4387  		bfa_stats(rp, sm_fwc_unexp);
4388  		bfa_sm_fault(rp->bfa, event);
4389  	}
4390  }
4391  
4392  /*
4393   * Request queue is full, awaiting queue resume to send create request.
4394   */
4395  static void
bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4396  bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4397  {
4398  	bfa_trc(rp->bfa, rp->rport_tag);
4399  	bfa_trc(rp->bfa, event);
4400  
4401  	switch (event) {
4402  	case BFA_RPORT_SM_QRESUME:
4403  		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4404  		bfa_rport_send_fwcreate(rp);
4405  		break;
4406  
4407  	case BFA_RPORT_SM_DELETE:
4408  		bfa_stats(rp, sm_fwc_del);
4409  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4410  		bfa_reqq_wcancel(&rp->reqq_wait);
4411  		bfa_rport_free(rp);
4412  		break;
4413  
4414  	case BFA_RPORT_SM_OFFLINE:
4415  		bfa_stats(rp, sm_fwc_off);
4416  		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4417  		bfa_reqq_wcancel(&rp->reqq_wait);
4418  		bfa_rport_offline_cb(rp);
4419  		break;
4420  
4421  	case BFA_RPORT_SM_HWFAIL:
4422  		bfa_stats(rp, sm_fwc_hwf);
4423  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4424  		bfa_reqq_wcancel(&rp->reqq_wait);
4425  		break;
4426  
4427  	default:
4428  		bfa_stats(rp, sm_fwc_unexp);
4429  		bfa_sm_fault(rp->bfa, event);
4430  	}
4431  }
4432  
4433  /*
4434   * Online state - normal parking state.
4435   */
4436  static void
bfa_rport_sm_online(struct bfa_rport_s * rp,enum bfa_rport_event event)4437  bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4438  {
4439  	struct bfi_rport_qos_scn_s *qos_scn;
4440  
4441  	bfa_trc(rp->bfa, rp->rport_tag);
4442  	bfa_trc(rp->bfa, event);
4443  
4444  	switch (event) {
4445  	case BFA_RPORT_SM_OFFLINE:
4446  		bfa_stats(rp, sm_on_off);
4447  		if (bfa_rport_send_fwdelete(rp))
4448  			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4449  		else
4450  			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4451  		break;
4452  
4453  	case BFA_RPORT_SM_DELETE:
4454  		bfa_stats(rp, sm_on_del);
4455  		if (bfa_rport_send_fwdelete(rp))
4456  			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4457  		else
4458  			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4459  		break;
4460  
4461  	case BFA_RPORT_SM_HWFAIL:
4462  		bfa_stats(rp, sm_on_hwf);
4463  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4464  		break;
4465  
4466  	case BFA_RPORT_SM_SET_SPEED:
4467  		bfa_rport_send_fwspeed(rp);
4468  		break;
4469  
4470  	case BFA_RPORT_SM_QOS_SCN:
4471  		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4472  		rp->qos_attr = qos_scn->new_qos_attr;
4473  		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4474  		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4475  		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4476  		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4477  
4478  		qos_scn->old_qos_attr.qos_flow_id  =
4479  			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
4480  		qos_scn->new_qos_attr.qos_flow_id  =
4481  			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
4482  
4483  		if (qos_scn->old_qos_attr.qos_flow_id !=
4484  			qos_scn->new_qos_attr.qos_flow_id)
4485  			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4486  						    qos_scn->old_qos_attr,
4487  						    qos_scn->new_qos_attr);
4488  		if (qos_scn->old_qos_attr.qos_priority !=
4489  			qos_scn->new_qos_attr.qos_priority)
4490  			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4491  						  qos_scn->old_qos_attr,
4492  						  qos_scn->new_qos_attr);
4493  		break;
4494  
4495  	default:
4496  		bfa_stats(rp, sm_on_unexp);
4497  		bfa_sm_fault(rp->bfa, event);
4498  	}
4499  }
4500  
4501  /*
4502   * Firmware rport is being deleted - awaiting f/w response.
4503   */
4504  static void
bfa_rport_sm_fwdelete(struct bfa_rport_s * rp,enum bfa_rport_event event)4505  bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4506  {
4507  	bfa_trc(rp->bfa, rp->rport_tag);
4508  	bfa_trc(rp->bfa, event);
4509  
4510  	switch (event) {
4511  	case BFA_RPORT_SM_FWRSP:
4512  		bfa_stats(rp, sm_fwd_rsp);
4513  		bfa_sm_set_state(rp, bfa_rport_sm_offline);
4514  		bfa_rport_offline_cb(rp);
4515  		break;
4516  
4517  	case BFA_RPORT_SM_DELETE:
4518  		bfa_stats(rp, sm_fwd_del);
4519  		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4520  		break;
4521  
4522  	case BFA_RPORT_SM_HWFAIL:
4523  		bfa_stats(rp, sm_fwd_hwf);
4524  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4525  		bfa_rport_offline_cb(rp);
4526  		break;
4527  
4528  	default:
4529  		bfa_stats(rp, sm_fwd_unexp);
4530  		bfa_sm_fault(rp->bfa, event);
4531  	}
4532  }
4533  
4534  static void
bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4535  bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4536  {
4537  	bfa_trc(rp->bfa, rp->rport_tag);
4538  	bfa_trc(rp->bfa, event);
4539  
4540  	switch (event) {
4541  	case BFA_RPORT_SM_QRESUME:
4542  		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4543  		bfa_rport_send_fwdelete(rp);
4544  		break;
4545  
4546  	case BFA_RPORT_SM_DELETE:
4547  		bfa_stats(rp, sm_fwd_del);
4548  		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4549  		break;
4550  
4551  	case BFA_RPORT_SM_HWFAIL:
4552  		bfa_stats(rp, sm_fwd_hwf);
4553  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4554  		bfa_reqq_wcancel(&rp->reqq_wait);
4555  		bfa_rport_offline_cb(rp);
4556  		break;
4557  
4558  	default:
4559  		bfa_stats(rp, sm_fwd_unexp);
4560  		bfa_sm_fault(rp->bfa, event);
4561  	}
4562  }
4563  
4564  /*
4565   * Offline state.
4566   */
4567  static void
bfa_rport_sm_offline(struct bfa_rport_s * rp,enum bfa_rport_event event)4568  bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4569  {
4570  	bfa_trc(rp->bfa, rp->rport_tag);
4571  	bfa_trc(rp->bfa, event);
4572  
4573  	switch (event) {
4574  	case BFA_RPORT_SM_DELETE:
4575  		bfa_stats(rp, sm_off_del);
4576  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4577  		bfa_rport_free(rp);
4578  		break;
4579  
4580  	case BFA_RPORT_SM_ONLINE:
4581  		bfa_stats(rp, sm_off_on);
4582  		if (bfa_rport_send_fwcreate(rp))
4583  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4584  		else
4585  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4586  		break;
4587  
4588  	case BFA_RPORT_SM_HWFAIL:
4589  		bfa_stats(rp, sm_off_hwf);
4590  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4591  		break;
4592  
4593  	case BFA_RPORT_SM_OFFLINE:
4594  		bfa_rport_offline_cb(rp);
4595  		break;
4596  
4597  	default:
4598  		bfa_stats(rp, sm_off_unexp);
4599  		bfa_sm_fault(rp->bfa, event);
4600  	}
4601  }
4602  
4603  /*
4604   * Rport is deleted, waiting for firmware response to delete.
4605   */
4606  static void
bfa_rport_sm_deleting(struct bfa_rport_s * rp,enum bfa_rport_event event)4607  bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4608  {
4609  	bfa_trc(rp->bfa, rp->rport_tag);
4610  	bfa_trc(rp->bfa, event);
4611  
4612  	switch (event) {
4613  	case BFA_RPORT_SM_FWRSP:
4614  		bfa_stats(rp, sm_del_fwrsp);
4615  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4616  		bfa_rport_free(rp);
4617  		break;
4618  
4619  	case BFA_RPORT_SM_HWFAIL:
4620  		bfa_stats(rp, sm_del_hwf);
4621  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4622  		bfa_rport_free(rp);
4623  		break;
4624  
4625  	default:
4626  		bfa_sm_fault(rp->bfa, event);
4627  	}
4628  }
4629  
4630  static void
bfa_rport_sm_deleting_qfull(struct bfa_rport_s * rp,enum bfa_rport_event event)4631  bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4632  {
4633  	bfa_trc(rp->bfa, rp->rport_tag);
4634  	bfa_trc(rp->bfa, event);
4635  
4636  	switch (event) {
4637  	case BFA_RPORT_SM_QRESUME:
4638  		bfa_stats(rp, sm_del_fwrsp);
4639  		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4640  		bfa_rport_send_fwdelete(rp);
4641  		break;
4642  
4643  	case BFA_RPORT_SM_HWFAIL:
4644  		bfa_stats(rp, sm_del_hwf);
4645  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4646  		bfa_reqq_wcancel(&rp->reqq_wait);
4647  		bfa_rport_free(rp);
4648  		break;
4649  
4650  	default:
4651  		bfa_sm_fault(rp->bfa, event);
4652  	}
4653  }
4654  
4655  /*
4656   * Waiting for rport create response from firmware. A delete is pending.
4657   */
4658  static void
bfa_rport_sm_delete_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4659  bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4660  				enum bfa_rport_event event)
4661  {
4662  	bfa_trc(rp->bfa, rp->rport_tag);
4663  	bfa_trc(rp->bfa, event);
4664  
4665  	switch (event) {
4666  	case BFA_RPORT_SM_FWRSP:
4667  		bfa_stats(rp, sm_delp_fwrsp);
4668  		if (bfa_rport_send_fwdelete(rp))
4669  			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4670  		else
4671  			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4672  		break;
4673  
4674  	case BFA_RPORT_SM_HWFAIL:
4675  		bfa_stats(rp, sm_delp_hwf);
4676  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4677  		bfa_rport_free(rp);
4678  		break;
4679  
4680  	default:
4681  		bfa_stats(rp, sm_delp_unexp);
4682  		bfa_sm_fault(rp->bfa, event);
4683  	}
4684  }
4685  
4686  /*
4687   * Waiting for rport create response from firmware. Rport offline is pending.
4688   */
4689  static void
bfa_rport_sm_offline_pending(struct bfa_rport_s * rp,enum bfa_rport_event event)4690  bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4691  				 enum bfa_rport_event event)
4692  {
4693  	bfa_trc(rp->bfa, rp->rport_tag);
4694  	bfa_trc(rp->bfa, event);
4695  
4696  	switch (event) {
4697  	case BFA_RPORT_SM_FWRSP:
4698  		bfa_stats(rp, sm_offp_fwrsp);
4699  		if (bfa_rport_send_fwdelete(rp))
4700  			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4701  		else
4702  			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4703  		break;
4704  
4705  	case BFA_RPORT_SM_DELETE:
4706  		bfa_stats(rp, sm_offp_del);
4707  		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4708  		break;
4709  
4710  	case BFA_RPORT_SM_HWFAIL:
4711  		bfa_stats(rp, sm_offp_hwf);
4712  		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4713  		bfa_rport_offline_cb(rp);
4714  		break;
4715  
4716  	default:
4717  		bfa_stats(rp, sm_offp_unexp);
4718  		bfa_sm_fault(rp->bfa, event);
4719  	}
4720  }
4721  
4722  /*
4723   * IOC h/w failed.
4724   */
4725  static void
bfa_rport_sm_iocdisable(struct bfa_rport_s * rp,enum bfa_rport_event event)4726  bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4727  {
4728  	bfa_trc(rp->bfa, rp->rport_tag);
4729  	bfa_trc(rp->bfa, event);
4730  
4731  	switch (event) {
4732  	case BFA_RPORT_SM_OFFLINE:
4733  		bfa_stats(rp, sm_iocd_off);
4734  		bfa_rport_offline_cb(rp);
4735  		break;
4736  
4737  	case BFA_RPORT_SM_DELETE:
4738  		bfa_stats(rp, sm_iocd_del);
4739  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4740  		bfa_rport_free(rp);
4741  		break;
4742  
4743  	case BFA_RPORT_SM_ONLINE:
4744  		bfa_stats(rp, sm_iocd_on);
4745  		if (bfa_rport_send_fwcreate(rp))
4746  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4747  		else
4748  			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4749  		break;
4750  
4751  	case BFA_RPORT_SM_HWFAIL:
4752  		break;
4753  
4754  	default:
4755  		bfa_stats(rp, sm_iocd_unexp);
4756  		bfa_sm_fault(rp->bfa, event);
4757  	}
4758  }
4759  
4760  
4761  
4762  /*
4763   *  bfa_rport_private BFA rport private functions
4764   */
4765  
4766  static void
__bfa_cb_rport_online(void * cbarg,bfa_boolean_t complete)4767  __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4768  {
4769  	struct bfa_rport_s *rp = cbarg;
4770  
4771  	if (complete)
4772  		bfa_cb_rport_online(rp->rport_drv);
4773  }
4774  
4775  static void
__bfa_cb_rport_offline(void * cbarg,bfa_boolean_t complete)4776  __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4777  {
4778  	struct bfa_rport_s *rp = cbarg;
4779  
4780  	if (complete)
4781  		bfa_cb_rport_offline(rp->rport_drv);
4782  }
4783  
4784  static void
bfa_rport_qresume(void * cbarg)4785  bfa_rport_qresume(void *cbarg)
4786  {
4787  	struct bfa_rport_s	*rp = cbarg;
4788  
4789  	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4790  }
4791  
4792  void
bfa_rport_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)4793  bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4794  		struct bfa_s *bfa)
4795  {
4796  	struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4797  
4798  	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4799  		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4800  
4801  	/* kva memory */
4802  	bfa_mem_kva_setup(minfo, rport_kva,
4803  		cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
4804  }
4805  
4806  void
bfa_rport_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)4807  bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4808  		struct bfa_pcidev_s *pcidev)
4809  {
4810  	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4811  	struct bfa_rport_s *rp;
4812  	u16 i;
4813  
4814  	INIT_LIST_HEAD(&mod->rp_free_q);
4815  	INIT_LIST_HEAD(&mod->rp_active_q);
4816  	INIT_LIST_HEAD(&mod->rp_unused_q);
4817  
4818  	rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
4819  	mod->rps_list = rp;
4820  	mod->num_rports = cfg->fwcfg.num_rports;
4821  
4822  	WARN_ON(!mod->num_rports ||
4823  		   (mod->num_rports & (mod->num_rports - 1)));
4824  
4825  	for (i = 0; i < mod->num_rports; i++, rp++) {
4826  		memset(rp, 0, sizeof(struct bfa_rport_s));
4827  		rp->bfa = bfa;
4828  		rp->rport_tag = i;
4829  		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4830  
4831  		/*
4832  		 *  - is unused
4833  		 */
4834  		if (i)
4835  			list_add_tail(&rp->qe, &mod->rp_free_q);
4836  
4837  		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4838  	}
4839  
4840  	/*
4841  	 * consume memory
4842  	 */
4843  	bfa_mem_kva_curp(mod) = (u8 *) rp;
4844  }
4845  
4846  void
bfa_rport_iocdisable(struct bfa_s * bfa)4847  bfa_rport_iocdisable(struct bfa_s *bfa)
4848  {
4849  	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4850  	struct bfa_rport_s *rport;
4851  	struct list_head *qe, *qen;
4852  
4853  	/* Enqueue unused rport resources to free_q */
4854  	list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4855  
4856  	list_for_each_safe(qe, qen, &mod->rp_active_q) {
4857  		rport = (struct bfa_rport_s *) qe;
4858  		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4859  	}
4860  }
4861  
4862  static struct bfa_rport_s *
bfa_rport_alloc(struct bfa_rport_mod_s * mod)4863  bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4864  {
4865  	struct bfa_rport_s *rport;
4866  
4867  	bfa_q_deq(&mod->rp_free_q, &rport);
4868  	if (rport)
4869  		list_add_tail(&rport->qe, &mod->rp_active_q);
4870  
4871  	return rport;
4872  }
4873  
4874  static void
bfa_rport_free(struct bfa_rport_s * rport)4875  bfa_rport_free(struct bfa_rport_s *rport)
4876  {
4877  	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4878  
4879  	WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
4880  	list_del(&rport->qe);
4881  	list_add_tail(&rport->qe, &mod->rp_free_q);
4882  }
4883  
4884  static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s * rp)4885  bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4886  {
4887  	struct bfi_rport_create_req_s *m;
4888  
4889  	/*
4890  	 * check for room in queue to send request now
4891  	 */
4892  	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4893  	if (!m) {
4894  		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4895  		return BFA_FALSE;
4896  	}
4897  
4898  	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4899  			bfa_fn_lpu(rp->bfa));
4900  	m->bfa_handle = rp->rport_tag;
4901  	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
4902  	m->pid = rp->rport_info.pid;
4903  	m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
4904  	m->local_pid = rp->rport_info.local_pid;
4905  	m->fc_class = rp->rport_info.fc_class;
4906  	m->vf_en = rp->rport_info.vf_en;
4907  	m->vf_id = rp->rport_info.vf_id;
4908  	m->cisc = rp->rport_info.cisc;
4909  
4910  	/*
4911  	 * queue I/O message to firmware
4912  	 */
4913  	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4914  	return BFA_TRUE;
4915  }
4916  
4917  static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s * rp)4918  bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4919  {
4920  	struct bfi_rport_delete_req_s *m;
4921  
4922  	/*
4923  	 * check for room in queue to send request now
4924  	 */
4925  	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4926  	if (!m) {
4927  		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4928  		return BFA_FALSE;
4929  	}
4930  
4931  	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4932  			bfa_fn_lpu(rp->bfa));
4933  	m->fw_handle = rp->fw_handle;
4934  
4935  	/*
4936  	 * queue I/O message to firmware
4937  	 */
4938  	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4939  	return BFA_TRUE;
4940  }
4941  
4942  static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s * rp)4943  bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4944  {
4945  	struct bfa_rport_speed_req_s *m;
4946  
4947  	/*
4948  	 * check for room in queue to send request now
4949  	 */
4950  	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4951  	if (!m) {
4952  		bfa_trc(rp->bfa, rp->rport_info.speed);
4953  		return BFA_FALSE;
4954  	}
4955  
4956  	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4957  			bfa_fn_lpu(rp->bfa));
4958  	m->fw_handle = rp->fw_handle;
4959  	m->speed = (u8)rp->rport_info.speed;
4960  
4961  	/*
4962  	 * queue I/O message to firmware
4963  	 */
4964  	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
4965  	return BFA_TRUE;
4966  }
4967  
4968  
4969  
4970  /*
4971   *  bfa_rport_public
4972   */
4973  
4974  /*
4975   * Rport interrupt processing.
4976   */
4977  void
bfa_rport_isr(struct bfa_s * bfa,struct bfi_msg_s * m)4978  bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4979  {
4980  	union bfi_rport_i2h_msg_u msg;
4981  	struct bfa_rport_s *rp;
4982  
4983  	bfa_trc(bfa, m->mhdr.msg_id);
4984  
4985  	msg.msg = m;
4986  
4987  	switch (m->mhdr.msg_id) {
4988  	case BFI_RPORT_I2H_CREATE_RSP:
4989  		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4990  		rp->fw_handle = msg.create_rsp->fw_handle;
4991  		rp->qos_attr = msg.create_rsp->qos_attr;
4992  		bfa_rport_set_lunmask(bfa, rp);
4993  		WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
4994  		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4995  		break;
4996  
4997  	case BFI_RPORT_I2H_DELETE_RSP:
4998  		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4999  		WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
5000  		bfa_rport_unset_lunmask(bfa, rp);
5001  		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
5002  		break;
5003  
5004  	case BFI_RPORT_I2H_QOS_SCN:
5005  		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
5006  		rp->event_arg.fw_msg = msg.qos_scn_evt;
5007  		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
5008  		break;
5009  
5010  	case BFI_RPORT_I2H_LIP_SCN_ONLINE:
5011  		bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
5012  				&msg.lip_scn->loop_info);
5013  		bfa_cb_rport_scn_online(bfa);
5014  		break;
5015  
5016  	case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
5017  		bfa_cb_rport_scn_offline(bfa);
5018  		break;
5019  
5020  	case BFI_RPORT_I2H_NO_DEV:
5021  		rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
5022  		bfa_cb_rport_scn_no_dev(rp->rport_drv);
5023  		break;
5024  
5025  	default:
5026  		bfa_trc(bfa, m->mhdr.msg_id);
5027  		WARN_ON(1);
5028  	}
5029  }
5030  
5031  void
bfa_rport_res_recfg(struct bfa_s * bfa,u16 num_rport_fw)5032  bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
5033  {
5034  	struct bfa_rport_mod_s	*mod = BFA_RPORT_MOD(bfa);
5035  	struct list_head	*qe;
5036  	int	i;
5037  
5038  	for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
5039  		bfa_q_deq_tail(&mod->rp_free_q, &qe);
5040  		list_add_tail(qe, &mod->rp_unused_q);
5041  	}
5042  }
5043  
5044  /*
5045   *  bfa_rport_api
5046   */
5047  
5048  struct bfa_rport_s *
bfa_rport_create(struct bfa_s * bfa,void * rport_drv)5049  bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
5050  {
5051  	struct bfa_rport_s *rp;
5052  
5053  	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
5054  
5055  	if (rp == NULL)
5056  		return NULL;
5057  
5058  	rp->bfa = bfa;
5059  	rp->rport_drv = rport_drv;
5060  	memset(&rp->stats, 0, sizeof(rp->stats));
5061  
5062  	WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
5063  	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
5064  
5065  	return rp;
5066  }
5067  
5068  void
bfa_rport_online(struct bfa_rport_s * rport,struct bfa_rport_info_s * rport_info)5069  bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
5070  {
5071  	WARN_ON(rport_info->max_frmsz == 0);
5072  
5073  	/*
5074  	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
5075  	 * responses. Default to minimum size.
5076  	 */
5077  	if (rport_info->max_frmsz == 0) {
5078  		bfa_trc(rport->bfa, rport->rport_tag);
5079  		rport_info->max_frmsz = FC_MIN_PDUSZ;
5080  	}
5081  
5082  	rport->rport_info = *rport_info;
5083  	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
5084  }
5085  
5086  void
bfa_rport_speed(struct bfa_rport_s * rport,enum bfa_port_speed speed)5087  bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
5088  {
5089  	WARN_ON(speed == 0);
5090  	WARN_ON(speed == BFA_PORT_SPEED_AUTO);
5091  
5092  	if (rport) {
5093  		rport->rport_info.speed = speed;
5094  		bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
5095  	}
5096  }
5097  
5098  /* Set Rport LUN Mask */
5099  void
bfa_rport_set_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)5100  bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5101  {
5102  	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5103  	wwn_t	lp_wwn, rp_wwn;
5104  	u8 lp_tag = (u8)rp->rport_info.lp_tag;
5105  
5106  	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5107  	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5108  
5109  	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5110  					rp->lun_mask = BFA_TRUE;
5111  	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
5112  }
5113  
5114  /* Unset Rport LUN mask */
5115  void
bfa_rport_unset_lunmask(struct bfa_s * bfa,struct bfa_rport_s * rp)5116  bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
5117  {
5118  	struct bfa_lps_mod_s	*lps_mod = BFA_LPS_MOD(bfa);
5119  	wwn_t	lp_wwn, rp_wwn;
5120  
5121  	rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
5122  	lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
5123  
5124  	BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
5125  				rp->lun_mask = BFA_FALSE;
5126  	bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
5127  			BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
5128  }
5129  
5130  /*
5131   * SGPG related functions
5132   */
5133  
5134  /*
5135   * Compute and return memory needed by FCP(im) module.
5136   */
5137  void
bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)5138  bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5139  		struct bfa_s *bfa)
5140  {
5141  	struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
5142  	struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
5143  	struct bfa_mem_dma_s *seg_ptr;
5144  	u16	nsegs, idx, per_seg_sgpg, num_sgpg;
5145  	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5146  
5147  	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
5148  		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
5149  	else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
5150  		cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
5151  
5152  	num_sgpg = cfg->drvcfg.num_sgpgs;
5153  
5154  	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5155  	per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
5156  
5157  	bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
5158  		if (num_sgpg >= per_seg_sgpg) {
5159  			num_sgpg -= per_seg_sgpg;
5160  			bfa_mem_dma_setup(minfo, seg_ptr,
5161  					per_seg_sgpg * sgpg_sz);
5162  		} else
5163  			bfa_mem_dma_setup(minfo, seg_ptr,
5164  					num_sgpg * sgpg_sz);
5165  	}
5166  
5167  	/* kva memory */
5168  	bfa_mem_kva_setup(minfo, sgpg_kva,
5169  		cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
5170  }
5171  
5172  void
bfa_sgpg_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5173  bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5174  		struct bfa_pcidev_s *pcidev)
5175  {
5176  	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5177  	struct bfa_sgpg_s *hsgpg;
5178  	struct bfi_sgpg_s *sgpg;
5179  	u64 align_len;
5180  	struct bfa_mem_dma_s *seg_ptr;
5181  	u32	sgpg_sz = sizeof(struct bfi_sgpg_s);
5182  	u16	i, idx, nsegs, per_seg_sgpg, num_sgpg;
5183  
5184  	union {
5185  		u64 pa;
5186  		union bfi_addr_u addr;
5187  	} sgpg_pa, sgpg_pa_tmp;
5188  
5189  	INIT_LIST_HEAD(&mod->sgpg_q);
5190  	INIT_LIST_HEAD(&mod->sgpg_wait_q);
5191  
5192  	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5193  
5194  	mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
5195  
5196  	num_sgpg = cfg->drvcfg.num_sgpgs;
5197  	nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
5198  
5199  	/* dma/kva mem claim */
5200  	hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
5201  
5202  	bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
5203  
5204  		if (!bfa_mem_dma_virt(seg_ptr))
5205  			break;
5206  
5207  		align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5208  					     bfa_mem_dma_phys(seg_ptr);
5209  
5210  		sgpg = (struct bfi_sgpg_s *)
5211  			(((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5212  		sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5213  		WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5214  
5215  		per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5216  
5217  		for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5218  			memset(hsgpg, 0, sizeof(*hsgpg));
5219  			memset(sgpg, 0, sizeof(*sgpg));
5220  
5221  			hsgpg->sgpg = sgpg;
5222  			sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5223  			hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5224  			list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5225  
5226  			sgpg++;
5227  			hsgpg++;
5228  			sgpg_pa.pa += sgpg_sz;
5229  		}
5230  	}
5231  
5232  	bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
5233  }
5234  
5235  bfa_status_t
bfa_sgpg_malloc(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpgs)5236  bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5237  {
5238  	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5239  	struct bfa_sgpg_s *hsgpg;
5240  	int i;
5241  
5242  	if (mod->free_sgpgs < nsgpgs)
5243  		return BFA_STATUS_ENOMEM;
5244  
5245  	for (i = 0; i < nsgpgs; i++) {
5246  		bfa_q_deq(&mod->sgpg_q, &hsgpg);
5247  		WARN_ON(!hsgpg);
5248  		list_add_tail(&hsgpg->qe, sgpg_q);
5249  	}
5250  
5251  	mod->free_sgpgs -= nsgpgs;
5252  	return BFA_STATUS_OK;
5253  }
5254  
5255  void
bfa_sgpg_mfree(struct bfa_s * bfa,struct list_head * sgpg_q,int nsgpg)5256  bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5257  {
5258  	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5259  	struct bfa_sgpg_wqe_s *wqe;
5260  
5261  	mod->free_sgpgs += nsgpg;
5262  	WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
5263  
5264  	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5265  
5266  	if (list_empty(&mod->sgpg_wait_q))
5267  		return;
5268  
5269  	/*
5270  	 * satisfy as many waiting requests as possible
5271  	 */
5272  	do {
5273  		wqe = bfa_q_first(&mod->sgpg_wait_q);
5274  		if (mod->free_sgpgs < wqe->nsgpg)
5275  			nsgpg = mod->free_sgpgs;
5276  		else
5277  			nsgpg = wqe->nsgpg;
5278  		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5279  		wqe->nsgpg -= nsgpg;
5280  		if (wqe->nsgpg == 0) {
5281  			list_del(&wqe->qe);
5282  			wqe->cbfn(wqe->cbarg);
5283  		}
5284  	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5285  }
5286  
5287  void
bfa_sgpg_wait(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe,int nsgpg)5288  bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5289  {
5290  	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5291  
5292  	WARN_ON(nsgpg <= 0);
5293  	WARN_ON(nsgpg <= mod->free_sgpgs);
5294  
5295  	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5296  
5297  	/*
5298  	 * allocate any left to this one first
5299  	 */
5300  	if (mod->free_sgpgs) {
5301  		/*
5302  		 * no one else is waiting for SGPG
5303  		 */
5304  		WARN_ON(!list_empty(&mod->sgpg_wait_q));
5305  		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5306  		wqe->nsgpg -= mod->free_sgpgs;
5307  		mod->free_sgpgs = 0;
5308  	}
5309  
5310  	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5311  }
5312  
5313  void
bfa_sgpg_wcancel(struct bfa_s * bfa,struct bfa_sgpg_wqe_s * wqe)5314  bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5315  {
5316  	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5317  
5318  	WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5319  	list_del(&wqe->qe);
5320  
5321  	if (wqe->nsgpg_total != wqe->nsgpg)
5322  		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5323  				   wqe->nsgpg_total - wqe->nsgpg);
5324  }
5325  
5326  void
bfa_sgpg_winit(struct bfa_sgpg_wqe_s * wqe,void (* cbfn)(void * cbarg),void * cbarg)5327  bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5328  		   void *cbarg)
5329  {
5330  	INIT_LIST_HEAD(&wqe->sgpg_q);
5331  	wqe->cbfn = cbfn;
5332  	wqe->cbarg = cbarg;
5333  }
5334  
5335  /*
5336   *  UF related functions
5337   */
5338  /*
5339   *****************************************************************************
5340   * Internal functions
5341   *****************************************************************************
5342   */
5343  static void
__bfa_cb_uf_recv(void * cbarg,bfa_boolean_t complete)5344  __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5345  {
5346  	struct bfa_uf_s   *uf = cbarg;
5347  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5348  
5349  	if (complete)
5350  		ufm->ufrecv(ufm->cbarg, uf);
5351  }
5352  
5353  static void
claim_uf_post_msgs(struct bfa_uf_mod_s * ufm)5354  claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
5355  {
5356  	struct bfi_uf_buf_post_s *uf_bp_msg;
5357  	u16 i;
5358  	u16 buf_len;
5359  
5360  	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
5361  	uf_bp_msg = ufm->uf_buf_posts;
5362  
5363  	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5364  	     i++, uf_bp_msg++) {
5365  		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5366  
5367  		uf_bp_msg->buf_tag = i;
5368  		buf_len = sizeof(struct bfa_uf_buf_s);
5369  		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
5370  		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5371  			    bfa_fn_lpu(ufm->bfa));
5372  		bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
5373  	}
5374  
5375  	/*
5376  	 * advance pointer beyond consumed memory
5377  	 */
5378  	bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
5379  }
5380  
5381  static void
claim_ufs(struct bfa_uf_mod_s * ufm)5382  claim_ufs(struct bfa_uf_mod_s *ufm)
5383  {
5384  	u16 i;
5385  	struct bfa_uf_s   *uf;
5386  
5387  	/*
5388  	 * Claim block of memory for UF list
5389  	 */
5390  	ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
5391  
5392  	/*
5393  	 * Initialize UFs and queue it in UF free queue
5394  	 */
5395  	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5396  		memset(uf, 0, sizeof(struct bfa_uf_s));
5397  		uf->bfa = ufm->bfa;
5398  		uf->uf_tag = i;
5399  		uf->pb_len = BFA_PER_UF_DMA_SZ;
5400  		uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
5401  		uf->buf_pa = ufm_pbs_pa(ufm, i);
5402  		list_add_tail(&uf->qe, &ufm->uf_free_q);
5403  	}
5404  
5405  	/*
5406  	 * advance memory pointer
5407  	 */
5408  	bfa_mem_kva_curp(ufm) = (u8 *) uf;
5409  }
5410  
5411  static void
uf_mem_claim(struct bfa_uf_mod_s * ufm)5412  uf_mem_claim(struct bfa_uf_mod_s *ufm)
5413  {
5414  	claim_ufs(ufm);
5415  	claim_uf_post_msgs(ufm);
5416  }
5417  
5418  void
bfa_uf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * minfo,struct bfa_s * bfa)5419  bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5420  		struct bfa_s *bfa)
5421  {
5422  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5423  	struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5424  	u32	num_ufs = cfg->fwcfg.num_uf_bufs;
5425  	struct bfa_mem_dma_s *seg_ptr;
5426  	u16	nsegs, idx, per_seg_uf = 0;
5427  
5428  	nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5429  	per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
5430  
5431  	bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5432  		if (num_ufs >= per_seg_uf) {
5433  			num_ufs -= per_seg_uf;
5434  			bfa_mem_dma_setup(minfo, seg_ptr,
5435  				per_seg_uf * BFA_PER_UF_DMA_SZ);
5436  		} else
5437  			bfa_mem_dma_setup(minfo, seg_ptr,
5438  				num_ufs * BFA_PER_UF_DMA_SZ);
5439  	}
5440  
5441  	/* kva memory */
5442  	bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5443  		(sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
5444  }
5445  
5446  void
bfa_uf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5447  bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5448  		struct bfa_pcidev_s *pcidev)
5449  {
5450  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5451  
5452  	ufm->bfa = bfa;
5453  	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5454  	INIT_LIST_HEAD(&ufm->uf_free_q);
5455  	INIT_LIST_HEAD(&ufm->uf_posted_q);
5456  	INIT_LIST_HEAD(&ufm->uf_unused_q);
5457  
5458  	uf_mem_claim(ufm);
5459  }
5460  
5461  static struct bfa_uf_s *
bfa_uf_get(struct bfa_uf_mod_s * uf_mod)5462  bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5463  {
5464  	struct bfa_uf_s   *uf;
5465  
5466  	bfa_q_deq(&uf_mod->uf_free_q, &uf);
5467  	return uf;
5468  }
5469  
5470  static void
bfa_uf_put(struct bfa_uf_mod_s * uf_mod,struct bfa_uf_s * uf)5471  bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5472  {
5473  	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5474  }
5475  
5476  static bfa_status_t
bfa_uf_post(struct bfa_uf_mod_s * ufm,struct bfa_uf_s * uf)5477  bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5478  {
5479  	struct bfi_uf_buf_post_s *uf_post_msg;
5480  
5481  	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5482  	if (!uf_post_msg)
5483  		return BFA_STATUS_FAILED;
5484  
5485  	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5486  		      sizeof(struct bfi_uf_buf_post_s));
5487  	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
5488  
5489  	bfa_trc(ufm->bfa, uf->uf_tag);
5490  
5491  	list_add_tail(&uf->qe, &ufm->uf_posted_q);
5492  	return BFA_STATUS_OK;
5493  }
5494  
5495  static void
bfa_uf_post_all(struct bfa_uf_mod_s * uf_mod)5496  bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5497  {
5498  	struct bfa_uf_s   *uf;
5499  
5500  	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5501  		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5502  			break;
5503  	}
5504  }
5505  
5506  static void
uf_recv(struct bfa_s * bfa,struct bfi_uf_frm_rcvd_s * m)5507  uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5508  {
5509  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5510  	u16 uf_tag = m->buf_tag;
5511  	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5512  	struct bfa_uf_buf_s *uf_buf;
5513  	uint8_t *buf;
5514  
5515  	uf_buf = (struct bfa_uf_buf_s *)
5516  			bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5517  	buf = &uf_buf->d[0];
5518  
5519  	m->frm_len = be16_to_cpu(m->frm_len);
5520  	m->xfr_len = be16_to_cpu(m->xfr_len);
5521  
5522  	list_del(&uf->qe);	/* dequeue from posted queue */
5523  
5524  	uf->data_ptr = buf;
5525  	uf->data_len = m->xfr_len;
5526  
5527  	WARN_ON(uf->data_len < sizeof(struct fchs_s));
5528  
5529  	if (uf->data_len == sizeof(struct fchs_s)) {
5530  		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5531  			       uf->data_len, (struct fchs_s *)buf);
5532  	} else {
5533  		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5534  		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5535  				      BFA_PL_EID_RX, uf->data_len,
5536  				      (struct fchs_s *)buf, pld_w0);
5537  	}
5538  
5539  	if (bfa->fcs)
5540  		__bfa_cb_uf_recv(uf, BFA_TRUE);
5541  	else
5542  		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5543  }
5544  
5545  void
bfa_uf_iocdisable(struct bfa_s * bfa)5546  bfa_uf_iocdisable(struct bfa_s *bfa)
5547  {
5548  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5549  	struct bfa_uf_s *uf;
5550  	struct list_head *qe, *qen;
5551  
5552  	/* Enqueue unused uf resources to free_q */
5553  	list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5554  
5555  	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5556  		uf = (struct bfa_uf_s *) qe;
5557  		list_del(&uf->qe);
5558  		bfa_uf_put(ufm, uf);
5559  	}
5560  }
5561  
5562  void
bfa_uf_start(struct bfa_s * bfa)5563  bfa_uf_start(struct bfa_s *bfa)
5564  {
5565  	bfa_uf_post_all(BFA_UF_MOD(bfa));
5566  }
5567  
5568  /*
5569   * Register handler for all unsolicted receive frames.
5570   *
5571   * @param[in]	bfa		BFA instance
5572   * @param[in]	ufrecv	receive handler function
5573   * @param[in]	cbarg	receive handler arg
5574   */
5575  void
bfa_uf_recv_register(struct bfa_s * bfa,bfa_cb_uf_recv_t ufrecv,void * cbarg)5576  bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5577  {
5578  	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5579  
5580  	ufm->ufrecv = ufrecv;
5581  	ufm->cbarg = cbarg;
5582  }
5583  
5584  /*
5585   *	Free an unsolicited frame back to BFA.
5586   *
5587   * @param[in]		uf		unsolicited frame to be freed
5588   *
5589   * @return None
5590   */
5591  void
bfa_uf_free(struct bfa_uf_s * uf)5592  bfa_uf_free(struct bfa_uf_s *uf)
5593  {
5594  	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5595  	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5596  }
5597  
5598  
5599  
5600  /*
5601   *  uf_pub BFA uf module public functions
5602   */
5603  void
bfa_uf_isr(struct bfa_s * bfa,struct bfi_msg_s * msg)5604  bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5605  {
5606  	bfa_trc(bfa, msg->mhdr.msg_id);
5607  
5608  	switch (msg->mhdr.msg_id) {
5609  	case BFI_UF_I2H_FRM_RCVD:
5610  		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5611  		break;
5612  
5613  	default:
5614  		bfa_trc(bfa, msg->mhdr.msg_id);
5615  		WARN_ON(1);
5616  	}
5617  }
5618  
5619  void
bfa_uf_res_recfg(struct bfa_s * bfa,u16 num_uf_fw)5620  bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5621  {
5622  	struct bfa_uf_mod_s	*mod = BFA_UF_MOD(bfa);
5623  	struct list_head	*qe;
5624  	int	i;
5625  
5626  	for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5627  		bfa_q_deq_tail(&mod->uf_free_q, &qe);
5628  		list_add_tail(qe, &mod->uf_unused_q);
5629  	}
5630  }
5631  
5632  /*
5633   *	Dport forward declaration
5634   */
5635  
5636  enum bfa_dport_test_state_e {
5637  	BFA_DPORT_ST_DISABLED	= 0,	/*!< dport is disabled */
5638  	BFA_DPORT_ST_INP	= 1,	/*!< test in progress */
5639  	BFA_DPORT_ST_COMP	= 2,	/*!< test complete successfully */
5640  	BFA_DPORT_ST_NO_SFP	= 3,	/*!< sfp is not present */
5641  	BFA_DPORT_ST_NOTSTART	= 4,	/*!< test not start dport is enabled */
5642  };
5643  
5644  /*
5645   * BFA DPORT state machine events
5646   */
5647  enum bfa_dport_sm_event {
5648  	BFA_DPORT_SM_ENABLE	= 1,	/* dport enable event         */
5649  	BFA_DPORT_SM_DISABLE    = 2,    /* dport disable event        */
5650  	BFA_DPORT_SM_FWRSP      = 3,    /* fw enable/disable rsp      */
5651  	BFA_DPORT_SM_QRESUME    = 4,    /* CQ space available         */
5652  	BFA_DPORT_SM_HWFAIL     = 5,    /* IOC h/w failure            */
5653  	BFA_DPORT_SM_START	= 6,	/* re-start dport test        */
5654  	BFA_DPORT_SM_REQFAIL	= 7,	/* request failure            */
5655  	BFA_DPORT_SM_SCN	= 8,	/* state change notify frm fw */
5656  };
5657  
5658  static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5659  				  enum bfa_dport_sm_event event);
5660  static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5661  				  enum bfa_dport_sm_event event);
5662  static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5663  				  enum bfa_dport_sm_event event);
5664  static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5665  				 enum bfa_dport_sm_event event);
5666  static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5667  				 enum bfa_dport_sm_event event);
5668  static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5669  				   enum bfa_dport_sm_event event);
5670  static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
5671  					enum bfa_dport_sm_event event);
5672  static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
5673  				  enum bfa_dport_sm_event event);
5674  static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
5675  				   enum bfa_dport_sm_event event);
5676  static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
5677  				   enum bfa_dport_sm_event event);
5678  static void bfa_dport_qresume(void *cbarg);
5679  static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5680  				struct bfi_diag_dport_rsp_s *msg);
5681  static void bfa_dport_scn(struct bfa_dport_s *dport,
5682  				struct bfi_diag_dport_scn_s *msg);
5683  
5684  /*
5685   *	BFA fcdiag module
5686   */
5687  #define BFA_DIAG_QTEST_TOV	1000    /* msec */
5688  
5689  /*
5690   *	Set port status to busy
5691   */
5692  static void
bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s * fcdiag)5693  bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5694  {
5695  	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5696  
5697  	if (fcdiag->lb.lock)
5698  		fcport->diag_busy = BFA_TRUE;
5699  	else
5700  		fcport->diag_busy = BFA_FALSE;
5701  }
5702  
5703  void
bfa_fcdiag_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg,struct bfa_pcidev_s * pcidev)5704  bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5705  		struct bfa_pcidev_s *pcidev)
5706  {
5707  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5708  	struct bfa_dport_s  *dport = &fcdiag->dport;
5709  
5710  	fcdiag->bfa             = bfa;
5711  	fcdiag->trcmod  = bfa->trcmod;
5712  	/* The common DIAG attach bfa_diag_attach() will do all memory claim */
5713  	dport->bfa = bfa;
5714  	bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5715  	bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5716  	dport->cbfn = NULL;
5717  	dport->cbarg = NULL;
5718  	dport->test_state = BFA_DPORT_ST_DISABLED;
5719  	memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
5720  }
5721  
5722  void
bfa_fcdiag_iocdisable(struct bfa_s * bfa)5723  bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5724  {
5725  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5726  	struct bfa_dport_s *dport = &fcdiag->dport;
5727  
5728  	bfa_trc(fcdiag, fcdiag->lb.lock);
5729  	if (fcdiag->lb.lock) {
5730  		fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5731  		fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5732  		fcdiag->lb.lock = 0;
5733  		bfa_fcdiag_set_busy_status(fcdiag);
5734  	}
5735  
5736  	bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
5737  }
5738  
5739  static void
bfa_fcdiag_queuetest_timeout(void * cbarg)5740  bfa_fcdiag_queuetest_timeout(void *cbarg)
5741  {
5742  	struct bfa_fcdiag_s       *fcdiag = cbarg;
5743  	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5744  
5745  	bfa_trc(fcdiag, fcdiag->qtest.all);
5746  	bfa_trc(fcdiag, fcdiag->qtest.count);
5747  
5748  	fcdiag->qtest.timer_active = 0;
5749  
5750  	res->status = BFA_STATUS_ETIMER;
5751  	res->count  = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5752  	if (fcdiag->qtest.all)
5753  		res->queue  = fcdiag->qtest.all;
5754  
5755  	bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5756  	fcdiag->qtest.status = BFA_STATUS_ETIMER;
5757  	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5758  	fcdiag->qtest.lock = 0;
5759  }
5760  
5761  static bfa_status_t
bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s * fcdiag)5762  bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5763  {
5764  	u32	i;
5765  	struct bfi_diag_qtest_req_s *req;
5766  
5767  	req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5768  	if (!req)
5769  		return BFA_STATUS_DEVBUSY;
5770  
5771  	/* build host command */
5772  	bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5773  		bfa_fn_lpu(fcdiag->bfa));
5774  
5775  	for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5776  		req->data[i] = QTEST_PAT_DEFAULT;
5777  
5778  	bfa_trc(fcdiag, fcdiag->qtest.queue);
5779  	/* ring door bell */
5780  	bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5781  	return BFA_STATUS_OK;
5782  }
5783  
5784  static void
bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s * fcdiag,bfi_diag_qtest_rsp_t * rsp)5785  bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5786  			bfi_diag_qtest_rsp_t *rsp)
5787  {
5788  	struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5789  	bfa_status_t status = BFA_STATUS_OK;
5790  	int i;
5791  
5792  	/* Check timer, should still be active   */
5793  	if (!fcdiag->qtest.timer_active) {
5794  		bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5795  		return;
5796  	}
5797  
5798  	/* update count */
5799  	fcdiag->qtest.count--;
5800  
5801  	/* Check result */
5802  	for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5803  		if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5804  			res->status = BFA_STATUS_DATACORRUPTED;
5805  			break;
5806  		}
5807  	}
5808  
5809  	if (res->status == BFA_STATUS_OK) {
5810  		if (fcdiag->qtest.count > 0) {
5811  			status = bfa_fcdiag_queuetest_send(fcdiag);
5812  			if (status == BFA_STATUS_OK)
5813  				return;
5814  			else
5815  				res->status = status;
5816  		} else if (fcdiag->qtest.all > 0 &&
5817  			fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5818  			fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5819  			fcdiag->qtest.queue++;
5820  			status = bfa_fcdiag_queuetest_send(fcdiag);
5821  			if (status == BFA_STATUS_OK)
5822  				return;
5823  			else
5824  				res->status = status;
5825  		}
5826  	}
5827  
5828  	/* Stop timer when we comp all queue */
5829  	if (fcdiag->qtest.timer_active) {
5830  		bfa_timer_stop(&fcdiag->qtest.timer);
5831  		fcdiag->qtest.timer_active = 0;
5832  	}
5833  	res->queue = fcdiag->qtest.queue;
5834  	res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5835  	bfa_trc(fcdiag, res->count);
5836  	bfa_trc(fcdiag, res->status);
5837  	fcdiag->qtest.status = res->status;
5838  	fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5839  	fcdiag->qtest.lock = 0;
5840  }
5841  
5842  static void
bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s * fcdiag,struct bfi_diag_lb_rsp_s * rsp)5843  bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5844  			struct bfi_diag_lb_rsp_s *rsp)
5845  {
5846  	struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5847  
5848  	res->numtxmfrm  = be32_to_cpu(rsp->res.numtxmfrm);
5849  	res->numosffrm  = be32_to_cpu(rsp->res.numosffrm);
5850  	res->numrcvfrm  = be32_to_cpu(rsp->res.numrcvfrm);
5851  	res->badfrminf  = be32_to_cpu(rsp->res.badfrminf);
5852  	res->badfrmnum  = be32_to_cpu(rsp->res.badfrmnum);
5853  	res->status     = rsp->res.status;
5854  	fcdiag->lb.status = rsp->res.status;
5855  	bfa_trc(fcdiag, fcdiag->lb.status);
5856  	fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5857  	fcdiag->lb.lock = 0;
5858  	bfa_fcdiag_set_busy_status(fcdiag);
5859  }
5860  
5861  static bfa_status_t
bfa_fcdiag_loopback_send(struct bfa_fcdiag_s * fcdiag,struct bfa_diag_loopback_s * loopback)5862  bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5863  			struct bfa_diag_loopback_s *loopback)
5864  {
5865  	struct bfi_diag_lb_req_s *lb_req;
5866  
5867  	lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5868  	if (!lb_req)
5869  		return BFA_STATUS_DEVBUSY;
5870  
5871  	/* build host command */
5872  	bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5873  		bfa_fn_lpu(fcdiag->bfa));
5874  
5875  	lb_req->lb_mode = loopback->lb_mode;
5876  	lb_req->speed = loopback->speed;
5877  	lb_req->loopcnt = loopback->loopcnt;
5878  	lb_req->pattern = loopback->pattern;
5879  
5880  	/* ring door bell */
5881  	bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5882  
5883  	bfa_trc(fcdiag, loopback->lb_mode);
5884  	bfa_trc(fcdiag, loopback->speed);
5885  	bfa_trc(fcdiag, loopback->loopcnt);
5886  	bfa_trc(fcdiag, loopback->pattern);
5887  	return BFA_STATUS_OK;
5888  }
5889  
5890  /*
5891   *	cpe/rme intr handler
5892   */
5893  void
bfa_fcdiag_intr(struct bfa_s * bfa,struct bfi_msg_s * msg)5894  bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5895  {
5896  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5897  
5898  	switch (msg->mhdr.msg_id) {
5899  	case BFI_DIAG_I2H_LOOPBACK:
5900  		bfa_fcdiag_loopback_comp(fcdiag,
5901  				(struct bfi_diag_lb_rsp_s *) msg);
5902  		break;
5903  	case BFI_DIAG_I2H_QTEST:
5904  		bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5905  		break;
5906  	case BFI_DIAG_I2H_DPORT:
5907  		bfa_dport_req_comp(&fcdiag->dport,
5908  				(struct bfi_diag_dport_rsp_s *)msg);
5909  		break;
5910  	case BFI_DIAG_I2H_DPORT_SCN:
5911  		bfa_dport_scn(&fcdiag->dport,
5912  				(struct bfi_diag_dport_scn_s *)msg);
5913  		break;
5914  	default:
5915  		bfa_trc(fcdiag, msg->mhdr.msg_id);
5916  		WARN_ON(1);
5917  	}
5918  }
5919  
5920  /*
5921   *	Loopback test
5922   *
5923   *   @param[in] *bfa            - bfa data struct
5924   *   @param[in] opmode          - port operation mode
5925   *   @param[in] speed           - port speed
5926   *   @param[in] lpcnt           - loop count
5927   *   @param[in] pat                     - pattern to build packet
5928   *   @param[in] *result         - pt to bfa_diag_loopback_result_t data struct
5929   *   @param[in] cbfn            - callback function
5930   *   @param[in] cbarg           - callback functioin arg
5931   *
5932   *   @param[out]
5933   */
5934  bfa_status_t
bfa_fcdiag_loopback(struct bfa_s * bfa,enum bfa_port_opmode opmode,enum bfa_port_speed speed,u32 lpcnt,u32 pat,struct bfa_diag_loopback_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)5935  bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5936  		enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5937  		struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5938  		void *cbarg)
5939  {
5940  	struct  bfa_diag_loopback_s loopback;
5941  	struct bfa_port_attr_s attr;
5942  	bfa_status_t status;
5943  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5944  
5945  	if (!bfa_iocfc_is_operational(bfa))
5946  		return BFA_STATUS_IOC_NON_OP;
5947  
5948  	/* if port is PBC disabled, return error */
5949  	if (bfa_fcport_is_pbcdisabled(bfa)) {
5950  		bfa_trc(fcdiag, BFA_STATUS_PBC);
5951  		return BFA_STATUS_PBC;
5952  	}
5953  
5954  	if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5955  		bfa_trc(fcdiag, opmode);
5956  		return BFA_STATUS_PORT_NOT_DISABLED;
5957  	}
5958  
5959  	/*
5960  	 * Check if input speed is supported by the port mode
5961  	 */
5962  	if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5963  		if (!(speed == BFA_PORT_SPEED_1GBPS ||
5964  		      speed == BFA_PORT_SPEED_2GBPS ||
5965  		      speed == BFA_PORT_SPEED_4GBPS ||
5966  		      speed == BFA_PORT_SPEED_8GBPS ||
5967  		      speed == BFA_PORT_SPEED_16GBPS ||
5968  		      speed == BFA_PORT_SPEED_AUTO)) {
5969  			bfa_trc(fcdiag, speed);
5970  			return BFA_STATUS_UNSUPP_SPEED;
5971  		}
5972  		bfa_fcport_get_attr(bfa, &attr);
5973  		bfa_trc(fcdiag, attr.speed_supported);
5974  		if (speed > attr.speed_supported)
5975  			return BFA_STATUS_UNSUPP_SPEED;
5976  	} else {
5977  		if (speed != BFA_PORT_SPEED_10GBPS) {
5978  			bfa_trc(fcdiag, speed);
5979  			return BFA_STATUS_UNSUPP_SPEED;
5980  		}
5981  	}
5982  
5983  	/*
5984  	 * For CT2, 1G is not supported
5985  	 */
5986  	if ((speed == BFA_PORT_SPEED_1GBPS) &&
5987  	    (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5988  		bfa_trc(fcdiag, speed);
5989  		return BFA_STATUS_UNSUPP_SPEED;
5990  	}
5991  
5992  	/* For Mezz card, port speed entered needs to be checked */
5993  	if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5994  		if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5995  			if (!(speed == BFA_PORT_SPEED_1GBPS ||
5996  			      speed == BFA_PORT_SPEED_2GBPS ||
5997  			      speed == BFA_PORT_SPEED_4GBPS ||
5998  			      speed == BFA_PORT_SPEED_8GBPS ||
5999  			      speed == BFA_PORT_SPEED_16GBPS ||
6000  			      speed == BFA_PORT_SPEED_AUTO))
6001  				return BFA_STATUS_UNSUPP_SPEED;
6002  		} else {
6003  			if (speed != BFA_PORT_SPEED_10GBPS)
6004  				return BFA_STATUS_UNSUPP_SPEED;
6005  		}
6006  	}
6007  	/* check to see if fcport is dport */
6008  	if (bfa_fcport_is_dport(bfa)) {
6009  		bfa_trc(fcdiag, fcdiag->lb.lock);
6010  		return BFA_STATUS_DPORT_ENABLED;
6011  	}
6012  	/* check to see if there is another destructive diag cmd running */
6013  	if (fcdiag->lb.lock) {
6014  		bfa_trc(fcdiag, fcdiag->lb.lock);
6015  		return BFA_STATUS_DEVBUSY;
6016  	}
6017  
6018  	fcdiag->lb.lock = 1;
6019  	loopback.lb_mode = opmode;
6020  	loopback.speed = speed;
6021  	loopback.loopcnt = lpcnt;
6022  	loopback.pattern = pat;
6023  	fcdiag->lb.result = result;
6024  	fcdiag->lb.cbfn = cbfn;
6025  	fcdiag->lb.cbarg = cbarg;
6026  	memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
6027  	bfa_fcdiag_set_busy_status(fcdiag);
6028  
6029  	/* Send msg to fw */
6030  	status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
6031  	return status;
6032  }
6033  
6034  /*
6035   *	DIAG queue test command
6036   *
6037   *   @param[in] *bfa            - bfa data struct
6038   *   @param[in] force           - 1: don't do ioc op checking
6039   *   @param[in] queue           - queue no. to test
6040   *   @param[in] *result         - pt to bfa_diag_qtest_result_t data struct
6041   *   @param[in] cbfn            - callback function
6042   *   @param[in] *cbarg          - callback functioin arg
6043   *
6044   *   @param[out]
6045   */
6046  bfa_status_t
bfa_fcdiag_queuetest(struct bfa_s * bfa,u32 force,u32 queue,struct bfa_diag_qtest_result_s * result,bfa_cb_diag_t cbfn,void * cbarg)6047  bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
6048  		struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
6049  		void *cbarg)
6050  {
6051  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6052  	bfa_status_t status;
6053  	bfa_trc(fcdiag, force);
6054  	bfa_trc(fcdiag, queue);
6055  
6056  	if (!force && !bfa_iocfc_is_operational(bfa))
6057  		return BFA_STATUS_IOC_NON_OP;
6058  
6059  	/* check to see if there is another destructive diag cmd running */
6060  	if (fcdiag->qtest.lock) {
6061  		bfa_trc(fcdiag, fcdiag->qtest.lock);
6062  		return BFA_STATUS_DEVBUSY;
6063  	}
6064  
6065  	/* Initialization */
6066  	fcdiag->qtest.lock = 1;
6067  	fcdiag->qtest.cbfn = cbfn;
6068  	fcdiag->qtest.cbarg = cbarg;
6069  	fcdiag->qtest.result = result;
6070  	fcdiag->qtest.count = QTEST_CNT_DEFAULT;
6071  
6072  	/* Init test results */
6073  	fcdiag->qtest.result->status = BFA_STATUS_OK;
6074  	fcdiag->qtest.result->count  = 0;
6075  
6076  	/* send */
6077  	if (queue < BFI_IOC_MAX_CQS) {
6078  		fcdiag->qtest.result->queue  = (u8)queue;
6079  		fcdiag->qtest.queue = (u8)queue;
6080  		fcdiag->qtest.all   = 0;
6081  	} else {
6082  		fcdiag->qtest.result->queue  = 0;
6083  		fcdiag->qtest.queue = 0;
6084  		fcdiag->qtest.all   = 1;
6085  	}
6086  	status = bfa_fcdiag_queuetest_send(fcdiag);
6087  
6088  	/* Start a timer */
6089  	if (status == BFA_STATUS_OK) {
6090  		bfa_timer_start(bfa, &fcdiag->qtest.timer,
6091  				bfa_fcdiag_queuetest_timeout, fcdiag,
6092  				BFA_DIAG_QTEST_TOV);
6093  		fcdiag->qtest.timer_active = 1;
6094  	}
6095  	return status;
6096  }
6097  
6098  /*
6099   * DIAG PLB is running
6100   *
6101   *   @param[in] *bfa    - bfa data struct
6102   *
6103   *   @param[out]
6104   */
6105  bfa_status_t
bfa_fcdiag_lb_is_running(struct bfa_s * bfa)6106  bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
6107  {
6108  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6109  	return fcdiag->lb.lock ?  BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
6110  }
6111  
6112  /*
6113   *	D-port
6114   */
6115  #define bfa_dport_result_start(__dport, __mode) do {				\
6116  		(__dport)->result.start_time = ktime_get_real_seconds();	\
6117  		(__dport)->result.status = DPORT_TEST_ST_INPRG;			\
6118  		(__dport)->result.mode = (__mode);				\
6119  		(__dport)->result.rp_pwwn = (__dport)->rp_pwwn;			\
6120  		(__dport)->result.rp_nwwn = (__dport)->rp_nwwn;			\
6121  		(__dport)->result.lpcnt = (__dport)->lpcnt;			\
6122  } while (0)
6123  
6124  static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
6125  					enum bfi_dport_req req);
6126  static void
bfa_cb_fcdiag_dport(struct bfa_dport_s * dport,bfa_status_t bfa_status)6127  bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
6128  {
6129  	if (dport->cbfn != NULL) {
6130  		dport->cbfn(dport->cbarg, bfa_status);
6131  		dport->cbfn = NULL;
6132  		dport->cbarg = NULL;
6133  	}
6134  }
6135  
6136  static void
bfa_dport_sm_disabled(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6137  bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6138  {
6139  	bfa_trc(dport->bfa, event);
6140  
6141  	switch (event) {
6142  	case BFA_DPORT_SM_ENABLE:
6143  		bfa_fcport_dportenable(dport->bfa);
6144  		if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
6145  			bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6146  		else
6147  			bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
6148  		break;
6149  
6150  	case BFA_DPORT_SM_DISABLE:
6151  		/* Already disabled */
6152  		break;
6153  
6154  	case BFA_DPORT_SM_HWFAIL:
6155  		/* ignore */
6156  		break;
6157  
6158  	case BFA_DPORT_SM_SCN:
6159  		if (dport->i2hmsg.scn.state ==  BFI_DPORT_SCN_DDPORT_ENABLE) {
6160  			bfa_fcport_ddportenable(dport->bfa);
6161  			dport->dynamic = BFA_TRUE;
6162  			dport->test_state = BFA_DPORT_ST_NOTSTART;
6163  			bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6164  		} else {
6165  			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6166  			WARN_ON(1);
6167  		}
6168  		break;
6169  
6170  	default:
6171  		bfa_sm_fault(dport->bfa, event);
6172  	}
6173  }
6174  
6175  static void
bfa_dport_sm_enabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6176  bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6177  			    enum bfa_dport_sm_event event)
6178  {
6179  	bfa_trc(dport->bfa, event);
6180  
6181  	switch (event) {
6182  	case BFA_DPORT_SM_QRESUME:
6183  		bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6184  		bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6185  		break;
6186  
6187  	case BFA_DPORT_SM_HWFAIL:
6188  		bfa_reqq_wcancel(&dport->reqq_wait);
6189  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6190  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6191  		break;
6192  
6193  	default:
6194  		bfa_sm_fault(dport->bfa, event);
6195  	}
6196  }
6197  
6198  static void
bfa_dport_sm_enabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6199  bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6200  {
6201  	bfa_trc(dport->bfa, event);
6202  
6203  	switch (event) {
6204  	case BFA_DPORT_SM_FWRSP:
6205  		memset(&dport->result, 0,
6206  				sizeof(struct bfa_diag_dport_result_s));
6207  		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6208  			dport->test_state = BFA_DPORT_ST_NO_SFP;
6209  		} else {
6210  			dport->test_state = BFA_DPORT_ST_INP;
6211  			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
6212  		}
6213  		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6214  		break;
6215  
6216  	case BFA_DPORT_SM_REQFAIL:
6217  		dport->test_state = BFA_DPORT_ST_DISABLED;
6218  		bfa_fcport_dportdisable(dport->bfa);
6219  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6220  		break;
6221  
6222  	case BFA_DPORT_SM_HWFAIL:
6223  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6224  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6225  		break;
6226  
6227  	default:
6228  		bfa_sm_fault(dport->bfa, event);
6229  	}
6230  }
6231  
6232  static void
bfa_dport_sm_enabled(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6233  bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6234  {
6235  	bfa_trc(dport->bfa, event);
6236  
6237  	switch (event) {
6238  	case BFA_DPORT_SM_START:
6239  		if (bfa_dport_send_req(dport, BFI_DPORT_START))
6240  			bfa_sm_set_state(dport, bfa_dport_sm_starting);
6241  		else
6242  			bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
6243  		break;
6244  
6245  	case BFA_DPORT_SM_DISABLE:
6246  		bfa_fcport_dportdisable(dport->bfa);
6247  		if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6248  			bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6249  		else
6250  			bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6251  		break;
6252  
6253  	case BFA_DPORT_SM_HWFAIL:
6254  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6255  		break;
6256  
6257  	case BFA_DPORT_SM_SCN:
6258  		switch (dport->i2hmsg.scn.state) {
6259  		case BFI_DPORT_SCN_TESTCOMP:
6260  			dport->test_state = BFA_DPORT_ST_COMP;
6261  			break;
6262  
6263  		case BFI_DPORT_SCN_TESTSTART:
6264  			dport->test_state = BFA_DPORT_ST_INP;
6265  			break;
6266  
6267  		case BFI_DPORT_SCN_TESTSKIP:
6268  		case BFI_DPORT_SCN_SUBTESTSTART:
6269  			/* no state change */
6270  			break;
6271  
6272  		case BFI_DPORT_SCN_SFP_REMOVED:
6273  			dport->test_state = BFA_DPORT_ST_NO_SFP;
6274  			break;
6275  
6276  		case BFI_DPORT_SCN_DDPORT_DISABLE:
6277  			bfa_fcport_ddportdisable(dport->bfa);
6278  
6279  			if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
6280  				bfa_sm_set_state(dport,
6281  					 bfa_dport_sm_dynamic_disabling);
6282  			else
6283  				bfa_sm_set_state(dport,
6284  					 bfa_dport_sm_dynamic_disabling_qwait);
6285  			break;
6286  
6287  		case BFI_DPORT_SCN_FCPORT_DISABLE:
6288  			bfa_fcport_ddportdisable(dport->bfa);
6289  
6290  			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6291  			dport->dynamic = BFA_FALSE;
6292  			break;
6293  
6294  		default:
6295  			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6296  			bfa_sm_fault(dport->bfa, event);
6297  		}
6298  		break;
6299  	default:
6300  		bfa_sm_fault(dport->bfa, event);
6301  	}
6302  }
6303  
6304  static void
bfa_dport_sm_disabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6305  bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6306  			     enum bfa_dport_sm_event event)
6307  {
6308  	bfa_trc(dport->bfa, event);
6309  
6310  	switch (event) {
6311  	case BFA_DPORT_SM_QRESUME:
6312  		bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6313  		bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6314  		break;
6315  
6316  	case BFA_DPORT_SM_HWFAIL:
6317  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6318  		bfa_reqq_wcancel(&dport->reqq_wait);
6319  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6320  		break;
6321  
6322  	case BFA_DPORT_SM_SCN:
6323  		/* ignore */
6324  		break;
6325  
6326  	default:
6327  		bfa_sm_fault(dport->bfa, event);
6328  	}
6329  }
6330  
6331  static void
bfa_dport_sm_disabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6332  bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6333  {
6334  	bfa_trc(dport->bfa, event);
6335  
6336  	switch (event) {
6337  	case BFA_DPORT_SM_FWRSP:
6338  		dport->test_state = BFA_DPORT_ST_DISABLED;
6339  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6340  		break;
6341  
6342  	case BFA_DPORT_SM_HWFAIL:
6343  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6344  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6345  		break;
6346  
6347  	case BFA_DPORT_SM_SCN:
6348  		/* no state change */
6349  		break;
6350  
6351  	default:
6352  		bfa_sm_fault(dport->bfa, event);
6353  	}
6354  }
6355  
6356  static void
bfa_dport_sm_starting_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6357  bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
6358  			    enum bfa_dport_sm_event event)
6359  {
6360  	bfa_trc(dport->bfa, event);
6361  
6362  	switch (event) {
6363  	case BFA_DPORT_SM_QRESUME:
6364  		bfa_sm_set_state(dport, bfa_dport_sm_starting);
6365  		bfa_dport_send_req(dport, BFI_DPORT_START);
6366  		break;
6367  
6368  	case BFA_DPORT_SM_HWFAIL:
6369  		bfa_reqq_wcancel(&dport->reqq_wait);
6370  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6371  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6372  		break;
6373  
6374  	default:
6375  		bfa_sm_fault(dport->bfa, event);
6376  	}
6377  }
6378  
6379  static void
bfa_dport_sm_starting(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6380  bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6381  {
6382  	bfa_trc(dport->bfa, event);
6383  
6384  	switch (event) {
6385  	case BFA_DPORT_SM_FWRSP:
6386  		memset(&dport->result, 0,
6387  				sizeof(struct bfa_diag_dport_result_s));
6388  		if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
6389  			dport->test_state = BFA_DPORT_ST_NO_SFP;
6390  		} else {
6391  			dport->test_state = BFA_DPORT_ST_INP;
6392  			bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
6393  		}
6394  		fallthrough;
6395  
6396  	case BFA_DPORT_SM_REQFAIL:
6397  		bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6398  		break;
6399  
6400  	case BFA_DPORT_SM_HWFAIL:
6401  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6402  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6403  		break;
6404  
6405  	default:
6406  		bfa_sm_fault(dport->bfa, event);
6407  	}
6408  }
6409  
6410  static void
bfa_dport_sm_dynamic_disabling(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6411  bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
6412  			       enum bfa_dport_sm_event event)
6413  {
6414  	bfa_trc(dport->bfa, event);
6415  
6416  	switch (event) {
6417  	case BFA_DPORT_SM_SCN:
6418  		switch (dport->i2hmsg.scn.state) {
6419  		case BFI_DPORT_SCN_DDPORT_DISABLED:
6420  			bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6421  			dport->dynamic = BFA_FALSE;
6422  			bfa_fcport_enable(dport->bfa);
6423  			break;
6424  
6425  		default:
6426  			bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
6427  			bfa_sm_fault(dport->bfa, event);
6428  
6429  		}
6430  		break;
6431  
6432  	case BFA_DPORT_SM_HWFAIL:
6433  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6434  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6435  		break;
6436  
6437  	default:
6438  		bfa_sm_fault(dport->bfa, event);
6439  	}
6440  }
6441  
6442  static void
bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s * dport,enum bfa_dport_sm_event event)6443  bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
6444  			    enum bfa_dport_sm_event event)
6445  {
6446  	bfa_trc(dport->bfa, event);
6447  
6448  	switch (event) {
6449  	case BFA_DPORT_SM_QRESUME:
6450  		bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
6451  		bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
6452  		break;
6453  
6454  	case BFA_DPORT_SM_HWFAIL:
6455  		bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6456  		bfa_reqq_wcancel(&dport->reqq_wait);
6457  		bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6458  		break;
6459  
6460  	case BFA_DPORT_SM_SCN:
6461  		/* ignore */
6462  		break;
6463  
6464  	default:
6465  		bfa_sm_fault(dport->bfa, event);
6466  	}
6467  }
6468  
6469  static bfa_boolean_t
bfa_dport_send_req(struct bfa_dport_s * dport,enum bfi_dport_req req)6470  bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6471  {
6472  	struct bfi_diag_dport_req_s *m;
6473  
6474  	/*
6475  	 * check for room in queue to send request now
6476  	 */
6477  	m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6478  	if (!m) {
6479  		bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6480  		return BFA_FALSE;
6481  	}
6482  
6483  	bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6484  		    bfa_fn_lpu(dport->bfa));
6485  	m->req  = req;
6486  	if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
6487  		m->lpcnt = cpu_to_be32(dport->lpcnt);
6488  		m->payload = cpu_to_be32(dport->payload);
6489  	}
6490  
6491  	/*
6492  	 * queue I/O message to firmware
6493  	 */
6494  	bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6495  
6496  	return BFA_TRUE;
6497  }
6498  
6499  static void
bfa_dport_qresume(void * cbarg)6500  bfa_dport_qresume(void *cbarg)
6501  {
6502  	struct bfa_dport_s *dport = cbarg;
6503  
6504  	bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6505  }
6506  
6507  static void
bfa_dport_req_comp(struct bfa_dport_s * dport,struct bfi_diag_dport_rsp_s * msg)6508  bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
6509  {
6510  	msg->status = cpu_to_be32(msg->status);
6511  	dport->i2hmsg.rsp.status = msg->status;
6512  	dport->rp_pwwn = msg->pwwn;
6513  	dport->rp_nwwn = msg->nwwn;
6514  
6515  	if ((msg->status == BFA_STATUS_OK) ||
6516  	    (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
6517  		bfa_trc(dport->bfa, msg->status);
6518  		bfa_trc(dport->bfa, dport->rp_pwwn);
6519  		bfa_trc(dport->bfa, dport->rp_nwwn);
6520  		bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6521  
6522  	} else {
6523  		bfa_trc(dport->bfa, msg->status);
6524  		bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
6525  	}
6526  	bfa_cb_fcdiag_dport(dport, msg->status);
6527  }
6528  
6529  static bfa_boolean_t
bfa_dport_is_sending_req(struct bfa_dport_s * dport)6530  bfa_dport_is_sending_req(struct bfa_dport_s *dport)
6531  {
6532  	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling)	||
6533  	    bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6534  	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling)	||
6535  	    bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
6536  	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting)	||
6537  	    bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
6538  		return BFA_TRUE;
6539  	} else {
6540  		return BFA_FALSE;
6541  	}
6542  }
6543  
6544  static void
bfa_dport_scn(struct bfa_dport_s * dport,struct bfi_diag_dport_scn_s * msg)6545  bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
6546  {
6547  	int i;
6548  	uint8_t subtesttype;
6549  
6550  	bfa_trc(dport->bfa, msg->state);
6551  	dport->i2hmsg.scn.state = msg->state;
6552  
6553  	switch (dport->i2hmsg.scn.state) {
6554  	case BFI_DPORT_SCN_TESTCOMP:
6555  		dport->result.end_time = ktime_get_real_seconds();
6556  		bfa_trc(dport->bfa, dport->result.end_time);
6557  
6558  		dport->result.status = msg->info.testcomp.status;
6559  		bfa_trc(dport->bfa, dport->result.status);
6560  
6561  		dport->result.roundtrip_latency =
6562  			cpu_to_be32(msg->info.testcomp.latency);
6563  		dport->result.est_cable_distance =
6564  			cpu_to_be32(msg->info.testcomp.distance);
6565  		dport->result.buffer_required =
6566  			be16_to_cpu(msg->info.testcomp.numbuffer);
6567  
6568  		dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
6569  		dport->result.speed = msg->info.testcomp.speed;
6570  
6571  		bfa_trc(dport->bfa, dport->result.roundtrip_latency);
6572  		bfa_trc(dport->bfa, dport->result.est_cable_distance);
6573  		bfa_trc(dport->bfa, dport->result.buffer_required);
6574  		bfa_trc(dport->bfa, dport->result.frmsz);
6575  		bfa_trc(dport->bfa, dport->result.speed);
6576  
6577  		for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
6578  			dport->result.subtest[i].status =
6579  				msg->info.testcomp.subtest_status[i];
6580  			bfa_trc(dport->bfa, dport->result.subtest[i].status);
6581  		}
6582  		break;
6583  
6584  	case BFI_DPORT_SCN_TESTSKIP:
6585  	case BFI_DPORT_SCN_DDPORT_ENABLE:
6586  		memset(&dport->result, 0,
6587  				sizeof(struct bfa_diag_dport_result_s));
6588  		break;
6589  
6590  	case BFI_DPORT_SCN_TESTSTART:
6591  		memset(&dport->result, 0,
6592  				sizeof(struct bfa_diag_dport_result_s));
6593  		dport->rp_pwwn = msg->info.teststart.pwwn;
6594  		dport->rp_nwwn = msg->info.teststart.nwwn;
6595  		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
6596  		bfa_dport_result_start(dport, msg->info.teststart.mode);
6597  		break;
6598  
6599  	case BFI_DPORT_SCN_SUBTESTSTART:
6600  		subtesttype = msg->info.teststart.type;
6601  		dport->result.subtest[subtesttype].start_time =
6602  			ktime_get_real_seconds();
6603  		dport->result.subtest[subtesttype].status =
6604  			DPORT_TEST_ST_INPRG;
6605  
6606  		bfa_trc(dport->bfa, subtesttype);
6607  		bfa_trc(dport->bfa,
6608  			dport->result.subtest[subtesttype].start_time);
6609  		break;
6610  
6611  	case BFI_DPORT_SCN_SFP_REMOVED:
6612  	case BFI_DPORT_SCN_DDPORT_DISABLED:
6613  	case BFI_DPORT_SCN_DDPORT_DISABLE:
6614  	case BFI_DPORT_SCN_FCPORT_DISABLE:
6615  		dport->result.status = DPORT_TEST_ST_IDLE;
6616  		break;
6617  
6618  	default:
6619  		bfa_sm_fault(dport->bfa, msg->state);
6620  	}
6621  
6622  	bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
6623  }
6624  
6625  /*
6626   * Dport enable
6627   *
6628   * @param[in] *bfa            - bfa data struct
6629   */
6630  bfa_status_t
bfa_dport_enable(struct bfa_s * bfa,u32 lpcnt,u32 pat,bfa_cb_diag_t cbfn,void * cbarg)6631  bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6632  				bfa_cb_diag_t cbfn, void *cbarg)
6633  {
6634  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6635  	struct bfa_dport_s  *dport = &fcdiag->dport;
6636  
6637  	/*
6638  	 * Dport is not support in MEZZ card
6639  	 */
6640  	if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6641  		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6642  		return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6643  	}
6644  
6645  	/*
6646  	 * Dport is supported in CT2 or above
6647  	 */
6648  	if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
6649  		bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
6650  		return BFA_STATUS_FEATURE_NOT_SUPPORTED;
6651  	}
6652  
6653  	/*
6654  	 * Check to see if IOC is down
6655  	*/
6656  	if (!bfa_iocfc_is_operational(bfa))
6657  		return BFA_STATUS_IOC_NON_OP;
6658  
6659  	/* if port is PBC disabled, return error */
6660  	if (bfa_fcport_is_pbcdisabled(bfa)) {
6661  		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6662  		return BFA_STATUS_PBC;
6663  	}
6664  
6665  	/*
6666  	 * Check if port mode is FC port
6667  	 */
6668  	if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6669  		bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6670  		return BFA_STATUS_CMD_NOTSUPP_CNA;
6671  	}
6672  
6673  	/*
6674  	 * Check if port is in LOOP mode
6675  	 */
6676  	if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6677  	    (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6678  		bfa_trc(dport->bfa, 0);
6679  		return BFA_STATUS_TOPOLOGY_LOOP;
6680  	}
6681  
6682  	/*
6683  	 * Check if port is TRUNK mode
6684  	 */
6685  	if (bfa_fcport_is_trunk_enabled(bfa)) {
6686  		bfa_trc(dport->bfa, 0);
6687  		return BFA_STATUS_ERROR_TRUNK_ENABLED;
6688  	}
6689  
6690  	/*
6691  	 * Check if diag loopback is running
6692  	 */
6693  	if (bfa_fcdiag_lb_is_running(bfa)) {
6694  		bfa_trc(dport->bfa, 0);
6695  		return BFA_STATUS_DIAG_BUSY;
6696  	}
6697  
6698  	/*
6699  	 * Check to see if port is disable or in dport state
6700  	 */
6701  	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6702  	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6703  		bfa_trc(dport->bfa, 0);
6704  		return BFA_STATUS_PORT_NOT_DISABLED;
6705  	}
6706  
6707  	/*
6708  	 * Check if dport is in dynamic mode
6709  	 */
6710  	if (dport->dynamic)
6711  		return BFA_STATUS_DDPORT_ERR;
6712  
6713  	/*
6714  	 * Check if dport is busy
6715  	 */
6716  	if (bfa_dport_is_sending_req(dport))
6717  		return BFA_STATUS_DEVBUSY;
6718  
6719  	/*
6720  	 * Check if dport is already enabled
6721  	 */
6722  	if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6723  		bfa_trc(dport->bfa, 0);
6724  		return BFA_STATUS_DPORT_ENABLED;
6725  	}
6726  
6727  	bfa_trc(dport->bfa, lpcnt);
6728  	bfa_trc(dport->bfa, pat);
6729  	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6730  	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6731  	dport->cbfn = cbfn;
6732  	dport->cbarg = cbarg;
6733  
6734  	bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6735  	return BFA_STATUS_OK;
6736  }
6737  
6738  /*
6739   *	Dport disable
6740   *
6741   *	@param[in] *bfa            - bfa data struct
6742   */
6743  bfa_status_t
bfa_dport_disable(struct bfa_s * bfa,bfa_cb_diag_t cbfn,void * cbarg)6744  bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6745  {
6746  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6747  	struct bfa_dport_s *dport = &fcdiag->dport;
6748  
6749  	if (bfa_ioc_is_disabled(&bfa->ioc))
6750  		return BFA_STATUS_IOC_DISABLED;
6751  
6752  	/* if port is PBC disabled, return error */
6753  	if (bfa_fcport_is_pbcdisabled(bfa)) {
6754  		bfa_trc(dport->bfa, BFA_STATUS_PBC);
6755  		return BFA_STATUS_PBC;
6756  	}
6757  
6758  	/*
6759  	 * Check if dport is in dynamic mode
6760  	 */
6761  	if (dport->dynamic) {
6762  		return BFA_STATUS_DDPORT_ERR;
6763  	}
6764  
6765  	/*
6766  	 * Check to see if port is disable or in dport state
6767  	 */
6768  	if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6769  	    (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6770  		bfa_trc(dport->bfa, 0);
6771  		return BFA_STATUS_PORT_NOT_DISABLED;
6772  	}
6773  
6774  	/*
6775  	 * Check if dport is busy
6776  	 */
6777  	if (bfa_dport_is_sending_req(dport))
6778  		return BFA_STATUS_DEVBUSY;
6779  
6780  	/*
6781  	 * Check if dport is already disabled
6782  	 */
6783  	if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6784  		bfa_trc(dport->bfa, 0);
6785  		return BFA_STATUS_DPORT_DISABLED;
6786  	}
6787  
6788  	dport->cbfn = cbfn;
6789  	dport->cbarg = cbarg;
6790  
6791  	bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6792  	return BFA_STATUS_OK;
6793  }
6794  
6795  /*
6796   * Dport start -- restart dport test
6797   *
6798   *   @param[in] *bfa		- bfa data struct
6799   */
6800  bfa_status_t
bfa_dport_start(struct bfa_s * bfa,u32 lpcnt,u32 pat,bfa_cb_diag_t cbfn,void * cbarg)6801  bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
6802  			bfa_cb_diag_t cbfn, void *cbarg)
6803  {
6804  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6805  	struct bfa_dport_s *dport = &fcdiag->dport;
6806  
6807  	/*
6808  	 * Check to see if IOC is down
6809  	 */
6810  	if (!bfa_iocfc_is_operational(bfa))
6811  		return BFA_STATUS_IOC_NON_OP;
6812  
6813  	/*
6814  	 * Check if dport is in dynamic mode
6815  	 */
6816  	if (dport->dynamic)
6817  		return BFA_STATUS_DDPORT_ERR;
6818  
6819  	/*
6820  	 * Check if dport is busy
6821  	 */
6822  	if (bfa_dport_is_sending_req(dport))
6823  		return BFA_STATUS_DEVBUSY;
6824  
6825  	/*
6826  	 * Check if dport is in enabled state.
6827  	 * Test can only be restart when previous test has completed
6828  	 */
6829  	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6830  		bfa_trc(dport->bfa, 0);
6831  		return BFA_STATUS_DPORT_DISABLED;
6832  
6833  	} else {
6834  		if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6835  			return BFA_STATUS_DPORT_INV_SFP;
6836  
6837  		if (dport->test_state == BFA_DPORT_ST_INP)
6838  			return BFA_STATUS_DEVBUSY;
6839  
6840  		WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
6841  	}
6842  
6843  	bfa_trc(dport->bfa, lpcnt);
6844  	bfa_trc(dport->bfa, pat);
6845  
6846  	dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
6847  	dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
6848  
6849  	dport->cbfn = cbfn;
6850  	dport->cbarg = cbarg;
6851  
6852  	bfa_sm_send_event(dport, BFA_DPORT_SM_START);
6853  	return BFA_STATUS_OK;
6854  }
6855  
6856  /*
6857   * Dport show -- return dport test result
6858   *
6859   *   @param[in] *bfa		- bfa data struct
6860   */
6861  bfa_status_t
bfa_dport_show(struct bfa_s * bfa,struct bfa_diag_dport_result_s * result)6862  bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
6863  {
6864  	struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6865  	struct bfa_dport_s *dport = &fcdiag->dport;
6866  
6867  	/*
6868  	 * Check to see if IOC is down
6869  	 */
6870  	if (!bfa_iocfc_is_operational(bfa))
6871  		return BFA_STATUS_IOC_NON_OP;
6872  
6873  	/*
6874  	 * Check if dport is busy
6875  	 */
6876  	if (bfa_dport_is_sending_req(dport))
6877  		return BFA_STATUS_DEVBUSY;
6878  
6879  	/*
6880  	 * Check if dport is in enabled state.
6881  	 */
6882  	if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6883  		bfa_trc(dport->bfa, 0);
6884  		return BFA_STATUS_DPORT_DISABLED;
6885  
6886  	}
6887  
6888  	/*
6889  	 * Check if there is SFP
6890  	 */
6891  	if (dport->test_state == BFA_DPORT_ST_NO_SFP)
6892  		return BFA_STATUS_DPORT_INV_SFP;
6893  
6894  	memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
6895  
6896  	return BFA_STATUS_OK;
6897  }
6898