1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4   * Copyright (c) 2014- QLogic Corporation.
5   * All rights reserved
6   * www.qlogic.com
7   *
8   * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9   */
10  
11  #include "bfad_drv.h"
12  #include "bfad_im.h"
13  #include "bfa_ioc.h"
14  #include "bfi_reg.h"
15  #include "bfa_defs.h"
16  #include "bfa_defs_svc.h"
17  #include "bfi.h"
18  
19  BFA_TRC_FILE(CNA, IOC);
20  
21  /*
22   * IOC local definitions
23   */
24  #define BFA_IOC_TOV		3000	/* msecs */
25  #define BFA_IOC_HWSEM_TOV	500	/* msecs */
26  #define BFA_IOC_HB_TOV		500	/* msecs */
27  #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
28  #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
29  
30  #define bfa_ioc_timer_start(__ioc)					\
31  	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
32  			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
33  #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
34  
35  #define bfa_hb_timer_start(__ioc)					\
36  	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
37  			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
38  #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
39  
40  #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
41  
42  #define bfa_ioc_state_disabled(__sm)		\
43  	(((__sm) == BFI_IOC_UNINIT) ||		\
44  	((__sm) == BFI_IOC_INITING) ||		\
45  	((__sm) == BFI_IOC_HWINIT) ||		\
46  	((__sm) == BFI_IOC_DISABLED) ||		\
47  	((__sm) == BFI_IOC_FAIL) ||		\
48  	((__sm) == BFI_IOC_CFG_DISABLED))
49  
50  /*
51   * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52   */
53  
54  #define bfa_ioc_firmware_lock(__ioc)			\
55  			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
56  #define bfa_ioc_firmware_unlock(__ioc)			\
57  			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
58  #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
59  #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
60  #define bfa_ioc_notify_fail(__ioc)              \
61  			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
62  #define bfa_ioc_sync_start(__ioc)               \
63  			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
64  #define bfa_ioc_sync_join(__ioc)                \
65  			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
66  #define bfa_ioc_sync_leave(__ioc)               \
67  			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
68  #define bfa_ioc_sync_ack(__ioc)                 \
69  			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
70  #define bfa_ioc_sync_complete(__ioc)            \
71  			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
72  #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
73  			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
74  #define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
75  			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
76  #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
77  		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
78  #define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
79  			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
80  
81  #define bfa_ioc_mbox_cmd_pending(__ioc)		\
82  			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
83  			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
84  
85  bfa_boolean_t bfa_auto_recover = BFA_TRUE;
86  
87  /*
88   * forward declarations
89   */
90  static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
91  static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
92  static void bfa_ioc_timeout(void *ioc);
93  static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
94  static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
95  static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
96  static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
97  static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
98  static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
99  static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
100  static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
101  static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
102  				enum bfa_ioc_event_e event);
103  static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
104  static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
105  static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
106  static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
107  static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
108  				struct bfi_ioc_image_hdr_s *base_fwhdr,
109  				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
110  static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
111  				struct bfa_ioc_s *ioc,
112  				struct bfi_ioc_image_hdr_s *base_fwhdr);
113  
114  /*
115   * IOC state machine definitions/declarations
116   */
117  enum ioc_event {
118  	IOC_E_RESET		= 1,	/*  IOC reset request		*/
119  	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
120  	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
121  	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
122  	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
123  	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
124  	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
125  	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
126  	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
127  	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
128  	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
129  	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
130  };
131  
132  bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
133  bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
134  bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
135  bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
136  bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
137  bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
138  bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
139  bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
140  bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
141  bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
142  
143  static struct bfa_sm_table_s ioc_sm_table[] = {
144  	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
145  	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
146  	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
147  	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
148  	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
149  	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
150  	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
151  	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
152  	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
153  	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
154  };
155  
156  /*
157   * IOCPF state machine definitions/declarations
158   */
159  
160  #define bfa_iocpf_timer_start(__ioc)					\
161  	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
162  			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
163  #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
164  
165  #define bfa_iocpf_poll_timer_start(__ioc)				\
166  	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
167  			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
168  
169  #define bfa_sem_timer_start(__ioc)					\
170  	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
171  			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
172  #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
173  
174  /*
175   * Forward declareations for iocpf state machine
176   */
177  static void bfa_iocpf_timeout(void *ioc_arg);
178  static void bfa_iocpf_sem_timeout(void *ioc_arg);
179  static void bfa_iocpf_poll_timeout(void *ioc_arg);
180  
181  /*
182   * IOCPF state machine events
183   */
184  enum iocpf_event {
185  	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
186  	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
187  	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
188  	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
189  	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
190  	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
191  	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
192  	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
193  	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
194  	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
195  	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
196  	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
197  };
198  
199  /*
200   * IOCPF states
201   */
202  enum bfa_iocpf_state {
203  	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
204  	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
205  	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
206  	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
207  	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
208  	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
209  	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
210  	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
211  	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
212  };
213  
214  bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
215  bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
216  bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
217  bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
218  bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
219  bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
220  bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
221  bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
222  						enum iocpf_event);
223  bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
224  bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
225  bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
226  bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
227  bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
228  						enum iocpf_event);
229  bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
230  
231  static struct bfa_sm_table_s iocpf_sm_table[] = {
232  	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
233  	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
234  	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
235  	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
236  	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
237  	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
238  	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
239  	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
240  	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
241  	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
242  	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
243  	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
244  	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
245  	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
246  };
247  
248  /*
249   * IOC State Machine
250   */
251  
252  /*
253   * Beginning state. IOC uninit state.
254   */
255  
256  static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s * ioc)257  bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
258  {
259  }
260  
261  /*
262   * IOC is in uninit state.
263   */
264  static void
bfa_ioc_sm_uninit(struct bfa_ioc_s * ioc,enum ioc_event event)265  bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
266  {
267  	bfa_trc(ioc, event);
268  
269  	switch (event) {
270  	case IOC_E_RESET:
271  		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
272  		break;
273  
274  	default:
275  		bfa_sm_fault(ioc, event);
276  	}
277  }
278  /*
279   * Reset entry actions -- initialize state machine
280   */
281  static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s * ioc)282  bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
283  {
284  	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
285  }
286  
287  /*
288   * IOC is in reset state.
289   */
290  static void
bfa_ioc_sm_reset(struct bfa_ioc_s * ioc,enum ioc_event event)291  bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
292  {
293  	bfa_trc(ioc, event);
294  
295  	switch (event) {
296  	case IOC_E_ENABLE:
297  		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
298  		break;
299  
300  	case IOC_E_DISABLE:
301  		bfa_ioc_disable_comp(ioc);
302  		break;
303  
304  	case IOC_E_DETACH:
305  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
306  		break;
307  
308  	default:
309  		bfa_sm_fault(ioc, event);
310  	}
311  }
312  
313  
314  static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s * ioc)315  bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
316  {
317  	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
318  }
319  
320  /*
321   * Host IOC function is being enabled, awaiting response from firmware.
322   * Semaphore is acquired.
323   */
324  static void
bfa_ioc_sm_enabling(struct bfa_ioc_s * ioc,enum ioc_event event)325  bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
326  {
327  	bfa_trc(ioc, event);
328  
329  	switch (event) {
330  	case IOC_E_ENABLED:
331  		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
332  		break;
333  
334  	case IOC_E_PFFAILED:
335  		/* !!! fall through !!! */
336  	case IOC_E_HWERROR:
337  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
338  		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
339  		if (event != IOC_E_PFFAILED)
340  			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
341  		break;
342  
343  	case IOC_E_HWFAILED:
344  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
345  		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
346  		break;
347  
348  	case IOC_E_DISABLE:
349  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
350  		break;
351  
352  	case IOC_E_DETACH:
353  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
354  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
355  		break;
356  
357  	case IOC_E_ENABLE:
358  		break;
359  
360  	default:
361  		bfa_sm_fault(ioc, event);
362  	}
363  }
364  
365  
366  static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s * ioc)367  bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
368  {
369  	bfa_ioc_timer_start(ioc);
370  	bfa_ioc_send_getattr(ioc);
371  }
372  
373  /*
374   * IOC configuration in progress. Timer is active.
375   */
376  static void
bfa_ioc_sm_getattr(struct bfa_ioc_s * ioc,enum ioc_event event)377  bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
378  {
379  	bfa_trc(ioc, event);
380  
381  	switch (event) {
382  	case IOC_E_FWRSP_GETATTR:
383  		bfa_ioc_timer_stop(ioc);
384  		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
385  		break;
386  
387  	case IOC_E_PFFAILED:
388  	case IOC_E_HWERROR:
389  		bfa_ioc_timer_stop(ioc);
390  		/* !!! fall through !!! */
391  	case IOC_E_TIMEOUT:
392  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
393  		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
394  		if (event != IOC_E_PFFAILED)
395  			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
396  		break;
397  
398  	case IOC_E_DISABLE:
399  		bfa_ioc_timer_stop(ioc);
400  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
401  		break;
402  
403  	case IOC_E_ENABLE:
404  		break;
405  
406  	default:
407  		bfa_sm_fault(ioc, event);
408  	}
409  }
410  
411  static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s * ioc)412  bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
413  {
414  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
415  
416  	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
417  	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
418  	bfa_ioc_hb_monitor(ioc);
419  	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
420  	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
421  }
422  
423  static void
bfa_ioc_sm_op(struct bfa_ioc_s * ioc,enum ioc_event event)424  bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
425  {
426  	bfa_trc(ioc, event);
427  
428  	switch (event) {
429  	case IOC_E_ENABLE:
430  		break;
431  
432  	case IOC_E_DISABLE:
433  		bfa_hb_timer_stop(ioc);
434  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
435  		break;
436  
437  	case IOC_E_PFFAILED:
438  	case IOC_E_HWERROR:
439  		bfa_hb_timer_stop(ioc);
440  		/* !!! fall through !!! */
441  	case IOC_E_HBFAIL:
442  		if (ioc->iocpf.auto_recover)
443  			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
444  		else
445  			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
446  
447  		bfa_ioc_fail_notify(ioc);
448  
449  		if (event != IOC_E_PFFAILED)
450  			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
451  		break;
452  
453  	default:
454  		bfa_sm_fault(ioc, event);
455  	}
456  }
457  
458  
459  static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s * ioc)460  bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
461  {
462  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
463  	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
464  	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
465  	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
466  }
467  
468  /*
469   * IOC is being disabled
470   */
471  static void
bfa_ioc_sm_disabling(struct bfa_ioc_s * ioc,enum ioc_event event)472  bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
473  {
474  	bfa_trc(ioc, event);
475  
476  	switch (event) {
477  	case IOC_E_DISABLED:
478  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
479  		break;
480  
481  	case IOC_E_HWERROR:
482  		/*
483  		 * No state change.  Will move to disabled state
484  		 * after iocpf sm completes failure processing and
485  		 * moves to disabled state.
486  		 */
487  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
488  		break;
489  
490  	case IOC_E_HWFAILED:
491  		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
492  		bfa_ioc_disable_comp(ioc);
493  		break;
494  
495  	default:
496  		bfa_sm_fault(ioc, event);
497  	}
498  }
499  
500  /*
501   * IOC disable completion entry.
502   */
503  static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s * ioc)504  bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
505  {
506  	bfa_ioc_disable_comp(ioc);
507  }
508  
509  static void
bfa_ioc_sm_disabled(struct bfa_ioc_s * ioc,enum ioc_event event)510  bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
511  {
512  	bfa_trc(ioc, event);
513  
514  	switch (event) {
515  	case IOC_E_ENABLE:
516  		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
517  		break;
518  
519  	case IOC_E_DISABLE:
520  		ioc->cbfn->disable_cbfn(ioc->bfa);
521  		break;
522  
523  	case IOC_E_DETACH:
524  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
525  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
526  		break;
527  
528  	default:
529  		bfa_sm_fault(ioc, event);
530  	}
531  }
532  
533  
534  static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s * ioc)535  bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
536  {
537  	bfa_trc(ioc, 0);
538  }
539  
540  /*
541   * Hardware initialization retry.
542   */
543  static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s * ioc,enum ioc_event event)544  bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
545  {
546  	bfa_trc(ioc, event);
547  
548  	switch (event) {
549  	case IOC_E_ENABLED:
550  		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
551  		break;
552  
553  	case IOC_E_PFFAILED:
554  	case IOC_E_HWERROR:
555  		/*
556  		 * Initialization retry failed.
557  		 */
558  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
559  		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
560  		if (event != IOC_E_PFFAILED)
561  			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
562  		break;
563  
564  	case IOC_E_HWFAILED:
565  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
566  		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
567  		break;
568  
569  	case IOC_E_ENABLE:
570  		break;
571  
572  	case IOC_E_DISABLE:
573  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
574  		break;
575  
576  	case IOC_E_DETACH:
577  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579  		break;
580  
581  	default:
582  		bfa_sm_fault(ioc, event);
583  	}
584  }
585  
586  
587  static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s * ioc)588  bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
589  {
590  	bfa_trc(ioc, 0);
591  }
592  
593  /*
594   * IOC failure.
595   */
596  static void
bfa_ioc_sm_fail(struct bfa_ioc_s * ioc,enum ioc_event event)597  bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
598  {
599  	bfa_trc(ioc, event);
600  
601  	switch (event) {
602  
603  	case IOC_E_ENABLE:
604  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605  		break;
606  
607  	case IOC_E_DISABLE:
608  		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
609  		break;
610  
611  	case IOC_E_DETACH:
612  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
613  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
614  		break;
615  
616  	case IOC_E_HWERROR:
617  	case IOC_E_HWFAILED:
618  		/*
619  		 * HB failure / HW error notification, ignore.
620  		 */
621  		break;
622  	default:
623  		bfa_sm_fault(ioc, event);
624  	}
625  }
626  
627  static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s * ioc)628  bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
629  {
630  	bfa_trc(ioc, 0);
631  }
632  
633  static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s * ioc,enum ioc_event event)634  bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
635  {
636  	bfa_trc(ioc, event);
637  
638  	switch (event) {
639  	case IOC_E_ENABLE:
640  		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
641  		break;
642  
643  	case IOC_E_DISABLE:
644  		ioc->cbfn->disable_cbfn(ioc->bfa);
645  		break;
646  
647  	case IOC_E_DETACH:
648  		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
649  		break;
650  
651  	case IOC_E_HWERROR:
652  		/* Ignore - already in hwfail state */
653  		break;
654  
655  	default:
656  		bfa_sm_fault(ioc, event);
657  	}
658  }
659  
660  /*
661   * IOCPF State Machine
662   */
663  
664  /*
665   * Reset entry actions -- initialize state machine
666   */
667  static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s * iocpf)668  bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
669  {
670  	iocpf->fw_mismatch_notified = BFA_FALSE;
671  	iocpf->auto_recover = bfa_auto_recover;
672  }
673  
674  /*
675   * Beginning state. IOC is in reset state.
676   */
677  static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s * iocpf,enum iocpf_event event)678  bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
679  {
680  	struct bfa_ioc_s *ioc = iocpf->ioc;
681  
682  	bfa_trc(ioc, event);
683  
684  	switch (event) {
685  	case IOCPF_E_ENABLE:
686  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
687  		break;
688  
689  	case IOCPF_E_STOP:
690  		break;
691  
692  	default:
693  		bfa_sm_fault(ioc, event);
694  	}
695  }
696  
697  /*
698   * Semaphore should be acquired for version check.
699   */
700  static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s * iocpf)701  bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
702  {
703  	struct bfi_ioc_image_hdr_s	fwhdr;
704  	u32	r32, fwstate, pgnum, pgoff, loff = 0;
705  	int	i;
706  
707  	/*
708  	 * Spin on init semaphore to serialize.
709  	 */
710  	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
711  	while (r32 & 0x1) {
712  		udelay(20);
713  		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
714  	}
715  
716  	/* h/w sem init */
717  	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
718  	if (fwstate == BFI_IOC_UNINIT) {
719  		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
720  		goto sem_get;
721  	}
722  
723  	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
724  
725  	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
726  		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
727  		goto sem_get;
728  	}
729  
730  	/*
731  	 * Clear fwver hdr
732  	 */
733  	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
734  	pgoff = PSS_SMEM_PGOFF(loff);
735  	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
736  
737  	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
738  		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
739  		loff += sizeof(u32);
740  	}
741  
742  	bfa_trc(iocpf->ioc, fwstate);
743  	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
744  	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
745  	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
746  
747  	/*
748  	 * Unlock the hw semaphore. Should be here only once per boot.
749  	 */
750  	bfa_ioc_ownership_reset(iocpf->ioc);
751  
752  	/*
753  	 * unlock init semaphore.
754  	 */
755  	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
756  
757  sem_get:
758  	bfa_ioc_hw_sem_get(iocpf->ioc);
759  }
760  
761  /*
762   * Awaiting h/w semaphore to continue with version check.
763   */
764  static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s * iocpf,enum iocpf_event event)765  bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
766  {
767  	struct bfa_ioc_s *ioc = iocpf->ioc;
768  
769  	bfa_trc(ioc, event);
770  
771  	switch (event) {
772  	case IOCPF_E_SEMLOCKED:
773  		if (bfa_ioc_firmware_lock(ioc)) {
774  			if (bfa_ioc_sync_start(ioc)) {
775  				bfa_ioc_sync_join(ioc);
776  				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
777  			} else {
778  				bfa_ioc_firmware_unlock(ioc);
779  				writel(1, ioc->ioc_regs.ioc_sem_reg);
780  				bfa_sem_timer_start(ioc);
781  			}
782  		} else {
783  			writel(1, ioc->ioc_regs.ioc_sem_reg);
784  			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
785  		}
786  		break;
787  
788  	case IOCPF_E_SEM_ERROR:
789  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
790  		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
791  		break;
792  
793  	case IOCPF_E_DISABLE:
794  		bfa_sem_timer_stop(ioc);
795  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
796  		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
797  		break;
798  
799  	case IOCPF_E_STOP:
800  		bfa_sem_timer_stop(ioc);
801  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
802  		break;
803  
804  	default:
805  		bfa_sm_fault(ioc, event);
806  	}
807  }
808  
809  /*
810   * Notify enable completion callback.
811   */
812  static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s * iocpf)813  bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
814  {
815  	/*
816  	 * Call only the first time sm enters fwmismatch state.
817  	 */
818  	if (iocpf->fw_mismatch_notified == BFA_FALSE)
819  		bfa_ioc_pf_fwmismatch(iocpf->ioc);
820  
821  	iocpf->fw_mismatch_notified = BFA_TRUE;
822  	bfa_iocpf_timer_start(iocpf->ioc);
823  }
824  
825  /*
826   * Awaiting firmware version match.
827   */
828  static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s * iocpf,enum iocpf_event event)829  bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
830  {
831  	struct bfa_ioc_s *ioc = iocpf->ioc;
832  
833  	bfa_trc(ioc, event);
834  
835  	switch (event) {
836  	case IOCPF_E_TIMEOUT:
837  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
838  		break;
839  
840  	case IOCPF_E_DISABLE:
841  		bfa_iocpf_timer_stop(ioc);
842  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
843  		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
844  		break;
845  
846  	case IOCPF_E_STOP:
847  		bfa_iocpf_timer_stop(ioc);
848  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
849  		break;
850  
851  	default:
852  		bfa_sm_fault(ioc, event);
853  	}
854  }
855  
856  /*
857   * Request for semaphore.
858   */
859  static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s * iocpf)860  bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
861  {
862  	bfa_ioc_hw_sem_get(iocpf->ioc);
863  }
864  
865  /*
866   * Awaiting semaphore for h/w initialzation.
867   */
868  static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s * iocpf,enum iocpf_event event)869  bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
870  {
871  	struct bfa_ioc_s *ioc = iocpf->ioc;
872  
873  	bfa_trc(ioc, event);
874  
875  	switch (event) {
876  	case IOCPF_E_SEMLOCKED:
877  		if (bfa_ioc_sync_complete(ioc)) {
878  			bfa_ioc_sync_join(ioc);
879  			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
880  		} else {
881  			writel(1, ioc->ioc_regs.ioc_sem_reg);
882  			bfa_sem_timer_start(ioc);
883  		}
884  		break;
885  
886  	case IOCPF_E_SEM_ERROR:
887  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
888  		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
889  		break;
890  
891  	case IOCPF_E_DISABLE:
892  		bfa_sem_timer_stop(ioc);
893  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
894  		break;
895  
896  	default:
897  		bfa_sm_fault(ioc, event);
898  	}
899  }
900  
901  static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s * iocpf)902  bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
903  {
904  	iocpf->poll_time = 0;
905  	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
906  }
907  
908  /*
909   * Hardware is being initialized. Interrupts are enabled.
910   * Holding hardware semaphore lock.
911   */
912  static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s * iocpf,enum iocpf_event event)913  bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
914  {
915  	struct bfa_ioc_s *ioc = iocpf->ioc;
916  
917  	bfa_trc(ioc, event);
918  
919  	switch (event) {
920  	case IOCPF_E_FWREADY:
921  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
922  		break;
923  
924  	case IOCPF_E_TIMEOUT:
925  		writel(1, ioc->ioc_regs.ioc_sem_reg);
926  		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
927  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
928  		break;
929  
930  	case IOCPF_E_DISABLE:
931  		bfa_iocpf_timer_stop(ioc);
932  		bfa_ioc_sync_leave(ioc);
933  		writel(1, ioc->ioc_regs.ioc_sem_reg);
934  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
935  		break;
936  
937  	default:
938  		bfa_sm_fault(ioc, event);
939  	}
940  }
941  
942  static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s * iocpf)943  bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
944  {
945  	bfa_iocpf_timer_start(iocpf->ioc);
946  	/*
947  	 * Enable Interrupts before sending fw IOC ENABLE cmd.
948  	 */
949  	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
950  	bfa_ioc_send_enable(iocpf->ioc);
951  }
952  
953  /*
954   * Host IOC function is being enabled, awaiting response from firmware.
955   * Semaphore is acquired.
956   */
957  static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)958  bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
959  {
960  	struct bfa_ioc_s *ioc = iocpf->ioc;
961  
962  	bfa_trc(ioc, event);
963  
964  	switch (event) {
965  	case IOCPF_E_FWRSP_ENABLE:
966  		bfa_iocpf_timer_stop(ioc);
967  		writel(1, ioc->ioc_regs.ioc_sem_reg);
968  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
969  		break;
970  
971  	case IOCPF_E_INITFAIL:
972  		bfa_iocpf_timer_stop(ioc);
973  		/* fall through */
974  
975  	case IOCPF_E_TIMEOUT:
976  		writel(1, ioc->ioc_regs.ioc_sem_reg);
977  		if (event == IOCPF_E_TIMEOUT)
978  			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
979  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
980  		break;
981  
982  	case IOCPF_E_DISABLE:
983  		bfa_iocpf_timer_stop(ioc);
984  		writel(1, ioc->ioc_regs.ioc_sem_reg);
985  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
986  		break;
987  
988  	default:
989  		bfa_sm_fault(ioc, event);
990  	}
991  }
992  
993  static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s * iocpf)994  bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
995  {
996  	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
997  }
998  
999  static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1000  bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1001  {
1002  	struct bfa_ioc_s *ioc = iocpf->ioc;
1003  
1004  	bfa_trc(ioc, event);
1005  
1006  	switch (event) {
1007  	case IOCPF_E_DISABLE:
1008  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1009  		break;
1010  
1011  	case IOCPF_E_GETATTRFAIL:
1012  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1013  		break;
1014  
1015  	case IOCPF_E_FAIL:
1016  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1017  		break;
1018  
1019  	default:
1020  		bfa_sm_fault(ioc, event);
1021  	}
1022  }
1023  
1024  static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s * iocpf)1025  bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1026  {
1027  	bfa_iocpf_timer_start(iocpf->ioc);
1028  	bfa_ioc_send_disable(iocpf->ioc);
1029  }
1030  
1031  /*
1032   * IOC is being disabled
1033   */
1034  static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1035  bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1036  {
1037  	struct bfa_ioc_s *ioc = iocpf->ioc;
1038  
1039  	bfa_trc(ioc, event);
1040  
1041  	switch (event) {
1042  	case IOCPF_E_FWRSP_DISABLE:
1043  		bfa_iocpf_timer_stop(ioc);
1044  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1045  		break;
1046  
1047  	case IOCPF_E_FAIL:
1048  		bfa_iocpf_timer_stop(ioc);
1049  		/* fall through */
1050  
1051  	case IOCPF_E_TIMEOUT:
1052  		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1053  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1054  		break;
1055  
1056  	case IOCPF_E_FWRSP_ENABLE:
1057  		break;
1058  
1059  	default:
1060  		bfa_sm_fault(ioc, event);
1061  	}
1062  }
1063  
1064  static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s * iocpf)1065  bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1066  {
1067  	bfa_ioc_hw_sem_get(iocpf->ioc);
1068  }
1069  
1070  /*
1071   * IOC hb ack request is being removed.
1072   */
1073  static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1074  bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1075  {
1076  	struct bfa_ioc_s *ioc = iocpf->ioc;
1077  
1078  	bfa_trc(ioc, event);
1079  
1080  	switch (event) {
1081  	case IOCPF_E_SEMLOCKED:
1082  		bfa_ioc_sync_leave(ioc);
1083  		writel(1, ioc->ioc_regs.ioc_sem_reg);
1084  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1085  		break;
1086  
1087  	case IOCPF_E_SEM_ERROR:
1088  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1089  		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1090  		break;
1091  
1092  	case IOCPF_E_FAIL:
1093  		break;
1094  
1095  	default:
1096  		bfa_sm_fault(ioc, event);
1097  	}
1098  }
1099  
1100  /*
1101   * IOC disable completion entry.
1102   */
1103  static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s * iocpf)1104  bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1105  {
1106  	bfa_ioc_mbox_flush(iocpf->ioc);
1107  	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1108  }
1109  
1110  static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1111  bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1112  {
1113  	struct bfa_ioc_s *ioc = iocpf->ioc;
1114  
1115  	bfa_trc(ioc, event);
1116  
1117  	switch (event) {
1118  	case IOCPF_E_ENABLE:
1119  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1120  		break;
1121  
1122  	case IOCPF_E_STOP:
1123  		bfa_ioc_firmware_unlock(ioc);
1124  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1125  		break;
1126  
1127  	default:
1128  		bfa_sm_fault(ioc, event);
1129  	}
1130  }
1131  
1132  static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s * iocpf)1133  bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1134  {
1135  	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1136  	bfa_ioc_hw_sem_get(iocpf->ioc);
1137  }
1138  
1139  /*
1140   * Hardware initialization failed.
1141   */
1142  static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1143  bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1144  {
1145  	struct bfa_ioc_s *ioc = iocpf->ioc;
1146  
1147  	bfa_trc(ioc, event);
1148  
1149  	switch (event) {
1150  	case IOCPF_E_SEMLOCKED:
1151  		bfa_ioc_notify_fail(ioc);
1152  		bfa_ioc_sync_leave(ioc);
1153  		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1154  		writel(1, ioc->ioc_regs.ioc_sem_reg);
1155  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1156  		break;
1157  
1158  	case IOCPF_E_SEM_ERROR:
1159  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1160  		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1161  		break;
1162  
1163  	case IOCPF_E_DISABLE:
1164  		bfa_sem_timer_stop(ioc);
1165  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1166  		break;
1167  
1168  	case IOCPF_E_STOP:
1169  		bfa_sem_timer_stop(ioc);
1170  		bfa_ioc_firmware_unlock(ioc);
1171  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1172  		break;
1173  
1174  	case IOCPF_E_FAIL:
1175  		break;
1176  
1177  	default:
1178  		bfa_sm_fault(ioc, event);
1179  	}
1180  }
1181  
1182  static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s * iocpf)1183  bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1184  {
1185  	bfa_trc(iocpf->ioc, 0);
1186  }
1187  
1188  /*
1189   * Hardware initialization failed.
1190   */
1191  static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1192  bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1193  {
1194  	struct bfa_ioc_s *ioc = iocpf->ioc;
1195  
1196  	bfa_trc(ioc, event);
1197  
1198  	switch (event) {
1199  	case IOCPF_E_DISABLE:
1200  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1201  		break;
1202  
1203  	case IOCPF_E_STOP:
1204  		bfa_ioc_firmware_unlock(ioc);
1205  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1206  		break;
1207  
1208  	default:
1209  		bfa_sm_fault(ioc, event);
1210  	}
1211  }
1212  
1213  static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s * iocpf)1214  bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1215  {
1216  	/*
1217  	 * Mark IOC as failed in hardware and stop firmware.
1218  	 */
1219  	bfa_ioc_lpu_stop(iocpf->ioc);
1220  
1221  	/*
1222  	 * Flush any queued up mailbox requests.
1223  	 */
1224  	bfa_ioc_mbox_flush(iocpf->ioc);
1225  
1226  	bfa_ioc_hw_sem_get(iocpf->ioc);
1227  }
1228  
1229  static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1230  bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1231  {
1232  	struct bfa_ioc_s *ioc = iocpf->ioc;
1233  
1234  	bfa_trc(ioc, event);
1235  
1236  	switch (event) {
1237  	case IOCPF_E_SEMLOCKED:
1238  		bfa_ioc_sync_ack(ioc);
1239  		bfa_ioc_notify_fail(ioc);
1240  		if (!iocpf->auto_recover) {
1241  			bfa_ioc_sync_leave(ioc);
1242  			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1243  			writel(1, ioc->ioc_regs.ioc_sem_reg);
1244  			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1245  		} else {
1246  			if (bfa_ioc_sync_complete(ioc))
1247  				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1248  			else {
1249  				writel(1, ioc->ioc_regs.ioc_sem_reg);
1250  				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1251  			}
1252  		}
1253  		break;
1254  
1255  	case IOCPF_E_SEM_ERROR:
1256  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1257  		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1258  		break;
1259  
1260  	case IOCPF_E_DISABLE:
1261  		bfa_sem_timer_stop(ioc);
1262  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1263  		break;
1264  
1265  	case IOCPF_E_FAIL:
1266  		break;
1267  
1268  	default:
1269  		bfa_sm_fault(ioc, event);
1270  	}
1271  }
1272  
1273  static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s * iocpf)1274  bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1275  {
1276  	bfa_trc(iocpf->ioc, 0);
1277  }
1278  
1279  /*
1280   * IOC is in failed state.
1281   */
1282  static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1283  bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1284  {
1285  	struct bfa_ioc_s *ioc = iocpf->ioc;
1286  
1287  	bfa_trc(ioc, event);
1288  
1289  	switch (event) {
1290  	case IOCPF_E_DISABLE:
1291  		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1292  		break;
1293  
1294  	default:
1295  		bfa_sm_fault(ioc, event);
1296  	}
1297  }
1298  
1299  /*
1300   *  BFA IOC private functions
1301   */
1302  
1303  /*
1304   * Notify common modules registered for notification.
1305   */
1306  static void
bfa_ioc_event_notify(struct bfa_ioc_s * ioc,enum bfa_ioc_event_e event)1307  bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1308  {
1309  	struct bfa_ioc_notify_s	*notify;
1310  	struct list_head	*qe;
1311  
1312  	list_for_each(qe, &ioc->notify_q) {
1313  		notify = (struct bfa_ioc_notify_s *)qe;
1314  		notify->cbfn(notify->cbarg, event);
1315  	}
1316  }
1317  
1318  static void
bfa_ioc_disable_comp(struct bfa_ioc_s * ioc)1319  bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1320  {
1321  	ioc->cbfn->disable_cbfn(ioc->bfa);
1322  	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1323  }
1324  
1325  bfa_boolean_t
bfa_ioc_sem_get(void __iomem * sem_reg)1326  bfa_ioc_sem_get(void __iomem *sem_reg)
1327  {
1328  	u32 r32;
1329  	int cnt = 0;
1330  #define BFA_SEM_SPINCNT	3000
1331  
1332  	r32 = readl(sem_reg);
1333  
1334  	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1335  		cnt++;
1336  		udelay(2);
1337  		r32 = readl(sem_reg);
1338  	}
1339  
1340  	if (!(r32 & 1))
1341  		return BFA_TRUE;
1342  
1343  	return BFA_FALSE;
1344  }
1345  
1346  static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s * ioc)1347  bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1348  {
1349  	u32	r32;
1350  
1351  	/*
1352  	 * First read to the semaphore register will return 0, subsequent reads
1353  	 * will return 1. Semaphore is released by writing 1 to the register
1354  	 */
1355  	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1356  	if (r32 == ~0) {
1357  		WARN_ON(r32 == ~0);
1358  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1359  		return;
1360  	}
1361  	if (!(r32 & 1)) {
1362  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1363  		return;
1364  	}
1365  
1366  	bfa_sem_timer_start(ioc);
1367  }
1368  
1369  /*
1370   * Initialize LPU local memory (aka secondary memory / SRAM)
1371   */
1372  static void
bfa_ioc_lmem_init(struct bfa_ioc_s * ioc)1373  bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1374  {
1375  	u32	pss_ctl;
1376  	int		i;
1377  #define PSS_LMEM_INIT_TIME  10000
1378  
1379  	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1380  	pss_ctl &= ~__PSS_LMEM_RESET;
1381  	pss_ctl |= __PSS_LMEM_INIT_EN;
1382  
1383  	/*
1384  	 * i2c workaround 12.5khz clock
1385  	 */
1386  	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1387  	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1388  
1389  	/*
1390  	 * wait for memory initialization to be complete
1391  	 */
1392  	i = 0;
1393  	do {
1394  		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1395  		i++;
1396  	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1397  
1398  	/*
1399  	 * If memory initialization is not successful, IOC timeout will catch
1400  	 * such failures.
1401  	 */
1402  	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1403  	bfa_trc(ioc, pss_ctl);
1404  
1405  	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1406  	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1407  }
1408  
1409  static void
bfa_ioc_lpu_start(struct bfa_ioc_s * ioc)1410  bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1411  {
1412  	u32	pss_ctl;
1413  
1414  	/*
1415  	 * Take processor out of reset.
1416  	 */
1417  	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1418  	pss_ctl &= ~__PSS_LPU0_RESET;
1419  
1420  	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1421  }
1422  
1423  static void
bfa_ioc_lpu_stop(struct bfa_ioc_s * ioc)1424  bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1425  {
1426  	u32	pss_ctl;
1427  
1428  	/*
1429  	 * Put processors in reset.
1430  	 */
1431  	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1432  	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1433  
1434  	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1435  }
1436  
1437  /*
1438   * Get driver and firmware versions.
1439   */
1440  void
bfa_ioc_fwver_get(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * fwhdr)1441  bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1442  {
1443  	u32	pgnum, pgoff;
1444  	u32	loff = 0;
1445  	int		i;
1446  	u32	*fwsig = (u32 *) fwhdr;
1447  
1448  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1449  	pgoff = PSS_SMEM_PGOFF(loff);
1450  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1451  
1452  	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1453  	     i++) {
1454  		fwsig[i] =
1455  			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1456  		loff += sizeof(u32);
1457  	}
1458  }
1459  
1460  /*
1461   * Returns TRUE if driver is willing to work with current smem f/w version.
1462   */
1463  bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * smem_fwhdr)1464  bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1465  		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1466  {
1467  	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1468  	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1469  
1470  	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1471  		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1472  
1473  	/*
1474  	 * If smem is incompatible or old, driver should not work with it.
1475  	 */
1476  	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1477  	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1478  		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1479  		return BFA_FALSE;
1480  	}
1481  
1482  	/*
1483  	 * IF Flash has a better F/W than smem do not work with smem.
1484  	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1485  	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1486  	 */
1487  	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1488  
1489  	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1490  		return BFA_FALSE;
1491  	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1492  		return BFA_TRUE;
1493  	} else {
1494  		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1495  			BFA_TRUE : BFA_FALSE;
1496  	}
1497  }
1498  
1499  /*
1500   * Return true if current running version is valid. Firmware signature and
1501   * execution context (driver/bios) must match.
1502   */
1503  static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s * ioc,u32 boot_env)1504  bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1505  {
1506  	struct bfi_ioc_image_hdr_s fwhdr;
1507  
1508  	bfa_ioc_fwver_get(ioc, &fwhdr);
1509  
1510  	if (swab32(fwhdr.bootenv) != boot_env) {
1511  		bfa_trc(ioc, fwhdr.bootenv);
1512  		bfa_trc(ioc, boot_env);
1513  		return BFA_FALSE;
1514  	}
1515  
1516  	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1517  }
1518  
1519  static bfa_boolean_t
bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s * fwhdr_1,struct bfi_ioc_image_hdr_s * fwhdr_2)1520  bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1521  				struct bfi_ioc_image_hdr_s *fwhdr_2)
1522  {
1523  	int i;
1524  
1525  	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1526  		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1527  			return BFA_FALSE;
1528  
1529  	return BFA_TRUE;
1530  }
1531  
1532  /*
1533   * Returns TRUE if major minor and maintainence are same.
1534   * If patch versions are same, check for MD5 Checksum to be same.
1535   */
1536  static bfa_boolean_t
bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s * drv_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1537  bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1538  				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1539  {
1540  	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1541  		return BFA_FALSE;
1542  
1543  	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1544  		return BFA_FALSE;
1545  
1546  	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1547  		return BFA_FALSE;
1548  
1549  	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1550  		return BFA_FALSE;
1551  
1552  	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1553  		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1554  		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1555  		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1556  	}
1557  
1558  	return BFA_TRUE;
1559  }
1560  
1561  static bfa_boolean_t
bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s * flash_fwhdr)1562  bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1563  {
1564  	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1565  		return BFA_FALSE;
1566  
1567  	return BFA_TRUE;
1568  }
1569  
fwhdr_is_ga(struct bfi_ioc_image_hdr_s * fwhdr)1570  static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1571  {
1572  	if (fwhdr->fwver.phase == 0 &&
1573  		fwhdr->fwver.build == 0)
1574  		return BFA_TRUE;
1575  
1576  	return BFA_FALSE;
1577  }
1578  
1579  /*
1580   * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1581   */
1582  static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s * base_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1583  bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1584  				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1585  {
1586  	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1587  		return BFI_IOC_IMG_VER_INCOMP;
1588  
1589  	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1590  		return BFI_IOC_IMG_VER_BETTER;
1591  
1592  	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1593  		return BFI_IOC_IMG_VER_OLD;
1594  
1595  	/*
1596  	 * GA takes priority over internal builds of the same patch stream.
1597  	 * At this point major minor maint and patch numbers are same.
1598  	 */
1599  
1600  	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1601  		if (fwhdr_is_ga(fwhdr_to_cmp))
1602  			return BFI_IOC_IMG_VER_SAME;
1603  		else
1604  			return BFI_IOC_IMG_VER_OLD;
1605  	} else {
1606  		if (fwhdr_is_ga(fwhdr_to_cmp))
1607  			return BFI_IOC_IMG_VER_BETTER;
1608  	}
1609  
1610  	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1611  		return BFI_IOC_IMG_VER_BETTER;
1612  	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1613  		return BFI_IOC_IMG_VER_OLD;
1614  
1615  	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1616  		return BFI_IOC_IMG_VER_BETTER;
1617  	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1618  		return BFI_IOC_IMG_VER_OLD;
1619  
1620  	/*
1621  	 * All Version Numbers are equal.
1622  	 * Md5 check to be done as a part of compatibility check.
1623  	 */
1624  	return BFI_IOC_IMG_VER_SAME;
1625  }
1626  
1627  #define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1628  
1629  bfa_status_t
bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s * ioc,u32 off,u32 * fwimg)1630  bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1631  				u32 *fwimg)
1632  {
1633  	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1634  			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1635  			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1636  }
1637  
1638  static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * base_fwhdr)1639  bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1640  			struct bfi_ioc_image_hdr_s *base_fwhdr)
1641  {
1642  	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1643  	bfa_status_t status;
1644  	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1645  
1646  	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1647  	if (status != BFA_STATUS_OK)
1648  		return BFI_IOC_IMG_VER_INCOMP;
1649  
1650  	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1651  	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1652  		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1653  	else
1654  		return BFI_IOC_IMG_VER_INCOMP;
1655  }
1656  
1657  
1658  /*
1659   * Invalidate fwver signature
1660   */
1661  bfa_status_t
bfa_ioc_fwsig_invalidate(struct bfa_ioc_s * ioc)1662  bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1663  {
1664  
1665  	u32	pgnum, pgoff;
1666  	u32	loff = 0;
1667  	enum bfi_ioc_state ioc_fwstate;
1668  
1669  	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1670  	if (!bfa_ioc_state_disabled(ioc_fwstate))
1671  		return BFA_STATUS_ADAPTER_ENABLED;
1672  
1673  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1674  	pgoff = PSS_SMEM_PGOFF(loff);
1675  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1676  	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1677  
1678  	return BFA_STATUS_OK;
1679  }
1680  
1681  /*
1682   * Conditionally flush any pending message from firmware at start.
1683   */
1684  static void
bfa_ioc_msgflush(struct bfa_ioc_s * ioc)1685  bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1686  {
1687  	u32	r32;
1688  
1689  	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1690  	if (r32)
1691  		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1692  }
1693  
1694  static void
bfa_ioc_hwinit(struct bfa_ioc_s * ioc,bfa_boolean_t force)1695  bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1696  {
1697  	enum bfi_ioc_state ioc_fwstate;
1698  	bfa_boolean_t fwvalid;
1699  	u32 boot_type;
1700  	u32 boot_env;
1701  
1702  	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1703  
1704  	if (force)
1705  		ioc_fwstate = BFI_IOC_UNINIT;
1706  
1707  	bfa_trc(ioc, ioc_fwstate);
1708  
1709  	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1710  	boot_env = BFI_FWBOOT_ENV_OS;
1711  
1712  	/*
1713  	 * check if firmware is valid
1714  	 */
1715  	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1716  		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1717  
1718  	if (!fwvalid) {
1719  		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1720  			bfa_ioc_poll_fwinit(ioc);
1721  		return;
1722  	}
1723  
1724  	/*
1725  	 * If hardware initialization is in progress (initialized by other IOC),
1726  	 * just wait for an initialization completion interrupt.
1727  	 */
1728  	if (ioc_fwstate == BFI_IOC_INITING) {
1729  		bfa_ioc_poll_fwinit(ioc);
1730  		return;
1731  	}
1732  
1733  	/*
1734  	 * If IOC function is disabled and firmware version is same,
1735  	 * just re-enable IOC.
1736  	 *
1737  	 * If option rom, IOC must not be in operational state. With
1738  	 * convergence, IOC will be in operational state when 2nd driver
1739  	 * is loaded.
1740  	 */
1741  	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1742  
1743  		/*
1744  		 * When using MSI-X any pending firmware ready event should
1745  		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1746  		 */
1747  		bfa_ioc_msgflush(ioc);
1748  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1749  		return;
1750  	}
1751  
1752  	/*
1753  	 * Initialize the h/w for any other states.
1754  	 */
1755  	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1756  		bfa_ioc_poll_fwinit(ioc);
1757  }
1758  
1759  static void
bfa_ioc_timeout(void * ioc_arg)1760  bfa_ioc_timeout(void *ioc_arg)
1761  {
1762  	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1763  
1764  	bfa_trc(ioc, 0);
1765  	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1766  }
1767  
1768  void
bfa_ioc_mbox_send(struct bfa_ioc_s * ioc,void * ioc_msg,int len)1769  bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1770  {
1771  	u32 *msgp = (u32 *) ioc_msg;
1772  	u32 i;
1773  
1774  	bfa_trc(ioc, msgp[0]);
1775  	bfa_trc(ioc, len);
1776  
1777  	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1778  
1779  	/*
1780  	 * first write msg to mailbox registers
1781  	 */
1782  	for (i = 0; i < len / sizeof(u32); i++)
1783  		writel(cpu_to_le32(msgp[i]),
1784  			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1785  
1786  	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1787  		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1788  
1789  	/*
1790  	 * write 1 to mailbox CMD to trigger LPU event
1791  	 */
1792  	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1793  	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1794  }
1795  
1796  static void
bfa_ioc_send_enable(struct bfa_ioc_s * ioc)1797  bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1798  {
1799  	struct bfi_ioc_ctrl_req_s enable_req;
1800  
1801  	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1802  		    bfa_ioc_portid(ioc));
1803  	enable_req.clscode = cpu_to_be16(ioc->clscode);
1804  	/* unsigned 32-bit time_t overflow in y2106 */
1805  	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1806  	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1807  }
1808  
1809  static void
bfa_ioc_send_disable(struct bfa_ioc_s * ioc)1810  bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1811  {
1812  	struct bfi_ioc_ctrl_req_s disable_req;
1813  
1814  	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1815  		    bfa_ioc_portid(ioc));
1816  	disable_req.clscode = cpu_to_be16(ioc->clscode);
1817  	/* unsigned 32-bit time_t overflow in y2106 */
1818  	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1819  	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1820  }
1821  
1822  static void
bfa_ioc_send_getattr(struct bfa_ioc_s * ioc)1823  bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1824  {
1825  	struct bfi_ioc_getattr_req_s	attr_req;
1826  
1827  	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1828  		    bfa_ioc_portid(ioc));
1829  	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1830  	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1831  }
1832  
1833  static void
bfa_ioc_hb_check(void * cbarg)1834  bfa_ioc_hb_check(void *cbarg)
1835  {
1836  	struct bfa_ioc_s  *ioc = cbarg;
1837  	u32	hb_count;
1838  
1839  	hb_count = readl(ioc->ioc_regs.heartbeat);
1840  	if (ioc->hb_count == hb_count) {
1841  		bfa_ioc_recover(ioc);
1842  		return;
1843  	} else {
1844  		ioc->hb_count = hb_count;
1845  	}
1846  
1847  	bfa_ioc_mbox_poll(ioc);
1848  	bfa_hb_timer_start(ioc);
1849  }
1850  
1851  static void
bfa_ioc_hb_monitor(struct bfa_ioc_s * ioc)1852  bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1853  {
1854  	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1855  	bfa_hb_timer_start(ioc);
1856  }
1857  
1858  /*
1859   *	Initiate a full firmware download.
1860   */
1861  static bfa_status_t
bfa_ioc_download_fw(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)1862  bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1863  		    u32 boot_env)
1864  {
1865  	u32 *fwimg;
1866  	u32 pgnum, pgoff;
1867  	u32 loff = 0;
1868  	u32 chunkno = 0;
1869  	u32 i;
1870  	u32 asicmode;
1871  	u32 fwimg_size;
1872  	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1873  	bfa_status_t status;
1874  
1875  	if (boot_env == BFI_FWBOOT_ENV_OS &&
1876  		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1877  		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1878  
1879  		status = bfa_ioc_flash_img_get_chnk(ioc,
1880  			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1881  		if (status != BFA_STATUS_OK)
1882  			return status;
1883  
1884  		fwimg = fwimg_buf;
1885  	} else {
1886  		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1887  		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1888  					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1889  	}
1890  
1891  	bfa_trc(ioc, fwimg_size);
1892  
1893  
1894  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1895  	pgoff = PSS_SMEM_PGOFF(loff);
1896  
1897  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1898  
1899  	for (i = 0; i < fwimg_size; i++) {
1900  
1901  		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1902  			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1903  
1904  			if (boot_env == BFI_FWBOOT_ENV_OS &&
1905  				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1906  				status = bfa_ioc_flash_img_get_chnk(ioc,
1907  					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1908  					fwimg_buf);
1909  				if (status != BFA_STATUS_OK)
1910  					return status;
1911  
1912  				fwimg = fwimg_buf;
1913  			} else {
1914  				fwimg = bfa_cb_image_get_chunk(
1915  					bfa_ioc_asic_gen(ioc),
1916  					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1917  			}
1918  		}
1919  
1920  		/*
1921  		 * write smem
1922  		 */
1923  		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1924  			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1925  
1926  		loff += sizeof(u32);
1927  
1928  		/*
1929  		 * handle page offset wrap around
1930  		 */
1931  		loff = PSS_SMEM_PGOFF(loff);
1932  		if (loff == 0) {
1933  			pgnum++;
1934  			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1935  		}
1936  	}
1937  
1938  	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1939  			ioc->ioc_regs.host_page_num_fn);
1940  
1941  	/*
1942  	 * Set boot type, env and device mode at the end.
1943  	 */
1944  	if (boot_env == BFI_FWBOOT_ENV_OS &&
1945  		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1946  		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1947  	}
1948  	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1949  				ioc->port0_mode, ioc->port1_mode);
1950  	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1951  			swab32(asicmode));
1952  	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1953  			swab32(boot_type));
1954  	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1955  			swab32(boot_env));
1956  	return BFA_STATUS_OK;
1957  }
1958  
1959  
1960  /*
1961   * Update BFA configuration from firmware configuration.
1962   */
1963  static void
bfa_ioc_getattr_reply(struct bfa_ioc_s * ioc)1964  bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1965  {
1966  	struct bfi_ioc_attr_s	*attr = ioc->attr;
1967  
1968  	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1969  	attr->card_type     = be32_to_cpu(attr->card_type);
1970  	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1971  	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1972  	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1973  
1974  	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1975  }
1976  
1977  /*
1978   * Attach time initialization of mbox logic.
1979   */
1980  static void
bfa_ioc_mbox_attach(struct bfa_ioc_s * ioc)1981  bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1982  {
1983  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1984  	int	mc;
1985  
1986  	INIT_LIST_HEAD(&mod->cmd_q);
1987  	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1988  		mod->mbhdlr[mc].cbfn = NULL;
1989  		mod->mbhdlr[mc].cbarg = ioc->bfa;
1990  	}
1991  }
1992  
1993  /*
1994   * Mbox poll timer -- restarts any pending mailbox requests.
1995   */
1996  static void
bfa_ioc_mbox_poll(struct bfa_ioc_s * ioc)1997  bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1998  {
1999  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2000  	struct bfa_mbox_cmd_s		*cmd;
2001  	u32			stat;
2002  
2003  	/*
2004  	 * If no command pending, do nothing
2005  	 */
2006  	if (list_empty(&mod->cmd_q))
2007  		return;
2008  
2009  	/*
2010  	 * If previous command is not yet fetched by firmware, do nothing
2011  	 */
2012  	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2013  	if (stat)
2014  		return;
2015  
2016  	/*
2017  	 * Enqueue command to firmware.
2018  	 */
2019  	bfa_q_deq(&mod->cmd_q, &cmd);
2020  	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2021  }
2022  
2023  /*
2024   * Cleanup any pending requests.
2025   */
2026  static void
bfa_ioc_mbox_flush(struct bfa_ioc_s * ioc)2027  bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2028  {
2029  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2030  	struct bfa_mbox_cmd_s		*cmd;
2031  
2032  	while (!list_empty(&mod->cmd_q))
2033  		bfa_q_deq(&mod->cmd_q, &cmd);
2034  }
2035  
2036  /*
2037   * Read data from SMEM to host through PCI memmap
2038   *
2039   * @param[in]	ioc	memory for IOC
2040   * @param[in]	tbuf	app memory to store data from smem
2041   * @param[in]	soff	smem offset
2042   * @param[in]	sz	size of smem in bytes
2043   */
2044  static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s * ioc,void * tbuf,u32 soff,u32 sz)2045  bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2046  {
2047  	u32 pgnum, loff;
2048  	__be32 r32;
2049  	int i, len;
2050  	u32 *buf = tbuf;
2051  
2052  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2053  	loff = PSS_SMEM_PGOFF(soff);
2054  	bfa_trc(ioc, pgnum);
2055  	bfa_trc(ioc, loff);
2056  	bfa_trc(ioc, sz);
2057  
2058  	/*
2059  	 *  Hold semaphore to serialize pll init and fwtrc.
2060  	 */
2061  	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2062  		bfa_trc(ioc, 0);
2063  		return BFA_STATUS_FAILED;
2064  	}
2065  
2066  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2067  
2068  	len = sz/sizeof(u32);
2069  	bfa_trc(ioc, len);
2070  	for (i = 0; i < len; i++) {
2071  		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2072  		buf[i] = swab32(r32);
2073  		loff += sizeof(u32);
2074  
2075  		/*
2076  		 * handle page offset wrap around
2077  		 */
2078  		loff = PSS_SMEM_PGOFF(loff);
2079  		if (loff == 0) {
2080  			pgnum++;
2081  			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2082  		}
2083  	}
2084  	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2085  			ioc->ioc_regs.host_page_num_fn);
2086  	/*
2087  	 *  release semaphore.
2088  	 */
2089  	readl(ioc->ioc_regs.ioc_init_sem_reg);
2090  	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2091  
2092  	bfa_trc(ioc, pgnum);
2093  	return BFA_STATUS_OK;
2094  }
2095  
2096  /*
2097   * Clear SMEM data from host through PCI memmap
2098   *
2099   * @param[in]	ioc	memory for IOC
2100   * @param[in]	soff	smem offset
2101   * @param[in]	sz	size of smem in bytes
2102   */
2103  static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s * ioc,u32 soff,u32 sz)2104  bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2105  {
2106  	int i, len;
2107  	u32 pgnum, loff;
2108  
2109  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2110  	loff = PSS_SMEM_PGOFF(soff);
2111  	bfa_trc(ioc, pgnum);
2112  	bfa_trc(ioc, loff);
2113  	bfa_trc(ioc, sz);
2114  
2115  	/*
2116  	 *  Hold semaphore to serialize pll init and fwtrc.
2117  	 */
2118  	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2119  		bfa_trc(ioc, 0);
2120  		return BFA_STATUS_FAILED;
2121  	}
2122  
2123  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2124  
2125  	len = sz/sizeof(u32); /* len in words */
2126  	bfa_trc(ioc, len);
2127  	for (i = 0; i < len; i++) {
2128  		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2129  		loff += sizeof(u32);
2130  
2131  		/*
2132  		 * handle page offset wrap around
2133  		 */
2134  		loff = PSS_SMEM_PGOFF(loff);
2135  		if (loff == 0) {
2136  			pgnum++;
2137  			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2138  		}
2139  	}
2140  	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2141  			ioc->ioc_regs.host_page_num_fn);
2142  
2143  	/*
2144  	 *  release semaphore.
2145  	 */
2146  	readl(ioc->ioc_regs.ioc_init_sem_reg);
2147  	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2148  	bfa_trc(ioc, pgnum);
2149  	return BFA_STATUS_OK;
2150  }
2151  
2152  static void
bfa_ioc_fail_notify(struct bfa_ioc_s * ioc)2153  bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2154  {
2155  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2156  
2157  	/*
2158  	 * Notify driver and common modules registered for notification.
2159  	 */
2160  	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2161  	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2162  
2163  	bfa_ioc_debug_save_ftrc(ioc);
2164  
2165  	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2166  		"Heart Beat of IOC has failed\n");
2167  	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2168  
2169  }
2170  
2171  static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s * ioc)2172  bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2173  {
2174  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2175  	/*
2176  	 * Provide enable completion callback.
2177  	 */
2178  	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2179  	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2180  		"Running firmware version is incompatible "
2181  		"with the driver version\n");
2182  	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2183  }
2184  
2185  bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s * ioc)2186  bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2187  {
2188  
2189  	/*
2190  	 *  Hold semaphore so that nobody can access the chip during init.
2191  	 */
2192  	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2193  
2194  	bfa_ioc_pll_init_asic(ioc);
2195  
2196  	ioc->pllinit = BFA_TRUE;
2197  
2198  	/*
2199  	 * Initialize LMEM
2200  	 */
2201  	bfa_ioc_lmem_init(ioc);
2202  
2203  	/*
2204  	 *  release semaphore.
2205  	 */
2206  	readl(ioc->ioc_regs.ioc_init_sem_reg);
2207  	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2208  
2209  	return BFA_STATUS_OK;
2210  }
2211  
2212  /*
2213   * Interface used by diag module to do firmware boot with memory test
2214   * as the entry vector.
2215   */
2216  bfa_status_t
bfa_ioc_boot(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)2217  bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2218  {
2219  	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2220  	bfa_status_t status;
2221  	bfa_ioc_stats(ioc, ioc_boots);
2222  
2223  	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2224  		return BFA_STATUS_FAILED;
2225  
2226  	if (boot_env == BFI_FWBOOT_ENV_OS &&
2227  		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2228  
2229  		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2230  			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2231  
2232  		/*
2233  		 * Work with Flash iff flash f/w is better than driver f/w.
2234  		 * Otherwise push drivers firmware.
2235  		 */
2236  		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2237  						BFI_IOC_IMG_VER_BETTER)
2238  			boot_type = BFI_FWBOOT_TYPE_FLASH;
2239  	}
2240  
2241  	/*
2242  	 * Initialize IOC state of all functions on a chip reset.
2243  	 */
2244  	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2245  		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2246  		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2247  	} else {
2248  		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2249  		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2250  	}
2251  
2252  	bfa_ioc_msgflush(ioc);
2253  	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2254  	if (status == BFA_STATUS_OK)
2255  		bfa_ioc_lpu_start(ioc);
2256  	else {
2257  		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2258  		bfa_iocpf_timeout(ioc);
2259  	}
2260  	return status;
2261  }
2262  
2263  /*
2264   * Enable/disable IOC failure auto recovery.
2265   */
2266  void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)2267  bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2268  {
2269  	bfa_auto_recover = auto_recover;
2270  }
2271  
2272  
2273  
2274  bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s * ioc)2275  bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2276  {
2277  	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2278  }
2279  
2280  bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s * ioc)2281  bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2282  {
2283  	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2284  
2285  	return ((r32 != BFI_IOC_UNINIT) &&
2286  		(r32 != BFI_IOC_INITING) &&
2287  		(r32 != BFI_IOC_MEMTEST));
2288  }
2289  
2290  bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s * ioc,void * mbmsg)2291  bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2292  {
2293  	__be32	*msgp = mbmsg;
2294  	u32	r32;
2295  	int		i;
2296  
2297  	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2298  	if ((r32 & 1) == 0)
2299  		return BFA_FALSE;
2300  
2301  	/*
2302  	 * read the MBOX msg
2303  	 */
2304  	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2305  	     i++) {
2306  		r32 = readl(ioc->ioc_regs.lpu_mbox +
2307  				   i * sizeof(u32));
2308  		msgp[i] = cpu_to_be32(r32);
2309  	}
2310  
2311  	/*
2312  	 * turn off mailbox interrupt by clearing mailbox status
2313  	 */
2314  	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2315  	readl(ioc->ioc_regs.lpu_mbox_cmd);
2316  
2317  	return BFA_TRUE;
2318  }
2319  
2320  void
bfa_ioc_isr(struct bfa_ioc_s * ioc,struct bfi_mbmsg_s * m)2321  bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2322  {
2323  	union bfi_ioc_i2h_msg_u	*msg;
2324  	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2325  
2326  	msg = (union bfi_ioc_i2h_msg_u *) m;
2327  
2328  	bfa_ioc_stats(ioc, ioc_isrs);
2329  
2330  	switch (msg->mh.msg_id) {
2331  	case BFI_IOC_I2H_HBEAT:
2332  		break;
2333  
2334  	case BFI_IOC_I2H_ENABLE_REPLY:
2335  		ioc->port_mode = ioc->port_mode_cfg =
2336  				(enum bfa_mode_s)msg->fw_event.port_mode;
2337  		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2338  		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2339  		break;
2340  
2341  	case BFI_IOC_I2H_DISABLE_REPLY:
2342  		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2343  		break;
2344  
2345  	case BFI_IOC_I2H_GETATTR_REPLY:
2346  		bfa_ioc_getattr_reply(ioc);
2347  		break;
2348  
2349  	default:
2350  		bfa_trc(ioc, msg->mh.msg_id);
2351  		WARN_ON(1);
2352  	}
2353  }
2354  
2355  /*
2356   * IOC attach time initialization and setup.
2357   *
2358   * @param[in]	ioc	memory for IOC
2359   * @param[in]	bfa	driver instance structure
2360   */
2361  void
bfa_ioc_attach(struct bfa_ioc_s * ioc,void * bfa,struct bfa_ioc_cbfn_s * cbfn,struct bfa_timer_mod_s * timer_mod)2362  bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2363  	       struct bfa_timer_mod_s *timer_mod)
2364  {
2365  	ioc->bfa	= bfa;
2366  	ioc->cbfn	= cbfn;
2367  	ioc->timer_mod	= timer_mod;
2368  	ioc->fcmode	= BFA_FALSE;
2369  	ioc->pllinit	= BFA_FALSE;
2370  	ioc->dbg_fwsave_once = BFA_TRUE;
2371  	ioc->iocpf.ioc	= ioc;
2372  
2373  	bfa_ioc_mbox_attach(ioc);
2374  	INIT_LIST_HEAD(&ioc->notify_q);
2375  
2376  	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2377  	bfa_fsm_send_event(ioc, IOC_E_RESET);
2378  }
2379  
2380  /*
2381   * Driver detach time IOC cleanup.
2382   */
2383  void
bfa_ioc_detach(struct bfa_ioc_s * ioc)2384  bfa_ioc_detach(struct bfa_ioc_s *ioc)
2385  {
2386  	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2387  	INIT_LIST_HEAD(&ioc->notify_q);
2388  }
2389  
2390  /*
2391   * Setup IOC PCI properties.
2392   *
2393   * @param[in]	pcidev	PCI device information for this IOC
2394   */
2395  void
bfa_ioc_pci_init(struct bfa_ioc_s * ioc,struct bfa_pcidev_s * pcidev,enum bfi_pcifn_class clscode)2396  bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2397  		enum bfi_pcifn_class clscode)
2398  {
2399  	ioc->clscode	= clscode;
2400  	ioc->pcidev	= *pcidev;
2401  
2402  	/*
2403  	 * Initialize IOC and device personality
2404  	 */
2405  	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2406  	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2407  
2408  	switch (pcidev->device_id) {
2409  	case BFA_PCI_DEVICE_ID_FC_8G1P:
2410  	case BFA_PCI_DEVICE_ID_FC_8G2P:
2411  		ioc->asic_gen = BFI_ASIC_GEN_CB;
2412  		ioc->fcmode = BFA_TRUE;
2413  		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2414  		ioc->ad_cap_bm = BFA_CM_HBA;
2415  		break;
2416  
2417  	case BFA_PCI_DEVICE_ID_CT:
2418  		ioc->asic_gen = BFI_ASIC_GEN_CT;
2419  		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2420  		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2421  		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2422  		ioc->ad_cap_bm = BFA_CM_CNA;
2423  		break;
2424  
2425  	case BFA_PCI_DEVICE_ID_CT_FC:
2426  		ioc->asic_gen = BFI_ASIC_GEN_CT;
2427  		ioc->fcmode = BFA_TRUE;
2428  		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2429  		ioc->ad_cap_bm = BFA_CM_HBA;
2430  		break;
2431  
2432  	case BFA_PCI_DEVICE_ID_CT2:
2433  	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2434  		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2435  		if (clscode == BFI_PCIFN_CLASS_FC &&
2436  		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2437  			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2438  			ioc->fcmode = BFA_TRUE;
2439  			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2440  			ioc->ad_cap_bm = BFA_CM_HBA;
2441  		} else {
2442  			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2443  			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2444  			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2445  				ioc->port_mode =
2446  				ioc->port_mode_cfg = BFA_MODE_CNA;
2447  				ioc->ad_cap_bm = BFA_CM_CNA;
2448  			} else {
2449  				ioc->port_mode =
2450  				ioc->port_mode_cfg = BFA_MODE_NIC;
2451  				ioc->ad_cap_bm = BFA_CM_NIC;
2452  			}
2453  		}
2454  		break;
2455  
2456  	default:
2457  		WARN_ON(1);
2458  	}
2459  
2460  	/*
2461  	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2462  	 */
2463  	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2464  		bfa_ioc_set_cb_hwif(ioc);
2465  	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2466  		bfa_ioc_set_ct_hwif(ioc);
2467  	else {
2468  		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2469  		bfa_ioc_set_ct2_hwif(ioc);
2470  		bfa_ioc_ct2_poweron(ioc);
2471  	}
2472  
2473  	bfa_ioc_map_port(ioc);
2474  	bfa_ioc_reg_init(ioc);
2475  }
2476  
2477  /*
2478   * Initialize IOC dma memory
2479   *
2480   * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2481   * @param[in]	dm_pa	physical address of IOC dma memory
2482   */
2483  void
bfa_ioc_mem_claim(struct bfa_ioc_s * ioc,u8 * dm_kva,u64 dm_pa)2484  bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2485  {
2486  	/*
2487  	 * dma memory for firmware attribute
2488  	 */
2489  	ioc->attr_dma.kva = dm_kva;
2490  	ioc->attr_dma.pa = dm_pa;
2491  	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2492  }
2493  
2494  void
bfa_ioc_enable(struct bfa_ioc_s * ioc)2495  bfa_ioc_enable(struct bfa_ioc_s *ioc)
2496  {
2497  	bfa_ioc_stats(ioc, ioc_enables);
2498  	ioc->dbg_fwsave_once = BFA_TRUE;
2499  
2500  	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2501  }
2502  
2503  void
bfa_ioc_disable(struct bfa_ioc_s * ioc)2504  bfa_ioc_disable(struct bfa_ioc_s *ioc)
2505  {
2506  	bfa_ioc_stats(ioc, ioc_disables);
2507  	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2508  }
2509  
2510  void
bfa_ioc_suspend(struct bfa_ioc_s * ioc)2511  bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2512  {
2513  	ioc->dbg_fwsave_once = BFA_TRUE;
2514  	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2515  }
2516  
2517  /*
2518   * Initialize memory for saving firmware trace. Driver must initialize
2519   * trace memory before call bfa_ioc_enable().
2520   */
2521  void
bfa_ioc_debug_memclaim(struct bfa_ioc_s * ioc,void * dbg_fwsave)2522  bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2523  {
2524  	ioc->dbg_fwsave	    = dbg_fwsave;
2525  	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2526  }
2527  
2528  /*
2529   * Register mailbox message handler functions
2530   *
2531   * @param[in]	ioc		IOC instance
2532   * @param[in]	mcfuncs		message class handler functions
2533   */
2534  void
bfa_ioc_mbox_register(struct bfa_ioc_s * ioc,bfa_ioc_mbox_mcfunc_t * mcfuncs)2535  bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2536  {
2537  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2538  	int				mc;
2539  
2540  	for (mc = 0; mc < BFI_MC_MAX; mc++)
2541  		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2542  }
2543  
2544  /*
2545   * Register mailbox message handler function, to be called by common modules
2546   */
2547  void
bfa_ioc_mbox_regisr(struct bfa_ioc_s * ioc,enum bfi_mclass mc,bfa_ioc_mbox_mcfunc_t cbfn,void * cbarg)2548  bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2549  		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2550  {
2551  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2552  
2553  	mod->mbhdlr[mc].cbfn	= cbfn;
2554  	mod->mbhdlr[mc].cbarg	= cbarg;
2555  }
2556  
2557  /*
2558   * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2559   * Responsibility of caller to serialize
2560   *
2561   * @param[in]	ioc	IOC instance
2562   * @param[i]	cmd	Mailbox command
2563   */
2564  void
bfa_ioc_mbox_queue(struct bfa_ioc_s * ioc,struct bfa_mbox_cmd_s * cmd)2565  bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2566  {
2567  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2568  	u32			stat;
2569  
2570  	/*
2571  	 * If a previous command is pending, queue new command
2572  	 */
2573  	if (!list_empty(&mod->cmd_q)) {
2574  		list_add_tail(&cmd->qe, &mod->cmd_q);
2575  		return;
2576  	}
2577  
2578  	/*
2579  	 * If mailbox is busy, queue command for poll timer
2580  	 */
2581  	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2582  	if (stat) {
2583  		list_add_tail(&cmd->qe, &mod->cmd_q);
2584  		return;
2585  	}
2586  
2587  	/*
2588  	 * mailbox is free -- queue command to firmware
2589  	 */
2590  	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2591  }
2592  
2593  /*
2594   * Handle mailbox interrupts
2595   */
2596  void
bfa_ioc_mbox_isr(struct bfa_ioc_s * ioc)2597  bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2598  {
2599  	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2600  	struct bfi_mbmsg_s		m;
2601  	int				mc;
2602  
2603  	if (bfa_ioc_msgget(ioc, &m)) {
2604  		/*
2605  		 * Treat IOC message class as special.
2606  		 */
2607  		mc = m.mh.msg_class;
2608  		if (mc == BFI_MC_IOC) {
2609  			bfa_ioc_isr(ioc, &m);
2610  			return;
2611  		}
2612  
2613  		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2614  			return;
2615  
2616  		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2617  	}
2618  
2619  	bfa_ioc_lpu_read_stat(ioc);
2620  
2621  	/*
2622  	 * Try to send pending mailbox commands
2623  	 */
2624  	bfa_ioc_mbox_poll(ioc);
2625  }
2626  
2627  void
bfa_ioc_error_isr(struct bfa_ioc_s * ioc)2628  bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2629  {
2630  	bfa_ioc_stats(ioc, ioc_hbfails);
2631  	ioc->stats.hb_count = ioc->hb_count;
2632  	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2633  }
2634  
2635  /*
2636   * return true if IOC is disabled
2637   */
2638  bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s * ioc)2639  bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2640  {
2641  	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2642  		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2643  }
2644  
2645  /*
2646   * return true if IOC firmware is different.
2647   */
2648  bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s * ioc)2649  bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2650  {
2651  	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2652  		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2653  		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2654  }
2655  
2656  /*
2657   * Check if adapter is disabled -- both IOCs should be in a disabled
2658   * state.
2659   */
2660  bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s * ioc)2661  bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2662  {
2663  	u32	ioc_state;
2664  
2665  	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2666  		return BFA_FALSE;
2667  
2668  	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2669  	if (!bfa_ioc_state_disabled(ioc_state))
2670  		return BFA_FALSE;
2671  
2672  	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2673  		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2674  		if (!bfa_ioc_state_disabled(ioc_state))
2675  			return BFA_FALSE;
2676  	}
2677  
2678  	return BFA_TRUE;
2679  }
2680  
2681  /*
2682   * Reset IOC fwstate registers.
2683   */
2684  void
bfa_ioc_reset_fwstate(struct bfa_ioc_s * ioc)2685  bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2686  {
2687  	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2688  	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2689  }
2690  
2691  #define BFA_MFG_NAME "QLogic"
2692  void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s * ioc,struct bfa_adapter_attr_s * ad_attr)2693  bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2694  			 struct bfa_adapter_attr_s *ad_attr)
2695  {
2696  	struct bfi_ioc_attr_s	*ioc_attr;
2697  
2698  	ioc_attr = ioc->attr;
2699  
2700  	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2701  	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2702  	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2703  	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2704  	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2705  		      sizeof(struct bfa_mfg_vpd_s));
2706  
2707  	ad_attr->nports = bfa_ioc_get_nports(ioc);
2708  	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2709  
2710  	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2711  	/* For now, model descr uses same model string */
2712  	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2713  
2714  	ad_attr->card_type = ioc_attr->card_type;
2715  	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2716  
2717  	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2718  		ad_attr->prototype = 1;
2719  	else
2720  		ad_attr->prototype = 0;
2721  
2722  	ad_attr->pwwn = ioc->attr->pwwn;
2723  	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2724  
2725  	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2726  	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2727  	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2728  	ad_attr->asic_rev = ioc_attr->asic_rev;
2729  
2730  	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2731  
2732  	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2733  	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2734  				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2735  	ad_attr->mfg_day = ioc_attr->mfg_day;
2736  	ad_attr->mfg_month = ioc_attr->mfg_month;
2737  	ad_attr->mfg_year = ioc_attr->mfg_year;
2738  	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2739  }
2740  
2741  enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s * ioc)2742  bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2743  {
2744  	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2745  		return BFA_IOC_TYPE_LL;
2746  
2747  	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2748  
2749  	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2750  		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2751  }
2752  
2753  void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s * ioc,char * serial_num)2754  bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2755  {
2756  	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2757  	memcpy((void *)serial_num,
2758  			(void *)ioc->attr->brcd_serialnum,
2759  			BFA_ADAPTER_SERIAL_NUM_LEN);
2760  }
2761  
2762  void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s * ioc,char * fw_ver)2763  bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2764  {
2765  	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2766  	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2767  }
2768  
2769  void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s * ioc,char * chip_rev)2770  bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2771  {
2772  	WARN_ON(!chip_rev);
2773  
2774  	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2775  
2776  	chip_rev[0] = 'R';
2777  	chip_rev[1] = 'e';
2778  	chip_rev[2] = 'v';
2779  	chip_rev[3] = '-';
2780  	chip_rev[4] = ioc->attr->asic_rev;
2781  	chip_rev[5] = '\0';
2782  }
2783  
2784  void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s * ioc,char * optrom_ver)2785  bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2786  {
2787  	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2788  	memcpy(optrom_ver, ioc->attr->optrom_version,
2789  		      BFA_VERSION_LEN);
2790  }
2791  
2792  void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s * ioc,char * manufacturer)2793  bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2794  {
2795  	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2796  	strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2797  }
2798  
2799  void
bfa_ioc_get_adapter_model(struct bfa_ioc_s * ioc,char * model)2800  bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2801  {
2802  	struct bfi_ioc_attr_s	*ioc_attr;
2803  	u8 nports = bfa_ioc_get_nports(ioc);
2804  
2805  	WARN_ON(!model);
2806  	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2807  
2808  	ioc_attr = ioc->attr;
2809  
2810  	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2811  		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2812  		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2813  			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2814  	else
2815  		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2816  			BFA_MFG_NAME, ioc_attr->card_type);
2817  }
2818  
2819  enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s * ioc)2820  bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2821  {
2822  	enum bfa_iocpf_state iocpf_st;
2823  	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2824  
2825  	if (ioc_st == BFA_IOC_ENABLING ||
2826  		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2827  
2828  		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2829  
2830  		switch (iocpf_st) {
2831  		case BFA_IOCPF_SEMWAIT:
2832  			ioc_st = BFA_IOC_SEMWAIT;
2833  			break;
2834  
2835  		case BFA_IOCPF_HWINIT:
2836  			ioc_st = BFA_IOC_HWINIT;
2837  			break;
2838  
2839  		case BFA_IOCPF_FWMISMATCH:
2840  			ioc_st = BFA_IOC_FWMISMATCH;
2841  			break;
2842  
2843  		case BFA_IOCPF_FAIL:
2844  			ioc_st = BFA_IOC_FAIL;
2845  			break;
2846  
2847  		case BFA_IOCPF_INITFAIL:
2848  			ioc_st = BFA_IOC_INITFAIL;
2849  			break;
2850  
2851  		default:
2852  			break;
2853  		}
2854  	}
2855  
2856  	return ioc_st;
2857  }
2858  
2859  void
bfa_ioc_get_attr(struct bfa_ioc_s * ioc,struct bfa_ioc_attr_s * ioc_attr)2860  bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2861  {
2862  	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2863  
2864  	ioc_attr->state = bfa_ioc_get_state(ioc);
2865  	ioc_attr->port_id = bfa_ioc_portid(ioc);
2866  	ioc_attr->port_mode = ioc->port_mode;
2867  	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2868  	ioc_attr->cap_bm = ioc->ad_cap_bm;
2869  
2870  	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2871  
2872  	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2873  
2874  	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2875  	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2876  	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2877  	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2878  }
2879  
2880  mac_t
bfa_ioc_get_mac(struct bfa_ioc_s * ioc)2881  bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2882  {
2883  	/*
2884  	 * Check the IOC type and return the appropriate MAC
2885  	 */
2886  	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2887  		return ioc->attr->fcoe_mac;
2888  	else
2889  		return ioc->attr->mac;
2890  }
2891  
2892  mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s * ioc)2893  bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2894  {
2895  	mac_t	m;
2896  
2897  	m = ioc->attr->mfg_mac;
2898  	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2899  		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2900  	else
2901  		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2902  			bfa_ioc_pcifn(ioc));
2903  
2904  	return m;
2905  }
2906  
2907  /*
2908   * Send AEN notification
2909   */
2910  void
bfa_ioc_aen_post(struct bfa_ioc_s * ioc,enum bfa_ioc_aen_event event)2911  bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2912  {
2913  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2914  	struct bfa_aen_entry_s	*aen_entry;
2915  	enum bfa_ioc_type_e ioc_type;
2916  
2917  	bfad_get_aen_entry(bfad, aen_entry);
2918  	if (!aen_entry)
2919  		return;
2920  
2921  	ioc_type = bfa_ioc_get_type(ioc);
2922  	switch (ioc_type) {
2923  	case BFA_IOC_TYPE_FC:
2924  		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2925  		break;
2926  	case BFA_IOC_TYPE_FCoE:
2927  		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2928  		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2929  		break;
2930  	case BFA_IOC_TYPE_LL:
2931  		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2932  		break;
2933  	default:
2934  		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2935  		break;
2936  	}
2937  
2938  	/* Send the AEN notification */
2939  	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2940  	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2941  				  BFA_AEN_CAT_IOC, event);
2942  }
2943  
2944  /*
2945   * Retrieve saved firmware trace from a prior IOC failure.
2946   */
2947  bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2948  bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2949  {
2950  	int	tlen;
2951  
2952  	if (ioc->dbg_fwsave_len == 0)
2953  		return BFA_STATUS_ENOFSAVE;
2954  
2955  	tlen = *trclen;
2956  	if (tlen > ioc->dbg_fwsave_len)
2957  		tlen = ioc->dbg_fwsave_len;
2958  
2959  	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2960  	*trclen = tlen;
2961  	return BFA_STATUS_OK;
2962  }
2963  
2964  
2965  /*
2966   * Retrieve saved firmware trace from a prior IOC failure.
2967   */
2968  bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2969  bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2970  {
2971  	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2972  	int tlen;
2973  	bfa_status_t status;
2974  
2975  	bfa_trc(ioc, *trclen);
2976  
2977  	tlen = *trclen;
2978  	if (tlen > BFA_DBG_FWTRC_LEN)
2979  		tlen = BFA_DBG_FWTRC_LEN;
2980  
2981  	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2982  	*trclen = tlen;
2983  	return status;
2984  }
2985  
2986  static void
bfa_ioc_send_fwsync(struct bfa_ioc_s * ioc)2987  bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2988  {
2989  	struct bfa_mbox_cmd_s cmd;
2990  	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2991  
2992  	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2993  		    bfa_ioc_portid(ioc));
2994  	req->clscode = cpu_to_be16(ioc->clscode);
2995  	bfa_ioc_mbox_queue(ioc, &cmd);
2996  }
2997  
2998  static void
bfa_ioc_fwsync(struct bfa_ioc_s * ioc)2999  bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
3000  {
3001  	u32 fwsync_iter = 1000;
3002  
3003  	bfa_ioc_send_fwsync(ioc);
3004  
3005  	/*
3006  	 * After sending a fw sync mbox command wait for it to
3007  	 * take effect.  We will not wait for a response because
3008  	 *    1. fw_sync mbox cmd doesn't have a response.
3009  	 *    2. Even if we implement that,  interrupts might not
3010  	 *	 be enabled when we call this function.
3011  	 * So, just keep checking if any mbox cmd is pending, and
3012  	 * after waiting for a reasonable amount of time, go ahead.
3013  	 * It is possible that fw has crashed and the mbox command
3014  	 * is never acknowledged.
3015  	 */
3016  	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3017  		fwsync_iter--;
3018  }
3019  
3020  /*
3021   * Dump firmware smem
3022   */
3023  bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s * ioc,void * buf,u32 * offset,int * buflen)3024  bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3025  				u32 *offset, int *buflen)
3026  {
3027  	u32 loff;
3028  	int dlen;
3029  	bfa_status_t status;
3030  	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3031  
3032  	if (*offset >= smem_len) {
3033  		*offset = *buflen = 0;
3034  		return BFA_STATUS_EINVAL;
3035  	}
3036  
3037  	loff = *offset;
3038  	dlen = *buflen;
3039  
3040  	/*
3041  	 * First smem read, sync smem before proceeding
3042  	 * No need to sync before reading every chunk.
3043  	 */
3044  	if (loff == 0)
3045  		bfa_ioc_fwsync(ioc);
3046  
3047  	if ((loff + dlen) >= smem_len)
3048  		dlen = smem_len - loff;
3049  
3050  	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3051  
3052  	if (status != BFA_STATUS_OK) {
3053  		*offset = *buflen = 0;
3054  		return status;
3055  	}
3056  
3057  	*offset += dlen;
3058  
3059  	if (*offset >= smem_len)
3060  		*offset = 0;
3061  
3062  	*buflen = dlen;
3063  
3064  	return status;
3065  }
3066  
3067  /*
3068   * Firmware statistics
3069   */
3070  bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s * ioc,void * stats)3071  bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3072  {
3073  	u32 loff = BFI_IOC_FWSTATS_OFF + \
3074  		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3075  	int tlen;
3076  	bfa_status_t status;
3077  
3078  	if (ioc->stats_busy) {
3079  		bfa_trc(ioc, ioc->stats_busy);
3080  		return BFA_STATUS_DEVBUSY;
3081  	}
3082  	ioc->stats_busy = BFA_TRUE;
3083  
3084  	tlen = sizeof(struct bfa_fw_stats_s);
3085  	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3086  
3087  	ioc->stats_busy = BFA_FALSE;
3088  	return status;
3089  }
3090  
3091  bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s * ioc)3092  bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3093  {
3094  	u32 loff = BFI_IOC_FWSTATS_OFF + \
3095  		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3096  	int tlen;
3097  	bfa_status_t status;
3098  
3099  	if (ioc->stats_busy) {
3100  		bfa_trc(ioc, ioc->stats_busy);
3101  		return BFA_STATUS_DEVBUSY;
3102  	}
3103  	ioc->stats_busy = BFA_TRUE;
3104  
3105  	tlen = sizeof(struct bfa_fw_stats_s);
3106  	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3107  
3108  	ioc->stats_busy = BFA_FALSE;
3109  	return status;
3110  }
3111  
3112  /*
3113   * Save firmware trace if configured.
3114   */
3115  void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s * ioc)3116  bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3117  {
3118  	int		tlen;
3119  
3120  	if (ioc->dbg_fwsave_once) {
3121  		ioc->dbg_fwsave_once = BFA_FALSE;
3122  		if (ioc->dbg_fwsave_len) {
3123  			tlen = ioc->dbg_fwsave_len;
3124  			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3125  		}
3126  	}
3127  }
3128  
3129  /*
3130   * Firmware failure detected. Start recovery actions.
3131   */
3132  static void
bfa_ioc_recover(struct bfa_ioc_s * ioc)3133  bfa_ioc_recover(struct bfa_ioc_s *ioc)
3134  {
3135  	bfa_ioc_stats(ioc, ioc_hbfails);
3136  	ioc->stats.hb_count = ioc->hb_count;
3137  	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3138  }
3139  
3140  /*
3141   *  BFA IOC PF private functions
3142   */
3143  static void
bfa_iocpf_timeout(void * ioc_arg)3144  bfa_iocpf_timeout(void *ioc_arg)
3145  {
3146  	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3147  
3148  	bfa_trc(ioc, 0);
3149  	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3150  }
3151  
3152  static void
bfa_iocpf_sem_timeout(void * ioc_arg)3153  bfa_iocpf_sem_timeout(void *ioc_arg)
3154  {
3155  	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3156  
3157  	bfa_ioc_hw_sem_get(ioc);
3158  }
3159  
3160  static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s * ioc)3161  bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3162  {
3163  	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3164  
3165  	bfa_trc(ioc, fwstate);
3166  
3167  	if (fwstate == BFI_IOC_DISABLED) {
3168  		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3169  		return;
3170  	}
3171  
3172  	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3173  		bfa_iocpf_timeout(ioc);
3174  	else {
3175  		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3176  		bfa_iocpf_poll_timer_start(ioc);
3177  	}
3178  }
3179  
3180  static void
bfa_iocpf_poll_timeout(void * ioc_arg)3181  bfa_iocpf_poll_timeout(void *ioc_arg)
3182  {
3183  	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3184  
3185  	bfa_ioc_poll_fwinit(ioc);
3186  }
3187  
3188  /*
3189   *  bfa timer function
3190   */
3191  void
bfa_timer_beat(struct bfa_timer_mod_s * mod)3192  bfa_timer_beat(struct bfa_timer_mod_s *mod)
3193  {
3194  	struct list_head *qh = &mod->timer_q;
3195  	struct list_head *qe, *qe_next;
3196  	struct bfa_timer_s *elem;
3197  	struct list_head timedout_q;
3198  
3199  	INIT_LIST_HEAD(&timedout_q);
3200  
3201  	qe = bfa_q_next(qh);
3202  
3203  	while (qe != qh) {
3204  		qe_next = bfa_q_next(qe);
3205  
3206  		elem = (struct bfa_timer_s *) qe;
3207  		if (elem->timeout <= BFA_TIMER_FREQ) {
3208  			elem->timeout = 0;
3209  			list_del(&elem->qe);
3210  			list_add_tail(&elem->qe, &timedout_q);
3211  		} else {
3212  			elem->timeout -= BFA_TIMER_FREQ;
3213  		}
3214  
3215  		qe = qe_next;	/* go to next elem */
3216  	}
3217  
3218  	/*
3219  	 * Pop all the timeout entries
3220  	 */
3221  	while (!list_empty(&timedout_q)) {
3222  		bfa_q_deq(&timedout_q, &elem);
3223  		elem->timercb(elem->arg);
3224  	}
3225  }
3226  
3227  /*
3228   * Should be called with lock protection
3229   */
3230  void
bfa_timer_begin(struct bfa_timer_mod_s * mod,struct bfa_timer_s * timer,void (* timercb)(void *),void * arg,unsigned int timeout)3231  bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3232  		    void (*timercb) (void *), void *arg, unsigned int timeout)
3233  {
3234  
3235  	WARN_ON(timercb == NULL);
3236  	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3237  
3238  	timer->timeout = timeout;
3239  	timer->timercb = timercb;
3240  	timer->arg = arg;
3241  
3242  	list_add_tail(&timer->qe, &mod->timer_q);
3243  }
3244  
3245  /*
3246   * Should be called with lock protection
3247   */
3248  void
bfa_timer_stop(struct bfa_timer_s * timer)3249  bfa_timer_stop(struct bfa_timer_s *timer)
3250  {
3251  	WARN_ON(list_empty(&timer->qe));
3252  
3253  	list_del(&timer->qe);
3254  }
3255  
3256  /*
3257   *	ASIC block related
3258   */
3259  static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s * cfg)3260  bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3261  {
3262  	struct bfa_ablk_cfg_inst_s *cfg_inst;
3263  	int i, j;
3264  	u16	be16;
3265  
3266  	for (i = 0; i < BFA_ABLK_MAX; i++) {
3267  		cfg_inst = &cfg->inst[i];
3268  		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3269  			be16 = cfg_inst->pf_cfg[j].pers;
3270  			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3271  			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3272  			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3273  			be16 = cfg_inst->pf_cfg[j].num_vectors;
3274  			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3275  			be16 = cfg_inst->pf_cfg[j].bw_min;
3276  			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3277  			be16 = cfg_inst->pf_cfg[j].bw_max;
3278  			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3279  		}
3280  	}
3281  }
3282  
3283  static void
bfa_ablk_isr(void * cbarg,struct bfi_mbmsg_s * msg)3284  bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3285  {
3286  	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3287  	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3288  	bfa_ablk_cbfn_t cbfn;
3289  
3290  	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3291  	bfa_trc(ablk->ioc, msg->mh.msg_id);
3292  
3293  	switch (msg->mh.msg_id) {
3294  	case BFI_ABLK_I2H_QUERY:
3295  		if (rsp->status == BFA_STATUS_OK) {
3296  			memcpy(ablk->cfg, ablk->dma_addr.kva,
3297  				sizeof(struct bfa_ablk_cfg_s));
3298  			bfa_ablk_config_swap(ablk->cfg);
3299  			ablk->cfg = NULL;
3300  		}
3301  		break;
3302  
3303  	case BFI_ABLK_I2H_ADPT_CONFIG:
3304  	case BFI_ABLK_I2H_PORT_CONFIG:
3305  		/* update config port mode */
3306  		ablk->ioc->port_mode_cfg = rsp->port_mode;
3307  
3308  	case BFI_ABLK_I2H_PF_DELETE:
3309  	case BFI_ABLK_I2H_PF_UPDATE:
3310  	case BFI_ABLK_I2H_OPTROM_ENABLE:
3311  	case BFI_ABLK_I2H_OPTROM_DISABLE:
3312  		/* No-op */
3313  		break;
3314  
3315  	case BFI_ABLK_I2H_PF_CREATE:
3316  		*(ablk->pcifn) = rsp->pcifn;
3317  		ablk->pcifn = NULL;
3318  		break;
3319  
3320  	default:
3321  		WARN_ON(1);
3322  	}
3323  
3324  	ablk->busy = BFA_FALSE;
3325  	if (ablk->cbfn) {
3326  		cbfn = ablk->cbfn;
3327  		ablk->cbfn = NULL;
3328  		cbfn(ablk->cbarg, rsp->status);
3329  	}
3330  }
3331  
3332  static void
bfa_ablk_notify(void * cbarg,enum bfa_ioc_event_e event)3333  bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3334  {
3335  	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3336  
3337  	bfa_trc(ablk->ioc, event);
3338  
3339  	switch (event) {
3340  	case BFA_IOC_E_ENABLED:
3341  		WARN_ON(ablk->busy != BFA_FALSE);
3342  		break;
3343  
3344  	case BFA_IOC_E_DISABLED:
3345  	case BFA_IOC_E_FAILED:
3346  		/* Fail any pending requests */
3347  		ablk->pcifn = NULL;
3348  		if (ablk->busy) {
3349  			if (ablk->cbfn)
3350  				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3351  			ablk->cbfn = NULL;
3352  			ablk->busy = BFA_FALSE;
3353  		}
3354  		break;
3355  
3356  	default:
3357  		WARN_ON(1);
3358  		break;
3359  	}
3360  }
3361  
3362  u32
bfa_ablk_meminfo(void)3363  bfa_ablk_meminfo(void)
3364  {
3365  	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3366  }
3367  
3368  void
bfa_ablk_memclaim(struct bfa_ablk_s * ablk,u8 * dma_kva,u64 dma_pa)3369  bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3370  {
3371  	ablk->dma_addr.kva = dma_kva;
3372  	ablk->dma_addr.pa  = dma_pa;
3373  }
3374  
3375  void
bfa_ablk_attach(struct bfa_ablk_s * ablk,struct bfa_ioc_s * ioc)3376  bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3377  {
3378  	ablk->ioc = ioc;
3379  
3380  	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3381  	bfa_q_qe_init(&ablk->ioc_notify);
3382  	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3383  	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3384  }
3385  
3386  bfa_status_t
bfa_ablk_query(struct bfa_ablk_s * ablk,struct bfa_ablk_cfg_s * ablk_cfg,bfa_ablk_cbfn_t cbfn,void * cbarg)3387  bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3388  		bfa_ablk_cbfn_t cbfn, void *cbarg)
3389  {
3390  	struct bfi_ablk_h2i_query_s *m;
3391  
3392  	WARN_ON(!ablk_cfg);
3393  
3394  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3395  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3396  		return BFA_STATUS_IOC_FAILURE;
3397  	}
3398  
3399  	if (ablk->busy) {
3400  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3401  		return  BFA_STATUS_DEVBUSY;
3402  	}
3403  
3404  	ablk->cfg = ablk_cfg;
3405  	ablk->cbfn  = cbfn;
3406  	ablk->cbarg = cbarg;
3407  	ablk->busy  = BFA_TRUE;
3408  
3409  	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3410  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3411  		    bfa_ioc_portid(ablk->ioc));
3412  	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3413  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3414  
3415  	return BFA_STATUS_OK;
3416  }
3417  
3418  bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s * ablk,u16 * pcifn,u8 port,enum bfi_pcifn_class personality,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3419  bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3420  		u8 port, enum bfi_pcifn_class personality,
3421  		u16 bw_min, u16 bw_max,
3422  		bfa_ablk_cbfn_t cbfn, void *cbarg)
3423  {
3424  	struct bfi_ablk_h2i_pf_req_s *m;
3425  
3426  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3427  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3428  		return BFA_STATUS_IOC_FAILURE;
3429  	}
3430  
3431  	if (ablk->busy) {
3432  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3433  		return  BFA_STATUS_DEVBUSY;
3434  	}
3435  
3436  	ablk->pcifn = pcifn;
3437  	ablk->cbfn = cbfn;
3438  	ablk->cbarg = cbarg;
3439  	ablk->busy  = BFA_TRUE;
3440  
3441  	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3442  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3443  		    bfa_ioc_portid(ablk->ioc));
3444  	m->pers = cpu_to_be16((u16)personality);
3445  	m->bw_min = cpu_to_be16(bw_min);
3446  	m->bw_max = cpu_to_be16(bw_max);
3447  	m->port = port;
3448  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3449  
3450  	return BFA_STATUS_OK;
3451  }
3452  
3453  bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s * ablk,int pcifn,bfa_ablk_cbfn_t cbfn,void * cbarg)3454  bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3455  		bfa_ablk_cbfn_t cbfn, void *cbarg)
3456  {
3457  	struct bfi_ablk_h2i_pf_req_s *m;
3458  
3459  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3460  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3461  		return BFA_STATUS_IOC_FAILURE;
3462  	}
3463  
3464  	if (ablk->busy) {
3465  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3466  		return  BFA_STATUS_DEVBUSY;
3467  	}
3468  
3469  	ablk->cbfn  = cbfn;
3470  	ablk->cbarg = cbarg;
3471  	ablk->busy  = BFA_TRUE;
3472  
3473  	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3474  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3475  		    bfa_ioc_portid(ablk->ioc));
3476  	m->pcifn = (u8)pcifn;
3477  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3478  
3479  	return BFA_STATUS_OK;
3480  }
3481  
3482  bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s * ablk,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3483  bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3484  		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3485  {
3486  	struct bfi_ablk_h2i_cfg_req_s *m;
3487  
3488  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3489  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3490  		return BFA_STATUS_IOC_FAILURE;
3491  	}
3492  
3493  	if (ablk->busy) {
3494  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3495  		return  BFA_STATUS_DEVBUSY;
3496  	}
3497  
3498  	ablk->cbfn  = cbfn;
3499  	ablk->cbarg = cbarg;
3500  	ablk->busy  = BFA_TRUE;
3501  
3502  	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3503  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3504  		    bfa_ioc_portid(ablk->ioc));
3505  	m->mode = (u8)mode;
3506  	m->max_pf = (u8)max_pf;
3507  	m->max_vf = (u8)max_vf;
3508  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3509  
3510  	return BFA_STATUS_OK;
3511  }
3512  
3513  bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s * ablk,int port,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3514  bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3515  		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3516  {
3517  	struct bfi_ablk_h2i_cfg_req_s *m;
3518  
3519  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3520  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3521  		return BFA_STATUS_IOC_FAILURE;
3522  	}
3523  
3524  	if (ablk->busy) {
3525  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3526  		return  BFA_STATUS_DEVBUSY;
3527  	}
3528  
3529  	ablk->cbfn  = cbfn;
3530  	ablk->cbarg = cbarg;
3531  	ablk->busy  = BFA_TRUE;
3532  
3533  	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3534  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3535  		bfa_ioc_portid(ablk->ioc));
3536  	m->port = (u8)port;
3537  	m->mode = (u8)mode;
3538  	m->max_pf = (u8)max_pf;
3539  	m->max_vf = (u8)max_vf;
3540  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3541  
3542  	return BFA_STATUS_OK;
3543  }
3544  
3545  bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s * ablk,int pcifn,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3546  bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3547  		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3548  {
3549  	struct bfi_ablk_h2i_pf_req_s *m;
3550  
3551  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3552  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3553  		return BFA_STATUS_IOC_FAILURE;
3554  	}
3555  
3556  	if (ablk->busy) {
3557  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3558  		return  BFA_STATUS_DEVBUSY;
3559  	}
3560  
3561  	ablk->cbfn  = cbfn;
3562  	ablk->cbarg = cbarg;
3563  	ablk->busy  = BFA_TRUE;
3564  
3565  	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3566  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3567  		bfa_ioc_portid(ablk->ioc));
3568  	m->pcifn = (u8)pcifn;
3569  	m->bw_min = cpu_to_be16(bw_min);
3570  	m->bw_max = cpu_to_be16(bw_max);
3571  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3572  
3573  	return BFA_STATUS_OK;
3574  }
3575  
3576  bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3577  bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3578  {
3579  	struct bfi_ablk_h2i_optrom_s *m;
3580  
3581  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3582  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3583  		return BFA_STATUS_IOC_FAILURE;
3584  	}
3585  
3586  	if (ablk->busy) {
3587  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3588  		return  BFA_STATUS_DEVBUSY;
3589  	}
3590  
3591  	ablk->cbfn  = cbfn;
3592  	ablk->cbarg = cbarg;
3593  	ablk->busy  = BFA_TRUE;
3594  
3595  	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3596  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3597  		bfa_ioc_portid(ablk->ioc));
3598  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3599  
3600  	return BFA_STATUS_OK;
3601  }
3602  
3603  bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3604  bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3605  {
3606  	struct bfi_ablk_h2i_optrom_s *m;
3607  
3608  	if (!bfa_ioc_is_operational(ablk->ioc)) {
3609  		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3610  		return BFA_STATUS_IOC_FAILURE;
3611  	}
3612  
3613  	if (ablk->busy) {
3614  		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3615  		return  BFA_STATUS_DEVBUSY;
3616  	}
3617  
3618  	ablk->cbfn  = cbfn;
3619  	ablk->cbarg = cbarg;
3620  	ablk->busy  = BFA_TRUE;
3621  
3622  	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3623  	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3624  		bfa_ioc_portid(ablk->ioc));
3625  	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3626  
3627  	return BFA_STATUS_OK;
3628  }
3629  
3630  /*
3631   *	SFP module specific
3632   */
3633  
3634  /* forward declarations */
3635  static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3636  static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3637  static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3638  				enum bfa_port_speed portspeed);
3639  
3640  static void
bfa_cb_sfp_show(struct bfa_sfp_s * sfp)3641  bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3642  {
3643  	bfa_trc(sfp, sfp->lock);
3644  	if (sfp->cbfn)
3645  		sfp->cbfn(sfp->cbarg, sfp->status);
3646  	sfp->lock = 0;
3647  	sfp->cbfn = NULL;
3648  }
3649  
3650  static void
bfa_cb_sfp_state_query(struct bfa_sfp_s * sfp)3651  bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3652  {
3653  	bfa_trc(sfp, sfp->portspeed);
3654  	if (sfp->media) {
3655  		bfa_sfp_media_get(sfp);
3656  		if (sfp->state_query_cbfn)
3657  			sfp->state_query_cbfn(sfp->state_query_cbarg,
3658  					sfp->status);
3659  		sfp->media = NULL;
3660  	}
3661  
3662  	if (sfp->portspeed) {
3663  		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3664  		if (sfp->state_query_cbfn)
3665  			sfp->state_query_cbfn(sfp->state_query_cbarg,
3666  					sfp->status);
3667  		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3668  	}
3669  
3670  	sfp->state_query_lock = 0;
3671  	sfp->state_query_cbfn = NULL;
3672  }
3673  
3674  /*
3675   *	IOC event handler.
3676   */
3677  static void
bfa_sfp_notify(void * sfp_arg,enum bfa_ioc_event_e event)3678  bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3679  {
3680  	struct bfa_sfp_s *sfp = sfp_arg;
3681  
3682  	bfa_trc(sfp, event);
3683  	bfa_trc(sfp, sfp->lock);
3684  	bfa_trc(sfp, sfp->state_query_lock);
3685  
3686  	switch (event) {
3687  	case BFA_IOC_E_DISABLED:
3688  	case BFA_IOC_E_FAILED:
3689  		if (sfp->lock) {
3690  			sfp->status = BFA_STATUS_IOC_FAILURE;
3691  			bfa_cb_sfp_show(sfp);
3692  		}
3693  
3694  		if (sfp->state_query_lock) {
3695  			sfp->status = BFA_STATUS_IOC_FAILURE;
3696  			bfa_cb_sfp_state_query(sfp);
3697  		}
3698  		break;
3699  
3700  	default:
3701  		break;
3702  	}
3703  }
3704  
3705  /*
3706   * SFP's State Change Notification post to AEN
3707   */
3708  static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s * sfp,struct bfi_sfp_scn_s * rsp)3709  bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3710  {
3711  	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3712  	struct bfa_aen_entry_s  *aen_entry;
3713  	enum bfa_port_aen_event aen_evt = 0;
3714  
3715  	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3716  		      ((u64)rsp->event));
3717  
3718  	bfad_get_aen_entry(bfad, aen_entry);
3719  	if (!aen_entry)
3720  		return;
3721  
3722  	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3723  	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3724  	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3725  
3726  	switch (rsp->event) {
3727  	case BFA_SFP_SCN_INSERTED:
3728  		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3729  		break;
3730  	case BFA_SFP_SCN_REMOVED:
3731  		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3732  		break;
3733  	case BFA_SFP_SCN_FAILED:
3734  		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3735  		break;
3736  	case BFA_SFP_SCN_UNSUPPORT:
3737  		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3738  		break;
3739  	case BFA_SFP_SCN_POM:
3740  		aen_evt = BFA_PORT_AEN_SFP_POM;
3741  		aen_entry->aen_data.port.level = rsp->pomlvl;
3742  		break;
3743  	default:
3744  		bfa_trc(sfp, rsp->event);
3745  		WARN_ON(1);
3746  	}
3747  
3748  	/* Send the AEN notification */
3749  	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3750  				  BFA_AEN_CAT_PORT, aen_evt);
3751  }
3752  
3753  /*
3754   *	SFP get data send
3755   */
3756  static void
bfa_sfp_getdata_send(struct bfa_sfp_s * sfp)3757  bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3758  {
3759  	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3760  
3761  	bfa_trc(sfp, req->memtype);
3762  
3763  	/* build host command */
3764  	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3765  			bfa_ioc_portid(sfp->ioc));
3766  
3767  	/* send mbox cmd */
3768  	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3769  }
3770  
3771  /*
3772   *	SFP is valid, read sfp data
3773   */
3774  static void
bfa_sfp_getdata(struct bfa_sfp_s * sfp,enum bfi_sfp_mem_e memtype)3775  bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3776  {
3777  	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3778  
3779  	WARN_ON(sfp->lock != 0);
3780  	bfa_trc(sfp, sfp->state);
3781  
3782  	sfp->lock = 1;
3783  	sfp->memtype = memtype;
3784  	req->memtype = memtype;
3785  
3786  	/* Setup SG list */
3787  	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3788  
3789  	bfa_sfp_getdata_send(sfp);
3790  }
3791  
3792  /*
3793   *	SFP scn handler
3794   */
3795  static void
bfa_sfp_scn(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3796  bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3797  {
3798  	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3799  
3800  	switch (rsp->event) {
3801  	case BFA_SFP_SCN_INSERTED:
3802  		sfp->state = BFA_SFP_STATE_INSERTED;
3803  		sfp->data_valid = 0;
3804  		bfa_sfp_scn_aen_post(sfp, rsp);
3805  		break;
3806  	case BFA_SFP_SCN_REMOVED:
3807  		sfp->state = BFA_SFP_STATE_REMOVED;
3808  		sfp->data_valid = 0;
3809  		bfa_sfp_scn_aen_post(sfp, rsp);
3810  		break;
3811  	case BFA_SFP_SCN_FAILED:
3812  		sfp->state = BFA_SFP_STATE_FAILED;
3813  		sfp->data_valid = 0;
3814  		bfa_sfp_scn_aen_post(sfp, rsp);
3815  		break;
3816  	case BFA_SFP_SCN_UNSUPPORT:
3817  		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3818  		bfa_sfp_scn_aen_post(sfp, rsp);
3819  		if (!sfp->lock)
3820  			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3821  		break;
3822  	case BFA_SFP_SCN_POM:
3823  		bfa_sfp_scn_aen_post(sfp, rsp);
3824  		break;
3825  	case BFA_SFP_SCN_VALID:
3826  		sfp->state = BFA_SFP_STATE_VALID;
3827  		if (!sfp->lock)
3828  			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3829  		break;
3830  	default:
3831  		bfa_trc(sfp, rsp->event);
3832  		WARN_ON(1);
3833  	}
3834  }
3835  
3836  /*
3837   * SFP show complete
3838   */
3839  static void
bfa_sfp_show_comp(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3840  bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3841  {
3842  	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3843  
3844  	if (!sfp->lock) {
3845  		/*
3846  		 * receiving response after ioc failure
3847  		 */
3848  		bfa_trc(sfp, sfp->lock);
3849  		return;
3850  	}
3851  
3852  	bfa_trc(sfp, rsp->status);
3853  	if (rsp->status == BFA_STATUS_OK) {
3854  		sfp->data_valid = 1;
3855  		if (sfp->state == BFA_SFP_STATE_VALID)
3856  			sfp->status = BFA_STATUS_OK;
3857  		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3858  			sfp->status = BFA_STATUS_SFP_UNSUPP;
3859  		else
3860  			bfa_trc(sfp, sfp->state);
3861  	} else {
3862  		sfp->data_valid = 0;
3863  		sfp->status = rsp->status;
3864  		/* sfpshow shouldn't change sfp state */
3865  	}
3866  
3867  	bfa_trc(sfp, sfp->memtype);
3868  	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3869  		bfa_trc(sfp, sfp->data_valid);
3870  		if (sfp->data_valid) {
3871  			u32	size = sizeof(struct sfp_mem_s);
3872  			u8 *des = (u8 *)(sfp->sfpmem);
3873  			memcpy(des, sfp->dbuf_kva, size);
3874  		}
3875  		/*
3876  		 * Queue completion callback.
3877  		 */
3878  		bfa_cb_sfp_show(sfp);
3879  	} else
3880  		sfp->lock = 0;
3881  
3882  	bfa_trc(sfp, sfp->state_query_lock);
3883  	if (sfp->state_query_lock) {
3884  		sfp->state = rsp->state;
3885  		/* Complete callback */
3886  		bfa_cb_sfp_state_query(sfp);
3887  	}
3888  }
3889  
3890  /*
3891   *	SFP query fw sfp state
3892   */
3893  static void
bfa_sfp_state_query(struct bfa_sfp_s * sfp)3894  bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3895  {
3896  	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3897  
3898  	/* Should not be doing query if not in _INIT state */
3899  	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3900  	WARN_ON(sfp->state_query_lock != 0);
3901  	bfa_trc(sfp, sfp->state);
3902  
3903  	sfp->state_query_lock = 1;
3904  	req->memtype = 0;
3905  
3906  	if (!sfp->lock)
3907  		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3908  }
3909  
3910  static void
bfa_sfp_media_get(struct bfa_sfp_s * sfp)3911  bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3912  {
3913  	enum bfa_defs_sfp_media_e *media = sfp->media;
3914  
3915  	*media = BFA_SFP_MEDIA_UNKNOWN;
3916  
3917  	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3918  		*media = BFA_SFP_MEDIA_UNSUPPORT;
3919  	else if (sfp->state == BFA_SFP_STATE_VALID) {
3920  		union sfp_xcvr_e10g_code_u e10g;
3921  		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3922  		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3923  				(sfpmem->srlid_base.xcvr[5] >> 1);
3924  
3925  		e10g.b = sfpmem->srlid_base.xcvr[0];
3926  		bfa_trc(sfp, e10g.b);
3927  		bfa_trc(sfp, xmtr_tech);
3928  		/* check fc transmitter tech */
3929  		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3930  		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3931  		    (xmtr_tech & SFP_XMTR_TECH_CA))
3932  			*media = BFA_SFP_MEDIA_CU;
3933  		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3934  			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3935  			*media = BFA_SFP_MEDIA_EL;
3936  		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3937  			 (xmtr_tech & SFP_XMTR_TECH_LC))
3938  			*media = BFA_SFP_MEDIA_LW;
3939  		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3940  			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3941  			 (xmtr_tech & SFP_XMTR_TECH_SA))
3942  			*media = BFA_SFP_MEDIA_SW;
3943  		/* Check 10G Ethernet Compilance code */
3944  		else if (e10g.r.e10g_sr)
3945  			*media = BFA_SFP_MEDIA_SW;
3946  		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3947  			*media = BFA_SFP_MEDIA_LW;
3948  		else if (e10g.r.e10g_unall)
3949  			*media = BFA_SFP_MEDIA_UNKNOWN;
3950  		else
3951  			bfa_trc(sfp, 0);
3952  	} else
3953  		bfa_trc(sfp, sfp->state);
3954  }
3955  
3956  static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed)3957  bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3958  {
3959  	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3960  	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3961  	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3962  	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3963  
3964  	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3965  		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3966  			return BFA_STATUS_OK;
3967  		else {
3968  			bfa_trc(sfp, e10g.b);
3969  			return BFA_STATUS_UNSUPP_SPEED;
3970  		}
3971  	}
3972  	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3973  	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3974  	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3975  	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3976  	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3977  		return BFA_STATUS_OK;
3978  	else {
3979  		bfa_trc(sfp, portspeed);
3980  		bfa_trc(sfp, fc3.b);
3981  		bfa_trc(sfp, e10g.b);
3982  		return BFA_STATUS_UNSUPP_SPEED;
3983  	}
3984  }
3985  
3986  /*
3987   *	SFP hmbox handler
3988   */
3989  void
bfa_sfp_intr(void * sfparg,struct bfi_mbmsg_s * msg)3990  bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3991  {
3992  	struct bfa_sfp_s *sfp = sfparg;
3993  
3994  	switch (msg->mh.msg_id) {
3995  	case BFI_SFP_I2H_SHOW:
3996  		bfa_sfp_show_comp(sfp, msg);
3997  		break;
3998  
3999  	case BFI_SFP_I2H_SCN:
4000  		bfa_sfp_scn(sfp, msg);
4001  		break;
4002  
4003  	default:
4004  		bfa_trc(sfp, msg->mh.msg_id);
4005  		WARN_ON(1);
4006  	}
4007  }
4008  
4009  /*
4010   *	Return DMA memory needed by sfp module.
4011   */
4012  u32
bfa_sfp_meminfo(void)4013  bfa_sfp_meminfo(void)
4014  {
4015  	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4016  }
4017  
4018  /*
4019   *	Attach virtual and physical memory for SFP.
4020   */
4021  void
bfa_sfp_attach(struct bfa_sfp_s * sfp,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod)4022  bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4023  		struct bfa_trc_mod_s *trcmod)
4024  {
4025  	sfp->dev = dev;
4026  	sfp->ioc = ioc;
4027  	sfp->trcmod = trcmod;
4028  
4029  	sfp->cbfn = NULL;
4030  	sfp->cbarg = NULL;
4031  	sfp->sfpmem = NULL;
4032  	sfp->lock = 0;
4033  	sfp->data_valid = 0;
4034  	sfp->state = BFA_SFP_STATE_INIT;
4035  	sfp->state_query_lock = 0;
4036  	sfp->state_query_cbfn = NULL;
4037  	sfp->state_query_cbarg = NULL;
4038  	sfp->media = NULL;
4039  	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4040  	sfp->is_elb = BFA_FALSE;
4041  
4042  	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4043  	bfa_q_qe_init(&sfp->ioc_notify);
4044  	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4045  	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4046  }
4047  
4048  /*
4049   *	Claim Memory for SFP
4050   */
4051  void
bfa_sfp_memclaim(struct bfa_sfp_s * sfp,u8 * dm_kva,u64 dm_pa)4052  bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4053  {
4054  	sfp->dbuf_kva   = dm_kva;
4055  	sfp->dbuf_pa    = dm_pa;
4056  	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4057  
4058  	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4059  	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4060  }
4061  
4062  /*
4063   * Show SFP eeprom content
4064   *
4065   * @param[in] sfp   - bfa sfp module
4066   *
4067   * @param[out] sfpmem - sfp eeprom data
4068   *
4069   */
4070  bfa_status_t
bfa_sfp_show(struct bfa_sfp_s * sfp,struct sfp_mem_s * sfpmem,bfa_cb_sfp_t cbfn,void * cbarg)4071  bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4072  		bfa_cb_sfp_t cbfn, void *cbarg)
4073  {
4074  
4075  	if (!bfa_ioc_is_operational(sfp->ioc)) {
4076  		bfa_trc(sfp, 0);
4077  		return BFA_STATUS_IOC_NON_OP;
4078  	}
4079  
4080  	if (sfp->lock) {
4081  		bfa_trc(sfp, 0);
4082  		return BFA_STATUS_DEVBUSY;
4083  	}
4084  
4085  	sfp->cbfn = cbfn;
4086  	sfp->cbarg = cbarg;
4087  	sfp->sfpmem = sfpmem;
4088  
4089  	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4090  	return BFA_STATUS_OK;
4091  }
4092  
4093  /*
4094   * Return SFP Media type
4095   *
4096   * @param[in] sfp   - bfa sfp module
4097   *
4098   * @param[out] media - port speed from user
4099   *
4100   */
4101  bfa_status_t
bfa_sfp_media(struct bfa_sfp_s * sfp,enum bfa_defs_sfp_media_e * media,bfa_cb_sfp_t cbfn,void * cbarg)4102  bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4103  		bfa_cb_sfp_t cbfn, void *cbarg)
4104  {
4105  	if (!bfa_ioc_is_operational(sfp->ioc)) {
4106  		bfa_trc(sfp, 0);
4107  		return BFA_STATUS_IOC_NON_OP;
4108  	}
4109  
4110  	sfp->media = media;
4111  	if (sfp->state == BFA_SFP_STATE_INIT) {
4112  		if (sfp->state_query_lock) {
4113  			bfa_trc(sfp, 0);
4114  			return BFA_STATUS_DEVBUSY;
4115  		} else {
4116  			sfp->state_query_cbfn = cbfn;
4117  			sfp->state_query_cbarg = cbarg;
4118  			bfa_sfp_state_query(sfp);
4119  			return BFA_STATUS_SFP_NOT_READY;
4120  		}
4121  	}
4122  
4123  	bfa_sfp_media_get(sfp);
4124  	return BFA_STATUS_OK;
4125  }
4126  
4127  /*
4128   * Check if user set port speed is allowed by the SFP
4129   *
4130   * @param[in] sfp   - bfa sfp module
4131   * @param[in] portspeed - port speed from user
4132   *
4133   */
4134  bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed,bfa_cb_sfp_t cbfn,void * cbarg)4135  bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4136  		bfa_cb_sfp_t cbfn, void *cbarg)
4137  {
4138  	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4139  
4140  	if (!bfa_ioc_is_operational(sfp->ioc))
4141  		return BFA_STATUS_IOC_NON_OP;
4142  
4143  	/* For Mezz card, all speed is allowed */
4144  	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4145  		return BFA_STATUS_OK;
4146  
4147  	/* Check SFP state */
4148  	sfp->portspeed = portspeed;
4149  	if (sfp->state == BFA_SFP_STATE_INIT) {
4150  		if (sfp->state_query_lock) {
4151  			bfa_trc(sfp, 0);
4152  			return BFA_STATUS_DEVBUSY;
4153  		} else {
4154  			sfp->state_query_cbfn = cbfn;
4155  			sfp->state_query_cbarg = cbarg;
4156  			bfa_sfp_state_query(sfp);
4157  			return BFA_STATUS_SFP_NOT_READY;
4158  		}
4159  	}
4160  
4161  	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4162  	    sfp->state == BFA_SFP_STATE_FAILED) {
4163  		bfa_trc(sfp, sfp->state);
4164  		return BFA_STATUS_NO_SFP_DEV;
4165  	}
4166  
4167  	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4168  		bfa_trc(sfp, sfp->state);
4169  		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4170  	}
4171  
4172  	/* For eloopback, all speed is allowed */
4173  	if (sfp->is_elb)
4174  		return BFA_STATUS_OK;
4175  
4176  	return bfa_sfp_speed_valid(sfp, portspeed);
4177  }
4178  
4179  /*
4180   *	Flash module specific
4181   */
4182  
4183  /*
4184   * FLASH DMA buffer should be big enough to hold both MFG block and
4185   * asic block(64k) at the same time and also should be 2k aligned to
4186   * avoid write segement to cross sector boundary.
4187   */
4188  #define BFA_FLASH_SEG_SZ	2048
4189  #define BFA_FLASH_DMA_BUF_SZ	\
4190  	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4191  
4192  static void
bfa_flash_aen_audit_post(struct bfa_ioc_s * ioc,enum bfa_audit_aen_event event,int inst,int type)4193  bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4194  			int inst, int type)
4195  {
4196  	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4197  	struct bfa_aen_entry_s  *aen_entry;
4198  
4199  	bfad_get_aen_entry(bfad, aen_entry);
4200  	if (!aen_entry)
4201  		return;
4202  
4203  	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4204  	aen_entry->aen_data.audit.partition_inst = inst;
4205  	aen_entry->aen_data.audit.partition_type = type;
4206  
4207  	/* Send the AEN notification */
4208  	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4209  				  BFA_AEN_CAT_AUDIT, event);
4210  }
4211  
4212  static void
bfa_flash_cb(struct bfa_flash_s * flash)4213  bfa_flash_cb(struct bfa_flash_s *flash)
4214  {
4215  	flash->op_busy = 0;
4216  	if (flash->cbfn)
4217  		flash->cbfn(flash->cbarg, flash->status);
4218  }
4219  
4220  static void
bfa_flash_notify(void * cbarg,enum bfa_ioc_event_e event)4221  bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4222  {
4223  	struct bfa_flash_s	*flash = cbarg;
4224  
4225  	bfa_trc(flash, event);
4226  	switch (event) {
4227  	case BFA_IOC_E_DISABLED:
4228  	case BFA_IOC_E_FAILED:
4229  		if (flash->op_busy) {
4230  			flash->status = BFA_STATUS_IOC_FAILURE;
4231  			flash->cbfn(flash->cbarg, flash->status);
4232  			flash->op_busy = 0;
4233  		}
4234  		break;
4235  
4236  	default:
4237  		break;
4238  	}
4239  }
4240  
4241  /*
4242   * Send flash attribute query request.
4243   *
4244   * @param[in] cbarg - callback argument
4245   */
4246  static void
bfa_flash_query_send(void * cbarg)4247  bfa_flash_query_send(void *cbarg)
4248  {
4249  	struct bfa_flash_s *flash = cbarg;
4250  	struct bfi_flash_query_req_s *msg =
4251  			(struct bfi_flash_query_req_s *) flash->mb.msg;
4252  
4253  	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4254  		bfa_ioc_portid(flash->ioc));
4255  	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4256  		flash->dbuf_pa);
4257  	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4258  }
4259  
4260  /*
4261   * Send flash write request.
4262   *
4263   * @param[in] cbarg - callback argument
4264   */
4265  static void
bfa_flash_write_send(struct bfa_flash_s * flash)4266  bfa_flash_write_send(struct bfa_flash_s *flash)
4267  {
4268  	struct bfi_flash_write_req_s *msg =
4269  			(struct bfi_flash_write_req_s *) flash->mb.msg;
4270  	u32	len;
4271  
4272  	msg->type = be32_to_cpu(flash->type);
4273  	msg->instance = flash->instance;
4274  	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4275  	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4276  		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4277  	msg->length = be32_to_cpu(len);
4278  
4279  	/* indicate if it's the last msg of the whole write operation */
4280  	msg->last = (len == flash->residue) ? 1 : 0;
4281  
4282  	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4283  			bfa_ioc_portid(flash->ioc));
4284  	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4285  	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4286  	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4287  
4288  	flash->residue -= len;
4289  	flash->offset += len;
4290  }
4291  
4292  /*
4293   * Send flash read request.
4294   *
4295   * @param[in] cbarg - callback argument
4296   */
4297  static void
bfa_flash_read_send(void * cbarg)4298  bfa_flash_read_send(void *cbarg)
4299  {
4300  	struct bfa_flash_s *flash = cbarg;
4301  	struct bfi_flash_read_req_s *msg =
4302  			(struct bfi_flash_read_req_s *) flash->mb.msg;
4303  	u32	len;
4304  
4305  	msg->type = be32_to_cpu(flash->type);
4306  	msg->instance = flash->instance;
4307  	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4308  	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4309  			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4310  	msg->length = be32_to_cpu(len);
4311  	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4312  		bfa_ioc_portid(flash->ioc));
4313  	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4314  	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4315  }
4316  
4317  /*
4318   * Send flash erase request.
4319   *
4320   * @param[in] cbarg - callback argument
4321   */
4322  static void
bfa_flash_erase_send(void * cbarg)4323  bfa_flash_erase_send(void *cbarg)
4324  {
4325  	struct bfa_flash_s *flash = cbarg;
4326  	struct bfi_flash_erase_req_s *msg =
4327  			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4328  
4329  	msg->type = be32_to_cpu(flash->type);
4330  	msg->instance = flash->instance;
4331  	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4332  			bfa_ioc_portid(flash->ioc));
4333  	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4334  }
4335  
4336  /*
4337   * Process flash response messages upon receiving interrupts.
4338   *
4339   * @param[in] flasharg - flash structure
4340   * @param[in] msg - message structure
4341   */
4342  static void
bfa_flash_intr(void * flasharg,struct bfi_mbmsg_s * msg)4343  bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4344  {
4345  	struct bfa_flash_s *flash = flasharg;
4346  	u32	status;
4347  
4348  	union {
4349  		struct bfi_flash_query_rsp_s *query;
4350  		struct bfi_flash_erase_rsp_s *erase;
4351  		struct bfi_flash_write_rsp_s *write;
4352  		struct bfi_flash_read_rsp_s *read;
4353  		struct bfi_flash_event_s *event;
4354  		struct bfi_mbmsg_s   *msg;
4355  	} m;
4356  
4357  	m.msg = msg;
4358  	bfa_trc(flash, msg->mh.msg_id);
4359  
4360  	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4361  		/* receiving response after ioc failure */
4362  		bfa_trc(flash, 0x9999);
4363  		return;
4364  	}
4365  
4366  	switch (msg->mh.msg_id) {
4367  	case BFI_FLASH_I2H_QUERY_RSP:
4368  		status = be32_to_cpu(m.query->status);
4369  		bfa_trc(flash, status);
4370  		if (status == BFA_STATUS_OK) {
4371  			u32	i;
4372  			struct bfa_flash_attr_s *attr, *f;
4373  
4374  			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4375  			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4376  			attr->status = be32_to_cpu(f->status);
4377  			attr->npart = be32_to_cpu(f->npart);
4378  			bfa_trc(flash, attr->status);
4379  			bfa_trc(flash, attr->npart);
4380  			for (i = 0; i < attr->npart; i++) {
4381  				attr->part[i].part_type =
4382  					be32_to_cpu(f->part[i].part_type);
4383  				attr->part[i].part_instance =
4384  					be32_to_cpu(f->part[i].part_instance);
4385  				attr->part[i].part_off =
4386  					be32_to_cpu(f->part[i].part_off);
4387  				attr->part[i].part_size =
4388  					be32_to_cpu(f->part[i].part_size);
4389  				attr->part[i].part_len =
4390  					be32_to_cpu(f->part[i].part_len);
4391  				attr->part[i].part_status =
4392  					be32_to_cpu(f->part[i].part_status);
4393  			}
4394  		}
4395  		flash->status = status;
4396  		bfa_flash_cb(flash);
4397  		break;
4398  	case BFI_FLASH_I2H_ERASE_RSP:
4399  		status = be32_to_cpu(m.erase->status);
4400  		bfa_trc(flash, status);
4401  		flash->status = status;
4402  		bfa_flash_cb(flash);
4403  		break;
4404  	case BFI_FLASH_I2H_WRITE_RSP:
4405  		status = be32_to_cpu(m.write->status);
4406  		bfa_trc(flash, status);
4407  		if (status != BFA_STATUS_OK || flash->residue == 0) {
4408  			flash->status = status;
4409  			bfa_flash_cb(flash);
4410  		} else {
4411  			bfa_trc(flash, flash->offset);
4412  			bfa_flash_write_send(flash);
4413  		}
4414  		break;
4415  	case BFI_FLASH_I2H_READ_RSP:
4416  		status = be32_to_cpu(m.read->status);
4417  		bfa_trc(flash, status);
4418  		if (status != BFA_STATUS_OK) {
4419  			flash->status = status;
4420  			bfa_flash_cb(flash);
4421  		} else {
4422  			u32 len = be32_to_cpu(m.read->length);
4423  			bfa_trc(flash, flash->offset);
4424  			bfa_trc(flash, len);
4425  			memcpy(flash->ubuf + flash->offset,
4426  				flash->dbuf_kva, len);
4427  			flash->residue -= len;
4428  			flash->offset += len;
4429  			if (flash->residue == 0) {
4430  				flash->status = status;
4431  				bfa_flash_cb(flash);
4432  			} else
4433  				bfa_flash_read_send(flash);
4434  		}
4435  		break;
4436  	case BFI_FLASH_I2H_BOOT_VER_RSP:
4437  		break;
4438  	case BFI_FLASH_I2H_EVENT:
4439  		status = be32_to_cpu(m.event->status);
4440  		bfa_trc(flash, status);
4441  		if (status == BFA_STATUS_BAD_FWCFG)
4442  			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4443  		else if (status == BFA_STATUS_INVALID_VENDOR) {
4444  			u32 param;
4445  			param = be32_to_cpu(m.event->param);
4446  			bfa_trc(flash, param);
4447  			bfa_ioc_aen_post(flash->ioc,
4448  				BFA_IOC_AEN_INVALID_VENDOR);
4449  		}
4450  		break;
4451  
4452  	default:
4453  		WARN_ON(1);
4454  	}
4455  }
4456  
4457  /*
4458   * Flash memory info API.
4459   *
4460   * @param[in] mincfg - minimal cfg variable
4461   */
4462  u32
bfa_flash_meminfo(bfa_boolean_t mincfg)4463  bfa_flash_meminfo(bfa_boolean_t mincfg)
4464  {
4465  	/* min driver doesn't need flash */
4466  	if (mincfg)
4467  		return 0;
4468  	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4469  }
4470  
4471  /*
4472   * Flash attach API.
4473   *
4474   * @param[in] flash - flash structure
4475   * @param[in] ioc  - ioc structure
4476   * @param[in] dev  - device structure
4477   * @param[in] trcmod - trace module
4478   * @param[in] logmod - log module
4479   */
4480  void
bfa_flash_attach(struct bfa_flash_s * flash,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)4481  bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4482  		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4483  {
4484  	flash->ioc = ioc;
4485  	flash->trcmod = trcmod;
4486  	flash->cbfn = NULL;
4487  	flash->cbarg = NULL;
4488  	flash->op_busy = 0;
4489  
4490  	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4491  	bfa_q_qe_init(&flash->ioc_notify);
4492  	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4493  	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4494  
4495  	/* min driver doesn't need flash */
4496  	if (mincfg) {
4497  		flash->dbuf_kva = NULL;
4498  		flash->dbuf_pa = 0;
4499  	}
4500  }
4501  
4502  /*
4503   * Claim memory for flash
4504   *
4505   * @param[in] flash - flash structure
4506   * @param[in] dm_kva - pointer to virtual memory address
4507   * @param[in] dm_pa - physical memory address
4508   * @param[in] mincfg - minimal cfg variable
4509   */
4510  void
bfa_flash_memclaim(struct bfa_flash_s * flash,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)4511  bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4512  		bfa_boolean_t mincfg)
4513  {
4514  	if (mincfg)
4515  		return;
4516  
4517  	flash->dbuf_kva = dm_kva;
4518  	flash->dbuf_pa = dm_pa;
4519  	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4520  	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4521  	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4522  }
4523  
4524  /*
4525   * Get flash attribute.
4526   *
4527   * @param[in] flash - flash structure
4528   * @param[in] attr - flash attribute structure
4529   * @param[in] cbfn - callback function
4530   * @param[in] cbarg - callback argument
4531   *
4532   * Return status.
4533   */
4534  bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s * flash,struct bfa_flash_attr_s * attr,bfa_cb_flash_t cbfn,void * cbarg)4535  bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4536  		bfa_cb_flash_t cbfn, void *cbarg)
4537  {
4538  	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4539  
4540  	if (!bfa_ioc_is_operational(flash->ioc))
4541  		return BFA_STATUS_IOC_NON_OP;
4542  
4543  	if (flash->op_busy) {
4544  		bfa_trc(flash, flash->op_busy);
4545  		return BFA_STATUS_DEVBUSY;
4546  	}
4547  
4548  	flash->op_busy = 1;
4549  	flash->cbfn = cbfn;
4550  	flash->cbarg = cbarg;
4551  	flash->ubuf = (u8 *) attr;
4552  	bfa_flash_query_send(flash);
4553  
4554  	return BFA_STATUS_OK;
4555  }
4556  
4557  /*
4558   * Erase flash partition.
4559   *
4560   * @param[in] flash - flash structure
4561   * @param[in] type - flash partition type
4562   * @param[in] instance - flash partition instance
4563   * @param[in] cbfn - callback function
4564   * @param[in] cbarg - callback argument
4565   *
4566   * Return status.
4567   */
4568  bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,bfa_cb_flash_t cbfn,void * cbarg)4569  bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4570  		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4571  {
4572  	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4573  	bfa_trc(flash, type);
4574  	bfa_trc(flash, instance);
4575  
4576  	if (!bfa_ioc_is_operational(flash->ioc))
4577  		return BFA_STATUS_IOC_NON_OP;
4578  
4579  	if (flash->op_busy) {
4580  		bfa_trc(flash, flash->op_busy);
4581  		return BFA_STATUS_DEVBUSY;
4582  	}
4583  
4584  	flash->op_busy = 1;
4585  	flash->cbfn = cbfn;
4586  	flash->cbarg = cbarg;
4587  	flash->type = type;
4588  	flash->instance = instance;
4589  
4590  	bfa_flash_erase_send(flash);
4591  	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4592  				instance, type);
4593  	return BFA_STATUS_OK;
4594  }
4595  
4596  /*
4597   * Update flash partition.
4598   *
4599   * @param[in] flash - flash structure
4600   * @param[in] type - flash partition type
4601   * @param[in] instance - flash partition instance
4602   * @param[in] buf - update data buffer
4603   * @param[in] len - data buffer length
4604   * @param[in] offset - offset relative to the partition starting address
4605   * @param[in] cbfn - callback function
4606   * @param[in] cbarg - callback argument
4607   *
4608   * Return status.
4609   */
4610  bfa_status_t
bfa_flash_update_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4611  bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4612  		u8 instance, void *buf, u32 len, u32 offset,
4613  		bfa_cb_flash_t cbfn, void *cbarg)
4614  {
4615  	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4616  	bfa_trc(flash, type);
4617  	bfa_trc(flash, instance);
4618  	bfa_trc(flash, len);
4619  	bfa_trc(flash, offset);
4620  
4621  	if (!bfa_ioc_is_operational(flash->ioc))
4622  		return BFA_STATUS_IOC_NON_OP;
4623  
4624  	/*
4625  	 * 'len' must be in word (4-byte) boundary
4626  	 * 'offset' must be in sector (16kb) boundary
4627  	 */
4628  	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4629  		return BFA_STATUS_FLASH_BAD_LEN;
4630  
4631  	if (type == BFA_FLASH_PART_MFG)
4632  		return BFA_STATUS_EINVAL;
4633  
4634  	if (flash->op_busy) {
4635  		bfa_trc(flash, flash->op_busy);
4636  		return BFA_STATUS_DEVBUSY;
4637  	}
4638  
4639  	flash->op_busy = 1;
4640  	flash->cbfn = cbfn;
4641  	flash->cbarg = cbarg;
4642  	flash->type = type;
4643  	flash->instance = instance;
4644  	flash->residue = len;
4645  	flash->offset = 0;
4646  	flash->addr_off = offset;
4647  	flash->ubuf = buf;
4648  
4649  	bfa_flash_write_send(flash);
4650  	return BFA_STATUS_OK;
4651  }
4652  
4653  /*
4654   * Read flash partition.
4655   *
4656   * @param[in] flash - flash structure
4657   * @param[in] type - flash partition type
4658   * @param[in] instance - flash partition instance
4659   * @param[in] buf - read data buffer
4660   * @param[in] len - data buffer length
4661   * @param[in] offset - offset relative to the partition starting address
4662   * @param[in] cbfn - callback function
4663   * @param[in] cbarg - callback argument
4664   *
4665   * Return status.
4666   */
4667  bfa_status_t
bfa_flash_read_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4668  bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4669  		u8 instance, void *buf, u32 len, u32 offset,
4670  		bfa_cb_flash_t cbfn, void *cbarg)
4671  {
4672  	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4673  	bfa_trc(flash, type);
4674  	bfa_trc(flash, instance);
4675  	bfa_trc(flash, len);
4676  	bfa_trc(flash, offset);
4677  
4678  	if (!bfa_ioc_is_operational(flash->ioc))
4679  		return BFA_STATUS_IOC_NON_OP;
4680  
4681  	/*
4682  	 * 'len' must be in word (4-byte) boundary
4683  	 * 'offset' must be in sector (16kb) boundary
4684  	 */
4685  	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4686  		return BFA_STATUS_FLASH_BAD_LEN;
4687  
4688  	if (flash->op_busy) {
4689  		bfa_trc(flash, flash->op_busy);
4690  		return BFA_STATUS_DEVBUSY;
4691  	}
4692  
4693  	flash->op_busy = 1;
4694  	flash->cbfn = cbfn;
4695  	flash->cbarg = cbarg;
4696  	flash->type = type;
4697  	flash->instance = instance;
4698  	flash->residue = len;
4699  	flash->offset = 0;
4700  	flash->addr_off = offset;
4701  	flash->ubuf = buf;
4702  	bfa_flash_read_send(flash);
4703  
4704  	return BFA_STATUS_OK;
4705  }
4706  
4707  /*
4708   *	DIAG module specific
4709   */
4710  
4711  #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4712  #define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4713  
4714  /* IOC event handler */
4715  static void
bfa_diag_notify(void * diag_arg,enum bfa_ioc_event_e event)4716  bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4717  {
4718  	struct bfa_diag_s *diag = diag_arg;
4719  
4720  	bfa_trc(diag, event);
4721  	bfa_trc(diag, diag->block);
4722  	bfa_trc(diag, diag->fwping.lock);
4723  	bfa_trc(diag, diag->tsensor.lock);
4724  
4725  	switch (event) {
4726  	case BFA_IOC_E_DISABLED:
4727  	case BFA_IOC_E_FAILED:
4728  		if (diag->fwping.lock) {
4729  			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4730  			diag->fwping.cbfn(diag->fwping.cbarg,
4731  					diag->fwping.status);
4732  			diag->fwping.lock = 0;
4733  		}
4734  
4735  		if (diag->tsensor.lock) {
4736  			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4737  			diag->tsensor.cbfn(diag->tsensor.cbarg,
4738  					   diag->tsensor.status);
4739  			diag->tsensor.lock = 0;
4740  		}
4741  
4742  		if (diag->block) {
4743  			if (diag->timer_active) {
4744  				bfa_timer_stop(&diag->timer);
4745  				diag->timer_active = 0;
4746  			}
4747  
4748  			diag->status = BFA_STATUS_IOC_FAILURE;
4749  			diag->cbfn(diag->cbarg, diag->status);
4750  			diag->block = 0;
4751  		}
4752  		break;
4753  
4754  	default:
4755  		break;
4756  	}
4757  }
4758  
4759  static void
bfa_diag_memtest_done(void * cbarg)4760  bfa_diag_memtest_done(void *cbarg)
4761  {
4762  	struct bfa_diag_s *diag = cbarg;
4763  	struct bfa_ioc_s  *ioc = diag->ioc;
4764  	struct bfa_diag_memtest_result *res = diag->result;
4765  	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4766  	u32	pgnum, pgoff, i;
4767  
4768  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4769  	pgoff = PSS_SMEM_PGOFF(loff);
4770  
4771  	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4772  
4773  	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4774  			 sizeof(u32)); i++) {
4775  		/* read test result from smem */
4776  		*((u32 *) res + i) =
4777  			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4778  		loff += sizeof(u32);
4779  	}
4780  
4781  	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4782  	bfa_ioc_reset_fwstate(ioc);
4783  
4784  	res->status = swab32(res->status);
4785  	bfa_trc(diag, res->status);
4786  
4787  	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4788  		diag->status = BFA_STATUS_OK;
4789  	else {
4790  		diag->status = BFA_STATUS_MEMTEST_FAILED;
4791  		res->addr = swab32(res->addr);
4792  		res->exp = swab32(res->exp);
4793  		res->act = swab32(res->act);
4794  		res->err_status = swab32(res->err_status);
4795  		res->err_status1 = swab32(res->err_status1);
4796  		res->err_addr = swab32(res->err_addr);
4797  		bfa_trc(diag, res->addr);
4798  		bfa_trc(diag, res->exp);
4799  		bfa_trc(diag, res->act);
4800  		bfa_trc(diag, res->err_status);
4801  		bfa_trc(diag, res->err_status1);
4802  		bfa_trc(diag, res->err_addr);
4803  	}
4804  	diag->timer_active = 0;
4805  	diag->cbfn(diag->cbarg, diag->status);
4806  	diag->block = 0;
4807  }
4808  
4809  /*
4810   * Firmware ping
4811   */
4812  
4813  /*
4814   * Perform DMA test directly
4815   */
4816  static void
diag_fwping_send(struct bfa_diag_s * diag)4817  diag_fwping_send(struct bfa_diag_s *diag)
4818  {
4819  	struct bfi_diag_fwping_req_s *fwping_req;
4820  	u32	i;
4821  
4822  	bfa_trc(diag, diag->fwping.dbuf_pa);
4823  
4824  	/* fill DMA area with pattern */
4825  	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4826  		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4827  
4828  	/* Fill mbox msg */
4829  	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4830  
4831  	/* Setup SG list */
4832  	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4833  			diag->fwping.dbuf_pa);
4834  	/* Set up dma count */
4835  	fwping_req->count = cpu_to_be32(diag->fwping.count);
4836  	/* Set up data pattern */
4837  	fwping_req->data = diag->fwping.data;
4838  
4839  	/* build host command */
4840  	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4841  		bfa_ioc_portid(diag->ioc));
4842  
4843  	/* send mbox cmd */
4844  	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4845  }
4846  
4847  static void
diag_fwping_comp(struct bfa_diag_s * diag,struct bfi_diag_fwping_rsp_s * diag_rsp)4848  diag_fwping_comp(struct bfa_diag_s *diag,
4849  		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4850  {
4851  	u32	rsp_data = diag_rsp->data;
4852  	u8	rsp_dma_status = diag_rsp->dma_status;
4853  
4854  	bfa_trc(diag, rsp_data);
4855  	bfa_trc(diag, rsp_dma_status);
4856  
4857  	if (rsp_dma_status == BFA_STATUS_OK) {
4858  		u32	i, pat;
4859  		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4860  			diag->fwping.data;
4861  		/* Check mbox data */
4862  		if (diag->fwping.data != rsp_data) {
4863  			bfa_trc(diag, rsp_data);
4864  			diag->fwping.result->dmastatus =
4865  					BFA_STATUS_DATACORRUPTED;
4866  			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4867  			diag->fwping.cbfn(diag->fwping.cbarg,
4868  					diag->fwping.status);
4869  			diag->fwping.lock = 0;
4870  			return;
4871  		}
4872  		/* Check dma pattern */
4873  		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4874  			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4875  				bfa_trc(diag, i);
4876  				bfa_trc(diag, pat);
4877  				bfa_trc(diag,
4878  					*((u32 *)diag->fwping.dbuf_kva + i));
4879  				diag->fwping.result->dmastatus =
4880  						BFA_STATUS_DATACORRUPTED;
4881  				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4882  				diag->fwping.cbfn(diag->fwping.cbarg,
4883  						diag->fwping.status);
4884  				diag->fwping.lock = 0;
4885  				return;
4886  			}
4887  		}
4888  		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4889  		diag->fwping.status = BFA_STATUS_OK;
4890  		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4891  		diag->fwping.lock = 0;
4892  	} else {
4893  		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4894  		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4895  		diag->fwping.lock = 0;
4896  	}
4897  }
4898  
4899  /*
4900   * Temperature Sensor
4901   */
4902  
4903  static void
diag_tempsensor_send(struct bfa_diag_s * diag)4904  diag_tempsensor_send(struct bfa_diag_s *diag)
4905  {
4906  	struct bfi_diag_ts_req_s *msg;
4907  
4908  	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4909  	bfa_trc(diag, msg->temp);
4910  	/* build host command */
4911  	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4912  		bfa_ioc_portid(diag->ioc));
4913  	/* send mbox cmd */
4914  	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4915  }
4916  
4917  static void
diag_tempsensor_comp(struct bfa_diag_s * diag,bfi_diag_ts_rsp_t * rsp)4918  diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4919  {
4920  	if (!diag->tsensor.lock) {
4921  		/* receiving response after ioc failure */
4922  		bfa_trc(diag, diag->tsensor.lock);
4923  		return;
4924  	}
4925  
4926  	/*
4927  	 * ASIC junction tempsensor is a reg read operation
4928  	 * it will always return OK
4929  	 */
4930  	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4931  	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4932  	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4933  
4934  	if (rsp->ts_brd) {
4935  		/* tsensor.temp->status is brd_temp status */
4936  		diag->tsensor.temp->status = rsp->status;
4937  		if (rsp->status == BFA_STATUS_OK) {
4938  			diag->tsensor.temp->brd_temp =
4939  				be16_to_cpu(rsp->brd_temp);
4940  		} else
4941  			diag->tsensor.temp->brd_temp = 0;
4942  	}
4943  
4944  	bfa_trc(diag, rsp->status);
4945  	bfa_trc(diag, rsp->ts_junc);
4946  	bfa_trc(diag, rsp->temp);
4947  	bfa_trc(diag, rsp->ts_brd);
4948  	bfa_trc(diag, rsp->brd_temp);
4949  
4950  	/* tsensor status is always good bcos we always have junction temp */
4951  	diag->tsensor.status = BFA_STATUS_OK;
4952  	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4953  	diag->tsensor.lock = 0;
4954  }
4955  
4956  /*
4957   *	LED Test command
4958   */
4959  static void
diag_ledtest_send(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)4960  diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4961  {
4962  	struct bfi_diag_ledtest_req_s  *msg;
4963  
4964  	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4965  	/* build host command */
4966  	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4967  			bfa_ioc_portid(diag->ioc));
4968  
4969  	/*
4970  	 * convert the freq from N blinks per 10 sec to
4971  	 * crossbow ontime value. We do it here because division is need
4972  	 */
4973  	if (ledtest->freq)
4974  		ledtest->freq = 500 / ledtest->freq;
4975  
4976  	if (ledtest->freq == 0)
4977  		ledtest->freq = 1;
4978  
4979  	bfa_trc(diag, ledtest->freq);
4980  	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4981  	msg->cmd = (u8) ledtest->cmd;
4982  	msg->color = (u8) ledtest->color;
4983  	msg->portid = bfa_ioc_portid(diag->ioc);
4984  	msg->led = ledtest->led;
4985  	msg->freq = cpu_to_be16(ledtest->freq);
4986  
4987  	/* send mbox cmd */
4988  	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4989  }
4990  
4991  static void
diag_ledtest_comp(struct bfa_diag_s * diag,struct bfi_diag_ledtest_rsp_s * msg)4992  diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4993  {
4994  	bfa_trc(diag, diag->ledtest.lock);
4995  	diag->ledtest.lock = BFA_FALSE;
4996  	/* no bfa_cb_queue is needed because driver is not waiting */
4997  }
4998  
4999  /*
5000   * Port beaconing
5001   */
5002  static void
diag_portbeacon_send(struct bfa_diag_s * diag,bfa_boolean_t beacon,u32 sec)5003  diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
5004  {
5005  	struct bfi_diag_portbeacon_req_s *msg;
5006  
5007  	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5008  	/* build host command */
5009  	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5010  		bfa_ioc_portid(diag->ioc));
5011  	msg->beacon = beacon;
5012  	msg->period = cpu_to_be32(sec);
5013  	/* send mbox cmd */
5014  	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5015  }
5016  
5017  static void
diag_portbeacon_comp(struct bfa_diag_s * diag)5018  diag_portbeacon_comp(struct bfa_diag_s *diag)
5019  {
5020  	bfa_trc(diag, diag->beacon.state);
5021  	diag->beacon.state = BFA_FALSE;
5022  	if (diag->cbfn_beacon)
5023  		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5024  }
5025  
5026  /*
5027   *	Diag hmbox handler
5028   */
5029  void
bfa_diag_intr(void * diagarg,struct bfi_mbmsg_s * msg)5030  bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5031  {
5032  	struct bfa_diag_s *diag = diagarg;
5033  
5034  	switch (msg->mh.msg_id) {
5035  	case BFI_DIAG_I2H_PORTBEACON:
5036  		diag_portbeacon_comp(diag);
5037  		break;
5038  	case BFI_DIAG_I2H_FWPING:
5039  		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5040  		break;
5041  	case BFI_DIAG_I2H_TEMPSENSOR:
5042  		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5043  		break;
5044  	case BFI_DIAG_I2H_LEDTEST:
5045  		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5046  		break;
5047  	default:
5048  		bfa_trc(diag, msg->mh.msg_id);
5049  		WARN_ON(1);
5050  	}
5051  }
5052  
5053  /*
5054   * Gen RAM Test
5055   *
5056   *   @param[in] *diag           - diag data struct
5057   *   @param[in] *memtest        - mem test params input from upper layer,
5058   *   @param[in] pattern         - mem test pattern
5059   *   @param[in] *result         - mem test result
5060   *   @param[in] cbfn            - mem test callback functioin
5061   *   @param[in] cbarg           - callback functioin arg
5062   *
5063   *   @param[out]
5064   */
5065  bfa_status_t
bfa_diag_memtest(struct bfa_diag_s * diag,struct bfa_diag_memtest_s * memtest,u32 pattern,struct bfa_diag_memtest_result * result,bfa_cb_diag_t cbfn,void * cbarg)5066  bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5067  		u32 pattern, struct bfa_diag_memtest_result *result,
5068  		bfa_cb_diag_t cbfn, void *cbarg)
5069  {
5070  	u32	memtest_tov;
5071  
5072  	bfa_trc(diag, pattern);
5073  
5074  	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5075  		return BFA_STATUS_ADAPTER_ENABLED;
5076  
5077  	/* check to see if there is another destructive diag cmd running */
5078  	if (diag->block) {
5079  		bfa_trc(diag, diag->block);
5080  		return BFA_STATUS_DEVBUSY;
5081  	} else
5082  		diag->block = 1;
5083  
5084  	diag->result = result;
5085  	diag->cbfn = cbfn;
5086  	diag->cbarg = cbarg;
5087  
5088  	/* download memtest code and take LPU0 out of reset */
5089  	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5090  
5091  	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5092  		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5093  	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5094  			bfa_diag_memtest_done, diag, memtest_tov);
5095  	diag->timer_active = 1;
5096  	return BFA_STATUS_OK;
5097  }
5098  
5099  /*
5100   * DIAG firmware ping command
5101   *
5102   *   @param[in] *diag           - diag data struct
5103   *   @param[in] cnt             - dma loop count for testing PCIE
5104   *   @param[in] data            - data pattern to pass in fw
5105   *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5106   *   @param[in] cbfn            - callback function
5107   *   @param[in] *cbarg          - callback functioin arg
5108   *
5109   *   @param[out]
5110   */
5111  bfa_status_t
bfa_diag_fwping(struct bfa_diag_s * diag,u32 cnt,u32 data,struct bfa_diag_results_fwping * result,bfa_cb_diag_t cbfn,void * cbarg)5112  bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5113  		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5114  		void *cbarg)
5115  {
5116  	bfa_trc(diag, cnt);
5117  	bfa_trc(diag, data);
5118  
5119  	if (!bfa_ioc_is_operational(diag->ioc))
5120  		return BFA_STATUS_IOC_NON_OP;
5121  
5122  	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5123  	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5124  		return BFA_STATUS_CMD_NOTSUPP;
5125  
5126  	/* check to see if there is another destructive diag cmd running */
5127  	if (diag->block || diag->fwping.lock) {
5128  		bfa_trc(diag, diag->block);
5129  		bfa_trc(diag, diag->fwping.lock);
5130  		return BFA_STATUS_DEVBUSY;
5131  	}
5132  
5133  	/* Initialization */
5134  	diag->fwping.lock = 1;
5135  	diag->fwping.cbfn = cbfn;
5136  	diag->fwping.cbarg = cbarg;
5137  	diag->fwping.result = result;
5138  	diag->fwping.data = data;
5139  	diag->fwping.count = cnt;
5140  
5141  	/* Init test results */
5142  	diag->fwping.result->data = 0;
5143  	diag->fwping.result->status = BFA_STATUS_OK;
5144  
5145  	/* kick off the first ping */
5146  	diag_fwping_send(diag);
5147  	return BFA_STATUS_OK;
5148  }
5149  
5150  /*
5151   * Read Temperature Sensor
5152   *
5153   *   @param[in] *diag           - diag data struct
5154   *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5155   *   @param[in] cbfn            - callback function
5156   *   @param[in] *cbarg          - callback functioin arg
5157   *
5158   *   @param[out]
5159   */
5160  bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s * diag,struct bfa_diag_results_tempsensor_s * result,bfa_cb_diag_t cbfn,void * cbarg)5161  bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5162  		struct bfa_diag_results_tempsensor_s *result,
5163  		bfa_cb_diag_t cbfn, void *cbarg)
5164  {
5165  	/* check to see if there is a destructive diag cmd running */
5166  	if (diag->block || diag->tsensor.lock) {
5167  		bfa_trc(diag, diag->block);
5168  		bfa_trc(diag, diag->tsensor.lock);
5169  		return BFA_STATUS_DEVBUSY;
5170  	}
5171  
5172  	if (!bfa_ioc_is_operational(diag->ioc))
5173  		return BFA_STATUS_IOC_NON_OP;
5174  
5175  	/* Init diag mod params */
5176  	diag->tsensor.lock = 1;
5177  	diag->tsensor.temp = result;
5178  	diag->tsensor.cbfn = cbfn;
5179  	diag->tsensor.cbarg = cbarg;
5180  	diag->tsensor.status = BFA_STATUS_OK;
5181  
5182  	/* Send msg to fw */
5183  	diag_tempsensor_send(diag);
5184  
5185  	return BFA_STATUS_OK;
5186  }
5187  
5188  /*
5189   * LED Test command
5190   *
5191   *   @param[in] *diag           - diag data struct
5192   *   @param[in] *ledtest        - pt to ledtest data structure
5193   *
5194   *   @param[out]
5195   */
5196  bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)5197  bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5198  {
5199  	bfa_trc(diag, ledtest->cmd);
5200  
5201  	if (!bfa_ioc_is_operational(diag->ioc))
5202  		return BFA_STATUS_IOC_NON_OP;
5203  
5204  	if (diag->beacon.state)
5205  		return BFA_STATUS_BEACON_ON;
5206  
5207  	if (diag->ledtest.lock)
5208  		return BFA_STATUS_LEDTEST_OP;
5209  
5210  	/* Send msg to fw */
5211  	diag->ledtest.lock = BFA_TRUE;
5212  	diag_ledtest_send(diag, ledtest);
5213  
5214  	return BFA_STATUS_OK;
5215  }
5216  
5217  /*
5218   * Port beaconing command
5219   *
5220   *   @param[in] *diag           - diag data struct
5221   *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5222   *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5223   *   @param[in] sec             - beaconing duration in seconds
5224   *
5225   *   @param[out]
5226   */
5227  bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s * diag,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon,uint32_t sec)5228  bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5229  		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5230  {
5231  	bfa_trc(diag, beacon);
5232  	bfa_trc(diag, link_e2e_beacon);
5233  	bfa_trc(diag, sec);
5234  
5235  	if (!bfa_ioc_is_operational(diag->ioc))
5236  		return BFA_STATUS_IOC_NON_OP;
5237  
5238  	if (diag->ledtest.lock)
5239  		return BFA_STATUS_LEDTEST_OP;
5240  
5241  	if (diag->beacon.state && beacon)       /* beacon alread on */
5242  		return BFA_STATUS_BEACON_ON;
5243  
5244  	diag->beacon.state	= beacon;
5245  	diag->beacon.link_e2e	= link_e2e_beacon;
5246  	if (diag->cbfn_beacon)
5247  		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5248  
5249  	/* Send msg to fw */
5250  	diag_portbeacon_send(diag, beacon, sec);
5251  
5252  	return BFA_STATUS_OK;
5253  }
5254  
5255  /*
5256   * Return DMA memory needed by diag module.
5257   */
5258  u32
bfa_diag_meminfo(void)5259  bfa_diag_meminfo(void)
5260  {
5261  	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5262  }
5263  
5264  /*
5265   *	Attach virtual and physical memory for Diag.
5266   */
5267  void
bfa_diag_attach(struct bfa_diag_s * diag,struct bfa_ioc_s * ioc,void * dev,bfa_cb_diag_beacon_t cbfn_beacon,struct bfa_trc_mod_s * trcmod)5268  bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5269  	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5270  {
5271  	diag->dev = dev;
5272  	diag->ioc = ioc;
5273  	diag->trcmod = trcmod;
5274  
5275  	diag->block = 0;
5276  	diag->cbfn = NULL;
5277  	diag->cbarg = NULL;
5278  	diag->result = NULL;
5279  	diag->cbfn_beacon = cbfn_beacon;
5280  
5281  	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5282  	bfa_q_qe_init(&diag->ioc_notify);
5283  	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5284  	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5285  }
5286  
5287  void
bfa_diag_memclaim(struct bfa_diag_s * diag,u8 * dm_kva,u64 dm_pa)5288  bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5289  {
5290  	diag->fwping.dbuf_kva = dm_kva;
5291  	diag->fwping.dbuf_pa = dm_pa;
5292  	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5293  }
5294  
5295  /*
5296   *	PHY module specific
5297   */
5298  #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5299  #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5300  
5301  static void
bfa_phy_ntoh32(u32 * obuf,u32 * ibuf,int sz)5302  bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5303  {
5304  	int i, m = sz >> 2;
5305  
5306  	for (i = 0; i < m; i++)
5307  		obuf[i] = be32_to_cpu(ibuf[i]);
5308  }
5309  
5310  static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s * phy)5311  bfa_phy_present(struct bfa_phy_s *phy)
5312  {
5313  	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5314  }
5315  
5316  static void
bfa_phy_notify(void * cbarg,enum bfa_ioc_event_e event)5317  bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5318  {
5319  	struct bfa_phy_s *phy = cbarg;
5320  
5321  	bfa_trc(phy, event);
5322  
5323  	switch (event) {
5324  	case BFA_IOC_E_DISABLED:
5325  	case BFA_IOC_E_FAILED:
5326  		if (phy->op_busy) {
5327  			phy->status = BFA_STATUS_IOC_FAILURE;
5328  			phy->cbfn(phy->cbarg, phy->status);
5329  			phy->op_busy = 0;
5330  		}
5331  		break;
5332  
5333  	default:
5334  		break;
5335  	}
5336  }
5337  
5338  /*
5339   * Send phy attribute query request.
5340   *
5341   * @param[in] cbarg - callback argument
5342   */
5343  static void
bfa_phy_query_send(void * cbarg)5344  bfa_phy_query_send(void *cbarg)
5345  {
5346  	struct bfa_phy_s *phy = cbarg;
5347  	struct bfi_phy_query_req_s *msg =
5348  			(struct bfi_phy_query_req_s *) phy->mb.msg;
5349  
5350  	msg->instance = phy->instance;
5351  	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5352  		bfa_ioc_portid(phy->ioc));
5353  	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5354  	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5355  }
5356  
5357  /*
5358   * Send phy write request.
5359   *
5360   * @param[in] cbarg - callback argument
5361   */
5362  static void
bfa_phy_write_send(void * cbarg)5363  bfa_phy_write_send(void *cbarg)
5364  {
5365  	struct bfa_phy_s *phy = cbarg;
5366  	struct bfi_phy_write_req_s *msg =
5367  			(struct bfi_phy_write_req_s *) phy->mb.msg;
5368  	u32	len;
5369  	u16	*buf, *dbuf;
5370  	int	i, sz;
5371  
5372  	msg->instance = phy->instance;
5373  	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5374  	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5375  			phy->residue : BFA_PHY_DMA_BUF_SZ;
5376  	msg->length = cpu_to_be32(len);
5377  
5378  	/* indicate if it's the last msg of the whole write operation */
5379  	msg->last = (len == phy->residue) ? 1 : 0;
5380  
5381  	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5382  		bfa_ioc_portid(phy->ioc));
5383  	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5384  
5385  	buf = (u16 *) (phy->ubuf + phy->offset);
5386  	dbuf = (u16 *)phy->dbuf_kva;
5387  	sz = len >> 1;
5388  	for (i = 0; i < sz; i++)
5389  		buf[i] = cpu_to_be16(dbuf[i]);
5390  
5391  	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5392  
5393  	phy->residue -= len;
5394  	phy->offset += len;
5395  }
5396  
5397  /*
5398   * Send phy read request.
5399   *
5400   * @param[in] cbarg - callback argument
5401   */
5402  static void
bfa_phy_read_send(void * cbarg)5403  bfa_phy_read_send(void *cbarg)
5404  {
5405  	struct bfa_phy_s *phy = cbarg;
5406  	struct bfi_phy_read_req_s *msg =
5407  			(struct bfi_phy_read_req_s *) phy->mb.msg;
5408  	u32	len;
5409  
5410  	msg->instance = phy->instance;
5411  	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5412  	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5413  			phy->residue : BFA_PHY_DMA_BUF_SZ;
5414  	msg->length = cpu_to_be32(len);
5415  	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5416  		bfa_ioc_portid(phy->ioc));
5417  	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5418  	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5419  }
5420  
5421  /*
5422   * Send phy stats request.
5423   *
5424   * @param[in] cbarg - callback argument
5425   */
5426  static void
bfa_phy_stats_send(void * cbarg)5427  bfa_phy_stats_send(void *cbarg)
5428  {
5429  	struct bfa_phy_s *phy = cbarg;
5430  	struct bfi_phy_stats_req_s *msg =
5431  			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5432  
5433  	msg->instance = phy->instance;
5434  	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5435  		bfa_ioc_portid(phy->ioc));
5436  	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5437  	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5438  }
5439  
5440  /*
5441   * Flash memory info API.
5442   *
5443   * @param[in] mincfg - minimal cfg variable
5444   */
5445  u32
bfa_phy_meminfo(bfa_boolean_t mincfg)5446  bfa_phy_meminfo(bfa_boolean_t mincfg)
5447  {
5448  	/* min driver doesn't need phy */
5449  	if (mincfg)
5450  		return 0;
5451  
5452  	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5453  }
5454  
5455  /*
5456   * Flash attach API.
5457   *
5458   * @param[in] phy - phy structure
5459   * @param[in] ioc  - ioc structure
5460   * @param[in] dev  - device structure
5461   * @param[in] trcmod - trace module
5462   * @param[in] logmod - log module
5463   */
5464  void
bfa_phy_attach(struct bfa_phy_s * phy,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)5465  bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5466  		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5467  {
5468  	phy->ioc = ioc;
5469  	phy->trcmod = trcmod;
5470  	phy->cbfn = NULL;
5471  	phy->cbarg = NULL;
5472  	phy->op_busy = 0;
5473  
5474  	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5475  	bfa_q_qe_init(&phy->ioc_notify);
5476  	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5477  	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5478  
5479  	/* min driver doesn't need phy */
5480  	if (mincfg) {
5481  		phy->dbuf_kva = NULL;
5482  		phy->dbuf_pa = 0;
5483  	}
5484  }
5485  
5486  /*
5487   * Claim memory for phy
5488   *
5489   * @param[in] phy - phy structure
5490   * @param[in] dm_kva - pointer to virtual memory address
5491   * @param[in] dm_pa - physical memory address
5492   * @param[in] mincfg - minimal cfg variable
5493   */
5494  void
bfa_phy_memclaim(struct bfa_phy_s * phy,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)5495  bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5496  		bfa_boolean_t mincfg)
5497  {
5498  	if (mincfg)
5499  		return;
5500  
5501  	phy->dbuf_kva = dm_kva;
5502  	phy->dbuf_pa = dm_pa;
5503  	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5504  	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5505  	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5506  }
5507  
5508  bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s * ioc)5509  bfa_phy_busy(struct bfa_ioc_s *ioc)
5510  {
5511  	void __iomem	*rb;
5512  
5513  	rb = bfa_ioc_bar0(ioc);
5514  	return readl(rb + BFA_PHY_LOCK_STATUS);
5515  }
5516  
5517  /*
5518   * Get phy attribute.
5519   *
5520   * @param[in] phy - phy structure
5521   * @param[in] attr - phy attribute structure
5522   * @param[in] cbfn - callback function
5523   * @param[in] cbarg - callback argument
5524   *
5525   * Return status.
5526   */
5527  bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_attr_s * attr,bfa_cb_phy_t cbfn,void * cbarg)5528  bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5529  		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5530  {
5531  	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5532  	bfa_trc(phy, instance);
5533  
5534  	if (!bfa_phy_present(phy))
5535  		return BFA_STATUS_PHY_NOT_PRESENT;
5536  
5537  	if (!bfa_ioc_is_operational(phy->ioc))
5538  		return BFA_STATUS_IOC_NON_OP;
5539  
5540  	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5541  		bfa_trc(phy, phy->op_busy);
5542  		return BFA_STATUS_DEVBUSY;
5543  	}
5544  
5545  	phy->op_busy = 1;
5546  	phy->cbfn = cbfn;
5547  	phy->cbarg = cbarg;
5548  	phy->instance = instance;
5549  	phy->ubuf = (uint8_t *) attr;
5550  	bfa_phy_query_send(phy);
5551  
5552  	return BFA_STATUS_OK;
5553  }
5554  
5555  /*
5556   * Get phy stats.
5557   *
5558   * @param[in] phy - phy structure
5559   * @param[in] instance - phy image instance
5560   * @param[in] stats - pointer to phy stats
5561   * @param[in] cbfn - callback function
5562   * @param[in] cbarg - callback argument
5563   *
5564   * Return status.
5565   */
5566  bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_stats_s * stats,bfa_cb_phy_t cbfn,void * cbarg)5567  bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5568  		struct bfa_phy_stats_s *stats,
5569  		bfa_cb_phy_t cbfn, void *cbarg)
5570  {
5571  	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5572  	bfa_trc(phy, instance);
5573  
5574  	if (!bfa_phy_present(phy))
5575  		return BFA_STATUS_PHY_NOT_PRESENT;
5576  
5577  	if (!bfa_ioc_is_operational(phy->ioc))
5578  		return BFA_STATUS_IOC_NON_OP;
5579  
5580  	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5581  		bfa_trc(phy, phy->op_busy);
5582  		return BFA_STATUS_DEVBUSY;
5583  	}
5584  
5585  	phy->op_busy = 1;
5586  	phy->cbfn = cbfn;
5587  	phy->cbarg = cbarg;
5588  	phy->instance = instance;
5589  	phy->ubuf = (u8 *) stats;
5590  	bfa_phy_stats_send(phy);
5591  
5592  	return BFA_STATUS_OK;
5593  }
5594  
5595  /*
5596   * Update phy image.
5597   *
5598   * @param[in] phy - phy structure
5599   * @param[in] instance - phy image instance
5600   * @param[in] buf - update data buffer
5601   * @param[in] len - data buffer length
5602   * @param[in] offset - offset relative to starting address
5603   * @param[in] cbfn - callback function
5604   * @param[in] cbarg - callback argument
5605   *
5606   * Return status.
5607   */
5608  bfa_status_t
bfa_phy_update(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5609  bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5610  		void *buf, u32 len, u32 offset,
5611  		bfa_cb_phy_t cbfn, void *cbarg)
5612  {
5613  	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5614  	bfa_trc(phy, instance);
5615  	bfa_trc(phy, len);
5616  	bfa_trc(phy, offset);
5617  
5618  	if (!bfa_phy_present(phy))
5619  		return BFA_STATUS_PHY_NOT_PRESENT;
5620  
5621  	if (!bfa_ioc_is_operational(phy->ioc))
5622  		return BFA_STATUS_IOC_NON_OP;
5623  
5624  	/* 'len' must be in word (4-byte) boundary */
5625  	if (!len || (len & 0x03))
5626  		return BFA_STATUS_FAILED;
5627  
5628  	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5629  		bfa_trc(phy, phy->op_busy);
5630  		return BFA_STATUS_DEVBUSY;
5631  	}
5632  
5633  	phy->op_busy = 1;
5634  	phy->cbfn = cbfn;
5635  	phy->cbarg = cbarg;
5636  	phy->instance = instance;
5637  	phy->residue = len;
5638  	phy->offset = 0;
5639  	phy->addr_off = offset;
5640  	phy->ubuf = buf;
5641  
5642  	bfa_phy_write_send(phy);
5643  	return BFA_STATUS_OK;
5644  }
5645  
5646  /*
5647   * Read phy image.
5648   *
5649   * @param[in] phy - phy structure
5650   * @param[in] instance - phy image instance
5651   * @param[in] buf - read data buffer
5652   * @param[in] len - data buffer length
5653   * @param[in] offset - offset relative to starting address
5654   * @param[in] cbfn - callback function
5655   * @param[in] cbarg - callback argument
5656   *
5657   * Return status.
5658   */
5659  bfa_status_t
bfa_phy_read(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5660  bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5661  		void *buf, u32 len, u32 offset,
5662  		bfa_cb_phy_t cbfn, void *cbarg)
5663  {
5664  	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5665  	bfa_trc(phy, instance);
5666  	bfa_trc(phy, len);
5667  	bfa_trc(phy, offset);
5668  
5669  	if (!bfa_phy_present(phy))
5670  		return BFA_STATUS_PHY_NOT_PRESENT;
5671  
5672  	if (!bfa_ioc_is_operational(phy->ioc))
5673  		return BFA_STATUS_IOC_NON_OP;
5674  
5675  	/* 'len' must be in word (4-byte) boundary */
5676  	if (!len || (len & 0x03))
5677  		return BFA_STATUS_FAILED;
5678  
5679  	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5680  		bfa_trc(phy, phy->op_busy);
5681  		return BFA_STATUS_DEVBUSY;
5682  	}
5683  
5684  	phy->op_busy = 1;
5685  	phy->cbfn = cbfn;
5686  	phy->cbarg = cbarg;
5687  	phy->instance = instance;
5688  	phy->residue = len;
5689  	phy->offset = 0;
5690  	phy->addr_off = offset;
5691  	phy->ubuf = buf;
5692  	bfa_phy_read_send(phy);
5693  
5694  	return BFA_STATUS_OK;
5695  }
5696  
5697  /*
5698   * Process phy response messages upon receiving interrupts.
5699   *
5700   * @param[in] phyarg - phy structure
5701   * @param[in] msg - message structure
5702   */
5703  void
bfa_phy_intr(void * phyarg,struct bfi_mbmsg_s * msg)5704  bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5705  {
5706  	struct bfa_phy_s *phy = phyarg;
5707  	u32	status;
5708  
5709  	union {
5710  		struct bfi_phy_query_rsp_s *query;
5711  		struct bfi_phy_stats_rsp_s *stats;
5712  		struct bfi_phy_write_rsp_s *write;
5713  		struct bfi_phy_read_rsp_s *read;
5714  		struct bfi_mbmsg_s   *msg;
5715  	} m;
5716  
5717  	m.msg = msg;
5718  	bfa_trc(phy, msg->mh.msg_id);
5719  
5720  	if (!phy->op_busy) {
5721  		/* receiving response after ioc failure */
5722  		bfa_trc(phy, 0x9999);
5723  		return;
5724  	}
5725  
5726  	switch (msg->mh.msg_id) {
5727  	case BFI_PHY_I2H_QUERY_RSP:
5728  		status = be32_to_cpu(m.query->status);
5729  		bfa_trc(phy, status);
5730  
5731  		if (status == BFA_STATUS_OK) {
5732  			struct bfa_phy_attr_s *attr =
5733  				(struct bfa_phy_attr_s *) phy->ubuf;
5734  			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5735  					sizeof(struct bfa_phy_attr_s));
5736  			bfa_trc(phy, attr->status);
5737  			bfa_trc(phy, attr->length);
5738  		}
5739  
5740  		phy->status = status;
5741  		phy->op_busy = 0;
5742  		if (phy->cbfn)
5743  			phy->cbfn(phy->cbarg, phy->status);
5744  		break;
5745  	case BFI_PHY_I2H_STATS_RSP:
5746  		status = be32_to_cpu(m.stats->status);
5747  		bfa_trc(phy, status);
5748  
5749  		if (status == BFA_STATUS_OK) {
5750  			struct bfa_phy_stats_s *stats =
5751  				(struct bfa_phy_stats_s *) phy->ubuf;
5752  			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5753  				sizeof(struct bfa_phy_stats_s));
5754  			bfa_trc(phy, stats->status);
5755  		}
5756  
5757  		phy->status = status;
5758  		phy->op_busy = 0;
5759  		if (phy->cbfn)
5760  			phy->cbfn(phy->cbarg, phy->status);
5761  		break;
5762  	case BFI_PHY_I2H_WRITE_RSP:
5763  		status = be32_to_cpu(m.write->status);
5764  		bfa_trc(phy, status);
5765  
5766  		if (status != BFA_STATUS_OK || phy->residue == 0) {
5767  			phy->status = status;
5768  			phy->op_busy = 0;
5769  			if (phy->cbfn)
5770  				phy->cbfn(phy->cbarg, phy->status);
5771  		} else {
5772  			bfa_trc(phy, phy->offset);
5773  			bfa_phy_write_send(phy);
5774  		}
5775  		break;
5776  	case BFI_PHY_I2H_READ_RSP:
5777  		status = be32_to_cpu(m.read->status);
5778  		bfa_trc(phy, status);
5779  
5780  		if (status != BFA_STATUS_OK) {
5781  			phy->status = status;
5782  			phy->op_busy = 0;
5783  			if (phy->cbfn)
5784  				phy->cbfn(phy->cbarg, phy->status);
5785  		} else {
5786  			u32 len = be32_to_cpu(m.read->length);
5787  			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5788  			u16 *dbuf = (u16 *)phy->dbuf_kva;
5789  			int i, sz = len >> 1;
5790  
5791  			bfa_trc(phy, phy->offset);
5792  			bfa_trc(phy, len);
5793  
5794  			for (i = 0; i < sz; i++)
5795  				buf[i] = be16_to_cpu(dbuf[i]);
5796  
5797  			phy->residue -= len;
5798  			phy->offset += len;
5799  
5800  			if (phy->residue == 0) {
5801  				phy->status = status;
5802  				phy->op_busy = 0;
5803  				if (phy->cbfn)
5804  					phy->cbfn(phy->cbarg, phy->status);
5805  			} else
5806  				bfa_phy_read_send(phy);
5807  		}
5808  		break;
5809  	default:
5810  		WARN_ON(1);
5811  	}
5812  }
5813  
5814  /*
5815   * DCONF state machine events
5816   */
5817  enum bfa_dconf_event {
5818  	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5819  	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5820  	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5821  	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5822  	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5823  	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5824  };
5825  
5826  /* forward declaration of DCONF state machine */
5827  static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5828  				enum bfa_dconf_event event);
5829  static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5830  				enum bfa_dconf_event event);
5831  static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5832  				enum bfa_dconf_event event);
5833  static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5834  				enum bfa_dconf_event event);
5835  static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5836  				enum bfa_dconf_event event);
5837  static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5838  				enum bfa_dconf_event event);
5839  static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5840  				enum bfa_dconf_event event);
5841  
5842  static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5843  static void bfa_dconf_timer(void *cbarg);
5844  static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5845  static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5846  
5847  /*
5848   * Beginning state of dconf module. Waiting for an event to start.
5849   */
5850  static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5851  bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5852  {
5853  	bfa_status_t bfa_status;
5854  	bfa_trc(dconf->bfa, event);
5855  
5856  	switch (event) {
5857  	case BFA_DCONF_SM_INIT:
5858  		if (dconf->min_cfg) {
5859  			bfa_trc(dconf->bfa, dconf->min_cfg);
5860  			bfa_fsm_send_event(&dconf->bfa->iocfc,
5861  					IOCFC_E_DCONF_DONE);
5862  			return;
5863  		}
5864  		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5865  		bfa_timer_start(dconf->bfa, &dconf->timer,
5866  			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5867  		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5868  					BFA_FLASH_PART_DRV, dconf->instance,
5869  					dconf->dconf,
5870  					sizeof(struct bfa_dconf_s), 0,
5871  					bfa_dconf_init_cb, dconf->bfa);
5872  		if (bfa_status != BFA_STATUS_OK) {
5873  			bfa_timer_stop(&dconf->timer);
5874  			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5875  			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5876  			return;
5877  		}
5878  		break;
5879  	case BFA_DCONF_SM_EXIT:
5880  		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5881  	case BFA_DCONF_SM_IOCDISABLE:
5882  	case BFA_DCONF_SM_WR:
5883  	case BFA_DCONF_SM_FLASH_COMP:
5884  		break;
5885  	default:
5886  		bfa_sm_fault(dconf->bfa, event);
5887  	}
5888  }
5889  
5890  /*
5891   * Read flash for dconf entries and make a call back to the driver once done.
5892   */
5893  static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5894  bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5895  			enum bfa_dconf_event event)
5896  {
5897  	bfa_trc(dconf->bfa, event);
5898  
5899  	switch (event) {
5900  	case BFA_DCONF_SM_FLASH_COMP:
5901  		bfa_timer_stop(&dconf->timer);
5902  		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5903  		break;
5904  	case BFA_DCONF_SM_TIMEOUT:
5905  		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5906  		bfa_ioc_suspend(&dconf->bfa->ioc);
5907  		break;
5908  	case BFA_DCONF_SM_EXIT:
5909  		bfa_timer_stop(&dconf->timer);
5910  		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5911  		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5912  		break;
5913  	case BFA_DCONF_SM_IOCDISABLE:
5914  		bfa_timer_stop(&dconf->timer);
5915  		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5916  		break;
5917  	default:
5918  		bfa_sm_fault(dconf->bfa, event);
5919  	}
5920  }
5921  
5922  /*
5923   * DCONF Module is in ready state. Has completed the initialization.
5924   */
5925  static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5926  bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5927  {
5928  	bfa_trc(dconf->bfa, event);
5929  
5930  	switch (event) {
5931  	case BFA_DCONF_SM_WR:
5932  		bfa_timer_start(dconf->bfa, &dconf->timer,
5933  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5934  		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5935  		break;
5936  	case BFA_DCONF_SM_EXIT:
5937  		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5938  		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5939  		break;
5940  	case BFA_DCONF_SM_INIT:
5941  	case BFA_DCONF_SM_IOCDISABLE:
5942  		break;
5943  	default:
5944  		bfa_sm_fault(dconf->bfa, event);
5945  	}
5946  }
5947  
5948  /*
5949   * entries are dirty, write back to the flash.
5950   */
5951  
5952  static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5953  bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5954  {
5955  	bfa_trc(dconf->bfa, event);
5956  
5957  	switch (event) {
5958  	case BFA_DCONF_SM_TIMEOUT:
5959  		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5960  		bfa_dconf_flash_write(dconf);
5961  		break;
5962  	case BFA_DCONF_SM_WR:
5963  		bfa_timer_stop(&dconf->timer);
5964  		bfa_timer_start(dconf->bfa, &dconf->timer,
5965  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5966  		break;
5967  	case BFA_DCONF_SM_EXIT:
5968  		bfa_timer_stop(&dconf->timer);
5969  		bfa_timer_start(dconf->bfa, &dconf->timer,
5970  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5971  		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5972  		bfa_dconf_flash_write(dconf);
5973  		break;
5974  	case BFA_DCONF_SM_FLASH_COMP:
5975  		break;
5976  	case BFA_DCONF_SM_IOCDISABLE:
5977  		bfa_timer_stop(&dconf->timer);
5978  		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5979  		break;
5980  	default:
5981  		bfa_sm_fault(dconf->bfa, event);
5982  	}
5983  }
5984  
5985  /*
5986   * Sync the dconf entries to the flash.
5987   */
5988  static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5989  bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5990  			enum bfa_dconf_event event)
5991  {
5992  	bfa_trc(dconf->bfa, event);
5993  
5994  	switch (event) {
5995  	case BFA_DCONF_SM_IOCDISABLE:
5996  	case BFA_DCONF_SM_FLASH_COMP:
5997  		bfa_timer_stop(&dconf->timer);
5998  		/* fall through */
5999  	case BFA_DCONF_SM_TIMEOUT:
6000  		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6001  		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6002  		break;
6003  	default:
6004  		bfa_sm_fault(dconf->bfa, event);
6005  	}
6006  }
6007  
6008  static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6009  bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6010  {
6011  	bfa_trc(dconf->bfa, event);
6012  
6013  	switch (event) {
6014  	case BFA_DCONF_SM_FLASH_COMP:
6015  		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6016  		break;
6017  	case BFA_DCONF_SM_WR:
6018  		bfa_timer_start(dconf->bfa, &dconf->timer,
6019  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6020  		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6021  		break;
6022  	case BFA_DCONF_SM_EXIT:
6023  		bfa_timer_start(dconf->bfa, &dconf->timer,
6024  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6025  		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6026  		break;
6027  	case BFA_DCONF_SM_IOCDISABLE:
6028  		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6029  		break;
6030  	default:
6031  		bfa_sm_fault(dconf->bfa, event);
6032  	}
6033  }
6034  
6035  static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6036  bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6037  			enum bfa_dconf_event event)
6038  {
6039  	bfa_trc(dconf->bfa, event);
6040  
6041  	switch (event) {
6042  	case BFA_DCONF_SM_INIT:
6043  		bfa_timer_start(dconf->bfa, &dconf->timer,
6044  			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6045  		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6046  		break;
6047  	case BFA_DCONF_SM_EXIT:
6048  		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6049  		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6050  		break;
6051  	case BFA_DCONF_SM_IOCDISABLE:
6052  		break;
6053  	default:
6054  		bfa_sm_fault(dconf->bfa, event);
6055  	}
6056  }
6057  
6058  /*
6059   * Compute and return memory needed by DRV_CFG module.
6060   */
6061  void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_s * bfa)6062  bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6063  		  struct bfa_s *bfa)
6064  {
6065  	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6066  
6067  	if (cfg->drvcfg.min_cfg)
6068  		bfa_mem_kva_setup(meminfo, dconf_kva,
6069  				sizeof(struct bfa_dconf_hdr_s));
6070  	else
6071  		bfa_mem_kva_setup(meminfo, dconf_kva,
6072  				sizeof(struct bfa_dconf_s));
6073  }
6074  
6075  void
bfa_dconf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg)6076  bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6077  {
6078  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6079  
6080  	dconf->bfad = bfad;
6081  	dconf->bfa = bfa;
6082  	dconf->instance = bfa->ioc.port_id;
6083  	bfa_trc(bfa, dconf->instance);
6084  
6085  	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6086  	if (cfg->drvcfg.min_cfg) {
6087  		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6088  		dconf->min_cfg = BFA_TRUE;
6089  	} else {
6090  		dconf->min_cfg = BFA_FALSE;
6091  		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6092  	}
6093  
6094  	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6095  	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6096  }
6097  
6098  static void
bfa_dconf_init_cb(void * arg,bfa_status_t status)6099  bfa_dconf_init_cb(void *arg, bfa_status_t status)
6100  {
6101  	struct bfa_s *bfa = arg;
6102  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6103  
6104  	if (status == BFA_STATUS_OK) {
6105  		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6106  		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6107  			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6108  		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6109  			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6110  	}
6111  	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6112  	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6113  }
6114  
6115  void
bfa_dconf_modinit(struct bfa_s * bfa)6116  bfa_dconf_modinit(struct bfa_s *bfa)
6117  {
6118  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6119  	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6120  }
6121  
bfa_dconf_timer(void * cbarg)6122  static void bfa_dconf_timer(void *cbarg)
6123  {
6124  	struct bfa_dconf_mod_s *dconf = cbarg;
6125  	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6126  }
6127  
6128  void
bfa_dconf_iocdisable(struct bfa_s * bfa)6129  bfa_dconf_iocdisable(struct bfa_s *bfa)
6130  {
6131  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6132  	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6133  }
6134  
6135  static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s * dconf)6136  bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6137  {
6138  	bfa_status_t bfa_status;
6139  	bfa_trc(dconf->bfa, 0);
6140  
6141  	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6142  				BFA_FLASH_PART_DRV, dconf->instance,
6143  				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6144  				bfa_dconf_cbfn, dconf);
6145  	if (bfa_status != BFA_STATUS_OK)
6146  		WARN_ON(bfa_status);
6147  	bfa_trc(dconf->bfa, bfa_status);
6148  
6149  	return bfa_status;
6150  }
6151  
6152  bfa_status_t
bfa_dconf_update(struct bfa_s * bfa)6153  bfa_dconf_update(struct bfa_s *bfa)
6154  {
6155  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6156  	bfa_trc(dconf->bfa, 0);
6157  	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6158  		return BFA_STATUS_FAILED;
6159  
6160  	if (dconf->min_cfg) {
6161  		bfa_trc(dconf->bfa, dconf->min_cfg);
6162  		return BFA_STATUS_FAILED;
6163  	}
6164  
6165  	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6166  	return BFA_STATUS_OK;
6167  }
6168  
6169  static void
bfa_dconf_cbfn(void * arg,bfa_status_t status)6170  bfa_dconf_cbfn(void *arg, bfa_status_t status)
6171  {
6172  	struct bfa_dconf_mod_s *dconf = arg;
6173  	WARN_ON(status);
6174  	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6175  }
6176  
6177  void
bfa_dconf_modexit(struct bfa_s * bfa)6178  bfa_dconf_modexit(struct bfa_s *bfa)
6179  {
6180  	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6181  	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6182  }
6183  
6184  /*
6185   * FRU specific functions
6186   */
6187  
6188  #define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6189  #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6190  #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6191  
6192  static void
bfa_fru_notify(void * cbarg,enum bfa_ioc_event_e event)6193  bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6194  {
6195  	struct bfa_fru_s *fru = cbarg;
6196  
6197  	bfa_trc(fru, event);
6198  
6199  	switch (event) {
6200  	case BFA_IOC_E_DISABLED:
6201  	case BFA_IOC_E_FAILED:
6202  		if (fru->op_busy) {
6203  			fru->status = BFA_STATUS_IOC_FAILURE;
6204  			fru->cbfn(fru->cbarg, fru->status);
6205  			fru->op_busy = 0;
6206  		}
6207  		break;
6208  
6209  	default:
6210  		break;
6211  	}
6212  }
6213  
6214  /*
6215   * Send fru write request.
6216   *
6217   * @param[in] cbarg - callback argument
6218   */
6219  static void
bfa_fru_write_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6220  bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6221  {
6222  	struct bfa_fru_s *fru = cbarg;
6223  	struct bfi_fru_write_req_s *msg =
6224  			(struct bfi_fru_write_req_s *) fru->mb.msg;
6225  	u32 len;
6226  
6227  	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6228  	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6229  				fru->residue : BFA_FRU_DMA_BUF_SZ;
6230  	msg->length = cpu_to_be32(len);
6231  
6232  	/*
6233  	 * indicate if it's the last msg of the whole write operation
6234  	 */
6235  	msg->last = (len == fru->residue) ? 1 : 0;
6236  
6237  	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6238  	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6239  	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6240  
6241  	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6242  	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6243  
6244  	fru->residue -= len;
6245  	fru->offset += len;
6246  }
6247  
6248  /*
6249   * Send fru read request.
6250   *
6251   * @param[in] cbarg - callback argument
6252   */
6253  static void
bfa_fru_read_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6254  bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6255  {
6256  	struct bfa_fru_s *fru = cbarg;
6257  	struct bfi_fru_read_req_s *msg =
6258  			(struct bfi_fru_read_req_s *) fru->mb.msg;
6259  	u32 len;
6260  
6261  	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6262  	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6263  				fru->residue : BFA_FRU_DMA_BUF_SZ;
6264  	msg->length = cpu_to_be32(len);
6265  	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6266  	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6267  	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6268  }
6269  
6270  /*
6271   * Flash memory info API.
6272   *
6273   * @param[in] mincfg - minimal cfg variable
6274   */
6275  u32
bfa_fru_meminfo(bfa_boolean_t mincfg)6276  bfa_fru_meminfo(bfa_boolean_t mincfg)
6277  {
6278  	/* min driver doesn't need fru */
6279  	if (mincfg)
6280  		return 0;
6281  
6282  	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6283  }
6284  
6285  /*
6286   * Flash attach API.
6287   *
6288   * @param[in] fru - fru structure
6289   * @param[in] ioc  - ioc structure
6290   * @param[in] dev  - device structure
6291   * @param[in] trcmod - trace module
6292   * @param[in] logmod - log module
6293   */
6294  void
bfa_fru_attach(struct bfa_fru_s * fru,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)6295  bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6296  	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6297  {
6298  	fru->ioc = ioc;
6299  	fru->trcmod = trcmod;
6300  	fru->cbfn = NULL;
6301  	fru->cbarg = NULL;
6302  	fru->op_busy = 0;
6303  
6304  	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6305  	bfa_q_qe_init(&fru->ioc_notify);
6306  	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6307  	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6308  
6309  	/* min driver doesn't need fru */
6310  	if (mincfg) {
6311  		fru->dbuf_kva = NULL;
6312  		fru->dbuf_pa = 0;
6313  	}
6314  }
6315  
6316  /*
6317   * Claim memory for fru
6318   *
6319   * @param[in] fru - fru structure
6320   * @param[in] dm_kva - pointer to virtual memory address
6321   * @param[in] dm_pa - frusical memory address
6322   * @param[in] mincfg - minimal cfg variable
6323   */
6324  void
bfa_fru_memclaim(struct bfa_fru_s * fru,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)6325  bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6326  	bfa_boolean_t mincfg)
6327  {
6328  	if (mincfg)
6329  		return;
6330  
6331  	fru->dbuf_kva = dm_kva;
6332  	fru->dbuf_pa = dm_pa;
6333  	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6334  	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6335  	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6336  }
6337  
6338  /*
6339   * Update fru vpd image.
6340   *
6341   * @param[in] fru - fru structure
6342   * @param[in] buf - update data buffer
6343   * @param[in] len - data buffer length
6344   * @param[in] offset - offset relative to starting address
6345   * @param[in] cbfn - callback function
6346   * @param[in] cbarg - callback argument
6347   *
6348   * Return status.
6349   */
6350  bfa_status_t
bfa_fruvpd_update(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg,u8 trfr_cmpl)6351  bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6352  		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6353  {
6354  	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6355  	bfa_trc(fru, len);
6356  	bfa_trc(fru, offset);
6357  
6358  	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6359  		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6360  		return BFA_STATUS_FRU_NOT_PRESENT;
6361  
6362  	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6363  		return BFA_STATUS_CMD_NOTSUPP;
6364  
6365  	if (!bfa_ioc_is_operational(fru->ioc))
6366  		return BFA_STATUS_IOC_NON_OP;
6367  
6368  	if (fru->op_busy) {
6369  		bfa_trc(fru, fru->op_busy);
6370  		return BFA_STATUS_DEVBUSY;
6371  	}
6372  
6373  	fru->op_busy = 1;
6374  
6375  	fru->cbfn = cbfn;
6376  	fru->cbarg = cbarg;
6377  	fru->residue = len;
6378  	fru->offset = 0;
6379  	fru->addr_off = offset;
6380  	fru->ubuf = buf;
6381  	fru->trfr_cmpl = trfr_cmpl;
6382  
6383  	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6384  
6385  	return BFA_STATUS_OK;
6386  }
6387  
6388  /*
6389   * Read fru vpd image.
6390   *
6391   * @param[in] fru - fru structure
6392   * @param[in] buf - read data buffer
6393   * @param[in] len - data buffer length
6394   * @param[in] offset - offset relative to starting address
6395   * @param[in] cbfn - callback function
6396   * @param[in] cbarg - callback argument
6397   *
6398   * Return status.
6399   */
6400  bfa_status_t
bfa_fruvpd_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6401  bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6402  		bfa_cb_fru_t cbfn, void *cbarg)
6403  {
6404  	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6405  	bfa_trc(fru, len);
6406  	bfa_trc(fru, offset);
6407  
6408  	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6409  		return BFA_STATUS_FRU_NOT_PRESENT;
6410  
6411  	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6412  		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6413  		return BFA_STATUS_CMD_NOTSUPP;
6414  
6415  	if (!bfa_ioc_is_operational(fru->ioc))
6416  		return BFA_STATUS_IOC_NON_OP;
6417  
6418  	if (fru->op_busy) {
6419  		bfa_trc(fru, fru->op_busy);
6420  		return BFA_STATUS_DEVBUSY;
6421  	}
6422  
6423  	fru->op_busy = 1;
6424  
6425  	fru->cbfn = cbfn;
6426  	fru->cbarg = cbarg;
6427  	fru->residue = len;
6428  	fru->offset = 0;
6429  	fru->addr_off = offset;
6430  	fru->ubuf = buf;
6431  	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6432  
6433  	return BFA_STATUS_OK;
6434  }
6435  
6436  /*
6437   * Get maximum size fru vpd image.
6438   *
6439   * @param[in] fru - fru structure
6440   * @param[out] size - maximum size of fru vpd data
6441   *
6442   * Return status.
6443   */
6444  bfa_status_t
bfa_fruvpd_get_max_size(struct bfa_fru_s * fru,u32 * max_size)6445  bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6446  {
6447  	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6448  		return BFA_STATUS_FRU_NOT_PRESENT;
6449  
6450  	if (!bfa_ioc_is_operational(fru->ioc))
6451  		return BFA_STATUS_IOC_NON_OP;
6452  
6453  	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6454  		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6455  		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6456  	else
6457  		return BFA_STATUS_CMD_NOTSUPP;
6458  	return BFA_STATUS_OK;
6459  }
6460  /*
6461   * tfru write.
6462   *
6463   * @param[in] fru - fru structure
6464   * @param[in] buf - update data buffer
6465   * @param[in] len - data buffer length
6466   * @param[in] offset - offset relative to starting address
6467   * @param[in] cbfn - callback function
6468   * @param[in] cbarg - callback argument
6469   *
6470   * Return status.
6471   */
6472  bfa_status_t
bfa_tfru_write(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6473  bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6474  	       bfa_cb_fru_t cbfn, void *cbarg)
6475  {
6476  	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6477  	bfa_trc(fru, len);
6478  	bfa_trc(fru, offset);
6479  	bfa_trc(fru, *((u8 *) buf));
6480  
6481  	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6482  		return BFA_STATUS_FRU_NOT_PRESENT;
6483  
6484  	if (!bfa_ioc_is_operational(fru->ioc))
6485  		return BFA_STATUS_IOC_NON_OP;
6486  
6487  	if (fru->op_busy) {
6488  		bfa_trc(fru, fru->op_busy);
6489  		return BFA_STATUS_DEVBUSY;
6490  	}
6491  
6492  	fru->op_busy = 1;
6493  
6494  	fru->cbfn = cbfn;
6495  	fru->cbarg = cbarg;
6496  	fru->residue = len;
6497  	fru->offset = 0;
6498  	fru->addr_off = offset;
6499  	fru->ubuf = buf;
6500  
6501  	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6502  
6503  	return BFA_STATUS_OK;
6504  }
6505  
6506  /*
6507   * tfru read.
6508   *
6509   * @param[in] fru - fru structure
6510   * @param[in] buf - read data buffer
6511   * @param[in] len - data buffer length
6512   * @param[in] offset - offset relative to starting address
6513   * @param[in] cbfn - callback function
6514   * @param[in] cbarg - callback argument
6515   *
6516   * Return status.
6517   */
6518  bfa_status_t
bfa_tfru_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6519  bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6520  	      bfa_cb_fru_t cbfn, void *cbarg)
6521  {
6522  	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6523  	bfa_trc(fru, len);
6524  	bfa_trc(fru, offset);
6525  
6526  	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6527  		return BFA_STATUS_FRU_NOT_PRESENT;
6528  
6529  	if (!bfa_ioc_is_operational(fru->ioc))
6530  		return BFA_STATUS_IOC_NON_OP;
6531  
6532  	if (fru->op_busy) {
6533  		bfa_trc(fru, fru->op_busy);
6534  		return BFA_STATUS_DEVBUSY;
6535  	}
6536  
6537  	fru->op_busy = 1;
6538  
6539  	fru->cbfn = cbfn;
6540  	fru->cbarg = cbarg;
6541  	fru->residue = len;
6542  	fru->offset = 0;
6543  	fru->addr_off = offset;
6544  	fru->ubuf = buf;
6545  	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6546  
6547  	return BFA_STATUS_OK;
6548  }
6549  
6550  /*
6551   * Process fru response messages upon receiving interrupts.
6552   *
6553   * @param[in] fruarg - fru structure
6554   * @param[in] msg - message structure
6555   */
6556  void
bfa_fru_intr(void * fruarg,struct bfi_mbmsg_s * msg)6557  bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6558  {
6559  	struct bfa_fru_s *fru = fruarg;
6560  	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6561  	u32 status;
6562  
6563  	bfa_trc(fru, msg->mh.msg_id);
6564  
6565  	if (!fru->op_busy) {
6566  		/*
6567  		 * receiving response after ioc failure
6568  		 */
6569  		bfa_trc(fru, 0x9999);
6570  		return;
6571  	}
6572  
6573  	switch (msg->mh.msg_id) {
6574  	case BFI_FRUVPD_I2H_WRITE_RSP:
6575  	case BFI_TFRU_I2H_WRITE_RSP:
6576  		status = be32_to_cpu(rsp->status);
6577  		bfa_trc(fru, status);
6578  
6579  		if (status != BFA_STATUS_OK || fru->residue == 0) {
6580  			fru->status = status;
6581  			fru->op_busy = 0;
6582  			if (fru->cbfn)
6583  				fru->cbfn(fru->cbarg, fru->status);
6584  		} else {
6585  			bfa_trc(fru, fru->offset);
6586  			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6587  				bfa_fru_write_send(fru,
6588  					BFI_FRUVPD_H2I_WRITE_REQ);
6589  			else
6590  				bfa_fru_write_send(fru,
6591  					BFI_TFRU_H2I_WRITE_REQ);
6592  		}
6593  		break;
6594  	case BFI_FRUVPD_I2H_READ_RSP:
6595  	case BFI_TFRU_I2H_READ_RSP:
6596  		status = be32_to_cpu(rsp->status);
6597  		bfa_trc(fru, status);
6598  
6599  		if (status != BFA_STATUS_OK) {
6600  			fru->status = status;
6601  			fru->op_busy = 0;
6602  			if (fru->cbfn)
6603  				fru->cbfn(fru->cbarg, fru->status);
6604  		} else {
6605  			u32 len = be32_to_cpu(rsp->length);
6606  
6607  			bfa_trc(fru, fru->offset);
6608  			bfa_trc(fru, len);
6609  
6610  			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6611  			fru->residue -= len;
6612  			fru->offset += len;
6613  
6614  			if (fru->residue == 0) {
6615  				fru->status = status;
6616  				fru->op_busy = 0;
6617  				if (fru->cbfn)
6618  					fru->cbfn(fru->cbarg, fru->status);
6619  			} else {
6620  				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6621  					bfa_fru_read_send(fru,
6622  						BFI_FRUVPD_H2I_READ_REQ);
6623  				else
6624  					bfa_fru_read_send(fru,
6625  						BFI_TFRU_H2I_READ_REQ);
6626  			}
6627  		}
6628  		break;
6629  	default:
6630  		WARN_ON(1);
6631  	}
6632  }
6633  
6634  /*
6635   * register definitions
6636   */
6637  #define FLI_CMD_REG			0x0001d000
6638  #define FLI_RDDATA_REG			0x0001d010
6639  #define FLI_ADDR_REG			0x0001d004
6640  #define FLI_DEV_STATUS_REG		0x0001d014
6641  
6642  #define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6643  #define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6644  #define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6645  #define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6646  
6647  enum bfa_flash_cmd {
6648  	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6649  	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6650  };
6651  
6652  /**
6653   * @brief hardware error definition
6654   */
6655  enum bfa_flash_err {
6656  	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6657  	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6658  	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6659  	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6660  	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6661  	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6662  	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6663  	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6664  	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6665  };
6666  
6667  /**
6668   * @brief flash command register data structure
6669   */
6670  union bfa_flash_cmd_reg_u {
6671  	struct {
6672  #ifdef __BIG_ENDIAN
6673  		u32	act:1;
6674  		u32	rsv:1;
6675  		u32	write_cnt:9;
6676  		u32	read_cnt:9;
6677  		u32	addr_cnt:4;
6678  		u32	cmd:8;
6679  #else
6680  		u32	cmd:8;
6681  		u32	addr_cnt:4;
6682  		u32	read_cnt:9;
6683  		u32	write_cnt:9;
6684  		u32	rsv:1;
6685  		u32	act:1;
6686  #endif
6687  	} r;
6688  	u32	i;
6689  };
6690  
6691  /**
6692   * @brief flash device status register data structure
6693   */
6694  union bfa_flash_dev_status_reg_u {
6695  	struct {
6696  #ifdef __BIG_ENDIAN
6697  		u32	rsv:21;
6698  		u32	fifo_cnt:6;
6699  		u32	busy:1;
6700  		u32	init_status:1;
6701  		u32	present:1;
6702  		u32	bad:1;
6703  		u32	good:1;
6704  #else
6705  		u32	good:1;
6706  		u32	bad:1;
6707  		u32	present:1;
6708  		u32	init_status:1;
6709  		u32	busy:1;
6710  		u32	fifo_cnt:6;
6711  		u32	rsv:21;
6712  #endif
6713  	} r;
6714  	u32	i;
6715  };
6716  
6717  /**
6718   * @brief flash address register data structure
6719   */
6720  union bfa_flash_addr_reg_u {
6721  	struct {
6722  #ifdef __BIG_ENDIAN
6723  		u32	addr:24;
6724  		u32	dummy:8;
6725  #else
6726  		u32	dummy:8;
6727  		u32	addr:24;
6728  #endif
6729  	} r;
6730  	u32	i;
6731  };
6732  
6733  /**
6734   * dg flash_raw_private Flash raw private functions
6735   */
6736  static void
bfa_flash_set_cmd(void __iomem * pci_bar,u8 wr_cnt,u8 rd_cnt,u8 ad_cnt,u8 op)6737  bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6738  		  u8 rd_cnt, u8 ad_cnt, u8 op)
6739  {
6740  	union bfa_flash_cmd_reg_u cmd;
6741  
6742  	cmd.i = 0;
6743  	cmd.r.act = 1;
6744  	cmd.r.write_cnt = wr_cnt;
6745  	cmd.r.read_cnt = rd_cnt;
6746  	cmd.r.addr_cnt = ad_cnt;
6747  	cmd.r.cmd = op;
6748  	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6749  }
6750  
6751  static void
bfa_flash_set_addr(void __iomem * pci_bar,u32 address)6752  bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6753  {
6754  	union bfa_flash_addr_reg_u addr;
6755  
6756  	addr.r.addr = address & 0x00ffffff;
6757  	addr.r.dummy = 0;
6758  	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6759  }
6760  
6761  static int
bfa_flash_cmd_act_check(void __iomem * pci_bar)6762  bfa_flash_cmd_act_check(void __iomem *pci_bar)
6763  {
6764  	union bfa_flash_cmd_reg_u cmd;
6765  
6766  	cmd.i = readl(pci_bar + FLI_CMD_REG);
6767  
6768  	if (cmd.r.act)
6769  		return BFA_FLASH_ERR_CMD_ACT;
6770  
6771  	return 0;
6772  }
6773  
6774  /**
6775   * @brief
6776   * Flush FLI data fifo.
6777   *
6778   * @param[in] pci_bar - pci bar address
6779   * @param[in] dev_status - device status
6780   *
6781   * Return 0 on success, negative error number on error.
6782   */
6783  static u32
bfa_flash_fifo_flush(void __iomem * pci_bar)6784  bfa_flash_fifo_flush(void __iomem *pci_bar)
6785  {
6786  	u32 i;
6787  	u32 t;
6788  	union bfa_flash_dev_status_reg_u dev_status;
6789  
6790  	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6791  
6792  	if (!dev_status.r.fifo_cnt)
6793  		return 0;
6794  
6795  	/* fifo counter in terms of words */
6796  	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6797  		t = readl(pci_bar + FLI_RDDATA_REG);
6798  
6799  	/*
6800  	 * Check the device status. It may take some time.
6801  	 */
6802  	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6803  		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6804  		if (!dev_status.r.fifo_cnt)
6805  			break;
6806  	}
6807  
6808  	if (dev_status.r.fifo_cnt)
6809  		return BFA_FLASH_ERR_FIFO_CNT;
6810  
6811  	return 0;
6812  }
6813  
6814  /**
6815   * @brief
6816   * Read flash status.
6817   *
6818   * @param[in] pci_bar - pci bar address
6819   *
6820   * Return 0 on success, negative error number on error.
6821  */
6822  static u32
bfa_flash_status_read(void __iomem * pci_bar)6823  bfa_flash_status_read(void __iomem *pci_bar)
6824  {
6825  	union bfa_flash_dev_status_reg_u	dev_status;
6826  	int				status;
6827  	u32			ret_status;
6828  	int				i;
6829  
6830  	status = bfa_flash_fifo_flush(pci_bar);
6831  	if (status < 0)
6832  		return status;
6833  
6834  	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6835  
6836  	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6837  		status = bfa_flash_cmd_act_check(pci_bar);
6838  		if (!status)
6839  			break;
6840  	}
6841  
6842  	if (status)
6843  		return status;
6844  
6845  	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6846  	if (!dev_status.r.fifo_cnt)
6847  		return BFA_FLASH_BUSY;
6848  
6849  	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6850  	ret_status >>= 24;
6851  
6852  	status = bfa_flash_fifo_flush(pci_bar);
6853  	if (status < 0)
6854  		return status;
6855  
6856  	return ret_status;
6857  }
6858  
6859  /**
6860   * @brief
6861   * Start flash read operation.
6862   *
6863   * @param[in] pci_bar - pci bar address
6864   * @param[in] offset - flash address offset
6865   * @param[in] len - read data length
6866   * @param[in] buf - read data buffer
6867   *
6868   * Return 0 on success, negative error number on error.
6869   */
6870  static u32
bfa_flash_read_start(void __iomem * pci_bar,u32 offset,u32 len,char * buf)6871  bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6872  			 char *buf)
6873  {
6874  	int status;
6875  
6876  	/*
6877  	 * len must be mutiple of 4 and not exceeding fifo size
6878  	 */
6879  	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6880  		return BFA_FLASH_ERR_LEN;
6881  
6882  	/*
6883  	 * check status
6884  	 */
6885  	status = bfa_flash_status_read(pci_bar);
6886  	if (status == BFA_FLASH_BUSY)
6887  		status = bfa_flash_status_read(pci_bar);
6888  
6889  	if (status < 0)
6890  		return status;
6891  
6892  	/*
6893  	 * check if write-in-progress bit is cleared
6894  	 */
6895  	if (status & BFA_FLASH_WIP_MASK)
6896  		return BFA_FLASH_ERR_WIP;
6897  
6898  	bfa_flash_set_addr(pci_bar, offset);
6899  
6900  	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6901  
6902  	return 0;
6903  }
6904  
6905  /**
6906   * @brief
6907   * Check flash read operation.
6908   *
6909   * @param[in] pci_bar - pci bar address
6910   *
6911   * Return flash device status, 1 if busy, 0 if not.
6912   */
6913  static u32
bfa_flash_read_check(void __iomem * pci_bar)6914  bfa_flash_read_check(void __iomem *pci_bar)
6915  {
6916  	if (bfa_flash_cmd_act_check(pci_bar))
6917  		return 1;
6918  
6919  	return 0;
6920  }
6921  /**
6922   * @brief
6923   * End flash read operation.
6924   *
6925   * @param[in] pci_bar - pci bar address
6926   * @param[in] len - read data length
6927   * @param[in] buf - read data buffer
6928   *
6929   */
6930  static void
bfa_flash_read_end(void __iomem * pci_bar,u32 len,char * buf)6931  bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6932  {
6933  
6934  	u32 i;
6935  
6936  	/*
6937  	 * read data fifo up to 32 words
6938  	 */
6939  	for (i = 0; i < len; i += 4) {
6940  		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6941  		*((u32 *) (buf + i)) = swab32(w);
6942  	}
6943  
6944  	bfa_flash_fifo_flush(pci_bar);
6945  }
6946  
6947  /**
6948   * @brief
6949   * Perform flash raw read.
6950   *
6951   * @param[in] pci_bar - pci bar address
6952   * @param[in] offset - flash partition address offset
6953   * @param[in] buf - read data buffer
6954   * @param[in] len - read data length
6955   *
6956   * Return status.
6957   */
6958  
6959  
6960  #define FLASH_BLOCKING_OP_MAX   500
6961  #define FLASH_SEM_LOCK_REG	0x18820
6962  
6963  static int
bfa_raw_sem_get(void __iomem * bar)6964  bfa_raw_sem_get(void __iomem *bar)
6965  {
6966  	int	locked;
6967  
6968  	locked = readl((bar + FLASH_SEM_LOCK_REG));
6969  	return !locked;
6970  
6971  }
6972  
6973  bfa_status_t
bfa_flash_sem_get(void __iomem * bar)6974  bfa_flash_sem_get(void __iomem *bar)
6975  {
6976  	u32 n = FLASH_BLOCKING_OP_MAX;
6977  
6978  	while (!bfa_raw_sem_get(bar)) {
6979  		if (--n <= 0)
6980  			return BFA_STATUS_BADFLASH;
6981  		mdelay(10);
6982  	}
6983  	return BFA_STATUS_OK;
6984  }
6985  
6986  void
bfa_flash_sem_put(void __iomem * bar)6987  bfa_flash_sem_put(void __iomem *bar)
6988  {
6989  	writel(0, (bar + FLASH_SEM_LOCK_REG));
6990  }
6991  
6992  bfa_status_t
bfa_flash_raw_read(void __iomem * pci_bar,u32 offset,char * buf,u32 len)6993  bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
6994  		       u32 len)
6995  {
6996  	u32 n;
6997  	int status;
6998  	u32 off, l, s, residue, fifo_sz;
6999  
7000  	residue = len;
7001  	off = 0;
7002  	fifo_sz = BFA_FLASH_FIFO_SIZE;
7003  	status = bfa_flash_sem_get(pci_bar);
7004  	if (status != BFA_STATUS_OK)
7005  		return status;
7006  
7007  	while (residue) {
7008  		s = offset + off;
7009  		n = s / fifo_sz;
7010  		l = (n + 1) * fifo_sz - s;
7011  		if (l > residue)
7012  			l = residue;
7013  
7014  		status = bfa_flash_read_start(pci_bar, offset + off, l,
7015  								&buf[off]);
7016  		if (status < 0) {
7017  			bfa_flash_sem_put(pci_bar);
7018  			return BFA_STATUS_FAILED;
7019  		}
7020  
7021  		n = BFA_FLASH_BLOCKING_OP_MAX;
7022  		while (bfa_flash_read_check(pci_bar)) {
7023  			if (--n <= 0) {
7024  				bfa_flash_sem_put(pci_bar);
7025  				return BFA_STATUS_FAILED;
7026  			}
7027  		}
7028  
7029  		bfa_flash_read_end(pci_bar, l, &buf[off]);
7030  
7031  		residue -= l;
7032  		off += l;
7033  	}
7034  	bfa_flash_sem_put(pci_bar);
7035  
7036  	return BFA_STATUS_OK;
7037  }
7038