1  /* QLogic qed NIC Driver
2   * Copyright (c) 2015-2017  QLogic Corporation
3   *
4   * This software is available to you under a choice of one of two
5   * licenses.  You may choose to be licensed under the terms of the GNU
6   * General Public License (GPL) Version 2, available from the file
7   * COPYING in the main directory of this source tree, or the
8   * OpenIB.org BSD license below:
9   *
10   *     Redistribution and use in source and binary forms, with or
11   *     without modification, are permitted provided that the following
12   *     conditions are met:
13   *
14   *      - Redistributions of source code must retain the above
15   *        copyright notice, this list of conditions and the following
16   *        disclaimer.
17   *
18   *      - Redistributions in binary form must reproduce the above
19   *        copyright notice, this list of conditions and the following
20   *        disclaimer in the documentation and /or other materials
21   *        provided with the distribution.
22   *
23   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30   * SOFTWARE.
31   */
32  
33  #include <linux/types.h>
34  #include <asm/byteorder.h>
35  #include <linux/delay.h>
36  #include <linux/errno.h>
37  #include <linux/kernel.h>
38  #include <linux/slab.h>
39  #include <linux/spinlock.h>
40  #include <linux/string.h>
41  #include <linux/etherdevice.h>
42  #include "qed.h"
43  #include "qed_cxt.h"
44  #include "qed_dcbx.h"
45  #include "qed_hsi.h"
46  #include "qed_hw.h"
47  #include "qed_mcp.h"
48  #include "qed_reg_addr.h"
49  #include "qed_sriov.h"
50  
51  #define QED_MCP_RESP_ITER_US	10
52  
53  #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
54  #define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
55  
56  #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
57  	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
58  	       _val)
59  
60  #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61  	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
62  
63  #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
64  	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65  		     offsetof(struct public_drv_mb, _field), _val)
66  
67  #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
68  	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69  		     offsetof(struct public_drv_mb, _field))
70  
71  #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72  		  DRV_ID_PDA_COMP_VER_SHIFT)
73  
74  #define MCP_BYTES_PER_MBIT_SHIFT 17
75  
qed_mcp_is_init(struct qed_hwfn * p_hwfn)76  bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
77  {
78  	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
79  		return false;
80  	return true;
81  }
82  
qed_mcp_cmd_port_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)83  void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
84  {
85  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
86  					PUBLIC_PORT);
87  	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
88  
89  	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
90  						   MFW_PORT(p_hwfn));
91  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
92  		   "port_addr = 0x%x, port_id 0x%02x\n",
93  		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
94  }
95  
qed_mcp_read_mb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)96  void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
97  {
98  	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
99  	u32 tmp, i;
100  
101  	if (!p_hwfn->mcp_info->public_base)
102  		return;
103  
104  	for (i = 0; i < length; i++) {
105  		tmp = qed_rd(p_hwfn, p_ptt,
106  			     p_hwfn->mcp_info->mfw_mb_addr +
107  			     (i << 2) + sizeof(u32));
108  
109  		/* The MB data is actually BE; Need to force it to cpu */
110  		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
111  			be32_to_cpu((__force __be32)tmp);
112  	}
113  }
114  
115  struct qed_mcp_cmd_elem {
116  	struct list_head list;
117  	struct qed_mcp_mb_params *p_mb_params;
118  	u16 expected_seq_num;
119  	bool b_is_completed;
120  };
121  
122  /* Must be called while cmd_lock is acquired */
123  static struct qed_mcp_cmd_elem *
qed_mcp_cmd_add_elem(struct qed_hwfn * p_hwfn,struct qed_mcp_mb_params * p_mb_params,u16 expected_seq_num)124  qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
125  		     struct qed_mcp_mb_params *p_mb_params,
126  		     u16 expected_seq_num)
127  {
128  	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
129  
130  	p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
131  	if (!p_cmd_elem)
132  		goto out;
133  
134  	p_cmd_elem->p_mb_params = p_mb_params;
135  	p_cmd_elem->expected_seq_num = expected_seq_num;
136  	list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
137  out:
138  	return p_cmd_elem;
139  }
140  
141  /* Must be called while cmd_lock is acquired */
qed_mcp_cmd_del_elem(struct qed_hwfn * p_hwfn,struct qed_mcp_cmd_elem * p_cmd_elem)142  static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
143  				 struct qed_mcp_cmd_elem *p_cmd_elem)
144  {
145  	list_del(&p_cmd_elem->list);
146  	kfree(p_cmd_elem);
147  }
148  
149  /* Must be called while cmd_lock is acquired */
qed_mcp_cmd_get_elem(struct qed_hwfn * p_hwfn,u16 seq_num)150  static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
151  						     u16 seq_num)
152  {
153  	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
154  
155  	list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
156  		if (p_cmd_elem->expected_seq_num == seq_num)
157  			return p_cmd_elem;
158  	}
159  
160  	return NULL;
161  }
162  
qed_mcp_free(struct qed_hwfn * p_hwfn)163  int qed_mcp_free(struct qed_hwfn *p_hwfn)
164  {
165  	if (p_hwfn->mcp_info) {
166  		struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
167  
168  		kfree(p_hwfn->mcp_info->mfw_mb_cur);
169  		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
170  
171  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
172  		list_for_each_entry_safe(p_cmd_elem,
173  					 p_tmp,
174  					 &p_hwfn->mcp_info->cmd_list, list) {
175  			qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
176  		}
177  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
178  	}
179  
180  	kfree(p_hwfn->mcp_info);
181  	p_hwfn->mcp_info = NULL;
182  
183  	return 0;
184  }
185  
186  /* Maximum of 1 sec to wait for the SHMEM ready indication */
187  #define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
188  #define QED_MCP_SHMEM_RDY_ITER_MS	50
189  
qed_load_mcp_offsets(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)190  static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
191  {
192  	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
193  	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
194  	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
195  	u32 drv_mb_offsize, mfw_mb_offsize;
196  	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
197  
198  	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
199  	if (!p_info->public_base) {
200  		DP_NOTICE(p_hwfn,
201  			  "The address of the MCP scratch-pad is not configured\n");
202  		return -EINVAL;
203  	}
204  
205  	p_info->public_base |= GRCBASE_MCP;
206  
207  	/* Get the MFW MB address and number of supported messages */
208  	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
209  				SECTION_OFFSIZE_ADDR(p_info->public_base,
210  						     PUBLIC_MFW_MB));
211  	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
212  	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
213  					    p_info->mfw_mb_addr +
214  					    offsetof(struct public_mfw_mb,
215  						     sup_msgs));
216  
217  	/* The driver can notify that there was an MCP reset, and might read the
218  	 * SHMEM values before the MFW has completed initializing them.
219  	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220  	 * data ready indication.
221  	 */
222  	while (!p_info->mfw_mb_length && --cnt) {
223  		msleep(msec);
224  		p_info->mfw_mb_length =
225  			(u16)qed_rd(p_hwfn, p_ptt,
226  				    p_info->mfw_mb_addr +
227  				    offsetof(struct public_mfw_mb, sup_msgs));
228  	}
229  
230  	if (!cnt) {
231  		DP_NOTICE(p_hwfn,
232  			  "Failed to get the SHMEM ready notification after %d msec\n",
233  			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
234  		return -EBUSY;
235  	}
236  
237  	/* Calculate the driver and MFW mailbox address */
238  	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
239  				SECTION_OFFSIZE_ADDR(p_info->public_base,
240  						     PUBLIC_DRV_MB));
241  	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
242  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
243  		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
244  		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
245  
246  	/* Get the current driver mailbox sequence before sending
247  	 * the first command
248  	 */
249  	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
250  			     DRV_MSG_SEQ_NUMBER_MASK;
251  
252  	/* Get current FW pulse sequence */
253  	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
254  				DRV_PULSE_SEQ_MASK;
255  
256  	p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
257  
258  	return 0;
259  }
260  
qed_mcp_cmd_init(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)261  int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
262  {
263  	struct qed_mcp_info *p_info;
264  	u32 size;
265  
266  	/* Allocate mcp_info structure */
267  	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
268  	if (!p_hwfn->mcp_info)
269  		goto err;
270  	p_info = p_hwfn->mcp_info;
271  
272  	/* Initialize the MFW spinlock */
273  	spin_lock_init(&p_info->cmd_lock);
274  	spin_lock_init(&p_info->link_lock);
275  
276  	INIT_LIST_HEAD(&p_info->cmd_list);
277  
278  	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
279  		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
280  		/* Do not free mcp_info here, since public_base indicate that
281  		 * the MCP is not initialized
282  		 */
283  		return 0;
284  	}
285  
286  	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
287  	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
288  	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
289  	if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
290  		goto err;
291  
292  	return 0;
293  
294  err:
295  	qed_mcp_free(p_hwfn);
296  	return -ENOMEM;
297  }
298  
qed_mcp_reread_offsets(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)299  static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
300  				   struct qed_ptt *p_ptt)
301  {
302  	u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
303  
304  	/* Use MCP history register to check if MCP reset occurred between init
305  	 * time and now.
306  	 */
307  	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
308  		DP_VERBOSE(p_hwfn,
309  			   QED_MSG_SP,
310  			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
311  			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
312  
313  		qed_load_mcp_offsets(p_hwfn, p_ptt);
314  		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
315  	}
316  }
317  
qed_mcp_reset(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)318  int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
319  {
320  	u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
321  	int rc = 0;
322  
323  	if (p_hwfn->mcp_info->b_block_cmd) {
324  		DP_NOTICE(p_hwfn,
325  			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
326  		return -EBUSY;
327  	}
328  
329  	/* Ensure that only a single thread is accessing the mailbox */
330  	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
331  
332  	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
333  
334  	/* Set drv command along with the updated sequence */
335  	qed_mcp_reread_offsets(p_hwfn, p_ptt);
336  	seq = ++p_hwfn->mcp_info->drv_mb_seq;
337  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
338  
339  	do {
340  		/* Wait for MFW response */
341  		udelay(delay);
342  		/* Give the FW up to 500 second (50*1000*10usec) */
343  	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
344  					      MISCS_REG_GENERIC_POR_0)) &&
345  		 (cnt++ < QED_MCP_RESET_RETRIES));
346  
347  	if (org_mcp_reset_seq !=
348  	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
349  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
350  			   "MCP was reset after %d usec\n", cnt * delay);
351  	} else {
352  		DP_ERR(p_hwfn, "Failed to reset MCP\n");
353  		rc = -EAGAIN;
354  	}
355  
356  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
357  
358  	return rc;
359  }
360  
361  /* Must be called while cmd_lock is acquired */
qed_mcp_has_pending_cmd(struct qed_hwfn * p_hwfn)362  static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
363  {
364  	struct qed_mcp_cmd_elem *p_cmd_elem;
365  
366  	/* There is at most one pending command at a certain time, and if it
367  	 * exists - it is placed at the HEAD of the list.
368  	 */
369  	if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
370  		p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
371  					      struct qed_mcp_cmd_elem, list);
372  		return !p_cmd_elem->b_is_completed;
373  	}
374  
375  	return false;
376  }
377  
378  /* Must be called while cmd_lock is acquired */
379  static int
qed_mcp_update_pending_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)380  qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
381  {
382  	struct qed_mcp_mb_params *p_mb_params;
383  	struct qed_mcp_cmd_elem *p_cmd_elem;
384  	u32 mcp_resp;
385  	u16 seq_num;
386  
387  	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
388  	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
389  
390  	/* Return if no new non-handled response has been received */
391  	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
392  		return -EAGAIN;
393  
394  	p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
395  	if (!p_cmd_elem) {
396  		DP_ERR(p_hwfn,
397  		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
398  		       seq_num);
399  		return -EINVAL;
400  	}
401  
402  	p_mb_params = p_cmd_elem->p_mb_params;
403  
404  	/* Get the MFW response along with the sequence number */
405  	p_mb_params->mcp_resp = mcp_resp;
406  
407  	/* Get the MFW param */
408  	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
409  
410  	/* Get the union data */
411  	if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
412  		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
413  				      offsetof(struct public_drv_mb,
414  					       union_data);
415  		qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
416  				union_data_addr, p_mb_params->data_dst_size);
417  	}
418  
419  	p_cmd_elem->b_is_completed = true;
420  
421  	return 0;
422  }
423  
424  /* Must be called while cmd_lock is acquired */
__qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params,u16 seq_num)425  static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
426  				    struct qed_ptt *p_ptt,
427  				    struct qed_mcp_mb_params *p_mb_params,
428  				    u16 seq_num)
429  {
430  	union drv_union_data union_data;
431  	u32 union_data_addr;
432  
433  	/* Set the union data */
434  	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
435  			  offsetof(struct public_drv_mb, union_data);
436  	memset(&union_data, 0, sizeof(union_data));
437  	if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
438  		memcpy(&union_data, p_mb_params->p_data_src,
439  		       p_mb_params->data_src_size);
440  	qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
441  		      sizeof(union_data));
442  
443  	/* Set the drv param */
444  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
445  
446  	/* Set the drv command along with the sequence number */
447  	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
448  
449  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
450  		   "MFW mailbox: command 0x%08x param 0x%08x\n",
451  		   (p_mb_params->cmd | seq_num), p_mb_params->param);
452  }
453  
qed_mcp_cmd_set_blocking(struct qed_hwfn * p_hwfn,bool block_cmd)454  static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
455  {
456  	p_hwfn->mcp_info->b_block_cmd = block_cmd;
457  
458  	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
459  		block_cmd ? "Block" : "Unblock");
460  }
461  
qed_mcp_print_cpu_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)462  static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
463  				   struct qed_ptt *p_ptt)
464  {
465  	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
466  	u32 delay = QED_MCP_RESP_ITER_US;
467  
468  	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
469  	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
470  	cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
471  	udelay(delay);
472  	cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
473  	udelay(delay);
474  	cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
475  
476  	DP_NOTICE(p_hwfn,
477  		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478  		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
479  }
480  
481  static int
_qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params,u32 max_retries,u32 usecs)482  _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
483  		       struct qed_ptt *p_ptt,
484  		       struct qed_mcp_mb_params *p_mb_params,
485  		       u32 max_retries, u32 usecs)
486  {
487  	u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
488  	struct qed_mcp_cmd_elem *p_cmd_elem;
489  	u16 seq_num;
490  	int rc = 0;
491  
492  	/* Wait until the mailbox is non-occupied */
493  	do {
494  		/* Exit the loop if there is no pending command, or if the
495  		 * pending command is completed during this iteration.
496  		 * The spinlock stays locked until the command is sent.
497  		 */
498  
499  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
500  
501  		if (!qed_mcp_has_pending_cmd(p_hwfn))
502  			break;
503  
504  		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
505  		if (!rc)
506  			break;
507  		else if (rc != -EAGAIN)
508  			goto err;
509  
510  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
511  
512  		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
513  			msleep(msecs);
514  		else
515  			udelay(usecs);
516  	} while (++cnt < max_retries);
517  
518  	if (cnt >= max_retries) {
519  		DP_NOTICE(p_hwfn,
520  			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
521  			  p_mb_params->cmd, p_mb_params->param);
522  		return -EAGAIN;
523  	}
524  
525  	/* Send the mailbox command */
526  	qed_mcp_reread_offsets(p_hwfn, p_ptt);
527  	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
528  	p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
529  	if (!p_cmd_elem) {
530  		rc = -ENOMEM;
531  		goto err;
532  	}
533  
534  	__qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
535  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
536  
537  	/* Wait for the MFW response */
538  	do {
539  		/* Exit the loop if the command is already completed, or if the
540  		 * command is completed during this iteration.
541  		 * The spinlock stays locked until the list element is removed.
542  		 */
543  
544  		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
545  			msleep(msecs);
546  		else
547  			udelay(usecs);
548  
549  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
550  
551  		if (p_cmd_elem->b_is_completed)
552  			break;
553  
554  		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
555  		if (!rc)
556  			break;
557  		else if (rc != -EAGAIN)
558  			goto err;
559  
560  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
561  	} while (++cnt < max_retries);
562  
563  	if (cnt >= max_retries) {
564  		DP_NOTICE(p_hwfn,
565  			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
566  			  p_mb_params->cmd, p_mb_params->param);
567  		qed_mcp_print_cpu_info(p_hwfn, p_ptt);
568  
569  		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
570  		qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
571  		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
572  
573  		if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
574  			qed_mcp_cmd_set_blocking(p_hwfn, true);
575  
576  		return -EAGAIN;
577  	}
578  
579  	qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
580  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
581  
582  	DP_VERBOSE(p_hwfn,
583  		   QED_MSG_SP,
584  		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
585  		   p_mb_params->mcp_resp,
586  		   p_mb_params->mcp_param,
587  		   (cnt * usecs) / 1000, (cnt * usecs) % 1000);
588  
589  	/* Clear the sequence number from the MFW response */
590  	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
591  
592  	return 0;
593  
594  err:
595  	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
596  	return rc;
597  }
598  
qed_mcp_cmd_and_union(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_mb_params * p_mb_params)599  static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
600  				 struct qed_ptt *p_ptt,
601  				 struct qed_mcp_mb_params *p_mb_params)
602  {
603  	size_t union_data_size = sizeof(union drv_union_data);
604  	u32 max_retries = QED_DRV_MB_MAX_RETRIES;
605  	u32 usecs = QED_MCP_RESP_ITER_US;
606  
607  	/* MCP not initialized */
608  	if (!qed_mcp_is_init(p_hwfn)) {
609  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
610  		return -EBUSY;
611  	}
612  
613  	if (p_hwfn->mcp_info->b_block_cmd) {
614  		DP_NOTICE(p_hwfn,
615  			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616  			  p_mb_params->cmd, p_mb_params->param);
617  		return -EBUSY;
618  	}
619  
620  	if (p_mb_params->data_src_size > union_data_size ||
621  	    p_mb_params->data_dst_size > union_data_size) {
622  		DP_ERR(p_hwfn,
623  		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
624  		       p_mb_params->data_src_size,
625  		       p_mb_params->data_dst_size, union_data_size);
626  		return -EINVAL;
627  	}
628  
629  	if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
630  		max_retries = DIV_ROUND_UP(max_retries, 1000);
631  		usecs *= 1000;
632  	}
633  
634  	return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
635  				      usecs);
636  }
637  
qed_mcp_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param)638  int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
639  		struct qed_ptt *p_ptt,
640  		u32 cmd,
641  		u32 param,
642  		u32 *o_mcp_resp,
643  		u32 *o_mcp_param)
644  {
645  	struct qed_mcp_mb_params mb_params;
646  	int rc;
647  
648  	memset(&mb_params, 0, sizeof(mb_params));
649  	mb_params.cmd = cmd;
650  	mb_params.param = param;
651  
652  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
653  	if (rc)
654  		return rc;
655  
656  	*o_mcp_resp = mb_params.mcp_resp;
657  	*o_mcp_param = mb_params.mcp_param;
658  
659  	return 0;
660  }
661  
662  static int
qed_mcp_nvm_wr_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 i_txn_size,u32 * i_buf)663  qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
664  		   struct qed_ptt *p_ptt,
665  		   u32 cmd,
666  		   u32 param,
667  		   u32 *o_mcp_resp,
668  		   u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
669  {
670  	struct qed_mcp_mb_params mb_params;
671  	int rc;
672  
673  	memset(&mb_params, 0, sizeof(mb_params));
674  	mb_params.cmd = cmd;
675  	mb_params.param = param;
676  	mb_params.p_data_src = i_buf;
677  	mb_params.data_src_size = (u8)i_txn_size;
678  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
679  	if (rc)
680  		return rc;
681  
682  	*o_mcp_resp = mb_params.mcp_resp;
683  	*o_mcp_param = mb_params.mcp_param;
684  
685  	/* nvm_info needs to be updated */
686  	p_hwfn->nvm_info.valid = false;
687  
688  	return 0;
689  }
690  
qed_mcp_nvm_rd_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 cmd,u32 param,u32 * o_mcp_resp,u32 * o_mcp_param,u32 * o_txn_size,u32 * o_buf)691  int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
692  		       struct qed_ptt *p_ptt,
693  		       u32 cmd,
694  		       u32 param,
695  		       u32 *o_mcp_resp,
696  		       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
697  {
698  	struct qed_mcp_mb_params mb_params;
699  	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
700  	int rc;
701  
702  	memset(&mb_params, 0, sizeof(mb_params));
703  	mb_params.cmd = cmd;
704  	mb_params.param = param;
705  	mb_params.p_data_dst = raw_data;
706  
707  	/* Use the maximal value since the actual one is part of the response */
708  	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
709  
710  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
711  	if (rc)
712  		return rc;
713  
714  	*o_mcp_resp = mb_params.mcp_resp;
715  	*o_mcp_param = mb_params.mcp_param;
716  
717  	*o_txn_size = *o_mcp_param;
718  	memcpy(o_buf, raw_data, *o_txn_size);
719  
720  	return 0;
721  }
722  
723  static bool
qed_mcp_can_force_load(u8 drv_role,u8 exist_drv_role,enum qed_override_force_load override_force_load)724  qed_mcp_can_force_load(u8 drv_role,
725  		       u8 exist_drv_role,
726  		       enum qed_override_force_load override_force_load)
727  {
728  	bool can_force_load = false;
729  
730  	switch (override_force_load) {
731  	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
732  		can_force_load = true;
733  		break;
734  	case QED_OVERRIDE_FORCE_LOAD_NEVER:
735  		can_force_load = false;
736  		break;
737  	default:
738  		can_force_load = (drv_role == DRV_ROLE_OS &&
739  				  exist_drv_role == DRV_ROLE_PREBOOT) ||
740  				 (drv_role == DRV_ROLE_KDUMP &&
741  				  exist_drv_role == DRV_ROLE_OS);
742  		break;
743  	}
744  
745  	return can_force_load;
746  }
747  
qed_mcp_cancel_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)748  static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
749  				   struct qed_ptt *p_ptt)
750  {
751  	u32 resp = 0, param = 0;
752  	int rc;
753  
754  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
755  			 &resp, &param);
756  	if (rc)
757  		DP_NOTICE(p_hwfn,
758  			  "Failed to send cancel load request, rc = %d\n", rc);
759  
760  	return rc;
761  }
762  
763  #define CONFIG_QEDE_BITMAP_IDX		BIT(0)
764  #define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1)
765  #define CONFIG_QEDR_BITMAP_IDX		BIT(2)
766  #define CONFIG_QEDF_BITMAP_IDX		BIT(4)
767  #define CONFIG_QEDI_BITMAP_IDX		BIT(5)
768  #define CONFIG_QED_LL2_BITMAP_IDX	BIT(6)
769  
qed_get_config_bitmap(void)770  static u32 qed_get_config_bitmap(void)
771  {
772  	u32 config_bitmap = 0x0;
773  
774  	if (IS_ENABLED(CONFIG_QEDE))
775  		config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
776  
777  	if (IS_ENABLED(CONFIG_QED_SRIOV))
778  		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
779  
780  	if (IS_ENABLED(CONFIG_QED_RDMA))
781  		config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
782  
783  	if (IS_ENABLED(CONFIG_QED_FCOE))
784  		config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
785  
786  	if (IS_ENABLED(CONFIG_QED_ISCSI))
787  		config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
788  
789  	if (IS_ENABLED(CONFIG_QED_LL2))
790  		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
791  
792  	return config_bitmap;
793  }
794  
795  struct qed_load_req_in_params {
796  	u8 hsi_ver;
797  #define QED_LOAD_REQ_HSI_VER_DEFAULT	0
798  #define QED_LOAD_REQ_HSI_VER_1		1
799  	u32 drv_ver_0;
800  	u32 drv_ver_1;
801  	u32 fw_ver;
802  	u8 drv_role;
803  	u8 timeout_val;
804  	u8 force_cmd;
805  	bool avoid_eng_reset;
806  };
807  
808  struct qed_load_req_out_params {
809  	u32 load_code;
810  	u32 exist_drv_ver_0;
811  	u32 exist_drv_ver_1;
812  	u32 exist_fw_ver;
813  	u8 exist_drv_role;
814  	u8 mfw_hsi_ver;
815  	bool drv_exists;
816  };
817  
818  static int
__qed_mcp_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_load_req_in_params * p_in_params,struct qed_load_req_out_params * p_out_params)819  __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
820  		   struct qed_ptt *p_ptt,
821  		   struct qed_load_req_in_params *p_in_params,
822  		   struct qed_load_req_out_params *p_out_params)
823  {
824  	struct qed_mcp_mb_params mb_params;
825  	struct load_req_stc load_req;
826  	struct load_rsp_stc load_rsp;
827  	u32 hsi_ver;
828  	int rc;
829  
830  	memset(&load_req, 0, sizeof(load_req));
831  	load_req.drv_ver_0 = p_in_params->drv_ver_0;
832  	load_req.drv_ver_1 = p_in_params->drv_ver_1;
833  	load_req.fw_ver = p_in_params->fw_ver;
834  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
835  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
836  			  p_in_params->timeout_val);
837  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
838  			  p_in_params->force_cmd);
839  	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
840  			  p_in_params->avoid_eng_reset);
841  
842  	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
843  		  DRV_ID_MCP_HSI_VER_CURRENT :
844  		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
845  
846  	memset(&mb_params, 0, sizeof(mb_params));
847  	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
848  	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
849  	mb_params.p_data_src = &load_req;
850  	mb_params.data_src_size = sizeof(load_req);
851  	mb_params.p_data_dst = &load_rsp;
852  	mb_params.data_dst_size = sizeof(load_rsp);
853  	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
854  
855  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
856  		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
857  		   mb_params.param,
858  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
859  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
860  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
861  		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
862  
863  	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
864  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
865  			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
866  			   load_req.drv_ver_0,
867  			   load_req.drv_ver_1,
868  			   load_req.fw_ver,
869  			   load_req.misc0,
870  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
871  			   QED_MFW_GET_FIELD(load_req.misc0,
872  					     LOAD_REQ_LOCK_TO),
873  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
874  			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
875  	}
876  
877  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
878  	if (rc) {
879  		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
880  		return rc;
881  	}
882  
883  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
884  		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
885  	p_out_params->load_code = mb_params.mcp_resp;
886  
887  	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
888  	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
889  		DP_VERBOSE(p_hwfn,
890  			   QED_MSG_SP,
891  			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
892  			   load_rsp.drv_ver_0,
893  			   load_rsp.drv_ver_1,
894  			   load_rsp.fw_ver,
895  			   load_rsp.misc0,
896  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
897  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
898  			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
899  
900  		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
901  		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
902  		p_out_params->exist_fw_ver = load_rsp.fw_ver;
903  		p_out_params->exist_drv_role =
904  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
905  		p_out_params->mfw_hsi_ver =
906  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
907  		p_out_params->drv_exists =
908  		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
909  		    LOAD_RSP_FLAGS0_DRV_EXISTS;
910  	}
911  
912  	return 0;
913  }
914  
eocre_get_mfw_drv_role(struct qed_hwfn * p_hwfn,enum qed_drv_role drv_role,u8 * p_mfw_drv_role)915  static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
916  				  enum qed_drv_role drv_role,
917  				  u8 *p_mfw_drv_role)
918  {
919  	switch (drv_role) {
920  	case QED_DRV_ROLE_OS:
921  		*p_mfw_drv_role = DRV_ROLE_OS;
922  		break;
923  	case QED_DRV_ROLE_KDUMP:
924  		*p_mfw_drv_role = DRV_ROLE_KDUMP;
925  		break;
926  	default:
927  		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
928  		return -EINVAL;
929  	}
930  
931  	return 0;
932  }
933  
934  enum qed_load_req_force {
935  	QED_LOAD_REQ_FORCE_NONE,
936  	QED_LOAD_REQ_FORCE_PF,
937  	QED_LOAD_REQ_FORCE_ALL,
938  };
939  
qed_get_mfw_force_cmd(struct qed_hwfn * p_hwfn,enum qed_load_req_force force_cmd,u8 * p_mfw_force_cmd)940  static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
941  
942  				  enum qed_load_req_force force_cmd,
943  				  u8 *p_mfw_force_cmd)
944  {
945  	switch (force_cmd) {
946  	case QED_LOAD_REQ_FORCE_NONE:
947  		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
948  		break;
949  	case QED_LOAD_REQ_FORCE_PF:
950  		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
951  		break;
952  	case QED_LOAD_REQ_FORCE_ALL:
953  		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
954  		break;
955  	}
956  }
957  
qed_mcp_load_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_load_req_params * p_params)958  int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
959  		     struct qed_ptt *p_ptt,
960  		     struct qed_load_req_params *p_params)
961  {
962  	struct qed_load_req_out_params out_params;
963  	struct qed_load_req_in_params in_params;
964  	u8 mfw_drv_role, mfw_force_cmd;
965  	int rc;
966  
967  	memset(&in_params, 0, sizeof(in_params));
968  	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
969  	in_params.drv_ver_0 = QED_VERSION;
970  	in_params.drv_ver_1 = qed_get_config_bitmap();
971  	in_params.fw_ver = STORM_FW_VERSION;
972  	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
973  	if (rc)
974  		return rc;
975  
976  	in_params.drv_role = mfw_drv_role;
977  	in_params.timeout_val = p_params->timeout_val;
978  	qed_get_mfw_force_cmd(p_hwfn,
979  			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
980  
981  	in_params.force_cmd = mfw_force_cmd;
982  	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
983  
984  	memset(&out_params, 0, sizeof(out_params));
985  	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
986  	if (rc)
987  		return rc;
988  
989  	/* First handle cases where another load request should/might be sent:
990  	 * - MFW expects the old interface [HSI version = 1]
991  	 * - MFW responds that a force load request is required
992  	 */
993  	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
994  		DP_INFO(p_hwfn,
995  			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
996  
997  		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
998  		memset(&out_params, 0, sizeof(out_params));
999  		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
1000  		if (rc)
1001  			return rc;
1002  	} else if (out_params.load_code ==
1003  		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
1004  		if (qed_mcp_can_force_load(in_params.drv_role,
1005  					   out_params.exist_drv_role,
1006  					   p_params->override_force_load)) {
1007  			DP_INFO(p_hwfn,
1008  				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1009  				in_params.drv_role, in_params.fw_ver,
1010  				in_params.drv_ver_0, in_params.drv_ver_1,
1011  				out_params.exist_drv_role,
1012  				out_params.exist_fw_ver,
1013  				out_params.exist_drv_ver_0,
1014  				out_params.exist_drv_ver_1);
1015  
1016  			qed_get_mfw_force_cmd(p_hwfn,
1017  					      QED_LOAD_REQ_FORCE_ALL,
1018  					      &mfw_force_cmd);
1019  
1020  			in_params.force_cmd = mfw_force_cmd;
1021  			memset(&out_params, 0, sizeof(out_params));
1022  			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1023  						&out_params);
1024  			if (rc)
1025  				return rc;
1026  		} else {
1027  			DP_NOTICE(p_hwfn,
1028  				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1029  				  in_params.drv_role, in_params.fw_ver,
1030  				  in_params.drv_ver_0, in_params.drv_ver_1,
1031  				  out_params.exist_drv_role,
1032  				  out_params.exist_fw_ver,
1033  				  out_params.exist_drv_ver_0,
1034  				  out_params.exist_drv_ver_1);
1035  			DP_NOTICE(p_hwfn,
1036  				  "Avoid sending a force load request to prevent disruption of active PFs\n");
1037  
1038  			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1039  			return -EBUSY;
1040  		}
1041  	}
1042  
1043  	/* Now handle the other types of responses.
1044  	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1045  	 * expected here after the additional revised load requests were sent.
1046  	 */
1047  	switch (out_params.load_code) {
1048  	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1049  	case FW_MSG_CODE_DRV_LOAD_PORT:
1050  	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1051  		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1052  		    out_params.drv_exists) {
1053  			/* The role and fw/driver version match, but the PF is
1054  			 * already loaded and has not been unloaded gracefully.
1055  			 */
1056  			DP_NOTICE(p_hwfn,
1057  				  "PF is already loaded\n");
1058  			return -EINVAL;
1059  		}
1060  		break;
1061  	default:
1062  		DP_NOTICE(p_hwfn,
1063  			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1064  			  out_params.load_code);
1065  		return -EBUSY;
1066  	}
1067  
1068  	p_params->load_code = out_params.load_code;
1069  
1070  	return 0;
1071  }
1072  
qed_mcp_load_done(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1073  int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1074  {
1075  	u32 resp = 0, param = 0;
1076  	int rc;
1077  
1078  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1079  			 &param);
1080  	if (rc) {
1081  		DP_NOTICE(p_hwfn,
1082  			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1083  		return rc;
1084  	}
1085  
1086  	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1087  	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1088  		DP_NOTICE(p_hwfn,
1089  			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1090  
1091  	return 0;
1092  }
1093  
qed_mcp_unload_req(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1094  int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1095  {
1096  	struct qed_mcp_mb_params mb_params;
1097  	u32 wol_param;
1098  
1099  	switch (p_hwfn->cdev->wol_config) {
1100  	case QED_OV_WOL_DISABLED:
1101  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1102  		break;
1103  	case QED_OV_WOL_ENABLED:
1104  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1105  		break;
1106  	default:
1107  		DP_NOTICE(p_hwfn,
1108  			  "Unknown WoL configuration %02x\n",
1109  			  p_hwfn->cdev->wol_config);
1110  		/* Fallthrough */
1111  	case QED_OV_WOL_DEFAULT:
1112  		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1113  	}
1114  
1115  	memset(&mb_params, 0, sizeof(mb_params));
1116  	mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1117  	mb_params.param = wol_param;
1118  	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1119  
1120  	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1121  }
1122  
qed_mcp_unload_done(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1123  int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1124  {
1125  	struct qed_mcp_mb_params mb_params;
1126  	struct mcp_mac wol_mac;
1127  
1128  	memset(&mb_params, 0, sizeof(mb_params));
1129  	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1130  
1131  	/* Set the primary MAC if WoL is enabled */
1132  	if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1133  		u8 *p_mac = p_hwfn->cdev->wol_mac;
1134  
1135  		memset(&wol_mac, 0, sizeof(wol_mac));
1136  		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1137  		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1138  				    p_mac[4] << 8 | p_mac[5];
1139  
1140  		DP_VERBOSE(p_hwfn,
1141  			   (QED_MSG_SP | NETIF_MSG_IFDOWN),
1142  			   "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1143  			   p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1144  
1145  		mb_params.p_data_src = &wol_mac;
1146  		mb_params.data_src_size = sizeof(wol_mac);
1147  	}
1148  
1149  	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1150  }
1151  
qed_mcp_handle_vf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1152  static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1153  				  struct qed_ptt *p_ptt)
1154  {
1155  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1156  					PUBLIC_PATH);
1157  	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1158  	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1159  				     QED_PATH_ID(p_hwfn));
1160  	u32 disabled_vfs[VF_MAX_STATIC / 32];
1161  	int i;
1162  
1163  	DP_VERBOSE(p_hwfn,
1164  		   QED_MSG_SP,
1165  		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1166  		   mfw_path_offsize, path_addr);
1167  
1168  	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1169  		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1170  					 path_addr +
1171  					 offsetof(struct public_path,
1172  						  mcp_vf_disabled) +
1173  					 sizeof(u32) * i);
1174  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1175  			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1176  			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1177  	}
1178  
1179  	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1180  		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1181  }
1182  
qed_mcp_ack_vf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * vfs_to_ack)1183  int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1184  		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1185  {
1186  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1187  					PUBLIC_FUNC);
1188  	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1189  	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1190  				     MCP_PF_ID(p_hwfn));
1191  	struct qed_mcp_mb_params mb_params;
1192  	int rc;
1193  	int i;
1194  
1195  	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1196  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1197  			   "Acking VFs [%08x,...,%08x] - %08x\n",
1198  			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1199  
1200  	memset(&mb_params, 0, sizeof(mb_params));
1201  	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1202  	mb_params.p_data_src = vfs_to_ack;
1203  	mb_params.data_src_size = VF_MAX_STATIC / 8;
1204  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1205  	if (rc) {
1206  		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1207  		return -EBUSY;
1208  	}
1209  
1210  	/* Clear the ACK bits */
1211  	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1212  		qed_wr(p_hwfn, p_ptt,
1213  		       func_addr +
1214  		       offsetof(struct public_func, drv_ack_vf_disabled) +
1215  		       i * sizeof(u32), 0);
1216  
1217  	return rc;
1218  }
1219  
qed_mcp_handle_transceiver_change(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1220  static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1221  					      struct qed_ptt *p_ptt)
1222  {
1223  	u32 transceiver_state;
1224  
1225  	transceiver_state = qed_rd(p_hwfn, p_ptt,
1226  				   p_hwfn->mcp_info->port_addr +
1227  				   offsetof(struct public_port,
1228  					    transceiver_data));
1229  
1230  	DP_VERBOSE(p_hwfn,
1231  		   (NETIF_MSG_HW | QED_MSG_SP),
1232  		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1233  		   transceiver_state,
1234  		   (u32)(p_hwfn->mcp_info->port_addr +
1235  			  offsetof(struct public_port, transceiver_data)));
1236  
1237  	transceiver_state = GET_FIELD(transceiver_state,
1238  				      ETH_TRANSCEIVER_STATE);
1239  
1240  	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1241  		DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1242  	else
1243  		DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1244  }
1245  
qed_mcp_read_eee_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_link_state * p_link)1246  static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1247  				    struct qed_ptt *p_ptt,
1248  				    struct qed_mcp_link_state *p_link)
1249  {
1250  	u32 eee_status, val;
1251  
1252  	p_link->eee_adv_caps = 0;
1253  	p_link->eee_lp_adv_caps = 0;
1254  	eee_status = qed_rd(p_hwfn,
1255  			    p_ptt,
1256  			    p_hwfn->mcp_info->port_addr +
1257  			    offsetof(struct public_port, eee_status));
1258  	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1259  	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1260  	if (val & EEE_1G_ADV)
1261  		p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1262  	if (val & EEE_10G_ADV)
1263  		p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1264  	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1265  	if (val & EEE_1G_ADV)
1266  		p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1267  	if (val & EEE_10G_ADV)
1268  		p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1269  }
1270  
qed_mcp_get_shmem_func(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct public_func * p_data,int pfid)1271  static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1272  				  struct qed_ptt *p_ptt,
1273  				  struct public_func *p_data, int pfid)
1274  {
1275  	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1276  					PUBLIC_FUNC);
1277  	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1278  	u32 func_addr;
1279  	u32 i, size;
1280  
1281  	func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1282  	memset(p_data, 0, sizeof(*p_data));
1283  
1284  	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1285  	for (i = 0; i < size / sizeof(u32); i++)
1286  		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1287  					    func_addr + (i << 2));
1288  	return size;
1289  }
1290  
qed_read_pf_bandwidth(struct qed_hwfn * p_hwfn,struct public_func * p_shmem_info)1291  static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1292  				  struct public_func *p_shmem_info)
1293  {
1294  	struct qed_mcp_function_info *p_info;
1295  
1296  	p_info = &p_hwfn->mcp_info->func_info;
1297  
1298  	p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1299  						  FUNC_MF_CFG_MIN_BW);
1300  	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1301  		DP_INFO(p_hwfn,
1302  			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1303  			p_info->bandwidth_min);
1304  		p_info->bandwidth_min = 1;
1305  	}
1306  
1307  	p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1308  						  FUNC_MF_CFG_MAX_BW);
1309  	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1310  		DP_INFO(p_hwfn,
1311  			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1312  			p_info->bandwidth_max);
1313  		p_info->bandwidth_max = 100;
1314  	}
1315  }
1316  
qed_mcp_handle_link_change(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_reset)1317  static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1318  				       struct qed_ptt *p_ptt, bool b_reset)
1319  {
1320  	struct qed_mcp_link_state *p_link;
1321  	u8 max_bw, min_bw;
1322  	u32 status = 0;
1323  
1324  	/* Prevent SW/attentions from doing this at the same time */
1325  	spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1326  
1327  	p_link = &p_hwfn->mcp_info->link_output;
1328  	memset(p_link, 0, sizeof(*p_link));
1329  	if (!b_reset) {
1330  		status = qed_rd(p_hwfn, p_ptt,
1331  				p_hwfn->mcp_info->port_addr +
1332  				offsetof(struct public_port, link_status));
1333  		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1334  			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1335  			   status,
1336  			   (u32)(p_hwfn->mcp_info->port_addr +
1337  				 offsetof(struct public_port, link_status)));
1338  	} else {
1339  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1340  			   "Resetting link indications\n");
1341  		goto out;
1342  	}
1343  
1344  	if (p_hwfn->b_drv_link_init) {
1345  		/* Link indication with modern MFW arrives as per-PF
1346  		 * indication.
1347  		 */
1348  		if (p_hwfn->mcp_info->capabilities &
1349  		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1350  			struct public_func shmem_info;
1351  
1352  			qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1353  					       MCP_PF_ID(p_hwfn));
1354  			p_link->link_up = !!(shmem_info.status &
1355  					     FUNC_STATUS_VIRTUAL_LINK_UP);
1356  			qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1357  			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1358  				   "Virtual link_up = %d\n", p_link->link_up);
1359  		} else {
1360  			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1361  			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1362  				   "Physical link_up = %d\n", p_link->link_up);
1363  		}
1364  	} else {
1365  		p_link->link_up = false;
1366  	}
1367  
1368  	p_link->full_duplex = true;
1369  	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1370  	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1371  		p_link->speed = 100000;
1372  		break;
1373  	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1374  		p_link->speed = 50000;
1375  		break;
1376  	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1377  		p_link->speed = 40000;
1378  		break;
1379  	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1380  		p_link->speed = 25000;
1381  		break;
1382  	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1383  		p_link->speed = 20000;
1384  		break;
1385  	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1386  		p_link->speed = 10000;
1387  		break;
1388  	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1389  		p_link->full_duplex = false;
1390  	/* Fall-through */
1391  	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1392  		p_link->speed = 1000;
1393  		break;
1394  	default:
1395  		p_link->speed = 0;
1396  		p_link->link_up = 0;
1397  	}
1398  
1399  	if (p_link->link_up && p_link->speed)
1400  		p_link->line_speed = p_link->speed;
1401  	else
1402  		p_link->line_speed = 0;
1403  
1404  	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1405  	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1406  
1407  	/* Max bandwidth configuration */
1408  	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1409  
1410  	/* Min bandwidth configuration */
1411  	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1412  	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1413  					    p_link->min_pf_rate);
1414  
1415  	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1416  	p_link->an_complete = !!(status &
1417  				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1418  	p_link->parallel_detection = !!(status &
1419  					LINK_STATUS_PARALLEL_DETECTION_USED);
1420  	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1421  
1422  	p_link->partner_adv_speed |=
1423  		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1424  		QED_LINK_PARTNER_SPEED_1G_FD : 0;
1425  	p_link->partner_adv_speed |=
1426  		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1427  		QED_LINK_PARTNER_SPEED_1G_HD : 0;
1428  	p_link->partner_adv_speed |=
1429  		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1430  		QED_LINK_PARTNER_SPEED_10G : 0;
1431  	p_link->partner_adv_speed |=
1432  		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1433  		QED_LINK_PARTNER_SPEED_20G : 0;
1434  	p_link->partner_adv_speed |=
1435  		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1436  		QED_LINK_PARTNER_SPEED_25G : 0;
1437  	p_link->partner_adv_speed |=
1438  		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1439  		QED_LINK_PARTNER_SPEED_40G : 0;
1440  	p_link->partner_adv_speed |=
1441  		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1442  		QED_LINK_PARTNER_SPEED_50G : 0;
1443  	p_link->partner_adv_speed |=
1444  		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1445  		QED_LINK_PARTNER_SPEED_100G : 0;
1446  
1447  	p_link->partner_tx_flow_ctrl_en =
1448  		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1449  	p_link->partner_rx_flow_ctrl_en =
1450  		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1451  
1452  	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1453  	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1454  		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1455  		break;
1456  	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1457  		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1458  		break;
1459  	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1460  		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1461  		break;
1462  	default:
1463  		p_link->partner_adv_pause = 0;
1464  	}
1465  
1466  	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1467  
1468  	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1469  		qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1470  
1471  	qed_link_update(p_hwfn, p_ptt);
1472  out:
1473  	spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1474  }
1475  
qed_mcp_set_link(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,bool b_up)1476  int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1477  {
1478  	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1479  	struct qed_mcp_mb_params mb_params;
1480  	struct eth_phy_cfg phy_cfg;
1481  	int rc = 0;
1482  	u32 cmd;
1483  
1484  	/* Set the shmem configuration according to params */
1485  	memset(&phy_cfg, 0, sizeof(phy_cfg));
1486  	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1487  	if (!params->speed.autoneg)
1488  		phy_cfg.speed = params->speed.forced_speed;
1489  	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1490  	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1491  	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1492  	phy_cfg.adv_speed = params->speed.advertised_speeds;
1493  	phy_cfg.loopback_mode = params->loopback_mode;
1494  
1495  	/* There are MFWs that share this capability regardless of whether
1496  	 * this is feasible or not. And given that at the very least adv_caps
1497  	 * would be set internally by qed, we want to make sure LFA would
1498  	 * still work.
1499  	 */
1500  	if ((p_hwfn->mcp_info->capabilities &
1501  	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1502  		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1503  		if (params->eee.tx_lpi_enable)
1504  			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1505  		if (params->eee.adv_caps & QED_EEE_1G_ADV)
1506  			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1507  		if (params->eee.adv_caps & QED_EEE_10G_ADV)
1508  			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1509  		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1510  				    EEE_TX_TIMER_USEC_OFFSET) &
1511  				   EEE_TX_TIMER_USEC_MASK;
1512  	}
1513  
1514  	p_hwfn->b_drv_link_init = b_up;
1515  
1516  	if (b_up) {
1517  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1518  			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1519  			   phy_cfg.speed,
1520  			   phy_cfg.pause,
1521  			   phy_cfg.adv_speed,
1522  			   phy_cfg.loopback_mode,
1523  			   phy_cfg.feature_config_flags);
1524  	} else {
1525  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1526  			   "Resetting link\n");
1527  	}
1528  
1529  	memset(&mb_params, 0, sizeof(mb_params));
1530  	mb_params.cmd = cmd;
1531  	mb_params.p_data_src = &phy_cfg;
1532  	mb_params.data_src_size = sizeof(phy_cfg);
1533  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1534  
1535  	/* if mcp fails to respond we must abort */
1536  	if (rc) {
1537  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1538  		return rc;
1539  	}
1540  
1541  	/* Mimic link-change attention, done for several reasons:
1542  	 *  - On reset, there's no guarantee MFW would trigger
1543  	 *    an attention.
1544  	 *  - On initialization, older MFWs might not indicate link change
1545  	 *    during LFA, so we'll never get an UP indication.
1546  	 */
1547  	qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1548  
1549  	return 0;
1550  }
1551  
qed_get_process_kill_counter(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1552  u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1553  				 struct qed_ptt *p_ptt)
1554  {
1555  	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1556  
1557  	if (IS_VF(p_hwfn->cdev))
1558  		return -EINVAL;
1559  
1560  	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1561  						 PUBLIC_PATH);
1562  	path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1563  	path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1564  
1565  	proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1566  			       path_addr +
1567  			       offsetof(struct public_path, process_kill)) &
1568  			PROCESS_KILL_COUNTER_MASK;
1569  
1570  	return proc_kill_cnt;
1571  }
1572  
qed_mcp_handle_process_kill(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1573  static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1574  					struct qed_ptt *p_ptt)
1575  {
1576  	struct qed_dev *cdev = p_hwfn->cdev;
1577  	u32 proc_kill_cnt;
1578  
1579  	/* Prevent possible attentions/interrupts during the recovery handling
1580  	 * and till its load phase, during which they will be re-enabled.
1581  	 */
1582  	qed_int_igu_disable_int(p_hwfn, p_ptt);
1583  
1584  	DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1585  
1586  	/* The following operations should be done once, and thus in CMT mode
1587  	 * are carried out by only the first HW function.
1588  	 */
1589  	if (p_hwfn != QED_LEADING_HWFN(cdev))
1590  		return;
1591  
1592  	if (cdev->recov_in_prog) {
1593  		DP_NOTICE(p_hwfn,
1594  			  "Ignoring the indication since a recovery process is already in progress\n");
1595  		return;
1596  	}
1597  
1598  	cdev->recov_in_prog = true;
1599  
1600  	proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1601  	DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1602  
1603  	qed_schedule_recovery_handler(p_hwfn);
1604  }
1605  
qed_mcp_send_protocol_stats(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum MFW_DRV_MSG_TYPE type)1606  static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1607  					struct qed_ptt *p_ptt,
1608  					enum MFW_DRV_MSG_TYPE type)
1609  {
1610  	enum qed_mcp_protocol_type stats_type;
1611  	union qed_mcp_protocol_stats stats;
1612  	struct qed_mcp_mb_params mb_params;
1613  	u32 hsi_param;
1614  
1615  	switch (type) {
1616  	case MFW_DRV_MSG_GET_LAN_STATS:
1617  		stats_type = QED_MCP_LAN_STATS;
1618  		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1619  		break;
1620  	case MFW_DRV_MSG_GET_FCOE_STATS:
1621  		stats_type = QED_MCP_FCOE_STATS;
1622  		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1623  		break;
1624  	case MFW_DRV_MSG_GET_ISCSI_STATS:
1625  		stats_type = QED_MCP_ISCSI_STATS;
1626  		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1627  		break;
1628  	case MFW_DRV_MSG_GET_RDMA_STATS:
1629  		stats_type = QED_MCP_RDMA_STATS;
1630  		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1631  		break;
1632  	default:
1633  		DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1634  		return;
1635  	}
1636  
1637  	qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1638  
1639  	memset(&mb_params, 0, sizeof(mb_params));
1640  	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1641  	mb_params.param = hsi_param;
1642  	mb_params.p_data_src = &stats;
1643  	mb_params.data_src_size = sizeof(stats);
1644  	qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1645  }
1646  
qed_mcp_update_bw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1647  static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1648  {
1649  	struct qed_mcp_function_info *p_info;
1650  	struct public_func shmem_info;
1651  	u32 resp = 0, param = 0;
1652  
1653  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1654  
1655  	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1656  
1657  	p_info = &p_hwfn->mcp_info->func_info;
1658  
1659  	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1660  	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1661  
1662  	/* Acknowledge the MFW */
1663  	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1664  		    &param);
1665  }
1666  
qed_mcp_update_stag(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1667  static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1668  {
1669  	struct public_func shmem_info;
1670  	u32 resp = 0, param = 0;
1671  
1672  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1673  
1674  	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1675  						 FUNC_MF_CFG_OV_STAG_MASK;
1676  	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1677  	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1678  		if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1679  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1680  			       p_hwfn->hw_info.ovlan);
1681  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1682  
1683  			/* Configure DB to add external vlan to EDPM packets */
1684  			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1685  			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1686  			       p_hwfn->hw_info.ovlan);
1687  		} else {
1688  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1689  			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1690  			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1691  			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1692  		}
1693  
1694  		qed_sp_pf_update_stag(p_hwfn);
1695  	}
1696  
1697  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1698  		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1699  
1700  	/* Acknowledge the MFW */
1701  	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1702  		    &resp, &param);
1703  }
1704  
qed_mcp_read_ufp_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1705  void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1706  {
1707  	struct public_func shmem_info;
1708  	u32 port_cfg, val;
1709  
1710  	if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1711  		return;
1712  
1713  	memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1714  	port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1715  			  offsetof(struct public_port, oem_cfg_port));
1716  	val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1717  		OEM_CFG_CHANNEL_TYPE_OFFSET;
1718  	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1719  		DP_NOTICE(p_hwfn,
1720  			  "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1721  			  val, MFW_PORT(p_hwfn));
1722  
1723  	val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1724  	if (val == OEM_CFG_SCHED_TYPE_ETS) {
1725  		p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1726  	} else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1727  		p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1728  	} else {
1729  		p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1730  		DP_NOTICE(p_hwfn,
1731  			  "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1732  			  val, MFW_PORT(p_hwfn));
1733  	}
1734  
1735  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1736  	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1737  		OEM_CFG_FUNC_TC_OFFSET;
1738  	p_hwfn->ufp_info.tc = (u8)val;
1739  	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1740  		OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1741  	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1742  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1743  	} else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1744  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1745  	} else {
1746  		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1747  		DP_NOTICE(p_hwfn,
1748  			  "Unknown Host priority control %d port_id 0x%02x\n",
1749  			  val, MFW_PORT(p_hwfn));
1750  	}
1751  
1752  	DP_NOTICE(p_hwfn,
1753  		  "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1754  		  p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1755  		  p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1756  }
1757  
1758  static int
qed_mcp_handle_ufp_event(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1759  qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1760  {
1761  	qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1762  
1763  	if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1764  		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1765  		qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1766  					   p_hwfn->ufp_info.tc);
1767  
1768  		qed_qm_reconf(p_hwfn, p_ptt);
1769  	} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1770  		/* Merge UFP TC with the dcbx TC data */
1771  		qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1772  					  QED_DCBX_OPERATIONAL_MIB);
1773  	} else {
1774  		DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1775  		return -EINVAL;
1776  	}
1777  
1778  	/* update storm FW with negotiation results */
1779  	qed_sp_pf_update_ufp(p_hwfn);
1780  
1781  	/* update stag pcp value */
1782  	qed_sp_pf_update_stag(p_hwfn);
1783  
1784  	return 0;
1785  }
1786  
qed_mcp_handle_events(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1787  int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1788  			  struct qed_ptt *p_ptt)
1789  {
1790  	struct qed_mcp_info *info = p_hwfn->mcp_info;
1791  	int rc = 0;
1792  	bool found = false;
1793  	u16 i;
1794  
1795  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1796  
1797  	/* Read Messages from MFW */
1798  	qed_mcp_read_mb(p_hwfn, p_ptt);
1799  
1800  	/* Compare current messages to old ones */
1801  	for (i = 0; i < info->mfw_mb_length; i++) {
1802  		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1803  			continue;
1804  
1805  		found = true;
1806  
1807  		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1808  			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1809  			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1810  
1811  		switch (i) {
1812  		case MFW_DRV_MSG_LINK_CHANGE:
1813  			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1814  			break;
1815  		case MFW_DRV_MSG_VF_DISABLED:
1816  			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1817  			break;
1818  		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1819  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1820  						  QED_DCBX_REMOTE_LLDP_MIB);
1821  			break;
1822  		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1823  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1824  						  QED_DCBX_REMOTE_MIB);
1825  			break;
1826  		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1827  			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1828  						  QED_DCBX_OPERATIONAL_MIB);
1829  			break;
1830  		case MFW_DRV_MSG_OEM_CFG_UPDATE:
1831  			qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
1832  			break;
1833  		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1834  			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1835  			break;
1836  		case MFW_DRV_MSG_ERROR_RECOVERY:
1837  			qed_mcp_handle_process_kill(p_hwfn, p_ptt);
1838  			break;
1839  		case MFW_DRV_MSG_GET_LAN_STATS:
1840  		case MFW_DRV_MSG_GET_FCOE_STATS:
1841  		case MFW_DRV_MSG_GET_ISCSI_STATS:
1842  		case MFW_DRV_MSG_GET_RDMA_STATS:
1843  			qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1844  			break;
1845  		case MFW_DRV_MSG_BW_UPDATE:
1846  			qed_mcp_update_bw(p_hwfn, p_ptt);
1847  			break;
1848  		case MFW_DRV_MSG_S_TAG_UPDATE:
1849  			qed_mcp_update_stag(p_hwfn, p_ptt);
1850  			break;
1851  		case MFW_DRV_MSG_GET_TLV_REQ:
1852  			qed_mfw_tlv_req(p_hwfn);
1853  			break;
1854  		default:
1855  			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1856  			rc = -EINVAL;
1857  		}
1858  	}
1859  
1860  	/* ACK everything */
1861  	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1862  		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1863  
1864  		/* MFW expect answer in BE, so we force write in that format */
1865  		qed_wr(p_hwfn, p_ptt,
1866  		       info->mfw_mb_addr + sizeof(u32) +
1867  		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1868  		       sizeof(u32) + i * sizeof(u32),
1869  		       (__force u32)val);
1870  	}
1871  
1872  	if (!found) {
1873  		DP_NOTICE(p_hwfn,
1874  			  "Received an MFW message indication but no new message!\n");
1875  		rc = -EINVAL;
1876  	}
1877  
1878  	/* Copy the new mfw messages into the shadow */
1879  	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1880  
1881  	return rc;
1882  }
1883  
qed_mcp_get_mfw_ver(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_mfw_ver,u32 * p_running_bundle_id)1884  int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1885  			struct qed_ptt *p_ptt,
1886  			u32 *p_mfw_ver, u32 *p_running_bundle_id)
1887  {
1888  	u32 global_offsize;
1889  
1890  	if (IS_VF(p_hwfn->cdev)) {
1891  		if (p_hwfn->vf_iov_info) {
1892  			struct pfvf_acquire_resp_tlv *p_resp;
1893  
1894  			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1895  			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1896  			return 0;
1897  		} else {
1898  			DP_VERBOSE(p_hwfn,
1899  				   QED_MSG_IOV,
1900  				   "VF requested MFW version prior to ACQUIRE\n");
1901  			return -EINVAL;
1902  		}
1903  	}
1904  
1905  	global_offsize = qed_rd(p_hwfn, p_ptt,
1906  				SECTION_OFFSIZE_ADDR(p_hwfn->
1907  						     mcp_info->public_base,
1908  						     PUBLIC_GLOBAL));
1909  	*p_mfw_ver =
1910  	    qed_rd(p_hwfn, p_ptt,
1911  		   SECTION_ADDR(global_offsize,
1912  				0) + offsetof(struct public_global, mfw_ver));
1913  
1914  	if (p_running_bundle_id != NULL) {
1915  		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1916  					      SECTION_ADDR(global_offsize, 0) +
1917  					      offsetof(struct public_global,
1918  						       running_bundle_id));
1919  	}
1920  
1921  	return 0;
1922  }
1923  
qed_mcp_get_mbi_ver(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_mbi_ver)1924  int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
1925  			struct qed_ptt *p_ptt, u32 *p_mbi_ver)
1926  {
1927  	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
1928  
1929  	if (IS_VF(p_hwfn->cdev))
1930  		return -EINVAL;
1931  
1932  	/* Read the address of the nvm_cfg */
1933  	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1934  	if (!nvm_cfg_addr) {
1935  		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1936  		return -EINVAL;
1937  	}
1938  
1939  	/* Read the offset of nvm_cfg1 */
1940  	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1941  
1942  	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1943  		       offsetof(struct nvm_cfg1, glob) +
1944  		       offsetof(struct nvm_cfg1_glob, mbi_version);
1945  	*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
1946  			    mbi_ver_addr) &
1947  		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
1948  		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
1949  		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
1950  
1951  	return 0;
1952  }
1953  
qed_mcp_get_media_type(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_media_type)1954  int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
1955  			   struct qed_ptt *p_ptt, u32 *p_media_type)
1956  {
1957  	*p_media_type = MEDIA_UNSPECIFIED;
1958  
1959  	if (IS_VF(p_hwfn->cdev))
1960  		return -EINVAL;
1961  
1962  	if (!qed_mcp_is_init(p_hwfn)) {
1963  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1964  		return -EBUSY;
1965  	}
1966  
1967  	if (!p_ptt) {
1968  		*p_media_type = MEDIA_UNSPECIFIED;
1969  		return -EINVAL;
1970  	}
1971  
1972  	*p_media_type = qed_rd(p_hwfn, p_ptt,
1973  			       p_hwfn->mcp_info->port_addr +
1974  			       offsetof(struct public_port,
1975  					media_type));
1976  
1977  	return 0;
1978  }
1979  
qed_mcp_get_transceiver_data(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_transceiver_state,u32 * p_transceiver_type)1980  int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
1981  				 struct qed_ptt *p_ptt,
1982  				 u32 *p_transceiver_state,
1983  				 u32 *p_transceiver_type)
1984  {
1985  	u32 transceiver_info;
1986  
1987  	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
1988  	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
1989  
1990  	if (IS_VF(p_hwfn->cdev))
1991  		return -EINVAL;
1992  
1993  	if (!qed_mcp_is_init(p_hwfn)) {
1994  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1995  		return -EBUSY;
1996  	}
1997  
1998  	transceiver_info = qed_rd(p_hwfn, p_ptt,
1999  				  p_hwfn->mcp_info->port_addr +
2000  				  offsetof(struct public_port,
2001  					   transceiver_data));
2002  
2003  	*p_transceiver_state = (transceiver_info &
2004  				ETH_TRANSCEIVER_STATE_MASK) >>
2005  				ETH_TRANSCEIVER_STATE_OFFSET;
2006  
2007  	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2008  		*p_transceiver_type = (transceiver_info &
2009  				       ETH_TRANSCEIVER_TYPE_MASK) >>
2010  				       ETH_TRANSCEIVER_TYPE_OFFSET;
2011  	else
2012  		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2013  
2014  	return 0;
2015  }
qed_is_transceiver_ready(u32 transceiver_state,u32 transceiver_type)2016  static bool qed_is_transceiver_ready(u32 transceiver_state,
2017  				     u32 transceiver_type)
2018  {
2019  	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2020  	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2021  	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2022  		return true;
2023  
2024  	return false;
2025  }
2026  
qed_mcp_trans_speed_mask(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_speed_mask)2027  int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2028  			     struct qed_ptt *p_ptt, u32 *p_speed_mask)
2029  {
2030  	u32 transceiver_type, transceiver_state;
2031  	int ret;
2032  
2033  	ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2034  					   &transceiver_type);
2035  	if (ret)
2036  		return ret;
2037  
2038  	if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2039  				     false)
2040  		return -EINVAL;
2041  
2042  	switch (transceiver_type) {
2043  	case ETH_TRANSCEIVER_TYPE_1G_LX:
2044  	case ETH_TRANSCEIVER_TYPE_1G_SX:
2045  	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2046  	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2047  	case ETH_TRANSCEIVER_TYPE_1000BASET:
2048  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2049  		break;
2050  	case ETH_TRANSCEIVER_TYPE_10G_SR:
2051  	case ETH_TRANSCEIVER_TYPE_10G_LR:
2052  	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2053  	case ETH_TRANSCEIVER_TYPE_10G_ER:
2054  	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2055  	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2056  	case ETH_TRANSCEIVER_TYPE_4x10G:
2057  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2058  		break;
2059  	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2060  	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2061  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2062  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2063  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2064  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2065  		break;
2066  	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2067  	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2068  	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2069  	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2070  	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2071  		*p_speed_mask =
2072  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2073  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2074  		break;
2075  	case ETH_TRANSCEIVER_TYPE_25G_SR:
2076  	case ETH_TRANSCEIVER_TYPE_25G_LR:
2077  	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2078  	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2079  	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2080  	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2081  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2082  		break;
2083  	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2084  	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2085  	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2086  	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2087  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2088  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2089  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2090  		break;
2091  	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2092  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2093  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2094  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2095  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2096  		break;
2097  	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2098  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2099  		*p_speed_mask =
2100  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2101  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2102  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2103  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2104  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2105  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2106  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2107  		break;
2108  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2109  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2110  	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2111  		*p_speed_mask =
2112  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2113  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2114  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2115  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2116  		break;
2117  	case ETH_TRANSCEIVER_TYPE_XLPPI:
2118  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2119  		break;
2120  	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2121  		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2122  		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2123  		break;
2124  	default:
2125  		DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2126  			transceiver_type);
2127  		*p_speed_mask = 0xff;
2128  		break;
2129  	}
2130  
2131  	return 0;
2132  }
2133  
qed_mcp_get_board_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_board_config)2134  int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2135  			     struct qed_ptt *p_ptt, u32 *p_board_config)
2136  {
2137  	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2138  
2139  	if (IS_VF(p_hwfn->cdev))
2140  		return -EINVAL;
2141  
2142  	if (!qed_mcp_is_init(p_hwfn)) {
2143  		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2144  		return -EBUSY;
2145  	}
2146  	if (!p_ptt) {
2147  		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2148  		return -EINVAL;
2149  	}
2150  
2151  	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2152  	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2153  	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2154  			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2155  	*p_board_config = qed_rd(p_hwfn, p_ptt,
2156  				 port_cfg_addr +
2157  				 offsetof(struct nvm_cfg1_port,
2158  					  board_cfg));
2159  
2160  	return 0;
2161  }
2162  
2163  /* Old MFW has a global configuration for all PFs regarding RDMA support */
2164  static void
qed_mcp_get_shmem_proto_legacy(struct qed_hwfn * p_hwfn,enum qed_pci_personality * p_proto)2165  qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2166  			       enum qed_pci_personality *p_proto)
2167  {
2168  	/* There wasn't ever a legacy MFW that published iwarp.
2169  	 * So at this point, this is either plain l2 or RoCE.
2170  	 */
2171  	if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2172  		*p_proto = QED_PCI_ETH_ROCE;
2173  	else
2174  		*p_proto = QED_PCI_ETH;
2175  
2176  	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2177  		   "According to Legacy capabilities, L2 personality is %08x\n",
2178  		   (u32) *p_proto);
2179  }
2180  
2181  static int
qed_mcp_get_shmem_proto_mfw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_pci_personality * p_proto)2182  qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2183  			    struct qed_ptt *p_ptt,
2184  			    enum qed_pci_personality *p_proto)
2185  {
2186  	u32 resp = 0, param = 0;
2187  	int rc;
2188  
2189  	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2190  			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2191  	if (rc)
2192  		return rc;
2193  	if (resp != FW_MSG_CODE_OK) {
2194  		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2195  			   "MFW lacks support for command; Returns %08x\n",
2196  			   resp);
2197  		return -EINVAL;
2198  	}
2199  
2200  	switch (param) {
2201  	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2202  		*p_proto = QED_PCI_ETH;
2203  		break;
2204  	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2205  		*p_proto = QED_PCI_ETH_ROCE;
2206  		break;
2207  	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2208  		*p_proto = QED_PCI_ETH_IWARP;
2209  		break;
2210  	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2211  		*p_proto = QED_PCI_ETH_RDMA;
2212  		break;
2213  	default:
2214  		DP_NOTICE(p_hwfn,
2215  			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2216  			  param);
2217  		return -EINVAL;
2218  	}
2219  
2220  	DP_VERBOSE(p_hwfn,
2221  		   NETIF_MSG_IFUP,
2222  		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2223  		   (u32) *p_proto, resp, param);
2224  	return 0;
2225  }
2226  
2227  static int
qed_mcp_get_shmem_proto(struct qed_hwfn * p_hwfn,struct public_func * p_info,struct qed_ptt * p_ptt,enum qed_pci_personality * p_proto)2228  qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2229  			struct public_func *p_info,
2230  			struct qed_ptt *p_ptt,
2231  			enum qed_pci_personality *p_proto)
2232  {
2233  	int rc = 0;
2234  
2235  	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2236  	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2237  		if (!IS_ENABLED(CONFIG_QED_RDMA))
2238  			*p_proto = QED_PCI_ETH;
2239  		else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2240  			qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2241  		break;
2242  	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2243  		*p_proto = QED_PCI_ISCSI;
2244  		break;
2245  	case FUNC_MF_CFG_PROTOCOL_FCOE:
2246  		*p_proto = QED_PCI_FCOE;
2247  		break;
2248  	case FUNC_MF_CFG_PROTOCOL_ROCE:
2249  		DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2250  	/* Fallthrough */
2251  	default:
2252  		rc = -EINVAL;
2253  	}
2254  
2255  	return rc;
2256  }
2257  
qed_mcp_fill_shmem_func_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2258  int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2259  				 struct qed_ptt *p_ptt)
2260  {
2261  	struct qed_mcp_function_info *info;
2262  	struct public_func shmem_info;
2263  
2264  	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2265  	info = &p_hwfn->mcp_info->func_info;
2266  
2267  	info->pause_on_host = (shmem_info.config &
2268  			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2269  
2270  	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2271  				    &info->protocol)) {
2272  		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2273  		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2274  		return -EINVAL;
2275  	}
2276  
2277  	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2278  
2279  	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2280  		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2281  		info->mac[1] = (u8)(shmem_info.mac_upper);
2282  		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2283  		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2284  		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2285  		info->mac[5] = (u8)(shmem_info.mac_lower);
2286  
2287  		/* Store primary MAC for later possible WoL */
2288  		memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2289  	} else {
2290  		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2291  	}
2292  
2293  	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2294  			 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2295  	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2296  			 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2297  
2298  	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2299  
2300  	info->mtu = (u16)shmem_info.mtu_size;
2301  
2302  	p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2303  	p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2304  	if (qed_mcp_is_init(p_hwfn)) {
2305  		u32 resp = 0, param = 0;
2306  		int rc;
2307  
2308  		rc = qed_mcp_cmd(p_hwfn, p_ptt,
2309  				 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2310  		if (rc)
2311  			return rc;
2312  		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2313  			p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2314  	}
2315  
2316  	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2317  		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2318  		info->pause_on_host, info->protocol,
2319  		info->bandwidth_min, info->bandwidth_max,
2320  		info->mac[0], info->mac[1], info->mac[2],
2321  		info->mac[3], info->mac[4], info->mac[5],
2322  		info->wwn_port, info->wwn_node,
2323  		info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2324  
2325  	return 0;
2326  }
2327  
2328  struct qed_mcp_link_params
qed_mcp_get_link_params(struct qed_hwfn * p_hwfn)2329  *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2330  {
2331  	if (!p_hwfn || !p_hwfn->mcp_info)
2332  		return NULL;
2333  	return &p_hwfn->mcp_info->link_input;
2334  }
2335  
2336  struct qed_mcp_link_state
qed_mcp_get_link_state(struct qed_hwfn * p_hwfn)2337  *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2338  {
2339  	if (!p_hwfn || !p_hwfn->mcp_info)
2340  		return NULL;
2341  	return &p_hwfn->mcp_info->link_output;
2342  }
2343  
2344  struct qed_mcp_link_capabilities
qed_mcp_get_link_capabilities(struct qed_hwfn * p_hwfn)2345  *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2346  {
2347  	if (!p_hwfn || !p_hwfn->mcp_info)
2348  		return NULL;
2349  	return &p_hwfn->mcp_info->link_capabilities;
2350  }
2351  
qed_mcp_drain(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2352  int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2353  {
2354  	u32 resp = 0, param = 0;
2355  	int rc;
2356  
2357  	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2358  			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2359  
2360  	/* Wait for the drain to complete before returning */
2361  	msleep(1020);
2362  
2363  	return rc;
2364  }
2365  
qed_mcp_get_flash_size(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * p_flash_size)2366  int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2367  			   struct qed_ptt *p_ptt, u32 *p_flash_size)
2368  {
2369  	u32 flash_size;
2370  
2371  	if (IS_VF(p_hwfn->cdev))
2372  		return -EINVAL;
2373  
2374  	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2375  	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2376  		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2377  	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2378  
2379  	*p_flash_size = flash_size;
2380  
2381  	return 0;
2382  }
2383  
qed_start_recovery_process(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2384  int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2385  {
2386  	struct qed_dev *cdev = p_hwfn->cdev;
2387  
2388  	if (cdev->recov_in_prog) {
2389  		DP_NOTICE(p_hwfn,
2390  			  "Avoid triggering a recovery since such a process is already in progress\n");
2391  		return -EAGAIN;
2392  	}
2393  
2394  	DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2395  	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2396  
2397  	return 0;
2398  }
2399  
2400  #define QED_RECOVERY_PROLOG_SLEEP_MS    100
2401  
qed_recovery_prolog(struct qed_dev * cdev)2402  int qed_recovery_prolog(struct qed_dev *cdev)
2403  {
2404  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2405  	struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2406  	int rc;
2407  
2408  	/* Allow ongoing PCIe transactions to complete */
2409  	msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2410  
2411  	/* Clear the PF's internal FID_enable in the PXP */
2412  	rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2413  	if (rc)
2414  		DP_NOTICE(p_hwfn,
2415  			  "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2416  			  rc);
2417  
2418  	return rc;
2419  }
2420  
2421  static int
qed_mcp_config_vf_msix_bb(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 vf_id,u8 num)2422  qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2423  			  struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2424  {
2425  	u32 resp = 0, param = 0, rc_param = 0;
2426  	int rc;
2427  
2428  	/* Only Leader can configure MSIX, and need to take CMT into account */
2429  	if (!IS_LEAD_HWFN(p_hwfn))
2430  		return 0;
2431  	num *= p_hwfn->cdev->num_hwfns;
2432  
2433  	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2434  		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2435  	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2436  		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2437  
2438  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2439  			 &resp, &rc_param);
2440  
2441  	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2442  		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2443  		rc = -EINVAL;
2444  	} else {
2445  		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2446  			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2447  			   num, vf_id);
2448  	}
2449  
2450  	return rc;
2451  }
2452  
2453  static int
qed_mcp_config_vf_msix_ah(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 num)2454  qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2455  			  struct qed_ptt *p_ptt, u8 num)
2456  {
2457  	u32 resp = 0, param = num, rc_param = 0;
2458  	int rc;
2459  
2460  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2461  			 param, &resp, &rc_param);
2462  
2463  	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2464  		DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2465  		rc = -EINVAL;
2466  	} else {
2467  		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2468  			   "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2469  	}
2470  
2471  	return rc;
2472  }
2473  
qed_mcp_config_vf_msix(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 vf_id,u8 num)2474  int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2475  			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2476  {
2477  	if (QED_IS_BB(p_hwfn->cdev))
2478  		return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2479  	else
2480  		return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2481  }
2482  
2483  int
qed_mcp_send_drv_version(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_mcp_drv_version * p_ver)2484  qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2485  			 struct qed_ptt *p_ptt,
2486  			 struct qed_mcp_drv_version *p_ver)
2487  {
2488  	struct qed_mcp_mb_params mb_params;
2489  	struct drv_version_stc drv_version;
2490  	__be32 val;
2491  	u32 i;
2492  	int rc;
2493  
2494  	memset(&drv_version, 0, sizeof(drv_version));
2495  	drv_version.version = p_ver->version;
2496  	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2497  		val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2498  		*(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2499  	}
2500  
2501  	memset(&mb_params, 0, sizeof(mb_params));
2502  	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2503  	mb_params.p_data_src = &drv_version;
2504  	mb_params.data_src_size = sizeof(drv_version);
2505  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2506  	if (rc)
2507  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2508  
2509  	return rc;
2510  }
2511  
2512  /* A maximal 100 msec waiting time for the MCP to halt */
2513  #define QED_MCP_HALT_SLEEP_MS		10
2514  #define QED_MCP_HALT_MAX_RETRIES	10
2515  
qed_mcp_halt(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2516  int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2517  {
2518  	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2519  	int rc;
2520  
2521  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2522  			 &param);
2523  	if (rc) {
2524  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2525  		return rc;
2526  	}
2527  
2528  	do {
2529  		msleep(QED_MCP_HALT_SLEEP_MS);
2530  		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2531  		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2532  			break;
2533  	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2534  
2535  	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2536  		DP_NOTICE(p_hwfn,
2537  			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2538  			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2539  		return -EBUSY;
2540  	}
2541  
2542  	qed_mcp_cmd_set_blocking(p_hwfn, true);
2543  
2544  	return 0;
2545  }
2546  
2547  #define QED_MCP_RESUME_SLEEP_MS	10
2548  
qed_mcp_resume(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2549  int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2550  {
2551  	u32 cpu_mode, cpu_state;
2552  
2553  	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2554  
2555  	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2556  	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2557  	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2558  	msleep(QED_MCP_RESUME_SLEEP_MS);
2559  	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2560  
2561  	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2562  		DP_NOTICE(p_hwfn,
2563  			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2564  			  cpu_mode, cpu_state);
2565  		return -EBUSY;
2566  	}
2567  
2568  	qed_mcp_cmd_set_blocking(p_hwfn, false);
2569  
2570  	return 0;
2571  }
2572  
qed_mcp_ov_update_current_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_client client)2573  int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2574  				     struct qed_ptt *p_ptt,
2575  				     enum qed_ov_client client)
2576  {
2577  	u32 resp = 0, param = 0;
2578  	u32 drv_mb_param;
2579  	int rc;
2580  
2581  	switch (client) {
2582  	case QED_OV_CLIENT_DRV:
2583  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2584  		break;
2585  	case QED_OV_CLIENT_USER:
2586  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2587  		break;
2588  	case QED_OV_CLIENT_VENDOR_SPEC:
2589  		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2590  		break;
2591  	default:
2592  		DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2593  		return -EINVAL;
2594  	}
2595  
2596  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2597  			 drv_mb_param, &resp, &param);
2598  	if (rc)
2599  		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2600  
2601  	return rc;
2602  }
2603  
qed_mcp_ov_update_driver_state(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_driver_state drv_state)2604  int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2605  				   struct qed_ptt *p_ptt,
2606  				   enum qed_ov_driver_state drv_state)
2607  {
2608  	u32 resp = 0, param = 0;
2609  	u32 drv_mb_param;
2610  	int rc;
2611  
2612  	switch (drv_state) {
2613  	case QED_OV_DRIVER_STATE_NOT_LOADED:
2614  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2615  		break;
2616  	case QED_OV_DRIVER_STATE_DISABLED:
2617  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2618  		break;
2619  	case QED_OV_DRIVER_STATE_ACTIVE:
2620  		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2621  		break;
2622  	default:
2623  		DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2624  		return -EINVAL;
2625  	}
2626  
2627  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2628  			 drv_mb_param, &resp, &param);
2629  	if (rc)
2630  		DP_ERR(p_hwfn, "Failed to send driver state\n");
2631  
2632  	return rc;
2633  }
2634  
qed_mcp_ov_update_mtu(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 mtu)2635  int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2636  			  struct qed_ptt *p_ptt, u16 mtu)
2637  {
2638  	u32 resp = 0, param = 0;
2639  	u32 drv_mb_param;
2640  	int rc;
2641  
2642  	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2643  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2644  			 drv_mb_param, &resp, &param);
2645  	if (rc)
2646  		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2647  
2648  	return rc;
2649  }
2650  
qed_mcp_ov_update_mac(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u8 * mac)2651  int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2652  			  struct qed_ptt *p_ptt, u8 *mac)
2653  {
2654  	struct qed_mcp_mb_params mb_params;
2655  	u32 mfw_mac[2];
2656  	int rc;
2657  
2658  	memset(&mb_params, 0, sizeof(mb_params));
2659  	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2660  	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2661  			  DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2662  	mb_params.param |= MCP_PF_ID(p_hwfn);
2663  
2664  	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2665  	 * in 32-bit granularity.
2666  	 * So the MAC has to be set in native order [and not byte order],
2667  	 * otherwise it would be read incorrectly by MFW after swap.
2668  	 */
2669  	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2670  	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2671  
2672  	mb_params.p_data_src = (u8 *)mfw_mac;
2673  	mb_params.data_src_size = 8;
2674  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2675  	if (rc)
2676  		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2677  
2678  	/* Store primary MAC for later possible WoL */
2679  	memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2680  
2681  	return rc;
2682  }
2683  
qed_mcp_ov_update_wol(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_wol wol)2684  int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2685  			  struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2686  {
2687  	u32 resp = 0, param = 0;
2688  	u32 drv_mb_param;
2689  	int rc;
2690  
2691  	if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2692  		DP_VERBOSE(p_hwfn, QED_MSG_SP,
2693  			   "Can't change WoL configuration when WoL isn't supported\n");
2694  		return -EINVAL;
2695  	}
2696  
2697  	switch (wol) {
2698  	case QED_OV_WOL_DEFAULT:
2699  		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2700  		break;
2701  	case QED_OV_WOL_DISABLED:
2702  		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2703  		break;
2704  	case QED_OV_WOL_ENABLED:
2705  		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2706  		break;
2707  	default:
2708  		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2709  		return -EINVAL;
2710  	}
2711  
2712  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2713  			 drv_mb_param, &resp, &param);
2714  	if (rc)
2715  		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2716  
2717  	/* Store the WoL update for a future unload */
2718  	p_hwfn->cdev->wol_config = (u8)wol;
2719  
2720  	return rc;
2721  }
2722  
qed_mcp_ov_update_eswitch(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_ov_eswitch eswitch)2723  int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2724  			      struct qed_ptt *p_ptt,
2725  			      enum qed_ov_eswitch eswitch)
2726  {
2727  	u32 resp = 0, param = 0;
2728  	u32 drv_mb_param;
2729  	int rc;
2730  
2731  	switch (eswitch) {
2732  	case QED_OV_ESWITCH_NONE:
2733  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2734  		break;
2735  	case QED_OV_ESWITCH_VEB:
2736  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2737  		break;
2738  	case QED_OV_ESWITCH_VEPA:
2739  		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2740  		break;
2741  	default:
2742  		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2743  		return -EINVAL;
2744  	}
2745  
2746  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2747  			 drv_mb_param, &resp, &param);
2748  	if (rc)
2749  		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2750  
2751  	return rc;
2752  }
2753  
qed_mcp_set_led(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_led_mode mode)2754  int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2755  		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
2756  {
2757  	u32 resp = 0, param = 0, drv_mb_param;
2758  	int rc;
2759  
2760  	switch (mode) {
2761  	case QED_LED_MODE_ON:
2762  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2763  		break;
2764  	case QED_LED_MODE_OFF:
2765  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2766  		break;
2767  	case QED_LED_MODE_RESTORE:
2768  		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2769  		break;
2770  	default:
2771  		DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2772  		return -EINVAL;
2773  	}
2774  
2775  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2776  			 drv_mb_param, &resp, &param);
2777  
2778  	return rc;
2779  }
2780  
qed_mcp_mask_parities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 mask_parities)2781  int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2782  			  struct qed_ptt *p_ptt, u32 mask_parities)
2783  {
2784  	u32 resp = 0, param = 0;
2785  	int rc;
2786  
2787  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2788  			 mask_parities, &resp, &param);
2789  
2790  	if (rc) {
2791  		DP_ERR(p_hwfn,
2792  		       "MCP response failure for mask parities, aborting\n");
2793  	} else if (resp != FW_MSG_CODE_OK) {
2794  		DP_ERR(p_hwfn,
2795  		       "MCP did not acknowledge mask parity request. Old MFW?\n");
2796  		rc = -EINVAL;
2797  	}
2798  
2799  	return rc;
2800  }
2801  
qed_mcp_nvm_read(struct qed_dev * cdev,u32 addr,u8 * p_buf,u32 len)2802  int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2803  {
2804  	u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2805  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2806  	u32 resp = 0, resp_param = 0;
2807  	struct qed_ptt *p_ptt;
2808  	int rc = 0;
2809  
2810  	p_ptt = qed_ptt_acquire(p_hwfn);
2811  	if (!p_ptt)
2812  		return -EBUSY;
2813  
2814  	while (bytes_left > 0) {
2815  		bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2816  
2817  		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2818  					DRV_MSG_CODE_NVM_READ_NVRAM,
2819  					addr + offset +
2820  					(bytes_to_copy <<
2821  					 DRV_MB_PARAM_NVM_LEN_OFFSET),
2822  					&resp, &resp_param,
2823  					&read_len,
2824  					(u32 *)(p_buf + offset));
2825  
2826  		if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2827  			DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2828  			break;
2829  		}
2830  
2831  		/* This can be a lengthy process, and it's possible scheduler
2832  		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2833  		 */
2834  		if (bytes_left % 0x1000 <
2835  		    (bytes_left - read_len) % 0x1000)
2836  			usleep_range(1000, 2000);
2837  
2838  		offset += read_len;
2839  		bytes_left -= read_len;
2840  	}
2841  
2842  	cdev->mcp_nvm_resp = resp;
2843  	qed_ptt_release(p_hwfn, p_ptt);
2844  
2845  	return rc;
2846  }
2847  
qed_mcp_nvm_resp(struct qed_dev * cdev,u8 * p_buf)2848  int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
2849  {
2850  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2851  	struct qed_ptt *p_ptt;
2852  
2853  	p_ptt = qed_ptt_acquire(p_hwfn);
2854  	if (!p_ptt)
2855  		return -EBUSY;
2856  
2857  	memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
2858  	qed_ptt_release(p_hwfn, p_ptt);
2859  
2860  	return 0;
2861  }
2862  
qed_mcp_nvm_write(struct qed_dev * cdev,u32 cmd,u32 addr,u8 * p_buf,u32 len)2863  int qed_mcp_nvm_write(struct qed_dev *cdev,
2864  		      u32 cmd, u32 addr, u8 *p_buf, u32 len)
2865  {
2866  	u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
2867  	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2868  	struct qed_ptt *p_ptt;
2869  	int rc = -EINVAL;
2870  
2871  	p_ptt = qed_ptt_acquire(p_hwfn);
2872  	if (!p_ptt)
2873  		return -EBUSY;
2874  
2875  	switch (cmd) {
2876  	case QED_PUT_FILE_BEGIN:
2877  		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
2878  		break;
2879  	case QED_PUT_FILE_DATA:
2880  		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
2881  		break;
2882  	case QED_NVM_WRITE_NVRAM:
2883  		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
2884  		break;
2885  	default:
2886  		DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
2887  		rc = -EINVAL;
2888  		goto out;
2889  	}
2890  
2891  	buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
2892  	while (buf_idx < len) {
2893  		if (cmd == QED_PUT_FILE_BEGIN)
2894  			nvm_offset = addr;
2895  		else
2896  			nvm_offset = ((buf_size <<
2897  				       DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
2898  				       buf_idx;
2899  		rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
2900  					&resp, &param, buf_size,
2901  					(u32 *)&p_buf[buf_idx]);
2902  		if (rc) {
2903  			DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
2904  			resp = FW_MSG_CODE_ERROR;
2905  			break;
2906  		}
2907  
2908  		if (resp != FW_MSG_CODE_OK &&
2909  		    resp != FW_MSG_CODE_NVM_OK &&
2910  		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
2911  			DP_NOTICE(cdev,
2912  				  "nvm write failed, resp = 0x%08x\n", resp);
2913  			rc = -EINVAL;
2914  			break;
2915  		}
2916  
2917  		/* This can be a lengthy process, and it's possible scheduler
2918  		 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2919  		 */
2920  		if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
2921  			usleep_range(1000, 2000);
2922  
2923  		/* For MBI upgrade, MFW response includes the next buffer offset
2924  		 * to be delivered to MFW.
2925  		 */
2926  		if (param && cmd == QED_PUT_FILE_DATA) {
2927  			buf_idx = QED_MFW_GET_FIELD(param,
2928  					FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
2929  			buf_size = QED_MFW_GET_FIELD(param,
2930  					 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
2931  		} else {
2932  			buf_idx += buf_size;
2933  			buf_size = min_t(u32, (len - buf_idx),
2934  					 MCP_DRV_NVM_BUF_LEN);
2935  		}
2936  	}
2937  
2938  	cdev->mcp_nvm_resp = resp;
2939  out:
2940  	qed_ptt_release(p_hwfn, p_ptt);
2941  
2942  	return rc;
2943  }
2944  
qed_mcp_phy_sfp_read(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 port,u32 addr,u32 offset,u32 len,u8 * p_buf)2945  int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2946  			 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
2947  {
2948  	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
2949  	u32 resp, param;
2950  	int rc;
2951  
2952  	nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
2953  		       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
2954  	nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
2955  		       DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
2956  
2957  	addr = offset;
2958  	offset = 0;
2959  	bytes_left = len;
2960  	while (bytes_left > 0) {
2961  		bytes_to_copy = min_t(u32, bytes_left,
2962  				      MAX_I2C_TRANSACTION_SIZE);
2963  		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
2964  			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
2965  		nvm_offset |= ((addr + offset) <<
2966  			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
2967  			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
2968  		nvm_offset |= (bytes_to_copy <<
2969  			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
2970  			       DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
2971  		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2972  					DRV_MSG_CODE_TRANSCEIVER_READ,
2973  					nvm_offset, &resp, &param, &buf_size,
2974  					(u32 *)(p_buf + offset));
2975  		if (rc) {
2976  			DP_NOTICE(p_hwfn,
2977  				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
2978  				  rc);
2979  			return rc;
2980  		}
2981  
2982  		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
2983  			return -ENODEV;
2984  		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
2985  			return -EINVAL;
2986  
2987  		offset += buf_size;
2988  		bytes_left -= buf_size;
2989  	}
2990  
2991  	return 0;
2992  }
2993  
qed_mcp_bist_register_test(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)2994  int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2995  {
2996  	u32 drv_mb_param = 0, rsp, param;
2997  	int rc = 0;
2998  
2999  	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3000  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3001  
3002  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3003  			 drv_mb_param, &rsp, &param);
3004  
3005  	if (rc)
3006  		return rc;
3007  
3008  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3009  	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3010  		rc = -EAGAIN;
3011  
3012  	return rc;
3013  }
3014  
qed_mcp_bist_clock_test(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3015  int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3016  {
3017  	u32 drv_mb_param, rsp, param;
3018  	int rc = 0;
3019  
3020  	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3021  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3022  
3023  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3024  			 drv_mb_param, &rsp, &param);
3025  
3026  	if (rc)
3027  		return rc;
3028  
3029  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3030  	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3031  		rc = -EAGAIN;
3032  
3033  	return rc;
3034  }
3035  
qed_mcp_bist_nvm_get_num_images(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 * num_images)3036  int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3037  				    struct qed_ptt *p_ptt,
3038  				    u32 *num_images)
3039  {
3040  	u32 drv_mb_param = 0, rsp;
3041  	int rc = 0;
3042  
3043  	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3044  			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3045  
3046  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3047  			 drv_mb_param, &rsp, num_images);
3048  	if (rc)
3049  		return rc;
3050  
3051  	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3052  		rc = -EINVAL;
3053  
3054  	return rc;
3055  }
3056  
qed_mcp_bist_nvm_get_image_att(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct bist_nvm_image_att * p_image_att,u32 image_index)3057  int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3058  				   struct qed_ptt *p_ptt,
3059  				   struct bist_nvm_image_att *p_image_att,
3060  				   u32 image_index)
3061  {
3062  	u32 buf_size = 0, param, resp = 0, resp_param = 0;
3063  	int rc;
3064  
3065  	param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3066  		DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3067  	param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3068  
3069  	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3070  				DRV_MSG_CODE_BIST_TEST, param,
3071  				&resp, &resp_param,
3072  				&buf_size,
3073  				(u32 *)p_image_att);
3074  	if (rc)
3075  		return rc;
3076  
3077  	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3078  	    (p_image_att->return_code != 1))
3079  		rc = -EINVAL;
3080  
3081  	return rc;
3082  }
3083  
qed_mcp_nvm_info_populate(struct qed_hwfn * p_hwfn)3084  int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3085  {
3086  	struct qed_nvm_image_info nvm_info;
3087  	struct qed_ptt *p_ptt;
3088  	int rc;
3089  	u32 i;
3090  
3091  	if (p_hwfn->nvm_info.valid)
3092  		return 0;
3093  
3094  	p_ptt = qed_ptt_acquire(p_hwfn);
3095  	if (!p_ptt) {
3096  		DP_ERR(p_hwfn, "failed to acquire ptt\n");
3097  		return -EBUSY;
3098  	}
3099  
3100  	/* Acquire from MFW the amount of available images */
3101  	nvm_info.num_images = 0;
3102  	rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3103  					     p_ptt, &nvm_info.num_images);
3104  	if (rc == -EOPNOTSUPP) {
3105  		DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3106  		goto out;
3107  	} else if (rc || !nvm_info.num_images) {
3108  		DP_ERR(p_hwfn, "Failed getting number of images\n");
3109  		goto err0;
3110  	}
3111  
3112  	nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3113  					   sizeof(struct bist_nvm_image_att),
3114  					   GFP_KERNEL);
3115  	if (!nvm_info.image_att) {
3116  		rc = -ENOMEM;
3117  		goto err0;
3118  	}
3119  
3120  	/* Iterate over images and get their attributes */
3121  	for (i = 0; i < nvm_info.num_images; i++) {
3122  		rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3123  						    &nvm_info.image_att[i], i);
3124  		if (rc) {
3125  			DP_ERR(p_hwfn,
3126  			       "Failed getting image index %d attributes\n", i);
3127  			goto err1;
3128  		}
3129  
3130  		DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3131  			   nvm_info.image_att[i].len);
3132  	}
3133  out:
3134  	/* Update hwfn's nvm_info */
3135  	if (nvm_info.num_images) {
3136  		p_hwfn->nvm_info.num_images = nvm_info.num_images;
3137  		kfree(p_hwfn->nvm_info.image_att);
3138  		p_hwfn->nvm_info.image_att = nvm_info.image_att;
3139  		p_hwfn->nvm_info.valid = true;
3140  	}
3141  
3142  	qed_ptt_release(p_hwfn, p_ptt);
3143  	return 0;
3144  
3145  err1:
3146  	kfree(nvm_info.image_att);
3147  err0:
3148  	qed_ptt_release(p_hwfn, p_ptt);
3149  	return rc;
3150  }
3151  
3152  int
qed_mcp_get_nvm_image_att(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,struct qed_nvm_image_att * p_image_att)3153  qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3154  			  enum qed_nvm_images image_id,
3155  			  struct qed_nvm_image_att *p_image_att)
3156  {
3157  	enum nvm_image_type type;
3158  	u32 i;
3159  
3160  	/* Translate image_id into MFW definitions */
3161  	switch (image_id) {
3162  	case QED_NVM_IMAGE_ISCSI_CFG:
3163  		type = NVM_TYPE_ISCSI_CFG;
3164  		break;
3165  	case QED_NVM_IMAGE_FCOE_CFG:
3166  		type = NVM_TYPE_FCOE_CFG;
3167  		break;
3168  	case QED_NVM_IMAGE_NVM_CFG1:
3169  		type = NVM_TYPE_NVM_CFG1;
3170  		break;
3171  	case QED_NVM_IMAGE_DEFAULT_CFG:
3172  		type = NVM_TYPE_DEFAULT_CFG;
3173  		break;
3174  	case QED_NVM_IMAGE_NVM_META:
3175  		type = NVM_TYPE_META;
3176  		break;
3177  	default:
3178  		DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3179  			  image_id);
3180  		return -EINVAL;
3181  	}
3182  
3183  	qed_mcp_nvm_info_populate(p_hwfn);
3184  	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3185  		if (type == p_hwfn->nvm_info.image_att[i].image_type)
3186  			break;
3187  	if (i == p_hwfn->nvm_info.num_images) {
3188  		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3189  			   "Failed to find nvram image of type %08x\n",
3190  			   image_id);
3191  		return -ENOENT;
3192  	}
3193  
3194  	p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3195  	p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3196  
3197  	return 0;
3198  }
3199  
qed_mcp_get_nvm_image(struct qed_hwfn * p_hwfn,enum qed_nvm_images image_id,u8 * p_buffer,u32 buffer_len)3200  int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3201  			  enum qed_nvm_images image_id,
3202  			  u8 *p_buffer, u32 buffer_len)
3203  {
3204  	struct qed_nvm_image_att image_att;
3205  	int rc;
3206  
3207  	memset(p_buffer, 0, buffer_len);
3208  
3209  	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3210  	if (rc)
3211  		return rc;
3212  
3213  	/* Validate sizes - both the image's and the supplied buffer's */
3214  	if (image_att.length <= 4) {
3215  		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3216  			   "Image [%d] is too small - only %d bytes\n",
3217  			   image_id, image_att.length);
3218  		return -EINVAL;
3219  	}
3220  
3221  	if (image_att.length > buffer_len) {
3222  		DP_VERBOSE(p_hwfn,
3223  			   QED_MSG_STORAGE,
3224  			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3225  			   image_id, image_att.length, buffer_len);
3226  		return -ENOMEM;
3227  	}
3228  
3229  	return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3230  				p_buffer, image_att.length);
3231  }
3232  
qed_mcp_get_mfw_res_id(enum qed_resources res_id)3233  static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3234  {
3235  	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3236  
3237  	switch (res_id) {
3238  	case QED_SB:
3239  		mfw_res_id = RESOURCE_NUM_SB_E;
3240  		break;
3241  	case QED_L2_QUEUE:
3242  		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3243  		break;
3244  	case QED_VPORT:
3245  		mfw_res_id = RESOURCE_NUM_VPORT_E;
3246  		break;
3247  	case QED_RSS_ENG:
3248  		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3249  		break;
3250  	case QED_PQ:
3251  		mfw_res_id = RESOURCE_NUM_PQ_E;
3252  		break;
3253  	case QED_RL:
3254  		mfw_res_id = RESOURCE_NUM_RL_E;
3255  		break;
3256  	case QED_MAC:
3257  	case QED_VLAN:
3258  		/* Each VFC resource can accommodate both a MAC and a VLAN */
3259  		mfw_res_id = RESOURCE_VFC_FILTER_E;
3260  		break;
3261  	case QED_ILT:
3262  		mfw_res_id = RESOURCE_ILT_E;
3263  		break;
3264  	case QED_LL2_QUEUE:
3265  		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3266  		break;
3267  	case QED_RDMA_CNQ_RAM:
3268  	case QED_CMDQS_CQS:
3269  		/* CNQ/CMDQS are the same resource */
3270  		mfw_res_id = RESOURCE_CQS_E;
3271  		break;
3272  	case QED_RDMA_STATS_QUEUE:
3273  		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3274  		break;
3275  	case QED_BDQ:
3276  		mfw_res_id = RESOURCE_BDQ_E;
3277  		break;
3278  	default:
3279  		break;
3280  	}
3281  
3282  	return mfw_res_id;
3283  }
3284  
3285  #define QED_RESC_ALLOC_VERSION_MAJOR    2
3286  #define QED_RESC_ALLOC_VERSION_MINOR    0
3287  #define QED_RESC_ALLOC_VERSION				     \
3288  	((QED_RESC_ALLOC_VERSION_MAJOR <<		     \
3289  	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3290  	 (QED_RESC_ALLOC_VERSION_MINOR <<		     \
3291  	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3292  
3293  struct qed_resc_alloc_in_params {
3294  	u32 cmd;
3295  	enum qed_resources res_id;
3296  	u32 resc_max_val;
3297  };
3298  
3299  struct qed_resc_alloc_out_params {
3300  	u32 mcp_resp;
3301  	u32 mcp_param;
3302  	u32 resc_num;
3303  	u32 resc_start;
3304  	u32 vf_resc_num;
3305  	u32 vf_resc_start;
3306  	u32 flags;
3307  };
3308  
3309  static int
qed_mcp_resc_allocation_msg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_alloc_in_params * p_in_params,struct qed_resc_alloc_out_params * p_out_params)3310  qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3311  			    struct qed_ptt *p_ptt,
3312  			    struct qed_resc_alloc_in_params *p_in_params,
3313  			    struct qed_resc_alloc_out_params *p_out_params)
3314  {
3315  	struct qed_mcp_mb_params mb_params;
3316  	struct resource_info mfw_resc_info;
3317  	int rc;
3318  
3319  	memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3320  
3321  	mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3322  	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3323  		DP_ERR(p_hwfn,
3324  		       "Failed to match resource %d [%s] with the MFW resources\n",
3325  		       p_in_params->res_id,
3326  		       qed_hw_get_resc_name(p_in_params->res_id));
3327  		return -EINVAL;
3328  	}
3329  
3330  	switch (p_in_params->cmd) {
3331  	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3332  		mfw_resc_info.size = p_in_params->resc_max_val;
3333  		/* Fallthrough */
3334  	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3335  		break;
3336  	default:
3337  		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3338  		       p_in_params->cmd);
3339  		return -EINVAL;
3340  	}
3341  
3342  	memset(&mb_params, 0, sizeof(mb_params));
3343  	mb_params.cmd = p_in_params->cmd;
3344  	mb_params.param = QED_RESC_ALLOC_VERSION;
3345  	mb_params.p_data_src = &mfw_resc_info;
3346  	mb_params.data_src_size = sizeof(mfw_resc_info);
3347  	mb_params.p_data_dst = mb_params.p_data_src;
3348  	mb_params.data_dst_size = mb_params.data_src_size;
3349  
3350  	DP_VERBOSE(p_hwfn,
3351  		   QED_MSG_SP,
3352  		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3353  		   p_in_params->cmd,
3354  		   p_in_params->res_id,
3355  		   qed_hw_get_resc_name(p_in_params->res_id),
3356  		   QED_MFW_GET_FIELD(mb_params.param,
3357  				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3358  		   QED_MFW_GET_FIELD(mb_params.param,
3359  				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3360  		   p_in_params->resc_max_val);
3361  
3362  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3363  	if (rc)
3364  		return rc;
3365  
3366  	p_out_params->mcp_resp = mb_params.mcp_resp;
3367  	p_out_params->mcp_param = mb_params.mcp_param;
3368  	p_out_params->resc_num = mfw_resc_info.size;
3369  	p_out_params->resc_start = mfw_resc_info.offset;
3370  	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3371  	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3372  	p_out_params->flags = mfw_resc_info.flags;
3373  
3374  	DP_VERBOSE(p_hwfn,
3375  		   QED_MSG_SP,
3376  		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3377  		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3378  				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3379  		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3380  				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3381  		   p_out_params->resc_num,
3382  		   p_out_params->resc_start,
3383  		   p_out_params->vf_resc_num,
3384  		   p_out_params->vf_resc_start, p_out_params->flags);
3385  
3386  	return 0;
3387  }
3388  
3389  int
qed_mcp_set_resc_max_val(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_resources res_id,u32 resc_max_val,u32 * p_mcp_resp)3390  qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3391  			 struct qed_ptt *p_ptt,
3392  			 enum qed_resources res_id,
3393  			 u32 resc_max_val, u32 *p_mcp_resp)
3394  {
3395  	struct qed_resc_alloc_out_params out_params;
3396  	struct qed_resc_alloc_in_params in_params;
3397  	int rc;
3398  
3399  	memset(&in_params, 0, sizeof(in_params));
3400  	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3401  	in_params.res_id = res_id;
3402  	in_params.resc_max_val = resc_max_val;
3403  	memset(&out_params, 0, sizeof(out_params));
3404  	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3405  					 &out_params);
3406  	if (rc)
3407  		return rc;
3408  
3409  	*p_mcp_resp = out_params.mcp_resp;
3410  
3411  	return 0;
3412  }
3413  
3414  int
qed_mcp_get_resc_info(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum qed_resources res_id,u32 * p_mcp_resp,u32 * p_resc_num,u32 * p_resc_start)3415  qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3416  		      struct qed_ptt *p_ptt,
3417  		      enum qed_resources res_id,
3418  		      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3419  {
3420  	struct qed_resc_alloc_out_params out_params;
3421  	struct qed_resc_alloc_in_params in_params;
3422  	int rc;
3423  
3424  	memset(&in_params, 0, sizeof(in_params));
3425  	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3426  	in_params.res_id = res_id;
3427  	memset(&out_params, 0, sizeof(out_params));
3428  	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3429  					 &out_params);
3430  	if (rc)
3431  		return rc;
3432  
3433  	*p_mcp_resp = out_params.mcp_resp;
3434  
3435  	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3436  		*p_resc_num = out_params.resc_num;
3437  		*p_resc_start = out_params.resc_start;
3438  	}
3439  
3440  	return 0;
3441  }
3442  
qed_mcp_initiate_pf_flr(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3443  int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3444  {
3445  	u32 mcp_resp, mcp_param;
3446  
3447  	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3448  			   &mcp_resp, &mcp_param);
3449  }
3450  
qed_mcp_resource_cmd(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u32 param,u32 * p_mcp_resp,u32 * p_mcp_param)3451  static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3452  				struct qed_ptt *p_ptt,
3453  				u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3454  {
3455  	int rc;
3456  
3457  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3458  			 p_mcp_resp, p_mcp_param);
3459  	if (rc)
3460  		return rc;
3461  
3462  	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3463  		DP_INFO(p_hwfn,
3464  			"The resource command is unsupported by the MFW\n");
3465  		return -EINVAL;
3466  	}
3467  
3468  	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3469  		u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3470  
3471  		DP_NOTICE(p_hwfn,
3472  			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3473  			  param, opcode);
3474  		return -EINVAL;
3475  	}
3476  
3477  	return rc;
3478  }
3479  
3480  static int
__qed_mcp_resc_lock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_lock_params * p_params)3481  __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3482  		    struct qed_ptt *p_ptt,
3483  		    struct qed_resc_lock_params *p_params)
3484  {
3485  	u32 param = 0, mcp_resp, mcp_param;
3486  	u8 opcode;
3487  	int rc;
3488  
3489  	switch (p_params->timeout) {
3490  	case QED_MCP_RESC_LOCK_TO_DEFAULT:
3491  		opcode = RESOURCE_OPCODE_REQ;
3492  		p_params->timeout = 0;
3493  		break;
3494  	case QED_MCP_RESC_LOCK_TO_NONE:
3495  		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3496  		p_params->timeout = 0;
3497  		break;
3498  	default:
3499  		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3500  		break;
3501  	}
3502  
3503  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3504  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3505  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3506  
3507  	DP_VERBOSE(p_hwfn,
3508  		   QED_MSG_SP,
3509  		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3510  		   param, p_params->timeout, opcode, p_params->resource);
3511  
3512  	/* Attempt to acquire the resource */
3513  	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3514  	if (rc)
3515  		return rc;
3516  
3517  	/* Analyze the response */
3518  	p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3519  	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3520  
3521  	DP_VERBOSE(p_hwfn,
3522  		   QED_MSG_SP,
3523  		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3524  		   mcp_param, opcode, p_params->owner);
3525  
3526  	switch (opcode) {
3527  	case RESOURCE_OPCODE_GNT:
3528  		p_params->b_granted = true;
3529  		break;
3530  	case RESOURCE_OPCODE_BUSY:
3531  		p_params->b_granted = false;
3532  		break;
3533  	default:
3534  		DP_NOTICE(p_hwfn,
3535  			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3536  			  mcp_param, opcode);
3537  		return -EINVAL;
3538  	}
3539  
3540  	return 0;
3541  }
3542  
3543  int
qed_mcp_resc_lock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_lock_params * p_params)3544  qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3545  		  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3546  {
3547  	u32 retry_cnt = 0;
3548  	int rc;
3549  
3550  	do {
3551  		/* No need for an interval before the first iteration */
3552  		if (retry_cnt) {
3553  			if (p_params->sleep_b4_retry) {
3554  				u16 retry_interval_in_ms =
3555  				    DIV_ROUND_UP(p_params->retry_interval,
3556  						 1000);
3557  
3558  				msleep(retry_interval_in_ms);
3559  			} else {
3560  				udelay(p_params->retry_interval);
3561  			}
3562  		}
3563  
3564  		rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3565  		if (rc)
3566  			return rc;
3567  
3568  		if (p_params->b_granted)
3569  			break;
3570  	} while (retry_cnt++ < p_params->retry_num);
3571  
3572  	return 0;
3573  }
3574  
3575  int
qed_mcp_resc_unlock(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_resc_unlock_params * p_params)3576  qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3577  		    struct qed_ptt *p_ptt,
3578  		    struct qed_resc_unlock_params *p_params)
3579  {
3580  	u32 param = 0, mcp_resp, mcp_param;
3581  	u8 opcode;
3582  	int rc;
3583  
3584  	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3585  				   : RESOURCE_OPCODE_RELEASE;
3586  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3587  	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3588  
3589  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3590  		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3591  		   param, opcode, p_params->resource);
3592  
3593  	/* Attempt to release the resource */
3594  	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3595  	if (rc)
3596  		return rc;
3597  
3598  	/* Analyze the response */
3599  	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3600  
3601  	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3602  		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3603  		   mcp_param, opcode);
3604  
3605  	switch (opcode) {
3606  	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3607  		DP_INFO(p_hwfn,
3608  			"Resource unlock request for an already released resource [%d]\n",
3609  			p_params->resource);
3610  		/* Fallthrough */
3611  	case RESOURCE_OPCODE_RELEASED:
3612  		p_params->b_released = true;
3613  		break;
3614  	case RESOURCE_OPCODE_WRONG_OWNER:
3615  		p_params->b_released = false;
3616  		break;
3617  	default:
3618  		DP_NOTICE(p_hwfn,
3619  			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3620  			  mcp_param, opcode);
3621  		return -EINVAL;
3622  	}
3623  
3624  	return 0;
3625  }
3626  
qed_mcp_resc_lock_default_init(struct qed_resc_lock_params * p_lock,struct qed_resc_unlock_params * p_unlock,enum qed_resc_lock resource,bool b_is_permanent)3627  void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3628  				    struct qed_resc_unlock_params *p_unlock,
3629  				    enum qed_resc_lock
3630  				    resource, bool b_is_permanent)
3631  {
3632  	if (p_lock) {
3633  		memset(p_lock, 0, sizeof(*p_lock));
3634  
3635  		/* Permanent resources don't require aging, and there's no
3636  		 * point in trying to acquire them more than once since it's
3637  		 * unexpected another entity would release them.
3638  		 */
3639  		if (b_is_permanent) {
3640  			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3641  		} else {
3642  			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3643  			p_lock->retry_interval =
3644  			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3645  			p_lock->sleep_b4_retry = true;
3646  		}
3647  
3648  		p_lock->resource = resource;
3649  	}
3650  
3651  	if (p_unlock) {
3652  		memset(p_unlock, 0, sizeof(*p_unlock));
3653  		p_unlock->resource = resource;
3654  	}
3655  }
3656  
qed_mcp_is_smart_an_supported(struct qed_hwfn * p_hwfn)3657  bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3658  {
3659  	return !!(p_hwfn->mcp_info->capabilities &
3660  		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3661  }
3662  
qed_mcp_get_capabilities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3663  int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3664  {
3665  	u32 mcp_resp;
3666  	int rc;
3667  
3668  	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3669  			 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3670  	if (!rc)
3671  		DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3672  			   "MFW supported features: %08x\n",
3673  			   p_hwfn->mcp_info->capabilities);
3674  
3675  	return rc;
3676  }
3677  
qed_mcp_set_capabilities(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3678  int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3679  {
3680  	u32 mcp_resp, mcp_param, features;
3681  
3682  	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3683  		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
3684  
3685  	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3686  			   features, &mcp_resp, &mcp_param);
3687  }
3688  
qed_mcp_get_engine_config(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3689  int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3690  {
3691  	struct qed_mcp_mb_params mb_params = {0};
3692  	struct qed_dev *cdev = p_hwfn->cdev;
3693  	u8 fir_valid, l2_valid;
3694  	int rc;
3695  
3696  	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3697  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3698  	if (rc)
3699  		return rc;
3700  
3701  	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3702  		DP_INFO(p_hwfn,
3703  			"The get_engine_config command is unsupported by the MFW\n");
3704  		return -EOPNOTSUPP;
3705  	}
3706  
3707  	fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3708  				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3709  	if (fir_valid)
3710  		cdev->fir_affin =
3711  		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3712  				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3713  
3714  	l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3715  				     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3716  	if (l2_valid)
3717  		cdev->l2_affin_hint =
3718  		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3719  				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3720  
3721  	DP_INFO(p_hwfn,
3722  		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3723  		fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3724  
3725  	return 0;
3726  }
3727  
qed_mcp_get_ppfid_bitmap(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)3728  int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3729  {
3730  	struct qed_mcp_mb_params mb_params = {0};
3731  	struct qed_dev *cdev = p_hwfn->cdev;
3732  	int rc;
3733  
3734  	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3735  	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3736  	if (rc)
3737  		return rc;
3738  
3739  	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3740  		DP_INFO(p_hwfn,
3741  			"The get_ppfid_bitmap command is unsupported by the MFW\n");
3742  		return -EOPNOTSUPP;
3743  	}
3744  
3745  	cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3746  					       FW_MB_PARAM_PPFID_BITMAP);
3747  
3748  	DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3749  		   cdev->ppfid_bitmap);
3750  
3751  	return 0;
3752  }
3753  
qed_mcp_nvm_get_cfg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 option_id,u8 entity_id,u16 flags,u8 * p_buf,u32 * p_len)3754  int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3755  			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3756  			u32 *p_len)
3757  {
3758  	u32 mb_param = 0, resp, param;
3759  	int rc;
3760  
3761  	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3762  	if (flags & QED_NVM_CFG_OPTION_INIT)
3763  		QED_MFW_SET_FIELD(mb_param,
3764  				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3765  	if (flags & QED_NVM_CFG_OPTION_FREE)
3766  		QED_MFW_SET_FIELD(mb_param,
3767  				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3768  	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3769  		QED_MFW_SET_FIELD(mb_param,
3770  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3771  		QED_MFW_SET_FIELD(mb_param,
3772  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3773  				  entity_id);
3774  	}
3775  
3776  	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3777  				DRV_MSG_CODE_GET_NVM_CFG_OPTION,
3778  				mb_param, &resp, &param, p_len, (u32 *)p_buf);
3779  
3780  	return rc;
3781  }
3782  
qed_mcp_nvm_set_cfg(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,u16 option_id,u8 entity_id,u16 flags,u8 * p_buf,u32 len)3783  int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3784  			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3785  			u32 len)
3786  {
3787  	u32 mb_param = 0, resp, param;
3788  
3789  	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3790  	if (flags & QED_NVM_CFG_OPTION_ALL)
3791  		QED_MFW_SET_FIELD(mb_param,
3792  				  DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
3793  	if (flags & QED_NVM_CFG_OPTION_INIT)
3794  		QED_MFW_SET_FIELD(mb_param,
3795  				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3796  	if (flags & QED_NVM_CFG_OPTION_COMMIT)
3797  		QED_MFW_SET_FIELD(mb_param,
3798  				  DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
3799  	if (flags & QED_NVM_CFG_OPTION_FREE)
3800  		QED_MFW_SET_FIELD(mb_param,
3801  				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
3802  	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
3803  		QED_MFW_SET_FIELD(mb_param,
3804  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
3805  		QED_MFW_SET_FIELD(mb_param,
3806  				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
3807  				  entity_id);
3808  	}
3809  
3810  	return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
3811  				  DRV_MSG_CODE_SET_NVM_CFG_OPTION,
3812  				  mb_param, &resp, &param, len, (u32 *)p_buf);
3813  }
3814