1  /* Broadcom NetXtreme-C/E network driver.
2   *
3   * Copyright (c) 2014-2016 Broadcom Corporation
4   * Copyright (c) 2016-2018 Broadcom Limited
5   *
6   * This program is free software; you can redistribute it and/or modify
7   * it under the terms of the GNU General Public License as published by
8   * the Free Software Foundation.
9   */
10  
11  #include <linux/ethtool.h>
12  #include <linux/module.h>
13  #include <linux/pci.h>
14  #include <linux/netdevice.h>
15  #include <linux/if_vlan.h>
16  #include <linux/interrupt.h>
17  #include <linux/etherdevice.h>
18  #include "bnxt_hsi.h"
19  #include "bnxt.h"
20  #include "bnxt_hwrm.h"
21  #include "bnxt_ulp.h"
22  #include "bnxt_sriov.h"
23  #include "bnxt_vfr.h"
24  #include "bnxt_ethtool.h"
25  
26  #ifdef CONFIG_BNXT_SRIOV
bnxt_hwrm_fwd_async_event_cmpl(struct bnxt * bp,struct bnxt_vf_info * vf,u16 event_id)27  static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp,
28  					  struct bnxt_vf_info *vf, u16 event_id)
29  {
30  	struct hwrm_fwd_async_event_cmpl_input *req;
31  	struct hwrm_async_event_cmpl *async_cmpl;
32  	int rc = 0;
33  
34  	rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL);
35  	if (rc)
36  		goto exit;
37  
38  	if (vf)
39  		req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid);
40  	else
41  		/* broadcast this async event to all VFs */
42  		req->encap_async_event_target_id = cpu_to_le16(0xffff);
43  	async_cmpl =
44  		(struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl;
45  	async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT);
46  	async_cmpl->event_id = cpu_to_le16(event_id);
47  
48  	rc = hwrm_req_send(bp, req);
49  exit:
50  	if (rc)
51  		netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n",
52  			   rc);
53  	return rc;
54  }
55  
bnxt_vf_ndo_prep(struct bnxt * bp,int vf_id)56  static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
57  {
58  	if (!bp->pf.active_vfs) {
59  		netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
60  		return -EINVAL;
61  	}
62  	if (vf_id >= bp->pf.active_vfs) {
63  		netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
64  		return -EINVAL;
65  	}
66  	return 0;
67  }
68  
bnxt_set_vf_spoofchk(struct net_device * dev,int vf_id,bool setting)69  int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
70  {
71  	struct bnxt *bp = netdev_priv(dev);
72  	struct hwrm_func_cfg_input *req;
73  	bool old_setting = false;
74  	struct bnxt_vf_info *vf;
75  	u32 func_flags;
76  	int rc;
77  
78  	if (bp->hwrm_spec_code < 0x10701)
79  		return -ENOTSUPP;
80  
81  	rc = bnxt_vf_ndo_prep(bp, vf_id);
82  	if (rc)
83  		return rc;
84  
85  	vf = &bp->pf.vf[vf_id];
86  	if (vf->flags & BNXT_VF_SPOOFCHK)
87  		old_setting = true;
88  	if (old_setting == setting)
89  		return 0;
90  
91  	if (setting)
92  		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE;
93  	else
94  		func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE;
95  	/*TODO: if the driver supports VLAN filter on guest VLAN,
96  	 * the spoof check should also include vlan anti-spoofing
97  	 */
98  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
99  	if (!rc) {
100  		req->fid = cpu_to_le16(vf->fw_fid);
101  		req->flags = cpu_to_le32(func_flags);
102  		rc = hwrm_req_send(bp, req);
103  		if (!rc) {
104  			if (setting)
105  				vf->flags |= BNXT_VF_SPOOFCHK;
106  			else
107  				vf->flags &= ~BNXT_VF_SPOOFCHK;
108  		}
109  	}
110  	return rc;
111  }
112  
bnxt_hwrm_func_qcfg_flags(struct bnxt * bp,struct bnxt_vf_info * vf)113  static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf)
114  {
115  	struct hwrm_func_qcfg_output *resp;
116  	struct hwrm_func_qcfg_input *req;
117  	int rc;
118  
119  	rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
120  	if (rc)
121  		return rc;
122  
123  	req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff);
124  	resp = hwrm_req_hold(bp, req);
125  	rc = hwrm_req_send(bp, req);
126  	if (!rc)
127  		vf->func_qcfg_flags = le16_to_cpu(resp->flags);
128  	hwrm_req_drop(bp, req);
129  	return rc;
130  }
131  
bnxt_is_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)132  bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
133  {
134  	if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
135  		return !!(vf->flags & BNXT_VF_TRUST);
136  
137  	bnxt_hwrm_func_qcfg_flags(bp, vf);
138  	return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF);
139  }
140  
bnxt_hwrm_set_trusted_vf(struct bnxt * bp,struct bnxt_vf_info * vf)141  static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
142  {
143  	struct hwrm_func_cfg_input *req;
144  	int rc;
145  
146  	if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
147  		return 0;
148  
149  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
150  	if (rc)
151  		return rc;
152  
153  	req->fid = cpu_to_le16(vf->fw_fid);
154  	if (vf->flags & BNXT_VF_TRUST)
155  		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
156  	else
157  		req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE);
158  	return hwrm_req_send(bp, req);
159  }
160  
bnxt_set_vf_trust(struct net_device * dev,int vf_id,bool trusted)161  int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
162  {
163  	struct bnxt *bp = netdev_priv(dev);
164  	struct bnxt_vf_info *vf;
165  
166  	if (bnxt_vf_ndo_prep(bp, vf_id))
167  		return -EINVAL;
168  
169  	vf = &bp->pf.vf[vf_id];
170  	if (trusted)
171  		vf->flags |= BNXT_VF_TRUST;
172  	else
173  		vf->flags &= ~BNXT_VF_TRUST;
174  
175  	bnxt_hwrm_set_trusted_vf(bp, vf);
176  	return 0;
177  }
178  
bnxt_get_vf_config(struct net_device * dev,int vf_id,struct ifla_vf_info * ivi)179  int bnxt_get_vf_config(struct net_device *dev, int vf_id,
180  		       struct ifla_vf_info *ivi)
181  {
182  	struct bnxt *bp = netdev_priv(dev);
183  	struct bnxt_vf_info *vf;
184  	int rc;
185  
186  	rc = bnxt_vf_ndo_prep(bp, vf_id);
187  	if (rc)
188  		return rc;
189  
190  	ivi->vf = vf_id;
191  	vf = &bp->pf.vf[vf_id];
192  
193  	if (is_valid_ether_addr(vf->mac_addr))
194  		memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
195  	else
196  		memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
197  	ivi->max_tx_rate = vf->max_tx_rate;
198  	ivi->min_tx_rate = vf->min_tx_rate;
199  	ivi->vlan = vf->vlan;
200  	if (vf->flags & BNXT_VF_QOS)
201  		ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
202  	else
203  		ivi->qos = 0;
204  	ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
205  	ivi->trusted = bnxt_is_trusted_vf(bp, vf);
206  	if (!(vf->flags & BNXT_VF_LINK_FORCED))
207  		ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
208  	else if (vf->flags & BNXT_VF_LINK_UP)
209  		ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
210  	else
211  		ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
212  
213  	return 0;
214  }
215  
bnxt_set_vf_mac(struct net_device * dev,int vf_id,u8 * mac)216  int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
217  {
218  	struct bnxt *bp = netdev_priv(dev);
219  	struct hwrm_func_cfg_input *req;
220  	struct bnxt_vf_info *vf;
221  	int rc;
222  
223  	rc = bnxt_vf_ndo_prep(bp, vf_id);
224  	if (rc)
225  		return rc;
226  	/* reject bc or mc mac addr, zero mac addr means allow
227  	 * VF to use its own mac addr
228  	 */
229  	if (is_multicast_ether_addr(mac)) {
230  		netdev_err(dev, "Invalid VF ethernet address\n");
231  		return -EINVAL;
232  	}
233  	vf = &bp->pf.vf[vf_id];
234  
235  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
236  	if (rc)
237  		return rc;
238  
239  	memcpy(vf->mac_addr, mac, ETH_ALEN);
240  
241  	req->fid = cpu_to_le16(vf->fw_fid);
242  	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
243  	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
244  	return hwrm_req_send(bp, req);
245  }
246  
bnxt_set_vf_vlan(struct net_device * dev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)247  int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
248  		     __be16 vlan_proto)
249  {
250  	struct bnxt *bp = netdev_priv(dev);
251  	struct hwrm_func_cfg_input *req;
252  	struct bnxt_vf_info *vf;
253  	u16 vlan_tag;
254  	int rc;
255  
256  	if (bp->hwrm_spec_code < 0x10201)
257  		return -ENOTSUPP;
258  
259  	if (vlan_proto != htons(ETH_P_8021Q))
260  		return -EPROTONOSUPPORT;
261  
262  	rc = bnxt_vf_ndo_prep(bp, vf_id);
263  	if (rc)
264  		return rc;
265  
266  	/* TODO: needed to implement proper handling of user priority,
267  	 * currently fail the command if there is valid priority
268  	 */
269  	if (vlan_id > 4095 || qos)
270  		return -EINVAL;
271  
272  	vf = &bp->pf.vf[vf_id];
273  	vlan_tag = vlan_id;
274  	if (vlan_tag == vf->vlan)
275  		return 0;
276  
277  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
278  	if (!rc) {
279  		req->fid = cpu_to_le16(vf->fw_fid);
280  		req->dflt_vlan = cpu_to_le16(vlan_tag);
281  		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
282  		rc = hwrm_req_send(bp, req);
283  		if (!rc)
284  			vf->vlan = vlan_tag;
285  	}
286  	return rc;
287  }
288  
bnxt_set_vf_bw(struct net_device * dev,int vf_id,int min_tx_rate,int max_tx_rate)289  int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
290  		   int max_tx_rate)
291  {
292  	struct bnxt *bp = netdev_priv(dev);
293  	struct hwrm_func_cfg_input *req;
294  	struct bnxt_vf_info *vf;
295  	u32 pf_link_speed;
296  	int rc;
297  
298  	rc = bnxt_vf_ndo_prep(bp, vf_id);
299  	if (rc)
300  		return rc;
301  
302  	vf = &bp->pf.vf[vf_id];
303  	pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
304  	if (max_tx_rate > pf_link_speed) {
305  		netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
306  			    max_tx_rate, vf_id);
307  		return -EINVAL;
308  	}
309  
310  	if (min_tx_rate > pf_link_speed) {
311  		netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
312  			    min_tx_rate, vf_id);
313  		return -EINVAL;
314  	}
315  	if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
316  		return 0;
317  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
318  	if (!rc) {
319  		req->fid = cpu_to_le16(vf->fw_fid);
320  		req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
321  					   FUNC_CFG_REQ_ENABLES_MIN_BW);
322  		req->max_bw = cpu_to_le32(max_tx_rate);
323  		req->min_bw = cpu_to_le32(min_tx_rate);
324  		rc = hwrm_req_send(bp, req);
325  		if (!rc) {
326  			vf->min_tx_rate = min_tx_rate;
327  			vf->max_tx_rate = max_tx_rate;
328  		}
329  	}
330  	return rc;
331  }
332  
bnxt_set_vf_link_state(struct net_device * dev,int vf_id,int link)333  int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
334  {
335  	struct bnxt *bp = netdev_priv(dev);
336  	struct bnxt_vf_info *vf;
337  	int rc;
338  
339  	rc = bnxt_vf_ndo_prep(bp, vf_id);
340  	if (rc)
341  		return rc;
342  
343  	vf = &bp->pf.vf[vf_id];
344  
345  	vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
346  	switch (link) {
347  	case IFLA_VF_LINK_STATE_AUTO:
348  		vf->flags |= BNXT_VF_LINK_UP;
349  		break;
350  	case IFLA_VF_LINK_STATE_DISABLE:
351  		vf->flags |= BNXT_VF_LINK_FORCED;
352  		break;
353  	case IFLA_VF_LINK_STATE_ENABLE:
354  		vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
355  		break;
356  	default:
357  		netdev_err(bp->dev, "Invalid link option\n");
358  		rc = -EINVAL;
359  		break;
360  	}
361  	if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED))
362  		rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf,
363  			ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
364  	return rc;
365  }
366  
bnxt_set_vf_attr(struct bnxt * bp,int num_vfs)367  static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
368  {
369  	int i;
370  	struct bnxt_vf_info *vf;
371  
372  	for (i = 0; i < num_vfs; i++) {
373  		vf = &bp->pf.vf[i];
374  		memset(vf, 0, sizeof(*vf));
375  	}
376  	return 0;
377  }
378  
bnxt_hwrm_func_vf_resource_free(struct bnxt * bp,int num_vfs)379  static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
380  {
381  	struct hwrm_func_vf_resc_free_input *req;
382  	struct bnxt_pf_info *pf = &bp->pf;
383  	int i, rc;
384  
385  	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE);
386  	if (rc)
387  		return rc;
388  
389  	hwrm_req_hold(bp, req);
390  	for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
391  		req->vf_id = cpu_to_le16(i);
392  		rc = hwrm_req_send(bp, req);
393  		if (rc)
394  			break;
395  	}
396  	hwrm_req_drop(bp, req);
397  	return rc;
398  }
399  
bnxt_free_vf_resources(struct bnxt * bp)400  static void bnxt_free_vf_resources(struct bnxt *bp)
401  {
402  	struct pci_dev *pdev = bp->pdev;
403  	int i;
404  
405  	kfree(bp->pf.vf_event_bmap);
406  	bp->pf.vf_event_bmap = NULL;
407  
408  	for (i = 0; i < 4; i++) {
409  		if (bp->pf.hwrm_cmd_req_addr[i]) {
410  			dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
411  					  bp->pf.hwrm_cmd_req_addr[i],
412  					  bp->pf.hwrm_cmd_req_dma_addr[i]);
413  			bp->pf.hwrm_cmd_req_addr[i] = NULL;
414  		}
415  	}
416  
417  	bp->pf.active_vfs = 0;
418  	kfree(bp->pf.vf);
419  	bp->pf.vf = NULL;
420  }
421  
bnxt_alloc_vf_resources(struct bnxt * bp,int num_vfs)422  static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
423  {
424  	struct pci_dev *pdev = bp->pdev;
425  	u32 nr_pages, size, i, j, k = 0;
426  
427  	bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
428  	if (!bp->pf.vf)
429  		return -ENOMEM;
430  
431  	bnxt_set_vf_attr(bp, num_vfs);
432  
433  	size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
434  	nr_pages = size / BNXT_PAGE_SIZE;
435  	if (size & (BNXT_PAGE_SIZE - 1))
436  		nr_pages++;
437  
438  	for (i = 0; i < nr_pages; i++) {
439  		bp->pf.hwrm_cmd_req_addr[i] =
440  			dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
441  					   &bp->pf.hwrm_cmd_req_dma_addr[i],
442  					   GFP_KERNEL);
443  
444  		if (!bp->pf.hwrm_cmd_req_addr[i])
445  			return -ENOMEM;
446  
447  		for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
448  			struct bnxt_vf_info *vf = &bp->pf.vf[k];
449  
450  			vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
451  						j * BNXT_HWRM_REQ_MAX_SIZE;
452  			vf->hwrm_cmd_req_dma_addr =
453  				bp->pf.hwrm_cmd_req_dma_addr[i] + j *
454  				BNXT_HWRM_REQ_MAX_SIZE;
455  			k++;
456  		}
457  	}
458  
459  	/* Max 128 VF's */
460  	bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
461  	if (!bp->pf.vf_event_bmap)
462  		return -ENOMEM;
463  
464  	bp->pf.hwrm_cmd_req_pages = nr_pages;
465  	return 0;
466  }
467  
bnxt_hwrm_func_buf_rgtr(struct bnxt * bp)468  static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
469  {
470  	struct hwrm_func_buf_rgtr_input *req;
471  	int rc;
472  
473  	rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR);
474  	if (rc)
475  		return rc;
476  
477  	req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
478  	req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
479  	req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
480  	req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
481  	req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
482  	req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
483  	req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
484  
485  	return hwrm_req_send(bp, req);
486  }
487  
__bnxt_set_vf_params(struct bnxt * bp,int vf_id)488  static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
489  {
490  	struct hwrm_func_cfg_input *req;
491  	struct bnxt_vf_info *vf;
492  	int rc;
493  
494  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
495  	if (rc)
496  		return rc;
497  
498  	vf = &bp->pf.vf[vf_id];
499  	req->fid = cpu_to_le16(vf->fw_fid);
500  
501  	if (is_valid_ether_addr(vf->mac_addr)) {
502  		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
503  		memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN);
504  	}
505  	if (vf->vlan) {
506  		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
507  		req->dflt_vlan = cpu_to_le16(vf->vlan);
508  	}
509  	if (vf->max_tx_rate) {
510  		req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
511  					    FUNC_CFG_REQ_ENABLES_MIN_BW);
512  		req->max_bw = cpu_to_le32(vf->max_tx_rate);
513  		req->min_bw = cpu_to_le32(vf->min_tx_rate);
514  	}
515  	if (vf->flags & BNXT_VF_TRUST)
516  		req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE);
517  
518  	return hwrm_req_send(bp, req);
519  }
520  
521  /* Only called by PF to reserve resources for VFs, returns actual number of
522   * VFs configured, or < 0 on error.
523   */
bnxt_hwrm_func_vf_resc_cfg(struct bnxt * bp,int num_vfs,bool reset)524  static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
525  {
526  	struct hwrm_func_vf_resource_cfg_input *req;
527  	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
528  	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
529  	u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
530  	struct bnxt_pf_info *pf = &bp->pf;
531  	int i, rc = 0, min = 1;
532  	u16 vf_msix = 0;
533  	u16 vf_rss;
534  
535  	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG);
536  	if (rc)
537  		return rc;
538  
539  	if (bp->flags & BNXT_FLAG_CHIP_P5) {
540  		vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
541  		vf_ring_grps = 0;
542  	} else {
543  		vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
544  	}
545  	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
546  	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
547  	if (bp->flags & BNXT_FLAG_AGG_RINGS)
548  		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
549  	else
550  		vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
551  	vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
552  	vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
553  	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
554  	vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
555  
556  	req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
557  	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
558  		min = 0;
559  		req->min_rsscos_ctx = cpu_to_le16(min);
560  	}
561  	if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL ||
562  	    pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
563  		req->min_cmpl_rings = cpu_to_le16(min);
564  		req->min_tx_rings = cpu_to_le16(min);
565  		req->min_rx_rings = cpu_to_le16(min);
566  		req->min_l2_ctxs = cpu_to_le16(min);
567  		req->min_vnics = cpu_to_le16(min);
568  		req->min_stat_ctx = cpu_to_le16(min);
569  		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
570  			req->min_hw_ring_grps = cpu_to_le16(min);
571  	} else {
572  		vf_cp_rings /= num_vfs;
573  		vf_tx_rings /= num_vfs;
574  		vf_rx_rings /= num_vfs;
575  		vf_vnics /= num_vfs;
576  		vf_stat_ctx /= num_vfs;
577  		vf_ring_grps /= num_vfs;
578  		vf_rss /= num_vfs;
579  
580  		req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
581  		req->min_tx_rings = cpu_to_le16(vf_tx_rings);
582  		req->min_rx_rings = cpu_to_le16(vf_rx_rings);
583  		req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
584  		req->min_vnics = cpu_to_le16(vf_vnics);
585  		req->min_stat_ctx = cpu_to_le16(vf_stat_ctx);
586  		req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
587  		req->min_rsscos_ctx = cpu_to_le16(vf_rss);
588  	}
589  	req->max_cmpl_rings = cpu_to_le16(vf_cp_rings);
590  	req->max_tx_rings = cpu_to_le16(vf_tx_rings);
591  	req->max_rx_rings = cpu_to_le16(vf_rx_rings);
592  	req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
593  	req->max_vnics = cpu_to_le16(vf_vnics);
594  	req->max_stat_ctx = cpu_to_le16(vf_stat_ctx);
595  	req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
596  	req->max_rsscos_ctx = cpu_to_le16(vf_rss);
597  	if (bp->flags & BNXT_FLAG_CHIP_P5)
598  		req->max_msix = cpu_to_le16(vf_msix / num_vfs);
599  
600  	hwrm_req_hold(bp, req);
601  	for (i = 0; i < num_vfs; i++) {
602  		if (reset)
603  			__bnxt_set_vf_params(bp, i);
604  
605  		req->vf_id = cpu_to_le16(pf->first_vf_id + i);
606  		rc = hwrm_req_send(bp, req);
607  		if (rc)
608  			break;
609  		pf->active_vfs = i + 1;
610  		pf->vf[i].fw_fid = pf->first_vf_id + i;
611  	}
612  
613  	if (pf->active_vfs) {
614  		u16 n = pf->active_vfs;
615  
616  		hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n;
617  		hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n;
618  		hw_resc->max_hw_ring_grps -=
619  			le16_to_cpu(req->min_hw_ring_grps) * n;
620  		hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n;
621  		hw_resc->max_rsscos_ctxs -=
622  			le16_to_cpu(req->min_rsscos_ctx) * n;
623  		hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n;
624  		hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n;
625  		if (bp->flags & BNXT_FLAG_CHIP_P5)
626  			hw_resc->max_nqs -= vf_msix;
627  
628  		rc = pf->active_vfs;
629  	}
630  	hwrm_req_drop(bp, req);
631  	return rc;
632  }
633  
634  /* Only called by PF to reserve resources for VFs, returns actual number of
635   * VFs configured, or < 0 on error.
636   */
bnxt_hwrm_func_cfg(struct bnxt * bp,int num_vfs)637  static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
638  {
639  	u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
640  	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
641  	struct bnxt_pf_info *pf = &bp->pf;
642  	struct hwrm_func_cfg_input *req;
643  	int total_vf_tx_rings = 0;
644  	u16 vf_ring_grps;
645  	u32 mtu, i;
646  	int rc;
647  
648  	rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
649  	if (rc)
650  		return rc;
651  
652  	/* Remaining rings are distributed equally amongs VF's for now */
653  	vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
654  	vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
655  	if (bp->flags & BNXT_FLAG_AGG_RINGS)
656  		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
657  			      num_vfs;
658  	else
659  		vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
660  			      num_vfs;
661  	vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
662  	vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
663  	vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
664  	vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
665  
666  	req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU |
667  				   FUNC_CFG_REQ_ENABLES_MRU |
668  				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
669  				   FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
670  				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
671  				   FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
672  				   FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
673  				   FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
674  				   FUNC_CFG_REQ_ENABLES_NUM_VNICS |
675  				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
676  
677  	mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN;
678  	req->mru = cpu_to_le16(mtu);
679  	req->admin_mtu = cpu_to_le16(mtu);
680  
681  	req->num_rsscos_ctxs = cpu_to_le16(1);
682  	req->num_cmpl_rings = cpu_to_le16(vf_cp_rings);
683  	req->num_tx_rings = cpu_to_le16(vf_tx_rings);
684  	req->num_rx_rings = cpu_to_le16(vf_rx_rings);
685  	req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
686  	req->num_l2_ctxs = cpu_to_le16(4);
687  
688  	req->num_vnics = cpu_to_le16(vf_vnics);
689  	/* FIXME spec currently uses 1 bit for stats ctx */
690  	req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
691  
692  	hwrm_req_hold(bp, req);
693  	for (i = 0; i < num_vfs; i++) {
694  		int vf_tx_rsvd = vf_tx_rings;
695  
696  		req->fid = cpu_to_le16(pf->first_vf_id + i);
697  		rc = hwrm_req_send(bp, req);
698  		if (rc)
699  			break;
700  		pf->active_vfs = i + 1;
701  		pf->vf[i].fw_fid = le16_to_cpu(req->fid);
702  		rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
703  					      &vf_tx_rsvd);
704  		if (rc)
705  			break;
706  		total_vf_tx_rings += vf_tx_rsvd;
707  	}
708  	hwrm_req_drop(bp, req);
709  	if (pf->active_vfs) {
710  		hw_resc->max_tx_rings -= total_vf_tx_rings;
711  		hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
712  		hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
713  		hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
714  		hw_resc->max_rsscos_ctxs -= num_vfs;
715  		hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
716  		hw_resc->max_vnics -= vf_vnics * num_vfs;
717  		rc = pf->active_vfs;
718  	}
719  	return rc;
720  }
721  
bnxt_func_cfg(struct bnxt * bp,int num_vfs,bool reset)722  static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset)
723  {
724  	if (BNXT_NEW_RM(bp))
725  		return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset);
726  	else
727  		return bnxt_hwrm_func_cfg(bp, num_vfs);
728  }
729  
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)730  int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
731  {
732  	int rc;
733  
734  	/* Register buffers for VFs */
735  	rc = bnxt_hwrm_func_buf_rgtr(bp);
736  	if (rc)
737  		return rc;
738  
739  	/* Reserve resources for VFs */
740  	rc = bnxt_func_cfg(bp, *num_vfs, reset);
741  	if (rc != *num_vfs) {
742  		if (rc <= 0) {
743  			netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
744  			*num_vfs = 0;
745  			return rc;
746  		}
747  		netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n",
748  			    rc);
749  		*num_vfs = rc;
750  	}
751  
752  	bnxt_ulp_sriov_cfg(bp, *num_vfs);
753  	return 0;
754  }
755  
bnxt_sriov_enable(struct bnxt * bp,int * num_vfs)756  static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
757  {
758  	int rc = 0, vfs_supported;
759  	int min_rx_rings, min_tx_rings, min_rss_ctxs;
760  	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
761  	int tx_ok = 0, rx_ok = 0, rss_ok = 0;
762  	int avail_cp, avail_stat;
763  
764  	/* Check if we can enable requested num of vf's. At a mininum
765  	 * we require 1 RX 1 TX rings for each VF. In this minimum conf
766  	 * features like TPA will not be available.
767  	 */
768  	vfs_supported = *num_vfs;
769  
770  	avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
771  	avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
772  	avail_cp = min_t(int, avail_cp, avail_stat);
773  
774  	while (vfs_supported) {
775  		min_rx_rings = vfs_supported;
776  		min_tx_rings = vfs_supported;
777  		min_rss_ctxs = vfs_supported;
778  
779  		if (bp->flags & BNXT_FLAG_AGG_RINGS) {
780  			if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
781  			    min_rx_rings)
782  				rx_ok = 1;
783  		} else {
784  			if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
785  			    min_rx_rings)
786  				rx_ok = 1;
787  		}
788  		if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
789  		    avail_cp < min_rx_rings)
790  			rx_ok = 0;
791  
792  		if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
793  		    avail_cp >= min_tx_rings)
794  			tx_ok = 1;
795  
796  		if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
797  		    min_rss_ctxs)
798  			rss_ok = 1;
799  
800  		if (tx_ok && rx_ok && rss_ok)
801  			break;
802  
803  		vfs_supported--;
804  	}
805  
806  	if (!vfs_supported) {
807  		netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
808  		return -EINVAL;
809  	}
810  
811  	if (vfs_supported != *num_vfs) {
812  		netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
813  			    *num_vfs, vfs_supported);
814  		*num_vfs = vfs_supported;
815  	}
816  
817  	rc = bnxt_alloc_vf_resources(bp, *num_vfs);
818  	if (rc)
819  		goto err_out1;
820  
821  	rc = bnxt_cfg_hw_sriov(bp, num_vfs, false);
822  	if (rc)
823  		goto err_out2;
824  
825  	rc = pci_enable_sriov(bp->pdev, *num_vfs);
826  	if (rc) {
827  		bnxt_ulp_sriov_cfg(bp, 0);
828  		goto err_out2;
829  	}
830  
831  	return 0;
832  
833  err_out2:
834  	/* Free the resources reserved for various VF's */
835  	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
836  
837  	/* Restore the max resources */
838  	bnxt_hwrm_func_qcaps(bp);
839  
840  err_out1:
841  	bnxt_free_vf_resources(bp);
842  
843  	return rc;
844  }
845  
bnxt_sriov_disable(struct bnxt * bp)846  void bnxt_sriov_disable(struct bnxt *bp)
847  {
848  	u16 num_vfs = pci_num_vf(bp->pdev);
849  
850  	if (!num_vfs)
851  		return;
852  
853  	/* synchronize VF and VF-rep create and destroy */
854  	devl_lock(bp->dl);
855  	bnxt_vf_reps_destroy(bp);
856  
857  	if (pci_vfs_assigned(bp->pdev)) {
858  		bnxt_hwrm_fwd_async_event_cmpl(
859  			bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
860  		netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
861  			    num_vfs);
862  	} else {
863  		pci_disable_sriov(bp->pdev);
864  		/* Free the HW resources reserved for various VF's */
865  		bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
866  	}
867  	devl_unlock(bp->dl);
868  
869  	bnxt_free_vf_resources(bp);
870  
871  	/* Reclaim all resources for the PF. */
872  	rtnl_lock();
873  	bnxt_restore_pf_fw_resources(bp);
874  	rtnl_unlock();
875  
876  	bnxt_ulp_sriov_cfg(bp, 0);
877  }
878  
bnxt_sriov_configure(struct pci_dev * pdev,int num_vfs)879  int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
880  {
881  	struct net_device *dev = pci_get_drvdata(pdev);
882  	struct bnxt *bp = netdev_priv(dev);
883  
884  	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
885  		netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
886  		return 0;
887  	}
888  
889  	rtnl_lock();
890  	if (!netif_running(dev)) {
891  		netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
892  		rtnl_unlock();
893  		return 0;
894  	}
895  	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
896  		netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n");
897  		rtnl_unlock();
898  		return 0;
899  	}
900  	bp->sriov_cfg = true;
901  	rtnl_unlock();
902  
903  	if (pci_vfs_assigned(bp->pdev)) {
904  		netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
905  		num_vfs = 0;
906  		goto sriov_cfg_exit;
907  	}
908  
909  	/* Check if enabled VFs is same as requested */
910  	if (num_vfs && num_vfs == bp->pf.active_vfs)
911  		goto sriov_cfg_exit;
912  
913  	/* if there are previous existing VFs, clean them up */
914  	bnxt_sriov_disable(bp);
915  	if (!num_vfs)
916  		goto sriov_cfg_exit;
917  
918  	bnxt_sriov_enable(bp, &num_vfs);
919  
920  sriov_cfg_exit:
921  	bp->sriov_cfg = false;
922  	wake_up(&bp->sriov_cfg_wait);
923  
924  	return num_vfs;
925  }
926  
bnxt_hwrm_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,void * encap_resp,__le64 encap_resp_addr,__le16 encap_resp_cpr,u32 msg_size)927  static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
928  			      void *encap_resp, __le64 encap_resp_addr,
929  			      __le16 encap_resp_cpr, u32 msg_size)
930  {
931  	struct hwrm_fwd_resp_input *req;
932  	int rc;
933  
934  	if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
935  		return -EINVAL;
936  
937  	rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
938  	if (!rc) {
939  		/* Set the new target id */
940  		req->target_id = cpu_to_le16(vf->fw_fid);
941  		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
942  		req->encap_resp_len = cpu_to_le16(msg_size);
943  		req->encap_resp_addr = encap_resp_addr;
944  		req->encap_resp_cmpl_ring = encap_resp_cpr;
945  		memcpy(req->encap_resp, encap_resp, msg_size);
946  
947  		rc = hwrm_req_send(bp, req);
948  	}
949  	if (rc)
950  		netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
951  	return rc;
952  }
953  
bnxt_hwrm_fwd_err_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)954  static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
955  				  u32 msg_size)
956  {
957  	struct hwrm_reject_fwd_resp_input *req;
958  	int rc;
959  
960  	if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
961  		return -EINVAL;
962  
963  	rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP);
964  	if (!rc) {
965  		/* Set the new target id */
966  		req->target_id = cpu_to_le16(vf->fw_fid);
967  		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
968  		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
969  
970  		rc = hwrm_req_send(bp, req);
971  	}
972  	if (rc)
973  		netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
974  	return rc;
975  }
976  
bnxt_hwrm_exec_fwd_resp(struct bnxt * bp,struct bnxt_vf_info * vf,u32 msg_size)977  static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
978  				   u32 msg_size)
979  {
980  	struct hwrm_exec_fwd_resp_input *req;
981  	int rc;
982  
983  	if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
984  		return -EINVAL;
985  
986  	rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP);
987  	if (!rc) {
988  		/* Set the new target id */
989  		req->target_id = cpu_to_le16(vf->fw_fid);
990  		req->encap_resp_target_id = cpu_to_le16(vf->fw_fid);
991  		memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size);
992  
993  		rc = hwrm_req_send(bp, req);
994  	}
995  	if (rc)
996  		netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
997  	return rc;
998  }
999  
bnxt_vf_configure_mac(struct bnxt * bp,struct bnxt_vf_info * vf)1000  static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1001  {
1002  	u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
1003  	struct hwrm_func_vf_cfg_input *req =
1004  		(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
1005  
1006  	/* Allow VF to set a valid MAC address, if trust is set to on or
1007  	 * if the PF assigned MAC address is zero
1008  	 */
1009  	if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
1010  		bool trust = bnxt_is_trusted_vf(bp, vf);
1011  
1012  		if (is_valid_ether_addr(req->dflt_mac_addr) &&
1013  		    (trust || !is_valid_ether_addr(vf->mac_addr) ||
1014  		     ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
1015  			ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
1016  			return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1017  		}
1018  		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1019  	}
1020  	return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1021  }
1022  
bnxt_vf_validate_set_mac(struct bnxt * bp,struct bnxt_vf_info * vf)1023  static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
1024  {
1025  	u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
1026  	struct hwrm_cfa_l2_filter_alloc_input *req =
1027  		(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
1028  	bool mac_ok = false;
1029  
1030  	if (!is_valid_ether_addr((const u8 *)req->l2_addr))
1031  		return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1032  
1033  	/* Allow VF to set a valid MAC address, if trust is set to on.
1034  	 * Or VF MAC address must first match MAC address in PF's context.
1035  	 * Otherwise, it must match the VF MAC address if firmware spec >=
1036  	 * 1.2.2
1037  	 */
1038  	if (bnxt_is_trusted_vf(bp, vf)) {
1039  		mac_ok = true;
1040  	} else if (is_valid_ether_addr(vf->mac_addr)) {
1041  		if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
1042  			mac_ok = true;
1043  	} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
1044  		if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
1045  			mac_ok = true;
1046  	} else {
1047  		/* There are two cases:
1048  		 * 1.If firmware spec < 0x10202,VF MAC address is not forwarded
1049  		 *   to the PF and so it doesn't have to match
1050  		 * 2.Allow VF to modify it's own MAC when PF has not assigned a
1051  		 *   valid MAC address and firmware spec >= 0x10202
1052  		 */
1053  		mac_ok = true;
1054  	}
1055  	if (mac_ok)
1056  		return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
1057  	return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
1058  }
1059  
bnxt_vf_set_link(struct bnxt * bp,struct bnxt_vf_info * vf)1060  static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
1061  {
1062  	int rc = 0;
1063  
1064  	if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
1065  		/* real link */
1066  		rc = bnxt_hwrm_exec_fwd_resp(
1067  			bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
1068  	} else {
1069  		struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
1070  		struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
1071  
1072  		phy_qcfg_req =
1073  		(struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
1074  		mutex_lock(&bp->link_lock);
1075  		memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
1076  		       sizeof(phy_qcfg_resp));
1077  		mutex_unlock(&bp->link_lock);
1078  		phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
1079  		phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
1080  		phy_qcfg_resp.valid = 1;
1081  
1082  		if (vf->flags & BNXT_VF_LINK_UP) {
1083  			/* if physical link is down, force link up on VF */
1084  			if (phy_qcfg_resp.link !=
1085  			    PORT_PHY_QCFG_RESP_LINK_LINK) {
1086  				phy_qcfg_resp.link =
1087  					PORT_PHY_QCFG_RESP_LINK_LINK;
1088  				phy_qcfg_resp.link_speed = cpu_to_le16(
1089  					PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
1090  				phy_qcfg_resp.duplex_cfg =
1091  					PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
1092  				phy_qcfg_resp.duplex_state =
1093  					PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
1094  				phy_qcfg_resp.pause =
1095  					(PORT_PHY_QCFG_RESP_PAUSE_TX |
1096  					 PORT_PHY_QCFG_RESP_PAUSE_RX);
1097  			}
1098  		} else {
1099  			/* force link down */
1100  			phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
1101  			phy_qcfg_resp.link_speed = 0;
1102  			phy_qcfg_resp.duplex_state =
1103  				PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
1104  			phy_qcfg_resp.pause = 0;
1105  		}
1106  		rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
1107  					phy_qcfg_req->resp_addr,
1108  					phy_qcfg_req->cmpl_ring,
1109  					sizeof(phy_qcfg_resp));
1110  	}
1111  	return rc;
1112  }
1113  
bnxt_vf_req_validate_snd(struct bnxt * bp,struct bnxt_vf_info * vf)1114  static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
1115  {
1116  	int rc = 0;
1117  	struct input *encap_req = vf->hwrm_cmd_req_addr;
1118  	u32 req_type = le16_to_cpu(encap_req->req_type);
1119  
1120  	switch (req_type) {
1121  	case HWRM_FUNC_VF_CFG:
1122  		rc = bnxt_vf_configure_mac(bp, vf);
1123  		break;
1124  	case HWRM_CFA_L2_FILTER_ALLOC:
1125  		rc = bnxt_vf_validate_set_mac(bp, vf);
1126  		break;
1127  	case HWRM_FUNC_CFG:
1128  		/* TODO Validate if VF is allowed to change mac address,
1129  		 * mtu, num of rings etc
1130  		 */
1131  		rc = bnxt_hwrm_exec_fwd_resp(
1132  			bp, vf, sizeof(struct hwrm_func_cfg_input));
1133  		break;
1134  	case HWRM_PORT_PHY_QCFG:
1135  		rc = bnxt_vf_set_link(bp, vf);
1136  		break;
1137  	default:
1138  		break;
1139  	}
1140  	return rc;
1141  }
1142  
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1143  void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1144  {
1145  	u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
1146  
1147  	/* Scan through VF's and process commands */
1148  	while (1) {
1149  		vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
1150  		if (vf_id >= active_vfs)
1151  			break;
1152  
1153  		clear_bit(vf_id, bp->pf.vf_event_bmap);
1154  		bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
1155  		i = vf_id + 1;
1156  	}
1157  }
1158  
bnxt_approve_mac(struct bnxt * bp,const u8 * mac,bool strict)1159  int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1160  {
1161  	struct hwrm_func_vf_cfg_input *req;
1162  	int rc = 0;
1163  
1164  	if (!BNXT_VF(bp))
1165  		return 0;
1166  
1167  	if (bp->hwrm_spec_code < 0x10202) {
1168  		if (is_valid_ether_addr(bp->vf.mac_addr))
1169  			rc = -EADDRNOTAVAIL;
1170  		goto mac_done;
1171  	}
1172  
1173  	rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
1174  	if (rc)
1175  		goto mac_done;
1176  
1177  	req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
1178  	memcpy(req->dflt_mac_addr, mac, ETH_ALEN);
1179  	if (!strict)
1180  		hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT);
1181  	rc = hwrm_req_send(bp, req);
1182  mac_done:
1183  	if (rc && strict) {
1184  		rc = -EADDRNOTAVAIL;
1185  		netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1186  			    mac);
1187  		return rc;
1188  	}
1189  	return 0;
1190  }
1191  
bnxt_update_vf_mac(struct bnxt * bp)1192  void bnxt_update_vf_mac(struct bnxt *bp)
1193  {
1194  	struct hwrm_func_qcaps_output *resp;
1195  	struct hwrm_func_qcaps_input *req;
1196  	bool inform_pf = false;
1197  
1198  	if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS))
1199  		return;
1200  
1201  	req->fid = cpu_to_le16(0xffff);
1202  
1203  	resp = hwrm_req_hold(bp, req);
1204  	if (hwrm_req_send(bp, req))
1205  		goto update_vf_mac_exit;
1206  
1207  	/* Store MAC address from the firmware.  There are 2 cases:
1208  	 * 1. MAC address is valid.  It is assigned from the PF and we
1209  	 *    need to override the current VF MAC address with it.
1210  	 * 2. MAC address is zero.  The VF will use a random MAC address by
1211  	 *    default but the stored zero MAC will allow the VF user to change
1212  	 *    the random MAC address using ndo_set_mac_address() if he wants.
1213  	 */
1214  	if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) {
1215  		memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
1216  		/* This means we are now using our own MAC address, let
1217  		 * the PF know about this MAC address.
1218  		 */
1219  		if (!is_valid_ether_addr(bp->vf.mac_addr))
1220  			inform_pf = true;
1221  	}
1222  
1223  	/* overwrite netdev dev_addr with admin VF MAC */
1224  	if (is_valid_ether_addr(bp->vf.mac_addr))
1225  		eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
1226  update_vf_mac_exit:
1227  	hwrm_req_drop(bp, req);
1228  	if (inform_pf)
1229  		bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1230  }
1231  
1232  #else
1233  
bnxt_cfg_hw_sriov(struct bnxt * bp,int * num_vfs,bool reset)1234  int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
1235  {
1236  	if (*num_vfs)
1237  		return -EOPNOTSUPP;
1238  	return 0;
1239  }
1240  
bnxt_sriov_disable(struct bnxt * bp)1241  void bnxt_sriov_disable(struct bnxt *bp)
1242  {
1243  }
1244  
bnxt_hwrm_exec_fwd_req(struct bnxt * bp)1245  void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
1246  {
1247  	netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
1248  }
1249  
bnxt_update_vf_mac(struct bnxt * bp)1250  void bnxt_update_vf_mac(struct bnxt *bp)
1251  {
1252  }
1253  
bnxt_approve_mac(struct bnxt * bp,const u8 * mac,bool strict)1254  int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
1255  {
1256  	return 0;
1257  }
1258  #endif
1259