1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/interrupt.h>
39 #include <linux/kernel.h>
40 #include <linux/log2.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/errno.h>
48 #include <linux/list.h>
49 #include <linux/spinlock.h>
50 #define __PREVENT_DUMP_MEM_ARR__
51 #define __PREVENT_PXP_GLOBAL_WIN__
52 #include "qed.h"
53 #include "qed_cxt.h"
54 #include "qed_dev_api.h"
55 #include "qed_fcoe.h"
56 #include "qed_hsi.h"
57 #include "qed_hw.h"
58 #include "qed_int.h"
59 #include "qed_ll2.h"
60 #include "qed_mcp.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64 #include <linux/qed/qed_fcoe_if.h>
65
66 struct qed_fcoe_conn {
67 struct list_head list_entry;
68 bool free_on_delete;
69
70 u16 conn_id;
71 u32 icid;
72 u32 fw_cid;
73 u8 layer_code;
74
75 dma_addr_t sq_pbl_addr;
76 dma_addr_t sq_curr_page_addr;
77 dma_addr_t sq_next_page_addr;
78 dma_addr_t xferq_pbl_addr;
79 void *xferq_pbl_addr_virt_addr;
80 dma_addr_t xferq_addr[4];
81 void *xferq_addr_virt_addr[4];
82 dma_addr_t confq_pbl_addr;
83 void *confq_pbl_addr_virt_addr;
84 dma_addr_t confq_addr[2];
85 void *confq_addr_virt_addr[2];
86
87 dma_addr_t terminate_params;
88
89 u16 dst_mac_addr_lo;
90 u16 dst_mac_addr_mid;
91 u16 dst_mac_addr_hi;
92 u16 src_mac_addr_lo;
93 u16 src_mac_addr_mid;
94 u16 src_mac_addr_hi;
95
96 u16 tx_max_fc_pay_len;
97 u16 e_d_tov_timer_val;
98 u16 rec_tov_timer_val;
99 u16 rx_max_fc_pay_len;
100 u16 vlan_tag;
101 u16 physical_q0;
102
103 struct fc_addr_nw s_id;
104 u8 max_conc_seqs_c3;
105 struct fc_addr_nw d_id;
106 u8 flags;
107 u8 def_q_idx;
108 };
109
110 static int
qed_sp_fcoe_func_start(struct qed_hwfn * p_hwfn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)111 qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
112 enum spq_mode comp_mode,
113 struct qed_spq_comp_cb *p_comp_addr)
114 {
115 struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
116 struct fcoe_init_ramrod_params *p_ramrod = NULL;
117 struct fcoe_init_func_ramrod_data *p_data;
118 struct e4_fcoe_conn_context *p_cxt = NULL;
119 struct qed_spq_entry *p_ent = NULL;
120 struct qed_sp_init_data init_data;
121 struct qed_cxt_info cxt_info;
122 u32 dummy_cid;
123 int rc = 0;
124 u16 tmp;
125 u8 i;
126
127 /* Get SPQ entry */
128 memset(&init_data, 0, sizeof(init_data));
129 init_data.cid = qed_spq_get_cid(p_hwfn);
130 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
131 init_data.comp_mode = comp_mode;
132 init_data.p_comp_data = p_comp_addr;
133
134 rc = qed_sp_init_request(p_hwfn, &p_ent,
135 FCOE_RAMROD_CMD_ID_INIT_FUNC,
136 PROTOCOLID_FCOE, &init_data);
137 if (rc)
138 return rc;
139
140 p_ramrod = &p_ent->ramrod.fcoe_init;
141 p_data = &p_ramrod->init_ramrod_data;
142 fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
143
144 /* Sanity */
145 if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
146 DP_ERR(p_hwfn,
147 "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
148 fcoe_pf_params->num_cqs,
149 p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
150 return -EINVAL;
151 }
152
153 p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
154 tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
155 p_data->sq_num_pages_in_pbl = tmp;
156
157 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
158 if (rc)
159 return rc;
160
161 cxt_info.iid = dummy_cid;
162 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
163 if (rc) {
164 DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
165 dummy_cid);
166 return rc;
167 }
168 p_cxt = cxt_info.p_cxt;
169 SET_FIELD(p_cxt->tstorm_ag_context.flags3,
170 E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
171
172 fcoe_pf_params->dummy_icid = (u16)dummy_cid;
173
174 tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
175 p_data->func_params.num_tasks = tmp;
176 p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
177 p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
178
179 DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
180 fcoe_pf_params->glbl_q_params_addr);
181
182 tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
183 p_data->q_params.cq_num_entries = tmp;
184
185 tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
186 p_data->q_params.cmdq_num_entries = tmp;
187
188 tmp = fcoe_pf_params->num_cqs;
189 p_data->q_params.num_queues = (u8)tmp;
190
191 tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
192 p_data->q_params.queue_relative_offset = (u8)tmp;
193
194 for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
195 u16 igu_sb_id;
196
197 igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
198 tmp = cpu_to_le16(igu_sb_id);
199 p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
200 }
201
202 p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
203 p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
204
205 p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
206
207 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
208 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
209 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
210 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
211 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
212 p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
213 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
214 p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
215
216 DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
217 fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
218 p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
219 fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
220 tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
221 p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
222 tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
223 p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
224 tmp = fcoe_pf_params->rq_buffer_size;
225 p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
226
227 if (fcoe_pf_params->is_target) {
228 SET_FIELD(p_data->q_params.q_validity,
229 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
230 if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
231 SET_FIELD(p_data->q_params.q_validity,
232 SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
233 SET_FIELD(p_data->q_params.q_validity,
234 SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
235 } else {
236 SET_FIELD(p_data->q_params.q_validity,
237 SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
238 }
239
240 rc = qed_spq_post(p_hwfn, p_ent, NULL);
241
242 return rc;
243 }
244
245 static int
qed_sp_fcoe_conn_offload(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)246 qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
247 struct qed_fcoe_conn *p_conn,
248 enum spq_mode comp_mode,
249 struct qed_spq_comp_cb *p_comp_addr)
250 {
251 struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
252 struct fcoe_conn_offload_ramrod_data *p_data;
253 struct qed_spq_entry *p_ent = NULL;
254 struct qed_sp_init_data init_data;
255 u16 physical_q0, tmp;
256 int rc;
257
258 /* Get SPQ entry */
259 memset(&init_data, 0, sizeof(init_data));
260 init_data.cid = p_conn->icid;
261 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
262 init_data.comp_mode = comp_mode;
263 init_data.p_comp_data = p_comp_addr;
264
265 rc = qed_sp_init_request(p_hwfn, &p_ent,
266 FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
267 PROTOCOLID_FCOE, &init_data);
268 if (rc)
269 return rc;
270
271 p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
272 p_data = &p_ramrod->offload_ramrod_data;
273
274 /* Transmission PQ is the first of the PF */
275 physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
276 p_conn->physical_q0 = cpu_to_le16(physical_q0);
277 p_data->physical_q0 = cpu_to_le16(physical_q0);
278
279 p_data->conn_id = cpu_to_le16(p_conn->conn_id);
280 DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
281 DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
282 DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
283 DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
284 DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
285 DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
286
287 DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
288 DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
289 DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
290
291 p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
292 p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
293 p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
294 p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
295 p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
296 p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
297
298 tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
299 p_data->tx_max_fc_pay_len = tmp;
300 tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
301 p_data->e_d_tov_timer_val = tmp;
302 tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
303 p_data->rec_rr_tov_timer_val = tmp;
304 tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
305 p_data->rx_max_fc_pay_len = tmp;
306
307 p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
308 p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
309 p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
310 p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
311 p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
312 p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
313 p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
314 p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
315 p_data->flags = p_conn->flags;
316 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
317 SET_FIELD(p_data->flags,
318 FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
319 p_data->def_q_idx = p_conn->def_q_idx;
320
321 return qed_spq_post(p_hwfn, p_ent, NULL);
322 }
323
324 static int
qed_sp_fcoe_conn_destroy(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn * p_conn,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)325 qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
326 struct qed_fcoe_conn *p_conn,
327 enum spq_mode comp_mode,
328 struct qed_spq_comp_cb *p_comp_addr)
329 {
330 struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
331 struct qed_spq_entry *p_ent = NULL;
332 struct qed_sp_init_data init_data;
333 int rc = 0;
334
335 /* Get SPQ entry */
336 memset(&init_data, 0, sizeof(init_data));
337 init_data.cid = p_conn->icid;
338 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
339 init_data.comp_mode = comp_mode;
340 init_data.p_comp_data = p_comp_addr;
341
342 rc = qed_sp_init_request(p_hwfn, &p_ent,
343 FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
344 PROTOCOLID_FCOE, &init_data);
345 if (rc)
346 return rc;
347
348 p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
349 DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
350 p_conn->terminate_params);
351
352 return qed_spq_post(p_hwfn, p_ent, NULL);
353 }
354
355 static int
qed_sp_fcoe_func_stop(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,enum spq_mode comp_mode,struct qed_spq_comp_cb * p_comp_addr)356 qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
357 struct qed_ptt *p_ptt,
358 enum spq_mode comp_mode,
359 struct qed_spq_comp_cb *p_comp_addr)
360 {
361 struct qed_spq_entry *p_ent = NULL;
362 struct qed_sp_init_data init_data;
363 u32 active_segs = 0;
364 int rc = 0;
365
366 /* Get SPQ entry */
367 memset(&init_data, 0, sizeof(init_data));
368 init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
369 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
370 init_data.comp_mode = comp_mode;
371 init_data.p_comp_data = p_comp_addr;
372
373 rc = qed_sp_init_request(p_hwfn, &p_ent,
374 FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
375 PROTOCOLID_FCOE, &init_data);
376 if (rc)
377 return rc;
378
379 active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
380 active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
381 qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
382
383 return qed_spq_post(p_hwfn, p_ent, NULL);
384 }
385
386 static int
qed_fcoe_allocate_connection(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn ** p_out_conn)387 qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
388 struct qed_fcoe_conn **p_out_conn)
389 {
390 struct qed_fcoe_conn *p_conn = NULL;
391 void *p_addr;
392 u32 i;
393
394 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
395 if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
396 p_conn =
397 list_first_entry(&p_hwfn->p_fcoe_info->free_list,
398 struct qed_fcoe_conn, list_entry);
399 if (p_conn) {
400 list_del(&p_conn->list_entry);
401 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
402 *p_out_conn = p_conn;
403 return 0;
404 }
405 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
406
407 p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
408 if (!p_conn)
409 return -ENOMEM;
410
411 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
412 QED_CHAIN_PAGE_SIZE,
413 &p_conn->xferq_pbl_addr, GFP_KERNEL);
414 if (!p_addr)
415 goto nomem_pbl_xferq;
416 p_conn->xferq_pbl_addr_virt_addr = p_addr;
417
418 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
419 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
420 QED_CHAIN_PAGE_SIZE,
421 &p_conn->xferq_addr[i], GFP_KERNEL);
422 if (!p_addr)
423 goto nomem_xferq;
424 p_conn->xferq_addr_virt_addr[i] = p_addr;
425
426 p_addr = p_conn->xferq_pbl_addr_virt_addr;
427 ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
428 }
429
430 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
431 QED_CHAIN_PAGE_SIZE,
432 &p_conn->confq_pbl_addr, GFP_KERNEL);
433 if (!p_addr)
434 goto nomem_xferq;
435 p_conn->confq_pbl_addr_virt_addr = p_addr;
436
437 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
438 p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
439 QED_CHAIN_PAGE_SIZE,
440 &p_conn->confq_addr[i], GFP_KERNEL);
441 if (!p_addr)
442 goto nomem_confq;
443 p_conn->confq_addr_virt_addr[i] = p_addr;
444
445 p_addr = p_conn->confq_pbl_addr_virt_addr;
446 ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
447 }
448
449 p_conn->free_on_delete = true;
450 *p_out_conn = p_conn;
451 return 0;
452
453 nomem_confq:
454 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
455 QED_CHAIN_PAGE_SIZE,
456 p_conn->confq_pbl_addr_virt_addr,
457 p_conn->confq_pbl_addr);
458 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
459 if (p_conn->confq_addr_virt_addr[i])
460 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
461 QED_CHAIN_PAGE_SIZE,
462 p_conn->confq_addr_virt_addr[i],
463 p_conn->confq_addr[i]);
464 nomem_xferq:
465 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
466 QED_CHAIN_PAGE_SIZE,
467 p_conn->xferq_pbl_addr_virt_addr,
468 p_conn->xferq_pbl_addr);
469 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
470 if (p_conn->xferq_addr_virt_addr[i])
471 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
472 QED_CHAIN_PAGE_SIZE,
473 p_conn->xferq_addr_virt_addr[i],
474 p_conn->xferq_addr[i]);
475 nomem_pbl_xferq:
476 kfree(p_conn);
477 return -ENOMEM;
478 }
479
qed_fcoe_free_connection(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn * p_conn)480 static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
481 struct qed_fcoe_conn *p_conn)
482 {
483 u32 i;
484
485 if (!p_conn)
486 return;
487
488 if (p_conn->confq_pbl_addr_virt_addr)
489 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
490 QED_CHAIN_PAGE_SIZE,
491 p_conn->confq_pbl_addr_virt_addr,
492 p_conn->confq_pbl_addr);
493
494 for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
495 if (!p_conn->confq_addr_virt_addr[i])
496 continue;
497 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
498 QED_CHAIN_PAGE_SIZE,
499 p_conn->confq_addr_virt_addr[i],
500 p_conn->confq_addr[i]);
501 }
502
503 if (p_conn->xferq_pbl_addr_virt_addr)
504 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
505 QED_CHAIN_PAGE_SIZE,
506 p_conn->xferq_pbl_addr_virt_addr,
507 p_conn->xferq_pbl_addr);
508
509 for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
510 if (!p_conn->xferq_addr_virt_addr[i])
511 continue;
512 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
513 QED_CHAIN_PAGE_SIZE,
514 p_conn->xferq_addr_virt_addr[i],
515 p_conn->xferq_addr[i]);
516 }
517 kfree(p_conn);
518 }
519
qed_fcoe_get_db_addr(struct qed_hwfn * p_hwfn,u32 cid)520 static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
521 {
522 return (u8 __iomem *)p_hwfn->doorbells +
523 qed_db_addr(cid, DQ_DEMS_LEGACY);
524 }
525
qed_fcoe_get_primary_bdq_prod(struct qed_hwfn * p_hwfn,u8 bdq_id)526 static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
527 u8 bdq_id)
528 {
529 if (RESC_NUM(p_hwfn, QED_BDQ)) {
530 return (u8 __iomem *)p_hwfn->regview +
531 GTT_BAR0_MAP_REG_MSDM_RAM +
532 MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
533 QED_BDQ),
534 bdq_id);
535 } else {
536 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
537 return NULL;
538 }
539 }
540
qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn * p_hwfn,u8 bdq_id)541 static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
542 u8 bdq_id)
543 {
544 if (RESC_NUM(p_hwfn, QED_BDQ)) {
545 return (u8 __iomem *)p_hwfn->regview +
546 GTT_BAR0_MAP_REG_TSDM_RAM +
547 TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
548 QED_BDQ),
549 bdq_id);
550 } else {
551 DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
552 return NULL;
553 }
554 }
555
qed_fcoe_alloc(struct qed_hwfn * p_hwfn)556 int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
557 {
558 struct qed_fcoe_info *p_fcoe_info;
559
560 /* Allocate LL2's set struct */
561 p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
562 if (!p_fcoe_info) {
563 DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
564 return -ENOMEM;
565 }
566 INIT_LIST_HEAD(&p_fcoe_info->free_list);
567
568 p_hwfn->p_fcoe_info = p_fcoe_info;
569 return 0;
570 }
571
qed_fcoe_setup(struct qed_hwfn * p_hwfn)572 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
573 {
574 struct e4_fcoe_task_context *p_task_ctx = NULL;
575 int rc;
576 u32 i;
577
578 spin_lock_init(&p_hwfn->p_fcoe_info->lock);
579 for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
580 rc = qed_cxt_get_task_ctx(p_hwfn, i,
581 QED_CTX_WORKING_MEM,
582 (void **)&p_task_ctx);
583 if (rc)
584 continue;
585
586 memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
587 SET_FIELD(p_task_ctx->timer_context.logical_client_0,
588 TIMERS_CONTEXT_VALIDLC0, 1);
589 SET_FIELD(p_task_ctx->timer_context.logical_client_1,
590 TIMERS_CONTEXT_VALIDLC1, 1);
591 SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
592 E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
593 }
594 }
595
qed_fcoe_free(struct qed_hwfn * p_hwfn)596 void qed_fcoe_free(struct qed_hwfn *p_hwfn)
597 {
598 struct qed_fcoe_conn *p_conn = NULL;
599
600 if (!p_hwfn->p_fcoe_info)
601 return;
602
603 while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
604 p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
605 struct qed_fcoe_conn, list_entry);
606 if (!p_conn)
607 break;
608 list_del(&p_conn->list_entry);
609 qed_fcoe_free_connection(p_hwfn, p_conn);
610 }
611
612 kfree(p_hwfn->p_fcoe_info);
613 p_hwfn->p_fcoe_info = NULL;
614 }
615
616 static int
qed_fcoe_acquire_connection(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn * p_in_conn,struct qed_fcoe_conn ** p_out_conn)617 qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
618 struct qed_fcoe_conn *p_in_conn,
619 struct qed_fcoe_conn **p_out_conn)
620 {
621 struct qed_fcoe_conn *p_conn = NULL;
622 int rc = 0;
623 u32 icid;
624
625 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
626 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
627 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
628 if (rc)
629 return rc;
630
631 /* Use input connection [if provided] or allocate a new one */
632 if (p_in_conn) {
633 p_conn = p_in_conn;
634 } else {
635 rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
636 if (rc) {
637 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
638 qed_cxt_release_cid(p_hwfn, icid);
639 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
640 return rc;
641 }
642 }
643
644 p_conn->icid = icid;
645 p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
646 *p_out_conn = p_conn;
647
648 return rc;
649 }
650
qed_fcoe_release_connection(struct qed_hwfn * p_hwfn,struct qed_fcoe_conn * p_conn)651 static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
652 struct qed_fcoe_conn *p_conn)
653 {
654 spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
655 list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
656 qed_cxt_release_cid(p_hwfn, p_conn->icid);
657 spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
658 }
659
_qed_fcoe_get_tstats(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_fcoe_stats * p_stats)660 static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
661 struct qed_ptt *p_ptt,
662 struct qed_fcoe_stats *p_stats)
663 {
664 struct fcoe_rx_stat tstats;
665 u32 tstats_addr;
666
667 memset(&tstats, 0, sizeof(tstats));
668 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
669 TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
670 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
671
672 p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
673 p_stats->fcoe_rx_data_pkt_cnt =
674 HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
675 p_stats->fcoe_rx_xfer_pkt_cnt =
676 HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
677 p_stats->fcoe_rx_other_pkt_cnt =
678 HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
679
680 p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
681 le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
682 p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
683 le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
684 p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
685 le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
686 p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
687 le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
688 p_stats->fcoe_silent_drop_total_pkt_cnt =
689 le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
690 }
691
_qed_fcoe_get_pstats(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt,struct qed_fcoe_stats * p_stats)692 static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
693 struct qed_ptt *p_ptt,
694 struct qed_fcoe_stats *p_stats)
695 {
696 struct fcoe_tx_stat pstats;
697 u32 pstats_addr;
698
699 memset(&pstats, 0, sizeof(pstats));
700 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
701 PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
702 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
703
704 p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
705 p_stats->fcoe_tx_data_pkt_cnt =
706 HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
707 p_stats->fcoe_tx_xfer_pkt_cnt =
708 HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
709 p_stats->fcoe_tx_other_pkt_cnt =
710 HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
711 }
712
qed_fcoe_get_stats(struct qed_hwfn * p_hwfn,struct qed_fcoe_stats * p_stats)713 static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
714 struct qed_fcoe_stats *p_stats)
715 {
716 struct qed_ptt *p_ptt;
717
718 memset(p_stats, 0, sizeof(*p_stats));
719
720 p_ptt = qed_ptt_acquire(p_hwfn);
721
722 if (!p_ptt) {
723 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
724 return -EINVAL;
725 }
726
727 _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
728 _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
729
730 qed_ptt_release(p_hwfn, p_ptt);
731
732 return 0;
733 }
734
735 struct qed_hash_fcoe_con {
736 struct hlist_node node;
737 struct qed_fcoe_conn *con;
738 };
739
qed_fill_fcoe_dev_info(struct qed_dev * cdev,struct qed_dev_fcoe_info * info)740 static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
741 struct qed_dev_fcoe_info *info)
742 {
743 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
744 int rc;
745
746 memset(info, 0, sizeof(*info));
747 rc = qed_fill_dev_info(cdev, &info->common);
748
749 info->primary_dbq_rq_addr =
750 qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
751 info->secondary_bdq_rq_addr =
752 qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
753
754 info->wwpn = hwfn->mcp_info->func_info.wwn_port;
755 info->wwnn = hwfn->mcp_info->func_info.wwn_node;
756
757 info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
758
759 return rc;
760 }
761
qed_register_fcoe_ops(struct qed_dev * cdev,struct qed_fcoe_cb_ops * ops,void * cookie)762 static void qed_register_fcoe_ops(struct qed_dev *cdev,
763 struct qed_fcoe_cb_ops *ops, void *cookie)
764 {
765 cdev->protocol_ops.fcoe = ops;
766 cdev->ops_cookie = cookie;
767 }
768
qed_fcoe_get_hash(struct qed_dev * cdev,u32 handle)769 static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
770 u32 handle)
771 {
772 struct qed_hash_fcoe_con *hash_con = NULL;
773
774 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
775 return NULL;
776
777 hash_for_each_possible(cdev->connections, hash_con, node, handle) {
778 if (hash_con->con->icid == handle)
779 break;
780 }
781
782 if (!hash_con || (hash_con->con->icid != handle))
783 return NULL;
784
785 return hash_con;
786 }
787
qed_fcoe_stop(struct qed_dev * cdev)788 static int qed_fcoe_stop(struct qed_dev *cdev)
789 {
790 struct qed_ptt *p_ptt;
791 int rc;
792
793 if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
794 DP_NOTICE(cdev, "fcoe already stopped\n");
795 return 0;
796 }
797
798 if (!hash_empty(cdev->connections)) {
799 DP_NOTICE(cdev,
800 "Can't stop fcoe - not all connections were returned\n");
801 return -EINVAL;
802 }
803
804 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
805 if (!p_ptt)
806 return -EAGAIN;
807
808 /* Stop the fcoe */
809 rc = qed_sp_fcoe_func_stop(QED_LEADING_HWFN(cdev), p_ptt,
810 QED_SPQ_MODE_EBLOCK, NULL);
811 cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
812 qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
813
814 return rc;
815 }
816
qed_fcoe_start(struct qed_dev * cdev,struct qed_fcoe_tid * tasks)817 static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
818 {
819 int rc;
820
821 if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
822 DP_NOTICE(cdev, "fcoe already started;\n");
823 return 0;
824 }
825
826 rc = qed_sp_fcoe_func_start(QED_LEADING_HWFN(cdev),
827 QED_SPQ_MODE_EBLOCK, NULL);
828 if (rc) {
829 DP_NOTICE(cdev, "Failed to start fcoe\n");
830 return rc;
831 }
832
833 cdev->flags |= QED_FLAG_STORAGE_STARTED;
834 hash_init(cdev->connections);
835
836 if (tasks) {
837 struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
838 GFP_ATOMIC);
839
840 if (!tid_info) {
841 DP_NOTICE(cdev,
842 "Failed to allocate tasks information\n");
843 qed_fcoe_stop(cdev);
844 return -ENOMEM;
845 }
846
847 rc = qed_cxt_get_tid_mem_info(QED_LEADING_HWFN(cdev), tid_info);
848 if (rc) {
849 DP_NOTICE(cdev, "Failed to gather task information\n");
850 qed_fcoe_stop(cdev);
851 kfree(tid_info);
852 return rc;
853 }
854
855 /* Fill task information */
856 tasks->size = tid_info->tid_size;
857 tasks->num_tids_per_block = tid_info->num_tids_per_block;
858 memcpy(tasks->blocks, tid_info->blocks,
859 MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
860
861 kfree(tid_info);
862 }
863
864 return 0;
865 }
866
qed_fcoe_acquire_conn(struct qed_dev * cdev,u32 * handle,u32 * fw_cid,void __iomem ** p_doorbell)867 static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
868 u32 *handle,
869 u32 *fw_cid, void __iomem **p_doorbell)
870 {
871 struct qed_hash_fcoe_con *hash_con;
872 int rc;
873
874 /* Allocate a hashed connection */
875 hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
876 if (!hash_con) {
877 DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
878 return -ENOMEM;
879 }
880
881 /* Acquire the connection */
882 rc = qed_fcoe_acquire_connection(QED_LEADING_HWFN(cdev), NULL,
883 &hash_con->con);
884 if (rc) {
885 DP_NOTICE(cdev, "Failed to acquire Connection\n");
886 kfree(hash_con);
887 return rc;
888 }
889
890 /* Added the connection to hash table */
891 *handle = hash_con->con->icid;
892 *fw_cid = hash_con->con->fw_cid;
893 hash_add(cdev->connections, &hash_con->node, *handle);
894
895 if (p_doorbell)
896 *p_doorbell = qed_fcoe_get_db_addr(QED_LEADING_HWFN(cdev),
897 *handle);
898
899 return 0;
900 }
901
qed_fcoe_release_conn(struct qed_dev * cdev,u32 handle)902 static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
903 {
904 struct qed_hash_fcoe_con *hash_con;
905
906 hash_con = qed_fcoe_get_hash(cdev, handle);
907 if (!hash_con) {
908 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
909 handle);
910 return -EINVAL;
911 }
912
913 hlist_del(&hash_con->node);
914 qed_fcoe_release_connection(QED_LEADING_HWFN(cdev), hash_con->con);
915 kfree(hash_con);
916
917 return 0;
918 }
919
qed_fcoe_offload_conn(struct qed_dev * cdev,u32 handle,struct qed_fcoe_params_offload * conn_info)920 static int qed_fcoe_offload_conn(struct qed_dev *cdev,
921 u32 handle,
922 struct qed_fcoe_params_offload *conn_info)
923 {
924 struct qed_hash_fcoe_con *hash_con;
925 struct qed_fcoe_conn *con;
926
927 hash_con = qed_fcoe_get_hash(cdev, handle);
928 if (!hash_con) {
929 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
930 handle);
931 return -EINVAL;
932 }
933
934 /* Update the connection with information from the params */
935 con = hash_con->con;
936
937 con->sq_pbl_addr = conn_info->sq_pbl_addr;
938 con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
939 con->sq_next_page_addr = conn_info->sq_next_page_addr;
940 con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
941 con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
942 con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
943 con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
944 con->vlan_tag = conn_info->vlan_tag;
945 con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
946 con->flags = conn_info->flags;
947 con->def_q_idx = conn_info->def_q_idx;
948
949 con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
950 conn_info->src_mac[4];
951 con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
952 conn_info->src_mac[2];
953 con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
954 conn_info->src_mac[0];
955 con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
956 conn_info->dst_mac[4];
957 con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
958 conn_info->dst_mac[2];
959 con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
960 conn_info->dst_mac[0];
961
962 con->s_id.addr_hi = conn_info->s_id.addr_hi;
963 con->s_id.addr_mid = conn_info->s_id.addr_mid;
964 con->s_id.addr_lo = conn_info->s_id.addr_lo;
965 con->d_id.addr_hi = conn_info->d_id.addr_hi;
966 con->d_id.addr_mid = conn_info->d_id.addr_mid;
967 con->d_id.addr_lo = conn_info->d_id.addr_lo;
968
969 return qed_sp_fcoe_conn_offload(QED_LEADING_HWFN(cdev), con,
970 QED_SPQ_MODE_EBLOCK, NULL);
971 }
972
qed_fcoe_destroy_conn(struct qed_dev * cdev,u32 handle,dma_addr_t terminate_params)973 static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
974 u32 handle, dma_addr_t terminate_params)
975 {
976 struct qed_hash_fcoe_con *hash_con;
977 struct qed_fcoe_conn *con;
978
979 hash_con = qed_fcoe_get_hash(cdev, handle);
980 if (!hash_con) {
981 DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
982 handle);
983 return -EINVAL;
984 }
985
986 /* Update the connection with information from the params */
987 con = hash_con->con;
988 con->terminate_params = terminate_params;
989
990 return qed_sp_fcoe_conn_destroy(QED_LEADING_HWFN(cdev), con,
991 QED_SPQ_MODE_EBLOCK, NULL);
992 }
993
qed_fcoe_stats(struct qed_dev * cdev,struct qed_fcoe_stats * stats)994 static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
995 {
996 return qed_fcoe_get_stats(QED_LEADING_HWFN(cdev), stats);
997 }
998
qed_get_protocol_stats_fcoe(struct qed_dev * cdev,struct qed_mcp_fcoe_stats * stats)999 void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
1000 struct qed_mcp_fcoe_stats *stats)
1001 {
1002 struct qed_fcoe_stats proto_stats;
1003
1004 /* Retrieve FW statistics */
1005 memset(&proto_stats, 0, sizeof(proto_stats));
1006 if (qed_fcoe_stats(cdev, &proto_stats)) {
1007 DP_VERBOSE(cdev, QED_MSG_STORAGE,
1008 "Failed to collect FCoE statistics\n");
1009 return;
1010 }
1011
1012 /* Translate FW statistics into struct */
1013 stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
1014 proto_stats.fcoe_rx_xfer_pkt_cnt +
1015 proto_stats.fcoe_rx_other_pkt_cnt;
1016 stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
1017 proto_stats.fcoe_tx_xfer_pkt_cnt +
1018 proto_stats.fcoe_tx_other_pkt_cnt;
1019 stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
1020
1021 /* Request protocol driver to fill-in the rest */
1022 if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
1023 struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
1024 void *cookie = cdev->ops_cookie;
1025
1026 if (ops->get_login_failures)
1027 stats->login_failure = ops->get_login_failures(cookie);
1028 }
1029 }
1030
1031 static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
1032 .common = &qed_common_ops_pass,
1033 .ll2 = &qed_ll2_ops_pass,
1034 .fill_dev_info = &qed_fill_fcoe_dev_info,
1035 .start = &qed_fcoe_start,
1036 .stop = &qed_fcoe_stop,
1037 .register_ops = &qed_register_fcoe_ops,
1038 .acquire_conn = &qed_fcoe_acquire_conn,
1039 .release_conn = &qed_fcoe_release_conn,
1040 .offload_conn = &qed_fcoe_offload_conn,
1041 .destroy_conn = &qed_fcoe_destroy_conn,
1042 .get_stats = &qed_fcoe_stats,
1043 };
1044
qed_get_fcoe_ops(void)1045 const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
1046 {
1047 return &qed_fcoe_ops_pass;
1048 }
1049 EXPORT_SYMBOL(qed_get_fcoe_ops);
1050
qed_put_fcoe_ops(void)1051 void qed_put_fcoe_ops(void)
1052 {
1053 }
1054 EXPORT_SYMBOL(qed_put_fcoe_ops);
1055