1 /*
2 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
3 * Copyright (c) 2014- QLogic Corporation.
4 * All rights reserved
5 * www.qlogic.com
6 *
7 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License (GPL) Version 2 as
11 * published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19 #include "bfad_drv.h"
20 #include "bfa_defs_svc.h"
21 #include "bfa_port.h"
22 #include "bfi.h"
23 #include "bfa_ioc.h"
24
25
26 BFA_TRC_FILE(CNA, PORT);
27
28 static void
bfa_port_stats_swap(struct bfa_port_s * port,union bfa_port_stats_u * stats)29 bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
30 {
31 u32 *dip = (u32 *) stats;
32 __be32 t0, t1;
33 int i;
34
35 for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
36 i += 2) {
37 t0 = dip[i];
38 t1 = dip[i + 1];
39 #ifdef __BIG_ENDIAN
40 dip[i] = be32_to_cpu(t0);
41 dip[i + 1] = be32_to_cpu(t1);
42 #else
43 dip[i] = be32_to_cpu(t1);
44 dip[i + 1] = be32_to_cpu(t0);
45 #endif
46 }
47 }
48
49 /*
50 * bfa_port_enable_isr()
51 *
52 *
53 * @param[in] port - Pointer to the port module
54 * status - Return status from the f/w
55 *
56 * @return void
57 */
58 static void
bfa_port_enable_isr(struct bfa_port_s * port,bfa_status_t status)59 bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
60 {
61 bfa_trc(port, status);
62 port->endis_pending = BFA_FALSE;
63 port->endis_cbfn(port->endis_cbarg, status);
64 }
65
66 /*
67 * bfa_port_disable_isr()
68 *
69 *
70 * @param[in] port - Pointer to the port module
71 * status - Return status from the f/w
72 *
73 * @return void
74 */
75 static void
bfa_port_disable_isr(struct bfa_port_s * port,bfa_status_t status)76 bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
77 {
78 bfa_trc(port, status);
79 port->endis_pending = BFA_FALSE;
80 port->endis_cbfn(port->endis_cbarg, status);
81 }
82
83 /*
84 * bfa_port_get_stats_isr()
85 *
86 *
87 * @param[in] port - Pointer to the Port module
88 * status - Return status from the f/w
89 *
90 * @return void
91 */
92 static void
bfa_port_get_stats_isr(struct bfa_port_s * port,bfa_status_t status)93 bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
94 {
95 port->stats_status = status;
96 port->stats_busy = BFA_FALSE;
97
98 if (status == BFA_STATUS_OK) {
99 memcpy(port->stats, port->stats_dma.kva,
100 sizeof(union bfa_port_stats_u));
101 bfa_port_stats_swap(port, port->stats);
102
103 port->stats->fc.secs_reset = ktime_get_seconds() - port->stats_reset_time;
104 }
105
106 if (port->stats_cbfn) {
107 port->stats_cbfn(port->stats_cbarg, status);
108 port->stats_cbfn = NULL;
109 }
110 }
111
112 /*
113 * bfa_port_clear_stats_isr()
114 *
115 *
116 * @param[in] port - Pointer to the Port module
117 * status - Return status from the f/w
118 *
119 * @return void
120 */
121 static void
bfa_port_clear_stats_isr(struct bfa_port_s * port,bfa_status_t status)122 bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
123 {
124 port->stats_status = status;
125 port->stats_busy = BFA_FALSE;
126
127 /*
128 * re-initialize time stamp for stats reset
129 */
130 port->stats_reset_time = ktime_get_seconds();
131
132 if (port->stats_cbfn) {
133 port->stats_cbfn(port->stats_cbarg, status);
134 port->stats_cbfn = NULL;
135 }
136 }
137
138 /*
139 * bfa_port_isr()
140 *
141 *
142 * @param[in] Pointer to the Port module data structure.
143 *
144 * @return void
145 */
146 static void
bfa_port_isr(void * cbarg,struct bfi_mbmsg_s * m)147 bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
148 {
149 struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
150 union bfi_port_i2h_msg_u *i2hmsg;
151
152 i2hmsg = (union bfi_port_i2h_msg_u *) m;
153 bfa_trc(port, m->mh.msg_id);
154
155 switch (m->mh.msg_id) {
156 case BFI_PORT_I2H_ENABLE_RSP:
157 if (port->endis_pending == BFA_FALSE)
158 break;
159 bfa_port_enable_isr(port, i2hmsg->enable_rsp.status);
160 break;
161
162 case BFI_PORT_I2H_DISABLE_RSP:
163 if (port->endis_pending == BFA_FALSE)
164 break;
165 bfa_port_disable_isr(port, i2hmsg->disable_rsp.status);
166 break;
167
168 case BFI_PORT_I2H_GET_STATS_RSP:
169 /* Stats busy flag is still set? (may be cmd timed out) */
170 if (port->stats_busy == BFA_FALSE)
171 break;
172 bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
173 break;
174
175 case BFI_PORT_I2H_CLEAR_STATS_RSP:
176 if (port->stats_busy == BFA_FALSE)
177 break;
178 bfa_port_clear_stats_isr(port, i2hmsg->clearstats_rsp.status);
179 break;
180
181 default:
182 WARN_ON(1);
183 }
184 }
185
186 /*
187 * bfa_port_meminfo()
188 *
189 *
190 * @param[in] void
191 *
192 * @return Size of DMA region
193 */
194 u32
bfa_port_meminfo(void)195 bfa_port_meminfo(void)
196 {
197 return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
198 }
199
200 /*
201 * bfa_port_mem_claim()
202 *
203 *
204 * @param[in] port Port module pointer
205 * dma_kva Kernel Virtual Address of Port DMA Memory
206 * dma_pa Physical Address of Port DMA Memory
207 *
208 * @return void
209 */
210 void
bfa_port_mem_claim(struct bfa_port_s * port,u8 * dma_kva,u64 dma_pa)211 bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
212 {
213 port->stats_dma.kva = dma_kva;
214 port->stats_dma.pa = dma_pa;
215 }
216
217 /*
218 * bfa_port_enable()
219 *
220 * Send the Port enable request to the f/w
221 *
222 * @param[in] Pointer to the Port module data structure.
223 *
224 * @return Status
225 */
226 bfa_status_t
bfa_port_enable(struct bfa_port_s * port,bfa_port_endis_cbfn_t cbfn,void * cbarg)227 bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
228 void *cbarg)
229 {
230 struct bfi_port_generic_req_s *m;
231
232 /* If port is PBC disabled, return error */
233 if (port->pbc_disabled) {
234 bfa_trc(port, BFA_STATUS_PBC);
235 return BFA_STATUS_PBC;
236 }
237
238 if (bfa_ioc_is_disabled(port->ioc)) {
239 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
240 return BFA_STATUS_IOC_DISABLED;
241 }
242
243 if (!bfa_ioc_is_operational(port->ioc)) {
244 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
245 return BFA_STATUS_IOC_FAILURE;
246 }
247
248 /* if port is d-port enabled, return error */
249 if (port->dport_enabled) {
250 bfa_trc(port, BFA_STATUS_DPORT_ERR);
251 return BFA_STATUS_DPORT_ERR;
252 }
253
254 if (port->endis_pending) {
255 bfa_trc(port, BFA_STATUS_DEVBUSY);
256 return BFA_STATUS_DEVBUSY;
257 }
258
259 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
260
261 port->msgtag++;
262 port->endis_cbfn = cbfn;
263 port->endis_cbarg = cbarg;
264 port->endis_pending = BFA_TRUE;
265
266 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
267 bfa_ioc_portid(port->ioc));
268 bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
269
270 return BFA_STATUS_OK;
271 }
272
273 /*
274 * bfa_port_disable()
275 *
276 * Send the Port disable request to the f/w
277 *
278 * @param[in] Pointer to the Port module data structure.
279 *
280 * @return Status
281 */
282 bfa_status_t
bfa_port_disable(struct bfa_port_s * port,bfa_port_endis_cbfn_t cbfn,void * cbarg)283 bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
284 void *cbarg)
285 {
286 struct bfi_port_generic_req_s *m;
287
288 /* If port is PBC disabled, return error */
289 if (port->pbc_disabled) {
290 bfa_trc(port, BFA_STATUS_PBC);
291 return BFA_STATUS_PBC;
292 }
293
294 if (bfa_ioc_is_disabled(port->ioc)) {
295 bfa_trc(port, BFA_STATUS_IOC_DISABLED);
296 return BFA_STATUS_IOC_DISABLED;
297 }
298
299 if (!bfa_ioc_is_operational(port->ioc)) {
300 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
301 return BFA_STATUS_IOC_FAILURE;
302 }
303
304 /* if port is d-port enabled, return error */
305 if (port->dport_enabled) {
306 bfa_trc(port, BFA_STATUS_DPORT_ERR);
307 return BFA_STATUS_DPORT_ERR;
308 }
309
310 if (port->endis_pending) {
311 bfa_trc(port, BFA_STATUS_DEVBUSY);
312 return BFA_STATUS_DEVBUSY;
313 }
314
315 m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
316
317 port->msgtag++;
318 port->endis_cbfn = cbfn;
319 port->endis_cbarg = cbarg;
320 port->endis_pending = BFA_TRUE;
321
322 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
323 bfa_ioc_portid(port->ioc));
324 bfa_ioc_mbox_queue(port->ioc, &port->endis_mb);
325
326 return BFA_STATUS_OK;
327 }
328
329 /*
330 * bfa_port_get_stats()
331 *
332 * Send the request to the f/w to fetch Port statistics.
333 *
334 * @param[in] Pointer to the Port module data structure.
335 *
336 * @return Status
337 */
338 bfa_status_t
bfa_port_get_stats(struct bfa_port_s * port,union bfa_port_stats_u * stats,bfa_port_stats_cbfn_t cbfn,void * cbarg)339 bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
340 bfa_port_stats_cbfn_t cbfn, void *cbarg)
341 {
342 struct bfi_port_get_stats_req_s *m;
343
344 if (!bfa_ioc_is_operational(port->ioc)) {
345 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
346 return BFA_STATUS_IOC_FAILURE;
347 }
348
349 if (port->stats_busy) {
350 bfa_trc(port, BFA_STATUS_DEVBUSY);
351 return BFA_STATUS_DEVBUSY;
352 }
353
354 m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
355
356 port->stats = stats;
357 port->stats_cbfn = cbfn;
358 port->stats_cbarg = cbarg;
359 port->stats_busy = BFA_TRUE;
360 bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
361
362 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
363 bfa_ioc_portid(port->ioc));
364 bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
365
366 return BFA_STATUS_OK;
367 }
368
369 /*
370 * bfa_port_clear_stats()
371 *
372 *
373 * @param[in] Pointer to the Port module data structure.
374 *
375 * @return Status
376 */
377 bfa_status_t
bfa_port_clear_stats(struct bfa_port_s * port,bfa_port_stats_cbfn_t cbfn,void * cbarg)378 bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
379 void *cbarg)
380 {
381 struct bfi_port_generic_req_s *m;
382
383 if (!bfa_ioc_is_operational(port->ioc)) {
384 bfa_trc(port, BFA_STATUS_IOC_FAILURE);
385 return BFA_STATUS_IOC_FAILURE;
386 }
387
388 if (port->stats_busy) {
389 bfa_trc(port, BFA_STATUS_DEVBUSY);
390 return BFA_STATUS_DEVBUSY;
391 }
392
393 m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
394
395 port->stats_cbfn = cbfn;
396 port->stats_cbarg = cbarg;
397 port->stats_busy = BFA_TRUE;
398
399 bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
400 bfa_ioc_portid(port->ioc));
401 bfa_ioc_mbox_queue(port->ioc, &port->stats_mb);
402
403 return BFA_STATUS_OK;
404 }
405
406 /*
407 * bfa_port_notify()
408 *
409 * Port module IOC event handler
410 *
411 * @param[in] Pointer to the Port module data structure.
412 * @param[in] IOC event structure
413 *
414 * @return void
415 */
416 void
bfa_port_notify(void * arg,enum bfa_ioc_event_e event)417 bfa_port_notify(void *arg, enum bfa_ioc_event_e event)
418 {
419 struct bfa_port_s *port = (struct bfa_port_s *) arg;
420
421 switch (event) {
422 case BFA_IOC_E_DISABLED:
423 case BFA_IOC_E_FAILED:
424 /* Fail any pending get_stats/clear_stats requests */
425 if (port->stats_busy) {
426 if (port->stats_cbfn)
427 port->stats_cbfn(port->stats_cbarg,
428 BFA_STATUS_FAILED);
429 port->stats_cbfn = NULL;
430 port->stats_busy = BFA_FALSE;
431 }
432
433 /* Clear any enable/disable is pending */
434 if (port->endis_pending) {
435 if (port->endis_cbfn)
436 port->endis_cbfn(port->endis_cbarg,
437 BFA_STATUS_FAILED);
438 port->endis_cbfn = NULL;
439 port->endis_pending = BFA_FALSE;
440 }
441
442 /* clear D-port mode */
443 if (port->dport_enabled)
444 bfa_port_set_dportenabled(port, BFA_FALSE);
445 break;
446 default:
447 break;
448 }
449 }
450
451 /*
452 * bfa_port_attach()
453 *
454 *
455 * @param[in] port - Pointer to the Port module data structure
456 * ioc - Pointer to the ioc module data structure
457 * dev - Pointer to the device driver module data structure
458 * The device driver specific mbox ISR functions have
459 * this pointer as one of the parameters.
460 * trcmod -
461 *
462 * @return void
463 */
464 void
bfa_port_attach(struct bfa_port_s * port,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod)465 bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
466 void *dev, struct bfa_trc_mod_s *trcmod)
467 {
468 WARN_ON(!port);
469
470 port->dev = dev;
471 port->ioc = ioc;
472 port->trcmod = trcmod;
473
474 port->stats_busy = BFA_FALSE;
475 port->endis_pending = BFA_FALSE;
476 port->stats_cbfn = NULL;
477 port->endis_cbfn = NULL;
478 port->pbc_disabled = BFA_FALSE;
479 port->dport_enabled = BFA_FALSE;
480
481 bfa_ioc_mbox_regisr(port->ioc, BFI_MC_PORT, bfa_port_isr, port);
482 bfa_q_qe_init(&port->ioc_notify);
483 bfa_ioc_notify_init(&port->ioc_notify, bfa_port_notify, port);
484 list_add_tail(&port->ioc_notify.qe, &port->ioc->notify_q);
485
486 /*
487 * initialize time stamp for stats reset
488 */
489 port->stats_reset_time = ktime_get_seconds();
490
491 bfa_trc(port, 0);
492 }
493
494 /*
495 * bfa_port_set_dportenabled();
496 *
497 * Port module- set pbc disabled flag
498 *
499 * @param[in] port - Pointer to the Port module data structure
500 *
501 * @return void
502 */
503 void
bfa_port_set_dportenabled(struct bfa_port_s * port,bfa_boolean_t enabled)504 bfa_port_set_dportenabled(struct bfa_port_s *port, bfa_boolean_t enabled)
505 {
506 port->dport_enabled = enabled;
507 }
508
509 /*
510 * CEE module specific definitions
511 */
512
513 /*
514 * bfa_cee_get_attr_isr()
515 *
516 * @brief CEE ISR for get-attributes responses from f/w
517 *
518 * @param[in] cee - Pointer to the CEE module
519 * status - Return status from the f/w
520 *
521 * @return void
522 */
523 static void
bfa_cee_get_attr_isr(struct bfa_cee_s * cee,bfa_status_t status)524 bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
525 {
526 struct bfa_cee_lldp_cfg_s *lldp_cfg = &cee->attr->lldp_remote;
527
528 cee->get_attr_status = status;
529 bfa_trc(cee, 0);
530 if (status == BFA_STATUS_OK) {
531 bfa_trc(cee, 0);
532 memcpy(cee->attr, cee->attr_dma.kva,
533 sizeof(struct bfa_cee_attr_s));
534 lldp_cfg->time_to_live = be16_to_cpu(lldp_cfg->time_to_live);
535 lldp_cfg->enabled_system_cap =
536 be16_to_cpu(lldp_cfg->enabled_system_cap);
537 }
538 cee->get_attr_pending = BFA_FALSE;
539 if (cee->cbfn.get_attr_cbfn) {
540 bfa_trc(cee, 0);
541 cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
542 }
543 }
544
545 /*
546 * bfa_cee_get_stats_isr()
547 *
548 * @brief CEE ISR for get-stats responses from f/w
549 *
550 * @param[in] cee - Pointer to the CEE module
551 * status - Return status from the f/w
552 *
553 * @return void
554 */
555 static void
bfa_cee_get_stats_isr(struct bfa_cee_s * cee,bfa_status_t status)556 bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
557 {
558 u32 *buffer;
559 int i;
560
561 cee->get_stats_status = status;
562 bfa_trc(cee, 0);
563 if (status == BFA_STATUS_OK) {
564 bfa_trc(cee, 0);
565 memcpy(cee->stats, cee->stats_dma.kva,
566 sizeof(struct bfa_cee_stats_s));
567 /* swap the cee stats */
568 buffer = (u32 *)cee->stats;
569 for (i = 0; i < (sizeof(struct bfa_cee_stats_s) /
570 sizeof(u32)); i++)
571 buffer[i] = cpu_to_be32(buffer[i]);
572 }
573 cee->get_stats_pending = BFA_FALSE;
574 bfa_trc(cee, 0);
575 if (cee->cbfn.get_stats_cbfn) {
576 bfa_trc(cee, 0);
577 cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
578 }
579 }
580
581 /*
582 * bfa_cee_reset_stats_isr()
583 *
584 * @brief CEE ISR for reset-stats responses from f/w
585 *
586 * @param[in] cee - Pointer to the CEE module
587 * status - Return status from the f/w
588 *
589 * @return void
590 */
591 static void
bfa_cee_reset_stats_isr(struct bfa_cee_s * cee,bfa_status_t status)592 bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
593 {
594 cee->reset_stats_status = status;
595 cee->reset_stats_pending = BFA_FALSE;
596 if (cee->cbfn.reset_stats_cbfn)
597 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
598 }
599
600 /*
601 * bfa_cee_meminfo()
602 *
603 * @brief Returns the size of the DMA memory needed by CEE module
604 *
605 * @param[in] void
606 *
607 * @return Size of DMA region
608 */
609 u32
bfa_cee_meminfo(void)610 bfa_cee_meminfo(void)
611 {
612 return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ) +
613 BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
614 }
615
616 /*
617 * bfa_cee_mem_claim()
618 *
619 * @brief Initialized CEE DMA Memory
620 *
621 * @param[in] cee CEE module pointer
622 * dma_kva Kernel Virtual Address of CEE DMA Memory
623 * dma_pa Physical Address of CEE DMA Memory
624 *
625 * @return void
626 */
627 void
bfa_cee_mem_claim(struct bfa_cee_s * cee,u8 * dma_kva,u64 dma_pa)628 bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
629 {
630 cee->attr_dma.kva = dma_kva;
631 cee->attr_dma.pa = dma_pa;
632 cee->stats_dma.kva = dma_kva + BFA_ROUNDUP(
633 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
634 cee->stats_dma.pa = dma_pa + BFA_ROUNDUP(
635 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
636 cee->attr = (struct bfa_cee_attr_s *) dma_kva;
637 cee->stats = (struct bfa_cee_stats_s *) (dma_kva + BFA_ROUNDUP(
638 sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ));
639 }
640
641 /*
642 * bfa_cee_get_attr()
643 *
644 * @brief
645 * Send the request to the f/w to fetch CEE attributes.
646 *
647 * @param[in] Pointer to the CEE module data structure.
648 *
649 * @return Status
650 */
651
652 bfa_status_t
bfa_cee_get_attr(struct bfa_cee_s * cee,struct bfa_cee_attr_s * attr,bfa_cee_get_attr_cbfn_t cbfn,void * cbarg)653 bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
654 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
655 {
656 struct bfi_cee_get_req_s *cmd;
657
658 WARN_ON((cee == NULL) || (cee->ioc == NULL));
659 bfa_trc(cee, 0);
660 if (!bfa_ioc_is_operational(cee->ioc)) {
661 bfa_trc(cee, 0);
662 return BFA_STATUS_IOC_FAILURE;
663 }
664 if (cee->get_attr_pending == BFA_TRUE) {
665 bfa_trc(cee, 0);
666 return BFA_STATUS_DEVBUSY;
667 }
668 cee->get_attr_pending = BFA_TRUE;
669 cmd = (struct bfi_cee_get_req_s *) cee->get_cfg_mb.msg;
670 cee->attr = attr;
671 cee->cbfn.get_attr_cbfn = cbfn;
672 cee->cbfn.get_attr_cbarg = cbarg;
673 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
674 bfa_ioc_portid(cee->ioc));
675 bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
676 bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
677
678 return BFA_STATUS_OK;
679 }
680
681 /*
682 * bfa_cee_get_stats()
683 *
684 * @brief
685 * Send the request to the f/w to fetch CEE statistics.
686 *
687 * @param[in] Pointer to the CEE module data structure.
688 *
689 * @return Status
690 */
691
692 bfa_status_t
bfa_cee_get_stats(struct bfa_cee_s * cee,struct bfa_cee_stats_s * stats,bfa_cee_get_stats_cbfn_t cbfn,void * cbarg)693 bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
694 bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
695 {
696 struct bfi_cee_get_req_s *cmd;
697
698 WARN_ON((cee == NULL) || (cee->ioc == NULL));
699
700 if (!bfa_ioc_is_operational(cee->ioc)) {
701 bfa_trc(cee, 0);
702 return BFA_STATUS_IOC_FAILURE;
703 }
704 if (cee->get_stats_pending == BFA_TRUE) {
705 bfa_trc(cee, 0);
706 return BFA_STATUS_DEVBUSY;
707 }
708 cee->get_stats_pending = BFA_TRUE;
709 cmd = (struct bfi_cee_get_req_s *) cee->get_stats_mb.msg;
710 cee->stats = stats;
711 cee->cbfn.get_stats_cbfn = cbfn;
712 cee->cbfn.get_stats_cbarg = cbarg;
713 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
714 bfa_ioc_portid(cee->ioc));
715 bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
716 bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
717
718 return BFA_STATUS_OK;
719 }
720
721 /*
722 * bfa_cee_reset_stats()
723 *
724 * @brief Clears CEE Stats in the f/w.
725 *
726 * @param[in] Pointer to the CEE module data structure.
727 *
728 * @return Status
729 */
730
731 bfa_status_t
bfa_cee_reset_stats(struct bfa_cee_s * cee,bfa_cee_reset_stats_cbfn_t cbfn,void * cbarg)732 bfa_cee_reset_stats(struct bfa_cee_s *cee,
733 bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg)
734 {
735 struct bfi_cee_reset_stats_s *cmd;
736
737 WARN_ON((cee == NULL) || (cee->ioc == NULL));
738 if (!bfa_ioc_is_operational(cee->ioc)) {
739 bfa_trc(cee, 0);
740 return BFA_STATUS_IOC_FAILURE;
741 }
742 if (cee->reset_stats_pending == BFA_TRUE) {
743 bfa_trc(cee, 0);
744 return BFA_STATUS_DEVBUSY;
745 }
746 cee->reset_stats_pending = BFA_TRUE;
747 cmd = (struct bfi_cee_reset_stats_s *) cee->reset_stats_mb.msg;
748 cee->cbfn.reset_stats_cbfn = cbfn;
749 cee->cbfn.reset_stats_cbarg = cbarg;
750 bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
751 bfa_ioc_portid(cee->ioc));
752 bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
753
754 return BFA_STATUS_OK;
755 }
756
757 /*
758 * bfa_cee_isrs()
759 *
760 * @brief Handles Mail-box interrupts for CEE module.
761 *
762 * @param[in] Pointer to the CEE module data structure.
763 *
764 * @return void
765 */
766
767 void
bfa_cee_isr(void * cbarg,struct bfi_mbmsg_s * m)768 bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
769 {
770 union bfi_cee_i2h_msg_u *msg;
771 struct bfi_cee_get_rsp_s *get_rsp;
772 struct bfa_cee_s *cee = (struct bfa_cee_s *) cbarg;
773 msg = (union bfi_cee_i2h_msg_u *) m;
774 get_rsp = (struct bfi_cee_get_rsp_s *) m;
775 bfa_trc(cee, msg->mh.msg_id);
776 switch (msg->mh.msg_id) {
777 case BFI_CEE_I2H_GET_CFG_RSP:
778 bfa_trc(cee, get_rsp->cmd_status);
779 bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
780 break;
781 case BFI_CEE_I2H_GET_STATS_RSP:
782 bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
783 break;
784 case BFI_CEE_I2H_RESET_STATS_RSP:
785 bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
786 break;
787 default:
788 WARN_ON(1);
789 }
790 }
791
792 /*
793 * bfa_cee_notify()
794 *
795 * @brief CEE module IOC event handler.
796 *
797 * @param[in] Pointer to the CEE module data structure.
798 * @param[in] IOC event type
799 *
800 * @return void
801 */
802
803 void
bfa_cee_notify(void * arg,enum bfa_ioc_event_e event)804 bfa_cee_notify(void *arg, enum bfa_ioc_event_e event)
805 {
806 struct bfa_cee_s *cee = (struct bfa_cee_s *) arg;
807
808 bfa_trc(cee, event);
809
810 switch (event) {
811 case BFA_IOC_E_DISABLED:
812 case BFA_IOC_E_FAILED:
813 if (cee->get_attr_pending == BFA_TRUE) {
814 cee->get_attr_status = BFA_STATUS_FAILED;
815 cee->get_attr_pending = BFA_FALSE;
816 if (cee->cbfn.get_attr_cbfn) {
817 cee->cbfn.get_attr_cbfn(
818 cee->cbfn.get_attr_cbarg,
819 BFA_STATUS_FAILED);
820 }
821 }
822 if (cee->get_stats_pending == BFA_TRUE) {
823 cee->get_stats_status = BFA_STATUS_FAILED;
824 cee->get_stats_pending = BFA_FALSE;
825 if (cee->cbfn.get_stats_cbfn) {
826 cee->cbfn.get_stats_cbfn(
827 cee->cbfn.get_stats_cbarg,
828 BFA_STATUS_FAILED);
829 }
830 }
831 if (cee->reset_stats_pending == BFA_TRUE) {
832 cee->reset_stats_status = BFA_STATUS_FAILED;
833 cee->reset_stats_pending = BFA_FALSE;
834 if (cee->cbfn.reset_stats_cbfn) {
835 cee->cbfn.reset_stats_cbfn(
836 cee->cbfn.reset_stats_cbarg,
837 BFA_STATUS_FAILED);
838 }
839 }
840 break;
841
842 default:
843 break;
844 }
845 }
846
847 /*
848 * bfa_cee_attach()
849 *
850 * @brief CEE module-attach API
851 *
852 * @param[in] cee - Pointer to the CEE module data structure
853 * ioc - Pointer to the ioc module data structure
854 * dev - Pointer to the device driver module data structure
855 * The device driver specific mbox ISR functions have
856 * this pointer as one of the parameters.
857 *
858 * @return void
859 */
860 void
bfa_cee_attach(struct bfa_cee_s * cee,struct bfa_ioc_s * ioc,void * dev)861 bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc,
862 void *dev)
863 {
864 WARN_ON(cee == NULL);
865 cee->dev = dev;
866 cee->ioc = ioc;
867
868 bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
869 bfa_q_qe_init(&cee->ioc_notify);
870 bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
871 list_add_tail(&cee->ioc_notify.qe, &cee->ioc->notify_q);
872 }
873