1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28
29 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
30 MODULE_LICENSE("GPL");
31
32 /* OOM task polling interval */
33 #define LIO_OOM_POLL_INTERVAL_MS 250
34
35 #define OCTNIC_MAX_SG MAX_SKB_FRAGS
36
37 /**
38 * lio_delete_glists - Delete gather lists
39 * @lio: per-network private data
40 */
lio_delete_glists(struct lio * lio)41 void lio_delete_glists(struct lio *lio)
42 {
43 struct octnic_gather *g;
44 int i;
45
46 kfree(lio->glist_lock);
47 lio->glist_lock = NULL;
48
49 if (!lio->glist)
50 return;
51
52 for (i = 0; i < lio->oct_dev->num_iqs; i++) {
53 do {
54 g = (struct octnic_gather *)
55 lio_list_delete_head(&lio->glist[i]);
56 kfree(g);
57 } while (g);
58
59 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
60 lio->glists_dma_base && lio->glists_dma_base[i]) {
61 lio_dma_free(lio->oct_dev,
62 lio->glist_entry_size * lio->tx_qsize,
63 lio->glists_virt_base[i],
64 lio->glists_dma_base[i]);
65 }
66 }
67
68 kfree(lio->glists_virt_base);
69 lio->glists_virt_base = NULL;
70
71 kfree(lio->glists_dma_base);
72 lio->glists_dma_base = NULL;
73
74 kfree(lio->glist);
75 lio->glist = NULL;
76 }
77 EXPORT_SYMBOL_GPL(lio_delete_glists);
78
79 /**
80 * lio_setup_glists - Setup gather lists
81 * @oct: octeon_device
82 * @lio: per-network private data
83 * @num_iqs: count of iqs to allocate
84 */
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)85 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
86 {
87 struct octnic_gather *g;
88 int i, j;
89
90 lio->glist_lock =
91 kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
92 if (!lio->glist_lock)
93 return -ENOMEM;
94
95 lio->glist =
96 kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
97 if (!lio->glist) {
98 kfree(lio->glist_lock);
99 lio->glist_lock = NULL;
100 return -ENOMEM;
101 }
102
103 lio->glist_entry_size =
104 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
105
106 /* allocate memory to store virtual and dma base address of
107 * per glist consistent memory
108 */
109 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
110 GFP_KERNEL);
111 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
112 GFP_KERNEL);
113
114 if (!lio->glists_virt_base || !lio->glists_dma_base) {
115 lio_delete_glists(lio);
116 return -ENOMEM;
117 }
118
119 for (i = 0; i < num_iqs; i++) {
120 int numa_node = dev_to_node(&oct->pci_dev->dev);
121
122 spin_lock_init(&lio->glist_lock[i]);
123
124 INIT_LIST_HEAD(&lio->glist[i]);
125
126 lio->glists_virt_base[i] =
127 lio_dma_alloc(oct,
128 lio->glist_entry_size * lio->tx_qsize,
129 &lio->glists_dma_base[i]);
130
131 if (!lio->glists_virt_base[i]) {
132 lio_delete_glists(lio);
133 return -ENOMEM;
134 }
135
136 for (j = 0; j < lio->tx_qsize; j++) {
137 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
138 numa_node);
139 if (!g)
140 g = kzalloc(sizeof(*g), GFP_KERNEL);
141 if (!g)
142 break;
143
144 g->sg = lio->glists_virt_base[i] +
145 (j * lio->glist_entry_size);
146
147 g->sg_dma_ptr = lio->glists_dma_base[i] +
148 (j * lio->glist_entry_size);
149
150 list_add_tail(&g->list, &lio->glist[i]);
151 }
152
153 if (j != lio->tx_qsize) {
154 lio_delete_glists(lio);
155 return -ENOMEM;
156 }
157 }
158
159 return 0;
160 }
161 EXPORT_SYMBOL_GPL(lio_setup_glists);
162
liquidio_set_feature(struct net_device * netdev,int cmd,u16 param1)163 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
164 {
165 struct lio *lio = GET_LIO(netdev);
166 struct octeon_device *oct = lio->oct_dev;
167 struct octnic_ctrl_pkt nctrl;
168 int ret = 0;
169
170 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
171
172 nctrl.ncmd.u64 = 0;
173 nctrl.ncmd.s.cmd = cmd;
174 nctrl.ncmd.s.param1 = param1;
175 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
176 nctrl.netpndev = (u64)netdev;
177 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
178
179 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
180 if (ret) {
181 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
182 ret);
183 if (ret > 0)
184 ret = -EIO;
185 }
186 return ret;
187 }
188 EXPORT_SYMBOL_GPL(liquidio_set_feature);
189
octeon_report_tx_completion_to_bql(void * txq,unsigned int pkts_compl,unsigned int bytes_compl)190 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
191 unsigned int bytes_compl)
192 {
193 struct netdev_queue *netdev_queue = txq;
194
195 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
196 }
197
octeon_update_tx_completion_counters(void * buf,int reqtype,unsigned int * pkts_compl,unsigned int * bytes_compl)198 void octeon_update_tx_completion_counters(void *buf, int reqtype,
199 unsigned int *pkts_compl,
200 unsigned int *bytes_compl)
201 {
202 struct octnet_buf_free_info *finfo;
203 struct sk_buff *skb = NULL;
204 struct octeon_soft_command *sc;
205
206 switch (reqtype) {
207 case REQTYPE_NORESP_NET:
208 case REQTYPE_NORESP_NET_SG:
209 finfo = buf;
210 skb = finfo->skb;
211 break;
212
213 case REQTYPE_RESP_NET_SG:
214 case REQTYPE_RESP_NET:
215 sc = buf;
216 skb = sc->callback_arg;
217 break;
218
219 default:
220 return;
221 }
222
223 (*pkts_compl)++;
224 *bytes_compl += skb->len;
225 }
226
octeon_report_sent_bytes_to_bql(void * buf,int reqtype)227 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
228 {
229 struct octnet_buf_free_info *finfo;
230 struct sk_buff *skb;
231 struct octeon_soft_command *sc;
232 struct netdev_queue *txq;
233
234 switch (reqtype) {
235 case REQTYPE_NORESP_NET:
236 case REQTYPE_NORESP_NET_SG:
237 finfo = buf;
238 skb = finfo->skb;
239 break;
240
241 case REQTYPE_RESP_NET_SG:
242 case REQTYPE_RESP_NET:
243 sc = buf;
244 skb = sc->callback_arg;
245 break;
246
247 default:
248 return 0;
249 }
250
251 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
252 netdev_tx_sent_queue(txq, skb->len);
253
254 return netif_xmit_stopped(txq);
255 }
256
liquidio_link_ctrl_cmd_completion(void * nctrl_ptr)257 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
258 {
259 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
260 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
261 struct lio *lio = GET_LIO(netdev);
262 struct octeon_device *oct = lio->oct_dev;
263 u8 *mac;
264
265 if (nctrl->sc_status)
266 return;
267
268 switch (nctrl->ncmd.s.cmd) {
269 case OCTNET_CMD_CHANGE_DEVFLAGS:
270 case OCTNET_CMD_SET_MULTI_LIST:
271 case OCTNET_CMD_SET_UC_LIST:
272 break;
273
274 case OCTNET_CMD_CHANGE_MACADDR:
275 mac = ((u8 *)&nctrl->udd[0]) + 2;
276 if (nctrl->ncmd.s.param1) {
277 /* vfidx is 0 based, but vf_num (param1) is 1 based */
278 int vfidx = nctrl->ncmd.s.param1 - 1;
279 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
280
281 if (mac_is_admin_assigned)
282 netif_info(lio, probe, lio->netdev,
283 "MAC Address %pM is configured for VF %d\n",
284 mac, vfidx);
285 } else {
286 netif_info(lio, probe, lio->netdev,
287 " MACAddr changed to %pM\n",
288 mac);
289 }
290 break;
291
292 case OCTNET_CMD_GPIO_ACCESS:
293 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
294
295 break;
296
297 case OCTNET_CMD_ID_ACTIVE:
298 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
299
300 break;
301
302 case OCTNET_CMD_LRO_ENABLE:
303 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
304 break;
305
306 case OCTNET_CMD_LRO_DISABLE:
307 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
308 netdev->name);
309 break;
310
311 case OCTNET_CMD_VERBOSE_ENABLE:
312 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
313 netdev->name);
314 break;
315
316 case OCTNET_CMD_VERBOSE_DISABLE:
317 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
318 netdev->name);
319 break;
320
321 case OCTNET_CMD_VLAN_FILTER_CTL:
322 if (nctrl->ncmd.s.param1)
323 dev_info(&oct->pci_dev->dev,
324 "%s VLAN filter enabled\n", netdev->name);
325 else
326 dev_info(&oct->pci_dev->dev,
327 "%s VLAN filter disabled\n", netdev->name);
328 break;
329
330 case OCTNET_CMD_ADD_VLAN_FILTER:
331 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
332 netdev->name, nctrl->ncmd.s.param1);
333 break;
334
335 case OCTNET_CMD_DEL_VLAN_FILTER:
336 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
337 netdev->name, nctrl->ncmd.s.param1);
338 break;
339
340 case OCTNET_CMD_SET_SETTINGS:
341 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
342 netdev->name);
343
344 break;
345
346 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
347 * Command passed by NIC driver
348 */
349 case OCTNET_CMD_TNL_RX_CSUM_CTL:
350 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
351 netif_info(lio, probe, lio->netdev,
352 "RX Checksum Offload Enabled\n");
353 } else if (nctrl->ncmd.s.param1 ==
354 OCTNET_CMD_RXCSUM_DISABLE) {
355 netif_info(lio, probe, lio->netdev,
356 "RX Checksum Offload Disabled\n");
357 }
358 break;
359
360 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
361 * Command passed by NIC driver
362 */
363 case OCTNET_CMD_TNL_TX_CSUM_CTL:
364 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
365 netif_info(lio, probe, lio->netdev,
366 "TX Checksum Offload Enabled\n");
367 } else if (nctrl->ncmd.s.param1 ==
368 OCTNET_CMD_TXCSUM_DISABLE) {
369 netif_info(lio, probe, lio->netdev,
370 "TX Checksum Offload Disabled\n");
371 }
372 break;
373
374 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
375 * Command passed by NIC driver
376 */
377 case OCTNET_CMD_VXLAN_PORT_CONFIG:
378 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
379 netif_info(lio, probe, lio->netdev,
380 "VxLAN Destination UDP PORT:%d ADDED\n",
381 nctrl->ncmd.s.param1);
382 } else if (nctrl->ncmd.s.more ==
383 OCTNET_CMD_VXLAN_PORT_DEL) {
384 netif_info(lio, probe, lio->netdev,
385 "VxLAN Destination UDP PORT:%d DELETED\n",
386 nctrl->ncmd.s.param1);
387 }
388 break;
389
390 case OCTNET_CMD_SET_FLOW_CTL:
391 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
392 break;
393
394 case OCTNET_CMD_QUEUE_COUNT_CTL:
395 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
396 nctrl->ncmd.s.param1);
397 break;
398
399 default:
400 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
401 nctrl->ncmd.s.cmd);
402 }
403 }
404 EXPORT_SYMBOL_GPL(liquidio_link_ctrl_cmd_completion);
405
octeon_pf_changed_vf_macaddr(struct octeon_device * oct,u8 * mac)406 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
407 {
408 bool macaddr_changed = false;
409 struct net_device *netdev;
410 struct lio *lio;
411
412 rtnl_lock();
413
414 netdev = oct->props[0].netdev;
415 lio = GET_LIO(netdev);
416
417 lio->linfo.macaddr_is_admin_asgnd = true;
418
419 if (!ether_addr_equal(netdev->dev_addr, mac)) {
420 macaddr_changed = true;
421 eth_hw_addr_set(netdev, mac);
422 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
423 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
424 }
425
426 rtnl_unlock();
427
428 if (macaddr_changed)
429 dev_info(&oct->pci_dev->dev,
430 "PF changed VF's MAC address to %pM\n", mac);
431
432 /* no need to notify the firmware of the macaddr change because
433 * the PF did that already
434 */
435 }
436
octeon_schedule_rxq_oom_work(struct octeon_device * oct,struct octeon_droq * droq)437 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
438 struct octeon_droq *droq)
439 {
440 struct net_device *netdev = oct->props[0].netdev;
441 struct lio *lio = GET_LIO(netdev);
442 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
443
444 queue_delayed_work(wq->wq, &wq->wk.work,
445 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
446 }
447
octnet_poll_check_rxq_oom_status(struct work_struct * work)448 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
449 {
450 struct cavium_wk *wk = (struct cavium_wk *)work;
451 struct lio *lio = (struct lio *)wk->ctxptr;
452 struct octeon_device *oct = lio->oct_dev;
453 int q_no = wk->ctxul;
454 struct octeon_droq *droq = oct->droq[q_no];
455
456 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
457 return;
458
459 if (octeon_retry_droq_refill(droq))
460 octeon_schedule_rxq_oom_work(oct, droq);
461 }
462
setup_rx_oom_poll_fn(struct net_device * netdev)463 int setup_rx_oom_poll_fn(struct net_device *netdev)
464 {
465 struct lio *lio = GET_LIO(netdev);
466 struct octeon_device *oct = lio->oct_dev;
467 struct cavium_wq *wq;
468 int q, q_no;
469
470 for (q = 0; q < oct->num_oqs; q++) {
471 q_no = lio->linfo.rxpciq[q].s.q_no;
472 wq = &lio->rxq_status_wq[q_no];
473 wq->wq = alloc_workqueue("rxq-oom-status",
474 WQ_MEM_RECLAIM, 0);
475 if (!wq->wq) {
476 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
477 return -ENOMEM;
478 }
479
480 INIT_DELAYED_WORK(&wq->wk.work,
481 octnet_poll_check_rxq_oom_status);
482 wq->wk.ctxptr = lio;
483 wq->wk.ctxul = q_no;
484 }
485
486 return 0;
487 }
488 EXPORT_SYMBOL_GPL(setup_rx_oom_poll_fn);
489
cleanup_rx_oom_poll_fn(struct net_device * netdev)490 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
491 {
492 struct lio *lio = GET_LIO(netdev);
493 struct octeon_device *oct = lio->oct_dev;
494 struct cavium_wq *wq;
495 int q_no;
496
497 for (q_no = 0; q_no < oct->num_oqs; q_no++) {
498 wq = &lio->rxq_status_wq[q_no];
499 if (wq->wq) {
500 cancel_delayed_work_sync(&wq->wk.work);
501 destroy_workqueue(wq->wq);
502 wq->wq = NULL;
503 }
504 }
505 }
506 EXPORT_SYMBOL_GPL(cleanup_rx_oom_poll_fn);
507
508 /* Runs in interrupt context. */
lio_update_txq_status(struct octeon_device * oct,int iq_num)509 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
510 {
511 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
512 struct net_device *netdev;
513 struct lio *lio;
514
515 netdev = oct->props[iq->ifidx].netdev;
516
517 /* This is needed because the first IQ does not have
518 * a netdev associated with it.
519 */
520 if (!netdev)
521 return;
522
523 lio = GET_LIO(netdev);
524 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
525 lio->linfo.link.s.link_up &&
526 (!octnet_iq_is_full(oct, iq_num))) {
527 netif_wake_subqueue(netdev, iq->q_index);
528 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
529 tx_restart, 1);
530 }
531 }
532
533 /**
534 * octeon_setup_droq - Setup output queue
535 * @oct: octeon device
536 * @q_no: which queue
537 * @num_descs: how many descriptors
538 * @desc_size: size of each descriptor
539 * @app_ctx: application context
540 */
octeon_setup_droq(struct octeon_device * oct,int q_no,int num_descs,int desc_size,void * app_ctx)541 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
542 int desc_size, void *app_ctx)
543 {
544 int ret_val;
545
546 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
547 /* droq creation and local register settings. */
548 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
549 if (ret_val < 0)
550 return ret_val;
551
552 if (ret_val == 1) {
553 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
554 return 0;
555 }
556
557 /* Enable the droq queues */
558 octeon_set_droq_pkt_op(oct, q_no, 1);
559
560 /* Send Credit for Octeon Output queues. Credits are always
561 * sent after the output queue is enabled.
562 */
563 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
564
565 return ret_val;
566 }
567
568 /**
569 * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
570 * @octeon_id:octeon device id.
571 * @skbuff: skbuff struct to be passed to network layer.
572 * @len: size of total data received.
573 * @rh: Control header associated with the packet
574 * @param: additional control data with the packet
575 * @arg: farg registered in droq_ops
576 */
577 static void
liquidio_push_packet(u32 __maybe_unused octeon_id,void * skbuff,u32 len,union octeon_rh * rh,void * param,void * arg)578 liquidio_push_packet(u32 __maybe_unused octeon_id,
579 void *skbuff,
580 u32 len,
581 union octeon_rh *rh,
582 void *param,
583 void *arg)
584 {
585 struct net_device *netdev = (struct net_device *)arg;
586 struct octeon_droq *droq =
587 container_of(param, struct octeon_droq, napi);
588 struct sk_buff *skb = (struct sk_buff *)skbuff;
589 struct skb_shared_hwtstamps *shhwtstamps;
590 struct napi_struct *napi = param;
591 u16 vtag = 0;
592 u32 r_dh_off;
593 u64 ns;
594
595 if (netdev) {
596 struct lio *lio = GET_LIO(netdev);
597 struct octeon_device *oct = lio->oct_dev;
598
599 /* Do not proceed if the interface is not in RUNNING state. */
600 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
601 recv_buffer_free(skb);
602 droq->stats.rx_dropped++;
603 return;
604 }
605
606 skb->dev = netdev;
607
608 skb_record_rx_queue(skb, droq->q_no);
609 if (likely(len > MIN_SKB_SIZE)) {
610 struct octeon_skb_page_info *pg_info;
611 unsigned char *va;
612
613 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
614 if (pg_info->page) {
615 /* For Paged allocation use the frags */
616 va = page_address(pg_info->page) +
617 pg_info->page_offset;
618 memcpy(skb->data, va, MIN_SKB_SIZE);
619 skb_put(skb, MIN_SKB_SIZE);
620 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
621 pg_info->page,
622 pg_info->page_offset +
623 MIN_SKB_SIZE,
624 len - MIN_SKB_SIZE,
625 LIO_RXBUFFER_SZ);
626 }
627 } else {
628 struct octeon_skb_page_info *pg_info =
629 ((struct octeon_skb_page_info *)(skb->cb));
630 skb_copy_to_linear_data(skb, page_address(pg_info->page)
631 + pg_info->page_offset, len);
632 skb_put(skb, len);
633 put_page(pg_info->page);
634 }
635
636 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
637
638 if (oct->ptp_enable) {
639 if (rh->r_dh.has_hwtstamp) {
640 /* timestamp is included from the hardware at
641 * the beginning of the packet.
642 */
643 if (ifstate_check
644 (lio,
645 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
646 /* Nanoseconds are in the first 64-bits
647 * of the packet.
648 */
649 memcpy(&ns, (skb->data + r_dh_off),
650 sizeof(ns));
651 r_dh_off -= BYTES_PER_DHLEN_UNIT;
652 shhwtstamps = skb_hwtstamps(skb);
653 shhwtstamps->hwtstamp =
654 ns_to_ktime(ns +
655 lio->ptp_adjust);
656 }
657 }
658 }
659
660 if (rh->r_dh.has_hash) {
661 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
662 u32 hash = be32_to_cpu(*hash_be);
663
664 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
665 r_dh_off -= BYTES_PER_DHLEN_UNIT;
666 }
667
668 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
669 skb->protocol = eth_type_trans(skb, skb->dev);
670
671 if ((netdev->features & NETIF_F_RXCSUM) &&
672 (((rh->r_dh.encap_on) &&
673 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
674 (!(rh->r_dh.encap_on) &&
675 ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
676 CNNIC_CSUM_VERIFIED))))
677 /* checksum has already been verified */
678 skb->ip_summed = CHECKSUM_UNNECESSARY;
679 else
680 skb->ip_summed = CHECKSUM_NONE;
681
682 /* Setting Encapsulation field on basis of status received
683 * from the firmware
684 */
685 if (rh->r_dh.encap_on) {
686 skb->encapsulation = 1;
687 skb->csum_level = 1;
688 droq->stats.rx_vxlan++;
689 }
690
691 /* inbound VLAN tag */
692 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
693 rh->r_dh.vlan) {
694 u16 priority = rh->r_dh.priority;
695 u16 vid = rh->r_dh.vlan;
696
697 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
699 }
700
701 napi_gro_receive(napi, skb);
702
703 droq->stats.rx_bytes_received += len -
704 rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
705 droq->stats.rx_pkts_received++;
706 } else {
707 recv_buffer_free(skb);
708 }
709 }
710
711 /**
712 * napi_schedule_wrapper - wrapper for calling napi_schedule
713 * @param: parameters to pass to napi_schedule
714 *
715 * Used when scheduling on different CPUs
716 */
napi_schedule_wrapper(void * param)717 static void napi_schedule_wrapper(void *param)
718 {
719 struct napi_struct *napi = param;
720
721 napi_schedule(napi);
722 }
723
724 /**
725 * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
726 * @arg: pointer to octeon output queue
727 */
liquidio_napi_drv_callback(void * arg)728 static void liquidio_napi_drv_callback(void *arg)
729 {
730 struct octeon_device *oct;
731 struct octeon_droq *droq = arg;
732 int this_cpu = smp_processor_id();
733
734 oct = droq->oct_dev;
735
736 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
737 droq->cpu_id == this_cpu) {
738 napi_schedule_irqoff(&droq->napi);
739 } else {
740 INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
741 smp_call_function_single_async(droq->cpu_id, &droq->csd);
742 }
743 }
744
745 /**
746 * liquidio_napi_poll - Entry point for NAPI polling
747 * @napi: NAPI structure
748 * @budget: maximum number of items to process
749 */
liquidio_napi_poll(struct napi_struct * napi,int budget)750 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
751 {
752 struct octeon_instr_queue *iq;
753 struct octeon_device *oct;
754 struct octeon_droq *droq;
755 int tx_done = 0, iq_no;
756 int work_done;
757
758 droq = container_of(napi, struct octeon_droq, napi);
759 oct = droq->oct_dev;
760 iq_no = droq->q_no;
761
762 /* Handle Droq descriptors */
763 work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
764
765 /* Flush the instruction queue */
766 iq = oct->instr_queue[iq_no];
767 if (iq) {
768 /* TODO: move this check to inside octeon_flush_iq,
769 * once check_db_timeout is removed
770 */
771 if (atomic_read(&iq->instr_pending))
772 /* Process iq buffers with in the budget limits */
773 tx_done = octeon_flush_iq(oct, iq, budget);
774 else
775 tx_done = 1;
776 /* Update iq read-index rather than waiting for next interrupt.
777 * Return back if tx_done is false.
778 */
779 /* sub-queue status update */
780 lio_update_txq_status(oct, iq_no);
781 } else {
782 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
783 __func__, iq_no);
784 }
785
786 #define MAX_REG_CNT 2000000U
787 /* force enable interrupt if reg cnts are high to avoid wraparound */
788 if ((work_done < budget && tx_done) ||
789 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
790 (droq->pkt_count >= MAX_REG_CNT)) {
791 napi_complete_done(napi, work_done);
792
793 octeon_enable_irq(droq->oct_dev, droq->q_no);
794 return 0;
795 }
796
797 return (!tx_done) ? (budget) : (work_done);
798 }
799
800 /**
801 * liquidio_setup_io_queues - Setup input and output queues
802 * @octeon_dev: octeon device
803 * @ifidx: Interface index
804 * @num_iqs: input io queue count
805 * @num_oqs: output io queue count
806 *
807 * Note: Queues are with respect to the octeon device. Thus
808 * an input queue is for egress packets, and output queues
809 * are for ingress packets.
810 */
liquidio_setup_io_queues(struct octeon_device * octeon_dev,int ifidx,u32 num_iqs,u32 num_oqs)811 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
812 u32 num_iqs, u32 num_oqs)
813 {
814 struct octeon_droq_ops droq_ops;
815 struct net_device *netdev;
816 struct octeon_droq *droq;
817 struct napi_struct *napi;
818 int cpu_id_modulus;
819 int num_tx_descs;
820 struct lio *lio;
821 int retval = 0;
822 int q, q_no;
823 int cpu_id;
824
825 netdev = octeon_dev->props[ifidx].netdev;
826
827 lio = GET_LIO(netdev);
828
829 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
830
831 droq_ops.fptr = liquidio_push_packet;
832 droq_ops.farg = netdev;
833
834 droq_ops.poll_mode = 1;
835 droq_ops.napi_fn = liquidio_napi_drv_callback;
836 cpu_id = 0;
837 cpu_id_modulus = num_present_cpus();
838
839 /* set up DROQs. */
840 for (q = 0; q < num_oqs; q++) {
841 q_no = lio->linfo.rxpciq[q].s.q_no;
842 dev_dbg(&octeon_dev->pci_dev->dev,
843 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
844 __func__, q, q_no);
845 retval = octeon_setup_droq(
846 octeon_dev, q_no,
847 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
848 lio->ifidx),
849 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
850 lio->ifidx),
851 NULL);
852 if (retval) {
853 dev_err(&octeon_dev->pci_dev->dev,
854 "%s : Runtime DROQ(RxQ) creation failed.\n",
855 __func__);
856 return 1;
857 }
858
859 droq = octeon_dev->droq[q_no];
860 napi = &droq->napi;
861 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
862 (u64)netdev, (u64)octeon_dev);
863 netif_napi_add(netdev, napi, liquidio_napi_poll);
864
865 /* designate a CPU for this droq */
866 droq->cpu_id = cpu_id;
867 cpu_id++;
868 if (cpu_id >= cpu_id_modulus)
869 cpu_id = 0;
870
871 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
872 }
873
874 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
875 /* 23XX PF/VF can send/recv control messages (via the first
876 * PF/VF-owned droq) from the firmware even if the ethX
877 * interface is down, so that's why poll_mode must be off
878 * for the first droq.
879 */
880 octeon_dev->droq[0]->ops.poll_mode = 0;
881 }
882
883 /* set up IQs. */
884 for (q = 0; q < num_iqs; q++) {
885 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
886 octeon_get_conf(octeon_dev), lio->ifidx);
887 retval = octeon_setup_iq(octeon_dev, ifidx, q,
888 lio->linfo.txpciq[q], num_tx_descs,
889 netdev_get_tx_queue(netdev, q));
890 if (retval) {
891 dev_err(&octeon_dev->pci_dev->dev,
892 " %s : Runtime IQ(TxQ) creation failed.\n",
893 __func__);
894 return 1;
895 }
896
897 /* XPS */
898 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
899 octeon_dev->ioq_vector) {
900 struct octeon_ioq_vector *ioq_vector;
901
902 ioq_vector = &octeon_dev->ioq_vector[q];
903 netif_set_xps_queue(netdev,
904 &ioq_vector->affinity_mask,
905 ioq_vector->iq_index);
906 }
907 }
908
909 return 0;
910 }
911 EXPORT_SYMBOL_GPL(liquidio_setup_io_queues);
912
913 static
liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq * droq,u64 ret)914 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
915 {
916 struct octeon_device *oct = droq->oct_dev;
917 struct octeon_device_priv *oct_priv =
918 (struct octeon_device_priv *)oct->priv;
919
920 if (droq->ops.poll_mode) {
921 droq->ops.napi_fn(droq);
922 } else {
923 if (ret & MSIX_PO_INT) {
924 if (OCTEON_CN23XX_VF(oct))
925 dev_err(&oct->pci_dev->dev,
926 "should not come here should not get rx when poll mode = 0 for vf\n");
927 tasklet_schedule(&oct_priv->droq_tasklet);
928 return 1;
929 }
930 /* this will be flushed periodically by check iq db */
931 if (ret & MSIX_PI_INT)
932 return 0;
933 }
934
935 return 0;
936 }
937
938 irqreturn_t
liquidio_msix_intr_handler(int __maybe_unused irq,void * dev)939 liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
940 {
941 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
942 struct octeon_device *oct = ioq_vector->oct_dev;
943 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
944 u64 ret;
945
946 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
947
948 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
949 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
950
951 return IRQ_HANDLED;
952 }
953
954 /**
955 * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
956 * @oct: octeon device
957 */
liquidio_schedule_droq_pkt_handlers(struct octeon_device * oct)958 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
959 {
960 struct octeon_device_priv *oct_priv =
961 (struct octeon_device_priv *)oct->priv;
962 struct octeon_droq *droq;
963 u64 oq_no;
964
965 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
966 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
967 oq_no++) {
968 if (!(oct->droq_intr & BIT_ULL(oq_no)))
969 continue;
970
971 droq = oct->droq[oq_no];
972
973 if (droq->ops.poll_mode) {
974 droq->ops.napi_fn(droq);
975 oct_priv->napi_mask |= BIT_ULL(oq_no);
976 } else {
977 tasklet_schedule(&oct_priv->droq_tasklet);
978 }
979 }
980 }
981 }
982
983 /**
984 * liquidio_legacy_intr_handler - Interrupt handler for octeon
985 * @irq: unused
986 * @dev: octeon device
987 */
988 static
liquidio_legacy_intr_handler(int __maybe_unused irq,void * dev)989 irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
990 {
991 struct octeon_device *oct = (struct octeon_device *)dev;
992 irqreturn_t ret;
993
994 /* Disable our interrupts for the duration of ISR */
995 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
996
997 ret = oct->fn_list.process_interrupt_regs(oct);
998
999 if (ret == IRQ_HANDLED)
1000 liquidio_schedule_droq_pkt_handlers(oct);
1001
1002 /* Re-enable our interrupts */
1003 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1004 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1005
1006 return ret;
1007 }
1008
1009 /**
1010 * octeon_setup_interrupt - Setup interrupt for octeon device
1011 * @oct: octeon device
1012 * @num_ioqs: number of queues
1013 *
1014 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1015 */
octeon_setup_interrupt(struct octeon_device * oct,u32 num_ioqs)1016 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1017 {
1018 struct msix_entry *msix_entries;
1019 char *queue_irq_names = NULL;
1020 int i, num_interrupts = 0;
1021 int num_alloc_ioq_vectors;
1022 char *aux_irq_name = NULL;
1023 int num_ioq_vectors;
1024 int irqret, err;
1025
1026 if (oct->msix_on) {
1027 oct->num_msix_irqs = num_ioqs;
1028 if (OCTEON_CN23XX_PF(oct)) {
1029 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1030
1031 /* one non ioq interrupt for handling
1032 * sli_mac_pf_int_sum
1033 */
1034 oct->num_msix_irqs += 1;
1035 } else if (OCTEON_CN23XX_VF(oct)) {
1036 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1037 }
1038
1039 /* allocate storage for the names assigned to each irq */
1040 oct->irq_name_storage =
1041 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1042 if (!oct->irq_name_storage) {
1043 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1044 return -ENOMEM;
1045 }
1046
1047 queue_irq_names = oct->irq_name_storage;
1048
1049 if (OCTEON_CN23XX_PF(oct))
1050 aux_irq_name = &queue_irq_names
1051 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1052
1053 oct->msix_entries = kcalloc(oct->num_msix_irqs,
1054 sizeof(struct msix_entry),
1055 GFP_KERNEL);
1056 if (!oct->msix_entries) {
1057 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1058 kfree(oct->irq_name_storage);
1059 oct->irq_name_storage = NULL;
1060 return -ENOMEM;
1061 }
1062
1063 msix_entries = (struct msix_entry *)oct->msix_entries;
1064
1065 /*Assumption is that pf msix vectors start from pf srn to pf to
1066 * trs and not from 0. if not change this code
1067 */
1068 if (OCTEON_CN23XX_PF(oct)) {
1069 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1070 msix_entries[i].entry =
1071 oct->sriov_info.pf_srn + i;
1072
1073 msix_entries[oct->num_msix_irqs - 1].entry =
1074 oct->sriov_info.trs;
1075 } else if (OCTEON_CN23XX_VF(oct)) {
1076 for (i = 0; i < oct->num_msix_irqs; i++)
1077 msix_entries[i].entry = i;
1078 }
1079 num_alloc_ioq_vectors = pci_enable_msix_range(
1080 oct->pci_dev, msix_entries,
1081 oct->num_msix_irqs,
1082 oct->num_msix_irqs);
1083 if (num_alloc_ioq_vectors < 0) {
1084 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1085 kfree(oct->msix_entries);
1086 oct->msix_entries = NULL;
1087 kfree(oct->irq_name_storage);
1088 oct->irq_name_storage = NULL;
1089 return num_alloc_ioq_vectors;
1090 }
1091
1092 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1093
1094 num_ioq_vectors = oct->num_msix_irqs;
1095 /* For PF, there is one non-ioq interrupt handler */
1096 if (OCTEON_CN23XX_PF(oct)) {
1097 num_ioq_vectors -= 1;
1098
1099 snprintf(aux_irq_name, INTRNAMSIZ,
1100 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1101 oct->pf_num);
1102 irqret = request_irq(
1103 msix_entries[num_ioq_vectors].vector,
1104 liquidio_legacy_intr_handler, 0,
1105 aux_irq_name, oct);
1106 if (irqret) {
1107 dev_err(&oct->pci_dev->dev,
1108 "Request_irq failed for MSIX interrupt Error: %d\n",
1109 irqret);
1110 pci_disable_msix(oct->pci_dev);
1111 kfree(oct->msix_entries);
1112 kfree(oct->irq_name_storage);
1113 oct->irq_name_storage = NULL;
1114 oct->msix_entries = NULL;
1115 return irqret;
1116 }
1117 }
1118 for (i = 0 ; i < num_ioq_vectors ; i++) {
1119 if (OCTEON_CN23XX_PF(oct))
1120 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1121 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1122 oct->octeon_id, oct->pf_num, i);
1123
1124 if (OCTEON_CN23XX_VF(oct))
1125 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1126 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1127 oct->octeon_id, oct->vf_num, i);
1128
1129 irqret = request_irq(msix_entries[i].vector,
1130 liquidio_msix_intr_handler, 0,
1131 &queue_irq_names[IRQ_NAME_OFF(i)],
1132 &oct->ioq_vector[i]);
1133
1134 if (irqret) {
1135 dev_err(&oct->pci_dev->dev,
1136 "Request_irq failed for MSIX interrupt Error: %d\n",
1137 irqret);
1138 /* Freeing the non-ioq irq vector here . */
1139 free_irq(msix_entries[num_ioq_vectors].vector,
1140 oct);
1141
1142 while (i) {
1143 i--;
1144 /* clearing affinity mask. */
1145 irq_set_affinity_hint(
1146 msix_entries[i].vector,
1147 NULL);
1148 free_irq(msix_entries[i].vector,
1149 &oct->ioq_vector[i]);
1150 }
1151 pci_disable_msix(oct->pci_dev);
1152 kfree(oct->msix_entries);
1153 kfree(oct->irq_name_storage);
1154 oct->irq_name_storage = NULL;
1155 oct->msix_entries = NULL;
1156 return irqret;
1157 }
1158 oct->ioq_vector[i].vector = msix_entries[i].vector;
1159 /* assign the cpu mask for this msix interrupt vector */
1160 irq_set_affinity_hint(msix_entries[i].vector,
1161 &oct->ioq_vector[i].affinity_mask
1162 );
1163 }
1164 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1165 oct->octeon_id);
1166 } else {
1167 err = pci_enable_msi(oct->pci_dev);
1168 if (err)
1169 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1170 err);
1171 else
1172 oct->flags |= LIO_FLAG_MSI_ENABLED;
1173
1174 /* allocate storage for the names assigned to the irq */
1175 oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
1176 if (!oct->irq_name_storage)
1177 return -ENOMEM;
1178
1179 queue_irq_names = oct->irq_name_storage;
1180
1181 if (OCTEON_CN23XX_PF(oct))
1182 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1183 "LiquidIO%u-pf%u-rxtx-%u",
1184 oct->octeon_id, oct->pf_num, 0);
1185
1186 if (OCTEON_CN23XX_VF(oct))
1187 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1188 "LiquidIO%u-vf%u-rxtx-%u",
1189 oct->octeon_id, oct->vf_num, 0);
1190
1191 irqret = request_irq(oct->pci_dev->irq,
1192 liquidio_legacy_intr_handler,
1193 IRQF_SHARED,
1194 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1195 if (irqret) {
1196 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1197 pci_disable_msi(oct->pci_dev);
1198 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1199 irqret);
1200 kfree(oct->irq_name_storage);
1201 oct->irq_name_storage = NULL;
1202 return irqret;
1203 }
1204 }
1205 return 0;
1206 }
1207 EXPORT_SYMBOL_GPL(octeon_setup_interrupt);
1208
1209 /**
1210 * liquidio_change_mtu - Net device change_mtu
1211 * @netdev: network device
1212 * @new_mtu: the new max transmit unit size
1213 */
liquidio_change_mtu(struct net_device * netdev,int new_mtu)1214 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1215 {
1216 struct lio *lio = GET_LIO(netdev);
1217 struct octeon_device *oct = lio->oct_dev;
1218 struct octeon_soft_command *sc;
1219 union octnet_cmd *ncmd;
1220 int ret = 0;
1221
1222 sc = (struct octeon_soft_command *)
1223 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
1224 if (!sc) {
1225 netif_info(lio, rx_err, lio->netdev,
1226 "Failed to allocate soft command\n");
1227 return -ENOMEM;
1228 }
1229
1230 ncmd = (union octnet_cmd *)sc->virtdptr;
1231
1232 init_completion(&sc->complete);
1233 sc->sc_status = OCTEON_REQUEST_PENDING;
1234
1235 ncmd->u64 = 0;
1236 ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1237 ncmd->s.param1 = new_mtu;
1238
1239 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1240
1241 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1242
1243 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1244 OPCODE_NIC_CMD, 0, 0, 0);
1245
1246 ret = octeon_send_soft_command(oct, sc);
1247 if (ret == IQ_SEND_FAILED) {
1248 netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1249 octeon_free_soft_command(oct, sc);
1250 return -EINVAL;
1251 }
1252 /* Sleep on a wait queue till the cond flag indicates that the
1253 * response arrived or timed-out.
1254 */
1255 ret = wait_for_sc_completion_timeout(oct, sc, 0);
1256 if (ret)
1257 return ret;
1258
1259 if (sc->sc_status) {
1260 WRITE_ONCE(sc->caller_is_done, true);
1261 return -EINVAL;
1262 }
1263
1264 netdev->mtu = new_mtu;
1265 lio->mtu = new_mtu;
1266
1267 WRITE_ONCE(sc->caller_is_done, true);
1268 return 0;
1269 }
1270 EXPORT_SYMBOL_GPL(liquidio_change_mtu);
1271
lio_wait_for_clean_oq(struct octeon_device * oct)1272 int lio_wait_for_clean_oq(struct octeon_device *oct)
1273 {
1274 int retry = 100, pending_pkts = 0;
1275 int idx;
1276
1277 do {
1278 pending_pkts = 0;
1279
1280 for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1281 if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1282 continue;
1283 pending_pkts +=
1284 atomic_read(&oct->droq[idx]->pkts_pending);
1285 }
1286
1287 if (pending_pkts > 0)
1288 schedule_timeout_uninterruptible(1);
1289
1290 } while (retry-- && pending_pkts);
1291
1292 return pending_pkts;
1293 }
1294 EXPORT_SYMBOL_GPL(lio_wait_for_clean_oq);
1295
1296 static void
octnet_nic_stats_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1297 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1298 u32 status, void *ptr)
1299 {
1300 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1301 struct oct_nic_stats_resp *resp =
1302 (struct oct_nic_stats_resp *)sc->virtrptr;
1303 struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1304 struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1305 struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1306 struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1307
1308 if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1309 octeon_swap_8B_data((u64 *)&resp->stats,
1310 (sizeof(struct oct_link_stats)) >> 3);
1311
1312 /* RX link-level stats */
1313 rstats->total_rcvd = rsp_rstats->total_rcvd;
1314 rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1315 rstats->total_bcst = rsp_rstats->total_bcst;
1316 rstats->total_mcst = rsp_rstats->total_mcst;
1317 rstats->runts = rsp_rstats->runts;
1318 rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
1319 /* Accounts for over/under-run of buffers */
1320 rstats->fifo_err = rsp_rstats->fifo_err;
1321 rstats->dmac_drop = rsp_rstats->dmac_drop;
1322 rstats->fcs_err = rsp_rstats->fcs_err;
1323 rstats->jabber_err = rsp_rstats->jabber_err;
1324 rstats->l2_err = rsp_rstats->l2_err;
1325 rstats->frame_err = rsp_rstats->frame_err;
1326 rstats->red_drops = rsp_rstats->red_drops;
1327
1328 /* RX firmware stats */
1329 rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1330 rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1331 rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1332 rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1333 rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1334 rstats->fw_err_link = rsp_rstats->fw_err_link;
1335 rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1336 rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1337 rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1338
1339 /* Number of packets that are LROed */
1340 rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1341 /* Number of octets that are LROed */
1342 rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1343 /* Number of LRO packets formed */
1344 rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1345 /* Number of times lRO of packet aborted */
1346 rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1347 rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1348 rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1349 rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1350 rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1351 /* intrmod: packet forward rate */
1352 rstats->fwd_rate = rsp_rstats->fwd_rate;
1353
1354 /* TX link-level stats */
1355 tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1356 tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1357 tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1358 tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1359 tstats->ctl_sent = rsp_tstats->ctl_sent;
1360 /* Packets sent after one collision*/
1361 tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1362 /* Packets sent after multiple collision*/
1363 tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1364 /* Packets not sent due to max collisions */
1365 tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1366 /* Packets not sent due to max deferrals */
1367 tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1368 /* Accounts for over/under-run of buffers */
1369 tstats->fifo_err = rsp_tstats->fifo_err;
1370 tstats->runts = rsp_tstats->runts;
1371 /* Total number of collisions detected */
1372 tstats->total_collisions = rsp_tstats->total_collisions;
1373
1374 /* firmware stats */
1375 tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1376 tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1377 tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1378 tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1379 tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1380 tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1381 tstats->fw_err_link = rsp_tstats->fw_err_link;
1382 tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1383 tstats->fw_tso = rsp_tstats->fw_tso;
1384 tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1385 tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1386 tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1387
1388 resp->status = 1;
1389 } else {
1390 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1391 resp->status = -1;
1392 }
1393 }
1394
lio_fetch_vf_stats(struct lio * lio)1395 static int lio_fetch_vf_stats(struct lio *lio)
1396 {
1397 struct octeon_device *oct_dev = lio->oct_dev;
1398 struct octeon_soft_command *sc;
1399 struct oct_nic_vf_stats_resp *resp;
1400
1401 int retval;
1402
1403 /* Alloc soft command */
1404 sc = (struct octeon_soft_command *)
1405 octeon_alloc_soft_command(oct_dev,
1406 0,
1407 sizeof(struct oct_nic_vf_stats_resp),
1408 0);
1409
1410 if (!sc) {
1411 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1412 retval = -ENOMEM;
1413 goto lio_fetch_vf_stats_exit;
1414 }
1415
1416 resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
1417 memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
1418
1419 init_completion(&sc->complete);
1420 sc->sc_status = OCTEON_REQUEST_PENDING;
1421
1422 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1423
1424 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1425 OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
1426
1427 retval = octeon_send_soft_command(oct_dev, sc);
1428 if (retval == IQ_SEND_FAILED) {
1429 octeon_free_soft_command(oct_dev, sc);
1430 goto lio_fetch_vf_stats_exit;
1431 }
1432
1433 retval =
1434 wait_for_sc_completion_timeout(oct_dev, sc,
1435 (2 * LIO_SC_MAX_TMO_MS));
1436 if (retval) {
1437 dev_err(&oct_dev->pci_dev->dev,
1438 "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
1439 goto lio_fetch_vf_stats_exit;
1440 }
1441
1442 if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1443 octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
1444 (sizeof(u64)) >> 3);
1445
1446 if (resp->spoofmac_cnt != 0) {
1447 dev_warn(&oct_dev->pci_dev->dev,
1448 "%llu Spoofed packets detected\n",
1449 resp->spoofmac_cnt);
1450 }
1451 }
1452 WRITE_ONCE(sc->caller_is_done, 1);
1453
1454 lio_fetch_vf_stats_exit:
1455 return retval;
1456 }
1457
lio_fetch_stats(struct work_struct * work)1458 void lio_fetch_stats(struct work_struct *work)
1459 {
1460 struct cavium_wk *wk = (struct cavium_wk *)work;
1461 struct lio *lio = wk->ctxptr;
1462 struct octeon_device *oct_dev = lio->oct_dev;
1463 struct octeon_soft_command *sc;
1464 struct oct_nic_stats_resp *resp;
1465 unsigned long time_in_jiffies;
1466 int retval;
1467
1468 if (OCTEON_CN23XX_PF(oct_dev)) {
1469 /* report spoofchk every 2 seconds */
1470 if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
1471 (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
1472 oct_dev->sriov_info.num_vfs_alloced) {
1473 lio_fetch_vf_stats(lio);
1474 }
1475
1476 oct_dev->vfstats_poll++;
1477 }
1478
1479 /* Alloc soft command */
1480 sc = (struct octeon_soft_command *)
1481 octeon_alloc_soft_command(oct_dev,
1482 0,
1483 sizeof(struct oct_nic_stats_resp),
1484 0);
1485
1486 if (!sc) {
1487 dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
1488 goto lio_fetch_stats_exit;
1489 }
1490
1491 resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1492 memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1493
1494 init_completion(&sc->complete);
1495 sc->sc_status = OCTEON_REQUEST_PENDING;
1496
1497 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1498
1499 octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1500 OPCODE_NIC_PORT_STATS, 0, 0, 0);
1501
1502 retval = octeon_send_soft_command(oct_dev, sc);
1503 if (retval == IQ_SEND_FAILED) {
1504 octeon_free_soft_command(oct_dev, sc);
1505 goto lio_fetch_stats_exit;
1506 }
1507
1508 retval = wait_for_sc_completion_timeout(oct_dev, sc,
1509 (2 * LIO_SC_MAX_TMO_MS));
1510 if (retval) {
1511 dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
1512 goto lio_fetch_stats_exit;
1513 }
1514
1515 octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
1516 WRITE_ONCE(sc->caller_is_done, true);
1517
1518 lio_fetch_stats_exit:
1519 time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
1520 if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
1521 schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
1522
1523 return;
1524 }
1525 EXPORT_SYMBOL_GPL(lio_fetch_stats);
1526
liquidio_set_speed(struct lio * lio,int speed)1527 int liquidio_set_speed(struct lio *lio, int speed)
1528 {
1529 struct octeon_device *oct = lio->oct_dev;
1530 struct oct_nic_seapi_resp *resp;
1531 struct octeon_soft_command *sc;
1532 union octnet_cmd *ncmd;
1533 int retval;
1534 u32 var;
1535
1536 if (oct->speed_setting == speed)
1537 return 0;
1538
1539 if (!OCTEON_CN23XX_PF(oct)) {
1540 dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1541 __func__);
1542 return -EOPNOTSUPP;
1543 }
1544
1545 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1546 sizeof(struct oct_nic_seapi_resp),
1547 0);
1548 if (!sc)
1549 return -ENOMEM;
1550
1551 ncmd = sc->virtdptr;
1552 resp = sc->virtrptr;
1553 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1554
1555 init_completion(&sc->complete);
1556 sc->sc_status = OCTEON_REQUEST_PENDING;
1557
1558 ncmd->u64 = 0;
1559 ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1560 ncmd->s.param1 = speed;
1561
1562 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1563
1564 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1565
1566 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1567 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1568
1569 retval = octeon_send_soft_command(oct, sc);
1570 if (retval == IQ_SEND_FAILED) {
1571 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1572 octeon_free_soft_command(oct, sc);
1573 retval = -EBUSY;
1574 } else {
1575 /* Wait for response or timeout */
1576 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1577 if (retval)
1578 return retval;
1579
1580 retval = resp->status;
1581
1582 if (retval) {
1583 dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1584 __func__, retval);
1585 WRITE_ONCE(sc->caller_is_done, true);
1586
1587 return -EIO;
1588 }
1589
1590 var = be32_to_cpu((__force __be32)resp->speed);
1591 if (var != speed) {
1592 dev_err(&oct->pci_dev->dev,
1593 "%s: setting failed speed= %x, expect %x\n",
1594 __func__, var, speed);
1595 }
1596
1597 oct->speed_setting = var;
1598 WRITE_ONCE(sc->caller_is_done, true);
1599 }
1600
1601 return retval;
1602 }
1603
liquidio_get_speed(struct lio * lio)1604 int liquidio_get_speed(struct lio *lio)
1605 {
1606 struct octeon_device *oct = lio->oct_dev;
1607 struct oct_nic_seapi_resp *resp;
1608 struct octeon_soft_command *sc;
1609 union octnet_cmd *ncmd;
1610 int retval;
1611
1612 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1613 sizeof(struct oct_nic_seapi_resp),
1614 0);
1615 if (!sc)
1616 return -ENOMEM;
1617
1618 ncmd = sc->virtdptr;
1619 resp = sc->virtrptr;
1620 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1621
1622 init_completion(&sc->complete);
1623 sc->sc_status = OCTEON_REQUEST_PENDING;
1624
1625 ncmd->u64 = 0;
1626 ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1627
1628 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1629
1630 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1631
1632 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1633 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1634
1635 retval = octeon_send_soft_command(oct, sc);
1636 if (retval == IQ_SEND_FAILED) {
1637 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1638 octeon_free_soft_command(oct, sc);
1639 retval = -EIO;
1640 } else {
1641 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1642 if (retval)
1643 return retval;
1644
1645 retval = resp->status;
1646 if (retval) {
1647 dev_err(&oct->pci_dev->dev,
1648 "%s failed retval=%d\n", __func__, retval);
1649 retval = -EIO;
1650 } else {
1651 u32 var;
1652
1653 var = be32_to_cpu((__force __be32)resp->speed);
1654 oct->speed_setting = var;
1655 if (var == 0xffff) {
1656 /* unable to access boot variables
1657 * get the default value based on the NIC type
1658 */
1659 if (oct->subsystem_id ==
1660 OCTEON_CN2350_25GB_SUBSYS_ID ||
1661 oct->subsystem_id ==
1662 OCTEON_CN2360_25GB_SUBSYS_ID) {
1663 oct->no_speed_setting = 1;
1664 oct->speed_setting = 25;
1665 } else {
1666 oct->speed_setting = 10;
1667 }
1668 }
1669
1670 }
1671 WRITE_ONCE(sc->caller_is_done, true);
1672 }
1673
1674 return retval;
1675 }
1676 EXPORT_SYMBOL_GPL(liquidio_get_speed);
1677
liquidio_set_fec(struct lio * lio,int on_off)1678 int liquidio_set_fec(struct lio *lio, int on_off)
1679 {
1680 struct oct_nic_seapi_resp *resp;
1681 struct octeon_soft_command *sc;
1682 struct octeon_device *oct;
1683 union octnet_cmd *ncmd;
1684 int retval;
1685 u32 var;
1686
1687 oct = lio->oct_dev;
1688
1689 if (oct->props[lio->ifidx].fec == on_off)
1690 return 0;
1691
1692 if (!OCTEON_CN23XX_PF(oct)) {
1693 dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
1694 __func__);
1695 return -1;
1696 }
1697
1698 if (oct->speed_boot != 25) {
1699 dev_err(&oct->pci_dev->dev,
1700 "Set FEC only when link speed is 25G during insmod\n");
1701 return -1;
1702 }
1703
1704 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1705 sizeof(struct oct_nic_seapi_resp), 0);
1706 if (!sc) {
1707 dev_err(&oct->pci_dev->dev,
1708 "Failed to allocate soft command\n");
1709 return -ENOMEM;
1710 }
1711
1712 ncmd = sc->virtdptr;
1713 resp = sc->virtrptr;
1714 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1715
1716 init_completion(&sc->complete);
1717 sc->sc_status = OCTEON_REQUEST_PENDING;
1718
1719 ncmd->u64 = 0;
1720 ncmd->s.cmd = SEAPI_CMD_FEC_SET;
1721 ncmd->s.param1 = on_off;
1722 /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
1723
1724 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1725
1726 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1727
1728 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1729 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1730
1731 retval = octeon_send_soft_command(oct, sc);
1732 if (retval == IQ_SEND_FAILED) {
1733 dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1734 octeon_free_soft_command(oct, sc);
1735 return -EIO;
1736 }
1737
1738 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1739 if (retval)
1740 return (-EIO);
1741
1742 var = be32_to_cpu(resp->fec_setting);
1743 resp->fec_setting = var;
1744 if (var != on_off) {
1745 dev_err(&oct->pci_dev->dev,
1746 "Setting failed fec= %x, expect %x\n",
1747 var, on_off);
1748 oct->props[lio->ifidx].fec = var;
1749 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1750 oct->props[lio->ifidx].fec = 1;
1751 else
1752 oct->props[lio->ifidx].fec = 0;
1753 }
1754
1755 WRITE_ONCE(sc->caller_is_done, true);
1756
1757 if (oct->props[lio->ifidx].fec !=
1758 oct->props[lio->ifidx].fec_boot) {
1759 dev_dbg(&oct->pci_dev->dev,
1760 "Reload driver to change fec to %s\n",
1761 oct->props[lio->ifidx].fec ? "on" : "off");
1762 }
1763
1764 return retval;
1765 }
1766
liquidio_get_fec(struct lio * lio)1767 int liquidio_get_fec(struct lio *lio)
1768 {
1769 struct oct_nic_seapi_resp *resp;
1770 struct octeon_soft_command *sc;
1771 struct octeon_device *oct;
1772 union octnet_cmd *ncmd;
1773 int retval;
1774 u32 var;
1775
1776 oct = lio->oct_dev;
1777
1778 sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1779 sizeof(struct oct_nic_seapi_resp), 0);
1780 if (!sc)
1781 return -ENOMEM;
1782
1783 ncmd = sc->virtdptr;
1784 resp = sc->virtrptr;
1785 memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1786
1787 init_completion(&sc->complete);
1788 sc->sc_status = OCTEON_REQUEST_PENDING;
1789
1790 ncmd->u64 = 0;
1791 ncmd->s.cmd = SEAPI_CMD_FEC_GET;
1792
1793 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1794
1795 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1796
1797 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1798 OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1799
1800 retval = octeon_send_soft_command(oct, sc);
1801 if (retval == IQ_SEND_FAILED) {
1802 dev_info(&oct->pci_dev->dev,
1803 "%s: Failed to send soft command\n", __func__);
1804 octeon_free_soft_command(oct, sc);
1805 return -EIO;
1806 }
1807
1808 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1809 if (retval)
1810 return retval;
1811
1812 var = be32_to_cpu(resp->fec_setting);
1813 resp->fec_setting = var;
1814 if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
1815 oct->props[lio->ifidx].fec = 1;
1816 else
1817 oct->props[lio->ifidx].fec = 0;
1818
1819 WRITE_ONCE(sc->caller_is_done, true);
1820
1821 if (oct->props[lio->ifidx].fec !=
1822 oct->props[lio->ifidx].fec_boot) {
1823 dev_dbg(&oct->pci_dev->dev,
1824 "Reload driver to change fec to %s\n",
1825 oct->props[lio->ifidx].fec ? "on" : "off");
1826 }
1827
1828 return retval;
1829 }
1830 EXPORT_SYMBOL_GPL(liquidio_get_fec);
1831