1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
31 
32 #define OCTNIC_MAX_SG  MAX_SKB_FRAGS
33 
34 /**
35  * \brief Callback for getting interface configuration
36  * @param status status of request
37  * @param buf pointer to resp structure
38  */
lio_if_cfg_callback(struct octeon_device * oct,u32 status,void * buf)39 void lio_if_cfg_callback(struct octeon_device *oct,
40 			 u32 status __attribute__((unused)), void *buf)
41 {
42 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
43 	struct liquidio_if_cfg_context *ctx;
44 	struct liquidio_if_cfg_resp *resp;
45 
46 	resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
47 	ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
48 
49 	oct = lio_get_device(ctx->octeon_id);
50 	if (resp->status)
51 		dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
52 			CVM_CAST64(resp->status));
53 	WRITE_ONCE(ctx->cond, 1);
54 
55 	snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
56 		 resp->cfg_info.liquidio_firmware_version);
57 
58 	/* This barrier is required to be sure that the response has been
59 	 * written fully before waking up the handler
60 	 */
61 	wmb();
62 
63 	wake_up_interruptible(&ctx->wc);
64 }
65 
66 /**
67  * \brief Delete gather lists
68  * @param lio per-network private data
69  */
lio_delete_glists(struct lio * lio)70 void lio_delete_glists(struct lio *lio)
71 {
72 	struct octnic_gather *g;
73 	int i;
74 
75 	kfree(lio->glist_lock);
76 	lio->glist_lock = NULL;
77 
78 	if (!lio->glist)
79 		return;
80 
81 	for (i = 0; i < lio->oct_dev->num_iqs; i++) {
82 		do {
83 			g = (struct octnic_gather *)
84 			    lio_list_delete_head(&lio->glist[i]);
85 			kfree(g);
86 		} while (g);
87 
88 		if (lio->glists_virt_base && lio->glists_virt_base[i] &&
89 		    lio->glists_dma_base && lio->glists_dma_base[i]) {
90 			lio_dma_free(lio->oct_dev,
91 				     lio->glist_entry_size * lio->tx_qsize,
92 				     lio->glists_virt_base[i],
93 				     lio->glists_dma_base[i]);
94 		}
95 	}
96 
97 	kfree(lio->glists_virt_base);
98 	lio->glists_virt_base = NULL;
99 
100 	kfree(lio->glists_dma_base);
101 	lio->glists_dma_base = NULL;
102 
103 	kfree(lio->glist);
104 	lio->glist = NULL;
105 }
106 
107 /**
108  * \brief Setup gather lists
109  * @param lio per-network private data
110  */
lio_setup_glists(struct octeon_device * oct,struct lio * lio,int num_iqs)111 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
112 {
113 	struct octnic_gather *g;
114 	int i, j;
115 
116 	lio->glist_lock =
117 	    kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
118 	if (!lio->glist_lock)
119 		return -ENOMEM;
120 
121 	lio->glist =
122 	    kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
123 	if (!lio->glist) {
124 		kfree(lio->glist_lock);
125 		lio->glist_lock = NULL;
126 		return -ENOMEM;
127 	}
128 
129 	lio->glist_entry_size =
130 		ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
131 
132 	/* allocate memory to store virtual and dma base address of
133 	 * per glist consistent memory
134 	 */
135 	lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
136 					GFP_KERNEL);
137 	lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
138 				       GFP_KERNEL);
139 
140 	if (!lio->glists_virt_base || !lio->glists_dma_base) {
141 		lio_delete_glists(lio);
142 		return -ENOMEM;
143 	}
144 
145 	for (i = 0; i < num_iqs; i++) {
146 		int numa_node = dev_to_node(&oct->pci_dev->dev);
147 
148 		spin_lock_init(&lio->glist_lock[i]);
149 
150 		INIT_LIST_HEAD(&lio->glist[i]);
151 
152 		lio->glists_virt_base[i] =
153 			lio_dma_alloc(oct,
154 				      lio->glist_entry_size * lio->tx_qsize,
155 				      &lio->glists_dma_base[i]);
156 
157 		if (!lio->glists_virt_base[i]) {
158 			lio_delete_glists(lio);
159 			return -ENOMEM;
160 		}
161 
162 		for (j = 0; j < lio->tx_qsize; j++) {
163 			g = kzalloc_node(sizeof(*g), GFP_KERNEL,
164 					 numa_node);
165 			if (!g)
166 				g = kzalloc(sizeof(*g), GFP_KERNEL);
167 			if (!g)
168 				break;
169 
170 			g->sg = lio->glists_virt_base[i] +
171 				(j * lio->glist_entry_size);
172 
173 			g->sg_dma_ptr = lio->glists_dma_base[i] +
174 					(j * lio->glist_entry_size);
175 
176 			list_add_tail(&g->list, &lio->glist[i]);
177 		}
178 
179 		if (j != lio->tx_qsize) {
180 			lio_delete_glists(lio);
181 			return -ENOMEM;
182 		}
183 	}
184 
185 	return 0;
186 }
187 
liquidio_set_feature(struct net_device * netdev,int cmd,u16 param1)188 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
189 {
190 	struct lio *lio = GET_LIO(netdev);
191 	struct octeon_device *oct = lio->oct_dev;
192 	struct octnic_ctrl_pkt nctrl;
193 	int ret = 0;
194 
195 	memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
196 
197 	nctrl.ncmd.u64 = 0;
198 	nctrl.ncmd.s.cmd = cmd;
199 	nctrl.ncmd.s.param1 = param1;
200 	nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
201 	nctrl.wait_time = 100;
202 	nctrl.netpndev = (u64)netdev;
203 	nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
204 
205 	ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
206 	if (ret < 0) {
207 		dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
208 			ret);
209 	}
210 	return ret;
211 }
212 
octeon_report_tx_completion_to_bql(void * txq,unsigned int pkts_compl,unsigned int bytes_compl)213 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
214 					unsigned int bytes_compl)
215 {
216 	struct netdev_queue *netdev_queue = txq;
217 
218 	netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
219 }
220 
octeon_update_tx_completion_counters(void * buf,int reqtype,unsigned int * pkts_compl,unsigned int * bytes_compl)221 void octeon_update_tx_completion_counters(void *buf, int reqtype,
222 					  unsigned int *pkts_compl,
223 					  unsigned int *bytes_compl)
224 {
225 	struct octnet_buf_free_info *finfo;
226 	struct sk_buff *skb = NULL;
227 	struct octeon_soft_command *sc;
228 
229 	switch (reqtype) {
230 	case REQTYPE_NORESP_NET:
231 	case REQTYPE_NORESP_NET_SG:
232 		finfo = buf;
233 		skb = finfo->skb;
234 		break;
235 
236 	case REQTYPE_RESP_NET_SG:
237 	case REQTYPE_RESP_NET:
238 		sc = buf;
239 		skb = sc->callback_arg;
240 		break;
241 
242 	default:
243 		return;
244 	}
245 
246 	(*pkts_compl)++;
247 	*bytes_compl += skb->len;
248 }
249 
octeon_report_sent_bytes_to_bql(void * buf,int reqtype)250 int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
251 {
252 	struct octnet_buf_free_info *finfo;
253 	struct sk_buff *skb;
254 	struct octeon_soft_command *sc;
255 	struct netdev_queue *txq;
256 
257 	switch (reqtype) {
258 	case REQTYPE_NORESP_NET:
259 	case REQTYPE_NORESP_NET_SG:
260 		finfo = buf;
261 		skb = finfo->skb;
262 		break;
263 
264 	case REQTYPE_RESP_NET_SG:
265 	case REQTYPE_RESP_NET:
266 		sc = buf;
267 		skb = sc->callback_arg;
268 		break;
269 
270 	default:
271 		return 0;
272 	}
273 
274 	txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
275 	netdev_tx_sent_queue(txq, skb->len);
276 
277 	return netif_xmit_stopped(txq);
278 }
279 
liquidio_link_ctrl_cmd_completion(void * nctrl_ptr)280 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
281 {
282 	struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
283 	struct net_device *netdev = (struct net_device *)nctrl->netpndev;
284 	struct lio *lio = GET_LIO(netdev);
285 	struct octeon_device *oct = lio->oct_dev;
286 	u8 *mac;
287 
288 	if (nctrl->completion && nctrl->response_code) {
289 		/* Signal whoever is interested that the response code from the
290 		 * firmware has arrived.
291 		 */
292 		WRITE_ONCE(*nctrl->response_code, nctrl->status);
293 		complete(nctrl->completion);
294 	}
295 
296 	if (nctrl->status)
297 		return;
298 
299 	switch (nctrl->ncmd.s.cmd) {
300 	case OCTNET_CMD_CHANGE_DEVFLAGS:
301 	case OCTNET_CMD_SET_MULTI_LIST:
302 	case OCTNET_CMD_SET_UC_LIST:
303 		break;
304 
305 	case OCTNET_CMD_CHANGE_MACADDR:
306 		mac = ((u8 *)&nctrl->udd[0]) + 2;
307 		if (nctrl->ncmd.s.param1) {
308 			/* vfidx is 0 based, but vf_num (param1) is 1 based */
309 			int vfidx = nctrl->ncmd.s.param1 - 1;
310 			bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
311 
312 			if (mac_is_admin_assigned)
313 				netif_info(lio, probe, lio->netdev,
314 					   "MAC Address %pM is configured for VF %d\n",
315 					   mac, vfidx);
316 		} else {
317 			netif_info(lio, probe, lio->netdev,
318 				   " MACAddr changed to %pM\n",
319 				   mac);
320 		}
321 		break;
322 
323 	case OCTNET_CMD_GPIO_ACCESS:
324 		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
325 
326 		break;
327 
328 	case OCTNET_CMD_ID_ACTIVE:
329 		netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
330 
331 		break;
332 
333 	case OCTNET_CMD_LRO_ENABLE:
334 		dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
335 		break;
336 
337 	case OCTNET_CMD_LRO_DISABLE:
338 		dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
339 			 netdev->name);
340 		break;
341 
342 	case OCTNET_CMD_VERBOSE_ENABLE:
343 		dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
344 			 netdev->name);
345 		break;
346 
347 	case OCTNET_CMD_VERBOSE_DISABLE:
348 		dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
349 			 netdev->name);
350 		break;
351 
352 	case OCTNET_CMD_VLAN_FILTER_CTL:
353 		if (nctrl->ncmd.s.param1)
354 			dev_info(&oct->pci_dev->dev,
355 				 "%s VLAN filter enabled\n", netdev->name);
356 		else
357 			dev_info(&oct->pci_dev->dev,
358 				 "%s VLAN filter disabled\n", netdev->name);
359 		break;
360 
361 	case OCTNET_CMD_ADD_VLAN_FILTER:
362 		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
363 			 netdev->name, nctrl->ncmd.s.param1);
364 		break;
365 
366 	case OCTNET_CMD_DEL_VLAN_FILTER:
367 		dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
368 			 netdev->name, nctrl->ncmd.s.param1);
369 		break;
370 
371 	case OCTNET_CMD_SET_SETTINGS:
372 		dev_info(&oct->pci_dev->dev, "%s settings changed\n",
373 			 netdev->name);
374 
375 		break;
376 
377 	/* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
378 	 * Command passed by NIC driver
379 	 */
380 	case OCTNET_CMD_TNL_RX_CSUM_CTL:
381 		if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
382 			netif_info(lio, probe, lio->netdev,
383 				   "RX Checksum Offload Enabled\n");
384 		} else if (nctrl->ncmd.s.param1 ==
385 			   OCTNET_CMD_RXCSUM_DISABLE) {
386 			netif_info(lio, probe, lio->netdev,
387 				   "RX Checksum Offload Disabled\n");
388 		}
389 		break;
390 
391 		/* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
392 		 * Command passed by NIC driver
393 		 */
394 	case OCTNET_CMD_TNL_TX_CSUM_CTL:
395 		if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
396 			netif_info(lio, probe, lio->netdev,
397 				   "TX Checksum Offload Enabled\n");
398 		} else if (nctrl->ncmd.s.param1 ==
399 			   OCTNET_CMD_TXCSUM_DISABLE) {
400 			netif_info(lio, probe, lio->netdev,
401 				   "TX Checksum Offload Disabled\n");
402 		}
403 		break;
404 
405 		/* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
406 		 * Command passed by NIC driver
407 		 */
408 	case OCTNET_CMD_VXLAN_PORT_CONFIG:
409 		if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
410 			netif_info(lio, probe, lio->netdev,
411 				   "VxLAN Destination UDP PORT:%d ADDED\n",
412 				   nctrl->ncmd.s.param1);
413 		} else if (nctrl->ncmd.s.more ==
414 			   OCTNET_CMD_VXLAN_PORT_DEL) {
415 			netif_info(lio, probe, lio->netdev,
416 				   "VxLAN Destination UDP PORT:%d DELETED\n",
417 				   nctrl->ncmd.s.param1);
418 		}
419 		break;
420 
421 	case OCTNET_CMD_SET_FLOW_CTL:
422 		netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
423 		break;
424 
425 	case OCTNET_CMD_QUEUE_COUNT_CTL:
426 		netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
427 			   nctrl->ncmd.s.param1);
428 		break;
429 
430 	default:
431 		dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
432 			nctrl->ncmd.s.cmd);
433 	}
434 }
435 
octeon_pf_changed_vf_macaddr(struct octeon_device * oct,u8 * mac)436 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
437 {
438 	bool macaddr_changed = false;
439 	struct net_device *netdev;
440 	struct lio *lio;
441 
442 	rtnl_lock();
443 
444 	netdev = oct->props[0].netdev;
445 	lio = GET_LIO(netdev);
446 
447 	lio->linfo.macaddr_is_admin_asgnd = true;
448 
449 	if (!ether_addr_equal(netdev->dev_addr, mac)) {
450 		macaddr_changed = true;
451 		ether_addr_copy(netdev->dev_addr, mac);
452 		ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
453 		call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
454 	}
455 
456 	rtnl_unlock();
457 
458 	if (macaddr_changed)
459 		dev_info(&oct->pci_dev->dev,
460 			 "PF changed VF's MAC address to %pM\n", mac);
461 
462 	/* no need to notify the firmware of the macaddr change because
463 	 * the PF did that already
464 	 */
465 }
466 
octnet_poll_check_rxq_oom_status(struct work_struct * work)467 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
468 {
469 	struct cavium_wk *wk = (struct cavium_wk *)work;
470 	struct lio *lio = (struct lio *)wk->ctxptr;
471 	struct octeon_device *oct = lio->oct_dev;
472 	struct octeon_droq *droq;
473 	int q, q_no = 0;
474 
475 	if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
476 		for (q = 0; q < lio->linfo.num_rxpciq; q++) {
477 			q_no = lio->linfo.rxpciq[q].s.q_no;
478 			droq = oct->droq[q_no];
479 			if (!droq)
480 				continue;
481 			octeon_droq_check_oom(droq);
482 		}
483 	}
484 	queue_delayed_work(lio->rxq_status_wq.wq,
485 			   &lio->rxq_status_wq.wk.work,
486 			   msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
487 }
488 
setup_rx_oom_poll_fn(struct net_device * netdev)489 int setup_rx_oom_poll_fn(struct net_device *netdev)
490 {
491 	struct lio *lio = GET_LIO(netdev);
492 	struct octeon_device *oct = lio->oct_dev;
493 
494 	lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
495 						WQ_MEM_RECLAIM, 0);
496 	if (!lio->rxq_status_wq.wq) {
497 		dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
498 		return -ENOMEM;
499 	}
500 	INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
501 			  octnet_poll_check_rxq_oom_status);
502 	lio->rxq_status_wq.wk.ctxptr = lio;
503 	queue_delayed_work(lio->rxq_status_wq.wq,
504 			   &lio->rxq_status_wq.wk.work,
505 			   msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
506 	return 0;
507 }
508 
cleanup_rx_oom_poll_fn(struct net_device * netdev)509 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
510 {
511 	struct lio *lio = GET_LIO(netdev);
512 
513 	if (lio->rxq_status_wq.wq) {
514 		cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
515 		flush_workqueue(lio->rxq_status_wq.wq);
516 		destroy_workqueue(lio->rxq_status_wq.wq);
517 	}
518 }
519 
520 /* Runs in interrupt context. */
lio_update_txq_status(struct octeon_device * oct,int iq_num)521 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
522 {
523 	struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
524 	struct net_device *netdev;
525 	struct lio *lio;
526 
527 	netdev = oct->props[iq->ifidx].netdev;
528 
529 	/* This is needed because the first IQ does not have
530 	 * a netdev associated with it.
531 	 */
532 	if (!netdev)
533 		return;
534 
535 	lio = GET_LIO(netdev);
536 	if (__netif_subqueue_stopped(netdev, iq->q_index) &&
537 	    lio->linfo.link.s.link_up &&
538 	    (!octnet_iq_is_full(oct, iq_num))) {
539 		netif_wake_subqueue(netdev, iq->q_index);
540 		INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
541 					  tx_restart, 1);
542 	}
543 }
544 
545 /**
546  * \brief Setup output queue
547  * @param oct octeon device
548  * @param q_no which queue
549  * @param num_descs how many descriptors
550  * @param desc_size size of each descriptor
551  * @param app_ctx application context
552  */
octeon_setup_droq(struct octeon_device * oct,int q_no,int num_descs,int desc_size,void * app_ctx)553 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
554 			     int desc_size, void *app_ctx)
555 {
556 	int ret_val;
557 
558 	dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
559 	/* droq creation and local register settings. */
560 	ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
561 	if (ret_val < 0)
562 		return ret_val;
563 
564 	if (ret_val == 1) {
565 		dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
566 		return 0;
567 	}
568 
569 	/* Enable the droq queues */
570 	octeon_set_droq_pkt_op(oct, q_no, 1);
571 
572 	/* Send Credit for Octeon Output queues. Credits are always
573 	 * sent after the output queue is enabled.
574 	 */
575 	writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
576 
577 	return ret_val;
578 }
579 
580 /** Routine to push packets arriving on Octeon interface upto network layer.
581  * @param oct_id   - octeon device id.
582  * @param skbuff   - skbuff struct to be passed to network layer.
583  * @param len      - size of total data received.
584  * @param rh       - Control header associated with the packet
585  * @param param    - additional control data with the packet
586  * @param arg      - farg registered in droq_ops
587  */
588 static void
liquidio_push_packet(u32 octeon_id,void * skbuff,u32 len,union octeon_rh * rh,void * param,void * arg)589 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
590 		     void *skbuff,
591 		     u32 len,
592 		     union octeon_rh *rh,
593 		     void *param,
594 		     void *arg)
595 {
596 	struct net_device *netdev = (struct net_device *)arg;
597 	struct octeon_droq *droq =
598 	    container_of(param, struct octeon_droq, napi);
599 	struct sk_buff *skb = (struct sk_buff *)skbuff;
600 	struct skb_shared_hwtstamps *shhwtstamps;
601 	struct napi_struct *napi = param;
602 	u16 vtag = 0;
603 	u32 r_dh_off;
604 	u64 ns;
605 
606 	if (netdev) {
607 		struct lio *lio = GET_LIO(netdev);
608 		struct octeon_device *oct = lio->oct_dev;
609 
610 		/* Do not proceed if the interface is not in RUNNING state. */
611 		if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
612 			recv_buffer_free(skb);
613 			droq->stats.rx_dropped++;
614 			return;
615 		}
616 
617 		skb->dev = netdev;
618 
619 		skb_record_rx_queue(skb, droq->q_no);
620 		if (likely(len > MIN_SKB_SIZE)) {
621 			struct octeon_skb_page_info *pg_info;
622 			unsigned char *va;
623 
624 			pg_info = ((struct octeon_skb_page_info *)(skb->cb));
625 			if (pg_info->page) {
626 				/* For Paged allocation use the frags */
627 				va = page_address(pg_info->page) +
628 					pg_info->page_offset;
629 				memcpy(skb->data, va, MIN_SKB_SIZE);
630 				skb_put(skb, MIN_SKB_SIZE);
631 				skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
632 						pg_info->page,
633 						pg_info->page_offset +
634 						MIN_SKB_SIZE,
635 						len - MIN_SKB_SIZE,
636 						LIO_RXBUFFER_SZ);
637 			}
638 		} else {
639 			struct octeon_skb_page_info *pg_info =
640 				((struct octeon_skb_page_info *)(skb->cb));
641 			skb_copy_to_linear_data(skb, page_address(pg_info->page)
642 						+ pg_info->page_offset, len);
643 			skb_put(skb, len);
644 			put_page(pg_info->page);
645 		}
646 
647 		r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
648 
649 		if (oct->ptp_enable) {
650 			if (rh->r_dh.has_hwtstamp) {
651 				/* timestamp is included from the hardware at
652 				 * the beginning of the packet.
653 				 */
654 				if (ifstate_check
655 					(lio,
656 					 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
657 					/* Nanoseconds are in the first 64-bits
658 					 * of the packet.
659 					 */
660 					memcpy(&ns, (skb->data + r_dh_off),
661 					       sizeof(ns));
662 					r_dh_off -= BYTES_PER_DHLEN_UNIT;
663 					shhwtstamps = skb_hwtstamps(skb);
664 					shhwtstamps->hwtstamp =
665 						ns_to_ktime(ns +
666 							    lio->ptp_adjust);
667 				}
668 			}
669 		}
670 
671 		if (rh->r_dh.has_hash) {
672 			__be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
673 			u32 hash = be32_to_cpu(*hash_be);
674 
675 			skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
676 			r_dh_off -= BYTES_PER_DHLEN_UNIT;
677 		}
678 
679 		skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
680 		skb->protocol = eth_type_trans(skb, skb->dev);
681 
682 		if ((netdev->features & NETIF_F_RXCSUM) &&
683 		    (((rh->r_dh.encap_on) &&
684 		      (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
685 		     (!(rh->r_dh.encap_on) &&
686 		      (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
687 			/* checksum has already been verified */
688 			skb->ip_summed = CHECKSUM_UNNECESSARY;
689 		else
690 			skb->ip_summed = CHECKSUM_NONE;
691 
692 		/* Setting Encapsulation field on basis of status received
693 		 * from the firmware
694 		 */
695 		if (rh->r_dh.encap_on) {
696 			skb->encapsulation = 1;
697 			skb->csum_level = 1;
698 			droq->stats.rx_vxlan++;
699 		}
700 
701 		/* inbound VLAN tag */
702 		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
703 		    rh->r_dh.vlan) {
704 			u16 priority = rh->r_dh.priority;
705 			u16 vid = rh->r_dh.vlan;
706 
707 			vtag = (priority << VLAN_PRIO_SHIFT) | vid;
708 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
709 		}
710 
711 		napi_gro_receive(napi, skb);
712 
713 		droq->stats.rx_bytes_received += len -
714 			rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
715 		droq->stats.rx_pkts_received++;
716 	} else {
717 		recv_buffer_free(skb);
718 	}
719 }
720 
721 /**
722  * \brief wrapper for calling napi_schedule
723  * @param param parameters to pass to napi_schedule
724  *
725  * Used when scheduling on different CPUs
726  */
napi_schedule_wrapper(void * param)727 static void napi_schedule_wrapper(void *param)
728 {
729 	struct napi_struct *napi = param;
730 
731 	napi_schedule(napi);
732 }
733 
734 /**
735  * \brief callback when receive interrupt occurs and we are in NAPI mode
736  * @param arg pointer to octeon output queue
737  */
liquidio_napi_drv_callback(void * arg)738 static void liquidio_napi_drv_callback(void *arg)
739 {
740 	struct octeon_device *oct;
741 	struct octeon_droq *droq = arg;
742 	int this_cpu = smp_processor_id();
743 
744 	oct = droq->oct_dev;
745 
746 	if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
747 	    droq->cpu_id == this_cpu) {
748 		napi_schedule_irqoff(&droq->napi);
749 	} else {
750 		call_single_data_t *csd = &droq->csd;
751 
752 		csd->func = napi_schedule_wrapper;
753 		csd->info = &droq->napi;
754 		csd->flags = 0;
755 
756 		smp_call_function_single_async(droq->cpu_id, csd);
757 	}
758 }
759 
760 /**
761  * \brief Entry point for NAPI polling
762  * @param napi NAPI structure
763  * @param budget maximum number of items to process
764  */
liquidio_napi_poll(struct napi_struct * napi,int budget)765 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
766 {
767 	struct octeon_instr_queue *iq;
768 	struct octeon_device *oct;
769 	struct octeon_droq *droq;
770 	int tx_done = 0, iq_no;
771 	int work_done;
772 
773 	droq = container_of(napi, struct octeon_droq, napi);
774 	oct = droq->oct_dev;
775 	iq_no = droq->q_no;
776 
777 	/* Handle Droq descriptors */
778 	work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
779 
780 	/* Flush the instruction queue */
781 	iq = oct->instr_queue[iq_no];
782 	if (iq) {
783 		/* TODO: move this check to inside octeon_flush_iq,
784 		 * once check_db_timeout is removed
785 		 */
786 		if (atomic_read(&iq->instr_pending))
787 			/* Process iq buffers with in the budget limits */
788 			tx_done = octeon_flush_iq(oct, iq, budget);
789 		else
790 			tx_done = 1;
791 		/* Update iq read-index rather than waiting for next interrupt.
792 		 * Return back if tx_done is false.
793 		 */
794 		/* sub-queue status update */
795 		lio_update_txq_status(oct, iq_no);
796 	} else {
797 		dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
798 			__func__, iq_no);
799 	}
800 
801 #define MAX_REG_CNT  2000000U
802 	/* force enable interrupt if reg cnts are high to avoid wraparound */
803 	if ((work_done < budget && tx_done) ||
804 	    (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
805 	    (droq->pkt_count >= MAX_REG_CNT)) {
806 		tx_done = 1;
807 		napi_complete_done(napi, work_done);
808 
809 		octeon_enable_irq(droq->oct_dev, droq->q_no);
810 		return 0;
811 	}
812 
813 	return (!tx_done) ? (budget) : (work_done);
814 }
815 
816 /**
817  * \brief Setup input and output queues
818  * @param octeon_dev octeon device
819  * @param ifidx Interface index
820  *
821  * Note: Queues are with respect to the octeon device. Thus
822  * an input queue is for egress packets, and output queues
823  * are for ingress packets.
824  */
liquidio_setup_io_queues(struct octeon_device * octeon_dev,int ifidx,u32 num_iqs,u32 num_oqs)825 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
826 			     u32 num_iqs, u32 num_oqs)
827 {
828 	struct octeon_droq_ops droq_ops;
829 	struct net_device *netdev;
830 	struct octeon_droq *droq;
831 	struct napi_struct *napi;
832 	int cpu_id_modulus;
833 	int num_tx_descs;
834 	struct lio *lio;
835 	int retval = 0;
836 	int q, q_no;
837 	int cpu_id;
838 
839 	netdev = octeon_dev->props[ifidx].netdev;
840 
841 	lio = GET_LIO(netdev);
842 
843 	memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
844 
845 	droq_ops.fptr = liquidio_push_packet;
846 	droq_ops.farg = netdev;
847 
848 	droq_ops.poll_mode = 1;
849 	droq_ops.napi_fn = liquidio_napi_drv_callback;
850 	cpu_id = 0;
851 	cpu_id_modulus = num_present_cpus();
852 
853 	/* set up DROQs. */
854 	for (q = 0; q < num_oqs; q++) {
855 		q_no = lio->linfo.rxpciq[q].s.q_no;
856 		dev_dbg(&octeon_dev->pci_dev->dev,
857 			"%s index:%d linfo.rxpciq.s.q_no:%d\n",
858 			__func__, q, q_no);
859 		retval = octeon_setup_droq(
860 		    octeon_dev, q_no,
861 		    CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
862 						lio->ifidx),
863 		    CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
864 						   lio->ifidx),
865 		    NULL);
866 		if (retval) {
867 			dev_err(&octeon_dev->pci_dev->dev,
868 				"%s : Runtime DROQ(RxQ) creation failed.\n",
869 				__func__);
870 			return 1;
871 		}
872 
873 		droq = octeon_dev->droq[q_no];
874 		napi = &droq->napi;
875 		dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
876 			(u64)netdev, (u64)octeon_dev);
877 		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
878 
879 		/* designate a CPU for this droq */
880 		droq->cpu_id = cpu_id;
881 		cpu_id++;
882 		if (cpu_id >= cpu_id_modulus)
883 			cpu_id = 0;
884 
885 		octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
886 	}
887 
888 	if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
889 		/* 23XX PF/VF can send/recv control messages (via the first
890 		 * PF/VF-owned droq) from the firmware even if the ethX
891 		 * interface is down, so that's why poll_mode must be off
892 		 * for the first droq.
893 		 */
894 		octeon_dev->droq[0]->ops.poll_mode = 0;
895 	}
896 
897 	/* set up IQs. */
898 	for (q = 0; q < num_iqs; q++) {
899 		num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
900 		    octeon_get_conf(octeon_dev), lio->ifidx);
901 		retval = octeon_setup_iq(octeon_dev, ifidx, q,
902 					 lio->linfo.txpciq[q], num_tx_descs,
903 					 netdev_get_tx_queue(netdev, q));
904 		if (retval) {
905 			dev_err(&octeon_dev->pci_dev->dev,
906 				" %s : Runtime IQ(TxQ) creation failed.\n",
907 				__func__);
908 			return 1;
909 		}
910 
911 		/* XPS */
912 		if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
913 		    octeon_dev->ioq_vector) {
914 			struct octeon_ioq_vector    *ioq_vector;
915 
916 			ioq_vector = &octeon_dev->ioq_vector[q];
917 			netif_set_xps_queue(netdev,
918 					    &ioq_vector->affinity_mask,
919 					    ioq_vector->iq_index);
920 		}
921 	}
922 
923 	return 0;
924 }
925 
926 static
liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq * droq,u64 ret)927 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
928 {
929 	struct octeon_device *oct = droq->oct_dev;
930 	struct octeon_device_priv *oct_priv =
931 	    (struct octeon_device_priv *)oct->priv;
932 
933 	if (droq->ops.poll_mode) {
934 		droq->ops.napi_fn(droq);
935 	} else {
936 		if (ret & MSIX_PO_INT) {
937 			if (OCTEON_CN23XX_VF(oct))
938 				dev_err(&oct->pci_dev->dev,
939 					"should not come here should not get rx when poll mode = 0 for vf\n");
940 			tasklet_schedule(&oct_priv->droq_tasklet);
941 			return 1;
942 		}
943 		/* this will be flushed periodically by check iq db */
944 		if (ret & MSIX_PI_INT)
945 			return 0;
946 	}
947 
948 	return 0;
949 }
950 
951 irqreturn_t
liquidio_msix_intr_handler(int irq,void * dev)952 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
953 {
954 	struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
955 	struct octeon_device *oct = ioq_vector->oct_dev;
956 	struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
957 	u64 ret;
958 
959 	ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
960 
961 	if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
962 		liquidio_schedule_msix_droq_pkt_handler(droq, ret);
963 
964 	return IRQ_HANDLED;
965 }
966 
967 /**
968  * \brief Droq packet processor sceduler
969  * @param oct octeon device
970  */
liquidio_schedule_droq_pkt_handlers(struct octeon_device * oct)971 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
972 {
973 	struct octeon_device_priv *oct_priv =
974 		(struct octeon_device_priv *)oct->priv;
975 	struct octeon_droq *droq;
976 	u64 oq_no;
977 
978 	if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
979 		for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
980 		     oq_no++) {
981 			if (!(oct->droq_intr & BIT_ULL(oq_no)))
982 				continue;
983 
984 			droq = oct->droq[oq_no];
985 
986 			if (droq->ops.poll_mode) {
987 				droq->ops.napi_fn(droq);
988 				oct_priv->napi_mask |= (1 << oq_no);
989 			} else {
990 				tasklet_schedule(&oct_priv->droq_tasklet);
991 			}
992 		}
993 	}
994 }
995 
996 /**
997  * \brief Interrupt handler for octeon
998  * @param irq unused
999  * @param dev octeon device
1000  */
1001 static
liquidio_legacy_intr_handler(int irq,void * dev)1002 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1003 					 void *dev)
1004 {
1005 	struct octeon_device *oct = (struct octeon_device *)dev;
1006 	irqreturn_t ret;
1007 
1008 	/* Disable our interrupts for the duration of ISR */
1009 	oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1010 
1011 	ret = oct->fn_list.process_interrupt_regs(oct);
1012 
1013 	if (ret == IRQ_HANDLED)
1014 		liquidio_schedule_droq_pkt_handlers(oct);
1015 
1016 	/* Re-enable our interrupts  */
1017 	if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1018 		oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1019 
1020 	return ret;
1021 }
1022 
1023 /**
1024  * \brief Setup interrupt for octeon device
1025  * @param oct octeon device
1026  *
1027  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1028  */
octeon_setup_interrupt(struct octeon_device * oct,u32 num_ioqs)1029 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
1030 {
1031 	struct msix_entry *msix_entries;
1032 	char *queue_irq_names = NULL;
1033 	int i, num_interrupts = 0;
1034 	int num_alloc_ioq_vectors;
1035 	char *aux_irq_name = NULL;
1036 	int num_ioq_vectors;
1037 	int irqret, err;
1038 
1039 	if (oct->msix_on) {
1040 		oct->num_msix_irqs = num_ioqs;
1041 		if (OCTEON_CN23XX_PF(oct)) {
1042 			num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
1043 
1044 			/* one non ioq interrupt for handling
1045 			 * sli_mac_pf_int_sum
1046 			 */
1047 			oct->num_msix_irqs += 1;
1048 		} else if (OCTEON_CN23XX_VF(oct)) {
1049 			num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
1050 		}
1051 
1052 		/* allocate storage for the names assigned to each irq */
1053 		oct->irq_name_storage =
1054 			kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
1055 		if (!oct->irq_name_storage) {
1056 			dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1057 			return -ENOMEM;
1058 		}
1059 
1060 		queue_irq_names = oct->irq_name_storage;
1061 
1062 		if (OCTEON_CN23XX_PF(oct))
1063 			aux_irq_name = &queue_irq_names
1064 				[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1065 
1066 		oct->msix_entries = kcalloc(oct->num_msix_irqs,
1067 					    sizeof(struct msix_entry),
1068 					    GFP_KERNEL);
1069 		if (!oct->msix_entries) {
1070 			dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1071 			kfree(oct->irq_name_storage);
1072 			oct->irq_name_storage = NULL;
1073 			return -ENOMEM;
1074 		}
1075 
1076 		msix_entries = (struct msix_entry *)oct->msix_entries;
1077 
1078 		/*Assumption is that pf msix vectors start from pf srn to pf to
1079 		 * trs and not from 0. if not change this code
1080 		 */
1081 		if (OCTEON_CN23XX_PF(oct)) {
1082 			for (i = 0; i < oct->num_msix_irqs - 1; i++)
1083 				msix_entries[i].entry =
1084 					oct->sriov_info.pf_srn + i;
1085 
1086 			msix_entries[oct->num_msix_irqs - 1].entry =
1087 				oct->sriov_info.trs;
1088 		} else if (OCTEON_CN23XX_VF(oct)) {
1089 			for (i = 0; i < oct->num_msix_irqs; i++)
1090 				msix_entries[i].entry = i;
1091 		}
1092 		num_alloc_ioq_vectors = pci_enable_msix_range(
1093 						oct->pci_dev, msix_entries,
1094 						oct->num_msix_irqs,
1095 						oct->num_msix_irqs);
1096 		if (num_alloc_ioq_vectors < 0) {
1097 			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1098 			kfree(oct->msix_entries);
1099 			oct->msix_entries = NULL;
1100 			kfree(oct->irq_name_storage);
1101 			oct->irq_name_storage = NULL;
1102 			return num_alloc_ioq_vectors;
1103 		}
1104 
1105 		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1106 
1107 		num_ioq_vectors = oct->num_msix_irqs;
1108 		/** For PF, there is one non-ioq interrupt handler */
1109 		if (OCTEON_CN23XX_PF(oct)) {
1110 			num_ioq_vectors -= 1;
1111 
1112 			snprintf(aux_irq_name, INTRNAMSIZ,
1113 				 "LiquidIO%u-pf%u-aux", oct->octeon_id,
1114 				 oct->pf_num);
1115 			irqret = request_irq(
1116 					msix_entries[num_ioq_vectors].vector,
1117 					liquidio_legacy_intr_handler, 0,
1118 					aux_irq_name, oct);
1119 			if (irqret) {
1120 				dev_err(&oct->pci_dev->dev,
1121 					"Request_irq failed for MSIX interrupt Error: %d\n",
1122 					irqret);
1123 				pci_disable_msix(oct->pci_dev);
1124 				kfree(oct->msix_entries);
1125 				kfree(oct->irq_name_storage);
1126 				oct->irq_name_storage = NULL;
1127 				oct->msix_entries = NULL;
1128 				return irqret;
1129 			}
1130 		}
1131 		for (i = 0 ; i < num_ioq_vectors ; i++) {
1132 			if (OCTEON_CN23XX_PF(oct))
1133 				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1134 					 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1135 					 oct->octeon_id, oct->pf_num, i);
1136 
1137 			if (OCTEON_CN23XX_VF(oct))
1138 				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1139 					 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1140 					 oct->octeon_id, oct->vf_num, i);
1141 
1142 			irqret = request_irq(msix_entries[i].vector,
1143 					     liquidio_msix_intr_handler, 0,
1144 					     &queue_irq_names[IRQ_NAME_OFF(i)],
1145 					     &oct->ioq_vector[i]);
1146 
1147 			if (irqret) {
1148 				dev_err(&oct->pci_dev->dev,
1149 					"Request_irq failed for MSIX interrupt Error: %d\n",
1150 					irqret);
1151 				/** Freeing the non-ioq irq vector here . */
1152 				free_irq(msix_entries[num_ioq_vectors].vector,
1153 					 oct);
1154 
1155 				while (i) {
1156 					i--;
1157 					/** clearing affinity mask. */
1158 					irq_set_affinity_hint(
1159 						      msix_entries[i].vector,
1160 						      NULL);
1161 					free_irq(msix_entries[i].vector,
1162 						 &oct->ioq_vector[i]);
1163 				}
1164 				pci_disable_msix(oct->pci_dev);
1165 				kfree(oct->msix_entries);
1166 				kfree(oct->irq_name_storage);
1167 				oct->irq_name_storage = NULL;
1168 				oct->msix_entries = NULL;
1169 				return irqret;
1170 			}
1171 			oct->ioq_vector[i].vector = msix_entries[i].vector;
1172 			/* assign the cpu mask for this msix interrupt vector */
1173 			irq_set_affinity_hint(msix_entries[i].vector,
1174 					      &oct->ioq_vector[i].affinity_mask
1175 					      );
1176 		}
1177 		dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1178 			oct->octeon_id);
1179 	} else {
1180 		err = pci_enable_msi(oct->pci_dev);
1181 		if (err)
1182 			dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1183 				 err);
1184 		else
1185 			oct->flags |= LIO_FLAG_MSI_ENABLED;
1186 
1187 		/* allocate storage for the names assigned to the irq */
1188 		oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1189 		if (!oct->irq_name_storage)
1190 			return -ENOMEM;
1191 
1192 		queue_irq_names = oct->irq_name_storage;
1193 
1194 		if (OCTEON_CN23XX_PF(oct))
1195 			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1196 				 "LiquidIO%u-pf%u-rxtx-%u",
1197 				 oct->octeon_id, oct->pf_num, 0);
1198 
1199 		if (OCTEON_CN23XX_VF(oct))
1200 			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1201 				 "LiquidIO%u-vf%u-rxtx-%u",
1202 				 oct->octeon_id, oct->vf_num, 0);
1203 
1204 		irqret = request_irq(oct->pci_dev->irq,
1205 				     liquidio_legacy_intr_handler,
1206 				     IRQF_SHARED,
1207 				     &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1208 		if (irqret) {
1209 			if (oct->flags & LIO_FLAG_MSI_ENABLED)
1210 				pci_disable_msi(oct->pci_dev);
1211 			dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1212 				irqret);
1213 			kfree(oct->irq_name_storage);
1214 			oct->irq_name_storage = NULL;
1215 			return irqret;
1216 		}
1217 	}
1218 	return 0;
1219 }
1220 
liquidio_change_mtu_completion(struct octeon_device * oct,u32 status,void * buf)1221 static void liquidio_change_mtu_completion(struct octeon_device *oct,
1222 					   u32 status, void *buf)
1223 {
1224 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1225 	struct liquidio_if_cfg_context *ctx;
1226 
1227 	ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1228 
1229 	if (status) {
1230 		dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n",
1231 			CVM_CAST64(status));
1232 		WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL);
1233 	} else {
1234 		WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS);
1235 	}
1236 
1237 	/* This barrier is required to be sure that the response has been
1238 	 * written fully before waking up the handler
1239 	 */
1240 	wmb();
1241 
1242 	wake_up_interruptible(&ctx->wc);
1243 }
1244 
1245 /**
1246  * \brief Net device change_mtu
1247  * @param netdev network device
1248  */
liquidio_change_mtu(struct net_device * netdev,int new_mtu)1249 int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
1250 {
1251 	struct lio *lio = GET_LIO(netdev);
1252 	struct octeon_device *oct = lio->oct_dev;
1253 	struct liquidio_if_cfg_context *ctx;
1254 	struct octeon_soft_command *sc;
1255 	union octnet_cmd *ncmd;
1256 	int ctx_size;
1257 	int ret = 0;
1258 
1259 	ctx_size = sizeof(struct liquidio_if_cfg_context);
1260 	sc = (struct octeon_soft_command *)
1261 		octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size);
1262 
1263 	ncmd = (union octnet_cmd *)sc->virtdptr;
1264 	ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
1265 
1266 	WRITE_ONCE(ctx->cond, 0);
1267 	ctx->octeon_id = lio_get_device_id(oct);
1268 	init_waitqueue_head(&ctx->wc);
1269 
1270 	ncmd->u64 = 0;
1271 	ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
1272 	ncmd->s.param1 = new_mtu;
1273 
1274 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1275 
1276 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1277 
1278 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1279 				    OPCODE_NIC_CMD, 0, 0, 0);
1280 
1281 	sc->callback = liquidio_change_mtu_completion;
1282 	sc->callback_arg = sc;
1283 	sc->wait_time = 100;
1284 
1285 	ret = octeon_send_soft_command(oct, sc);
1286 	if (ret == IQ_SEND_FAILED) {
1287 		netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
1288 		return -EINVAL;
1289 	}
1290 	/* Sleep on a wait queue till the cond flag indicates that the
1291 	 * response arrived or timed-out.
1292 	 */
1293 	if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR ||
1294 	    ctx->cond == LIO_CHANGE_MTU_FAIL) {
1295 		octeon_free_soft_command(oct, sc);
1296 		return -EINVAL;
1297 	}
1298 
1299 	netdev->mtu = new_mtu;
1300 	lio->mtu = new_mtu;
1301 
1302 	octeon_free_soft_command(oct, sc);
1303 	return 0;
1304 }
1305 
lio_wait_for_clean_oq(struct octeon_device * oct)1306 int lio_wait_for_clean_oq(struct octeon_device *oct)
1307 {
1308 	int retry = 100, pending_pkts = 0;
1309 	int idx;
1310 
1311 	do {
1312 		pending_pkts = 0;
1313 
1314 		for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
1315 			if (!(oct->io_qmask.oq & BIT_ULL(idx)))
1316 				continue;
1317 			pending_pkts +=
1318 				atomic_read(&oct->droq[idx]->pkts_pending);
1319 		}
1320 
1321 		if (pending_pkts > 0)
1322 			schedule_timeout_uninterruptible(1);
1323 
1324 	} while (retry-- && pending_pkts);
1325 
1326 	return pending_pkts;
1327 }
1328 
1329 static void
octnet_nic_stats_callback(struct octeon_device * oct_dev,u32 status,void * ptr)1330 octnet_nic_stats_callback(struct octeon_device *oct_dev,
1331 			  u32 status, void *ptr)
1332 {
1333 	struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
1334 	struct oct_nic_stats_resp *resp =
1335 	    (struct oct_nic_stats_resp *)sc->virtrptr;
1336 	struct oct_nic_stats_ctrl *ctrl =
1337 	    (struct oct_nic_stats_ctrl *)sc->ctxptr;
1338 	struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
1339 	struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
1340 	struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
1341 	struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
1342 
1343 	if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
1344 		octeon_swap_8B_data((u64 *)&resp->stats,
1345 				    (sizeof(struct oct_link_stats)) >> 3);
1346 
1347 		/* RX link-level stats */
1348 		rstats->total_rcvd = rsp_rstats->total_rcvd;
1349 		rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
1350 		rstats->total_bcst = rsp_rstats->total_bcst;
1351 		rstats->total_mcst = rsp_rstats->total_mcst;
1352 		rstats->runts      = rsp_rstats->runts;
1353 		rstats->ctl_rcvd   = rsp_rstats->ctl_rcvd;
1354 		/* Accounts for over/under-run of buffers */
1355 		rstats->fifo_err  = rsp_rstats->fifo_err;
1356 		rstats->dmac_drop = rsp_rstats->dmac_drop;
1357 		rstats->fcs_err   = rsp_rstats->fcs_err;
1358 		rstats->jabber_err = rsp_rstats->jabber_err;
1359 		rstats->l2_err    = rsp_rstats->l2_err;
1360 		rstats->frame_err = rsp_rstats->frame_err;
1361 		rstats->red_drops = rsp_rstats->red_drops;
1362 
1363 		/* RX firmware stats */
1364 		rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
1365 		rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
1366 		rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
1367 		rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
1368 		rstats->fw_err_pko = rsp_rstats->fw_err_pko;
1369 		rstats->fw_err_link = rsp_rstats->fw_err_link;
1370 		rstats->fw_err_drop = rsp_rstats->fw_err_drop;
1371 		rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
1372 		rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
1373 
1374 		/* Number of packets that are LROed      */
1375 		rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
1376 		/* Number of octets that are LROed       */
1377 		rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
1378 		/* Number of LRO packets formed          */
1379 		rstats->fw_total_lro = rsp_rstats->fw_total_lro;
1380 		/* Number of times lRO of packet aborted */
1381 		rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
1382 		rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
1383 		rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
1384 		rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
1385 		rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
1386 		/* intrmod: packet forward rate */
1387 		rstats->fwd_rate = rsp_rstats->fwd_rate;
1388 
1389 		/* TX link-level stats */
1390 		tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
1391 		tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
1392 		tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
1393 		tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
1394 		tstats->ctl_sent = rsp_tstats->ctl_sent;
1395 		/* Packets sent after one collision*/
1396 		tstats->one_collision_sent = rsp_tstats->one_collision_sent;
1397 		/* Packets sent after multiple collision*/
1398 		tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
1399 		/* Packets not sent due to max collisions */
1400 		tstats->max_collision_fail = rsp_tstats->max_collision_fail;
1401 		/* Packets not sent due to max deferrals */
1402 		tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
1403 		/* Accounts for over/under-run of buffers */
1404 		tstats->fifo_err = rsp_tstats->fifo_err;
1405 		tstats->runts = rsp_tstats->runts;
1406 		/* Total number of collisions detected */
1407 		tstats->total_collisions = rsp_tstats->total_collisions;
1408 
1409 		/* firmware stats */
1410 		tstats->fw_total_sent = rsp_tstats->fw_total_sent;
1411 		tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
1412 		tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
1413 		tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
1414 		tstats->fw_err_pko = rsp_tstats->fw_err_pko;
1415 		tstats->fw_err_pki = rsp_tstats->fw_err_pki;
1416 		tstats->fw_err_link = rsp_tstats->fw_err_link;
1417 		tstats->fw_err_drop = rsp_tstats->fw_err_drop;
1418 		tstats->fw_tso = rsp_tstats->fw_tso;
1419 		tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
1420 		tstats->fw_err_tso = rsp_tstats->fw_err_tso;
1421 		tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
1422 
1423 		resp->status = 1;
1424 	} else {
1425 		resp->status = -1;
1426 	}
1427 	complete(&ctrl->complete);
1428 }
1429 
octnet_get_link_stats(struct net_device * netdev)1430 int octnet_get_link_stats(struct net_device *netdev)
1431 {
1432 	struct lio *lio = GET_LIO(netdev);
1433 	struct octeon_device *oct_dev = lio->oct_dev;
1434 	struct octeon_soft_command *sc;
1435 	struct oct_nic_stats_ctrl *ctrl;
1436 	struct oct_nic_stats_resp *resp;
1437 	int retval;
1438 
1439 	/* Alloc soft command */
1440 	sc = (struct octeon_soft_command *)
1441 		octeon_alloc_soft_command(oct_dev,
1442 					  0,
1443 					  sizeof(struct oct_nic_stats_resp),
1444 					  sizeof(struct octnic_ctrl_pkt));
1445 
1446 	if (!sc)
1447 		return -ENOMEM;
1448 
1449 	resp = (struct oct_nic_stats_resp *)sc->virtrptr;
1450 	memset(resp, 0, sizeof(struct oct_nic_stats_resp));
1451 
1452 	ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
1453 	memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
1454 	ctrl->netdev = netdev;
1455 	init_completion(&ctrl->complete);
1456 
1457 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1458 
1459 	octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
1460 				    OPCODE_NIC_PORT_STATS, 0, 0, 0);
1461 
1462 	sc->callback = octnet_nic_stats_callback;
1463 	sc->callback_arg = sc;
1464 	sc->wait_time = 500;	/*in milli seconds*/
1465 
1466 	retval = octeon_send_soft_command(oct_dev, sc);
1467 	if (retval == IQ_SEND_FAILED) {
1468 		octeon_free_soft_command(oct_dev, sc);
1469 		return -EINVAL;
1470 	}
1471 
1472 	wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
1473 
1474 	if (resp->status != 1) {
1475 		octeon_free_soft_command(oct_dev, sc);
1476 
1477 		return -EINVAL;
1478 	}
1479 
1480 	octeon_free_soft_command(oct_dev, sc);
1481 
1482 	return 0;
1483 }
1484 
liquidio_nic_seapi_ctl_callback(struct octeon_device * oct,u32 status,void * buf)1485 static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct,
1486 					    u32 status,
1487 					    void *buf)
1488 {
1489 	struct liquidio_nic_seapi_ctl_context *ctx;
1490 	struct octeon_soft_command *sc = buf;
1491 
1492 	ctx = sc->ctxptr;
1493 
1494 	oct = lio_get_device(ctx->octeon_id);
1495 	if (status) {
1496 		dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n",
1497 			__func__,
1498 			CVM_CAST64(status));
1499 	}
1500 	ctx->status = status;
1501 	complete(&ctx->complete);
1502 }
1503 
liquidio_set_speed(struct lio * lio,int speed)1504 int liquidio_set_speed(struct lio *lio, int speed)
1505 {
1506 	struct liquidio_nic_seapi_ctl_context *ctx;
1507 	struct octeon_device *oct = lio->oct_dev;
1508 	struct oct_nic_seapi_resp *resp;
1509 	struct octeon_soft_command *sc;
1510 	union octnet_cmd *ncmd;
1511 	u32 ctx_size;
1512 	int retval;
1513 	u32 var;
1514 
1515 	if (oct->speed_setting == speed)
1516 		return 0;
1517 
1518 	if (!OCTEON_CN23XX_PF(oct)) {
1519 		dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
1520 			__func__);
1521 		return -EOPNOTSUPP;
1522 	}
1523 
1524 	ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1525 	sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1526 				       sizeof(struct oct_nic_seapi_resp),
1527 				       ctx_size);
1528 	if (!sc)
1529 		return -ENOMEM;
1530 
1531 	ncmd = sc->virtdptr;
1532 	ctx  = sc->ctxptr;
1533 	resp = sc->virtrptr;
1534 	memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1535 
1536 	ctx->octeon_id = lio_get_device_id(oct);
1537 	ctx->status = 0;
1538 	init_completion(&ctx->complete);
1539 
1540 	ncmd->u64 = 0;
1541 	ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
1542 	ncmd->s.param1 = speed;
1543 
1544 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1545 
1546 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1547 
1548 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1549 				    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1550 
1551 	sc->callback = liquidio_nic_seapi_ctl_callback;
1552 	sc->callback_arg = sc;
1553 	sc->wait_time = 5000;
1554 
1555 	retval = octeon_send_soft_command(oct, sc);
1556 	if (retval == IQ_SEND_FAILED) {
1557 		dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1558 		retval = -EBUSY;
1559 	} else {
1560 		/* Wait for response or timeout */
1561 		if (wait_for_completion_timeout(&ctx->complete,
1562 						msecs_to_jiffies(10000)) == 0) {
1563 			dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1564 				__func__);
1565 			octeon_free_soft_command(oct, sc);
1566 			return -EINTR;
1567 		}
1568 
1569 		retval = resp->status;
1570 
1571 		if (retval) {
1572 			dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
1573 				__func__, retval);
1574 			octeon_free_soft_command(oct, sc);
1575 			return -EIO;
1576 		}
1577 
1578 		var = be32_to_cpu((__force __be32)resp->speed);
1579 		if (var != speed) {
1580 			dev_err(&oct->pci_dev->dev,
1581 				"%s: setting failed speed= %x, expect %x\n",
1582 				__func__, var, speed);
1583 		}
1584 
1585 		oct->speed_setting = var;
1586 	}
1587 
1588 	octeon_free_soft_command(oct, sc);
1589 
1590 	return retval;
1591 }
1592 
liquidio_get_speed(struct lio * lio)1593 int liquidio_get_speed(struct lio *lio)
1594 {
1595 	struct liquidio_nic_seapi_ctl_context *ctx;
1596 	struct octeon_device *oct = lio->oct_dev;
1597 	struct oct_nic_seapi_resp *resp;
1598 	struct octeon_soft_command *sc;
1599 	union octnet_cmd *ncmd;
1600 	u32 ctx_size;
1601 	int retval;
1602 
1603 	ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context);
1604 	sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1605 				       sizeof(struct oct_nic_seapi_resp),
1606 				       ctx_size);
1607 	if (!sc)
1608 		return -ENOMEM;
1609 
1610 	ncmd = sc->virtdptr;
1611 	ctx  = sc->ctxptr;
1612 	resp = sc->virtrptr;
1613 	memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
1614 
1615 	ctx->octeon_id = lio_get_device_id(oct);
1616 	ctx->status = 0;
1617 	init_completion(&ctx->complete);
1618 
1619 	ncmd->u64 = 0;
1620 	ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
1621 
1622 	octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1623 
1624 	sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1625 
1626 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1627 				    OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
1628 
1629 	sc->callback = liquidio_nic_seapi_ctl_callback;
1630 	sc->callback_arg = sc;
1631 	sc->wait_time = 5000;
1632 
1633 	retval = octeon_send_soft_command(oct, sc);
1634 	if (retval == IQ_SEND_FAILED) {
1635 		dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
1636 		oct->no_speed_setting = 1;
1637 		oct->speed_setting = 25;
1638 
1639 		retval = -EBUSY;
1640 	} else {
1641 		if (wait_for_completion_timeout(&ctx->complete,
1642 						msecs_to_jiffies(10000)) == 0) {
1643 			dev_err(&oct->pci_dev->dev, "%s: sc timeout\n",
1644 				__func__);
1645 
1646 			oct->speed_setting = 25;
1647 			oct->no_speed_setting = 1;
1648 
1649 			octeon_free_soft_command(oct, sc);
1650 
1651 			return -EINTR;
1652 		}
1653 		retval = resp->status;
1654 		if (retval) {
1655 			dev_err(&oct->pci_dev->dev,
1656 				"%s failed retval=%d\n", __func__, retval);
1657 			oct->no_speed_setting = 1;
1658 			oct->speed_setting = 25;
1659 			octeon_free_soft_command(oct, sc);
1660 			retval = -EIO;
1661 		} else {
1662 			u32 var;
1663 
1664 			var = be32_to_cpu((__force __be32)resp->speed);
1665 			oct->speed_setting = var;
1666 			if (var == 0xffff) {
1667 				oct->no_speed_setting = 1;
1668 				/* unable to access boot variables
1669 				 * get the default value based on the NIC type
1670 				 */
1671 				oct->speed_setting = 25;
1672 			}
1673 		}
1674 	}
1675 
1676 	octeon_free_soft_command(oct, sc);
1677 
1678 	return retval;
1679 }
1680