1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3 
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/interrupt.h>
10 #include <linux/pci.h>
11 #include <linux/cpumask.h>
12 
13 #include "ionic.h"
14 #include "ionic_bus.h"
15 #include "ionic_lif.h"
16 #include "ionic_txrx.h"
17 #include "ionic_ethtool.h"
18 #include "ionic_debugfs.h"
19 
20 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
21 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
22 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
23 static void ionic_link_status_check(struct ionic_lif *lif);
24 
ionic_lif_deferred_work(struct work_struct * work)25 static void ionic_lif_deferred_work(struct work_struct *work)
26 {
27 	struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
28 	struct ionic_deferred *def = &lif->deferred;
29 	struct ionic_deferred_work *w = NULL;
30 
31 	spin_lock_bh(&def->lock);
32 	if (!list_empty(&def->list)) {
33 		w = list_first_entry(&def->list,
34 				     struct ionic_deferred_work, list);
35 		list_del(&w->list);
36 	}
37 	spin_unlock_bh(&def->lock);
38 
39 	if (w) {
40 		switch (w->type) {
41 		case IONIC_DW_TYPE_RX_MODE:
42 			ionic_lif_rx_mode(lif, w->rx_mode);
43 			break;
44 		case IONIC_DW_TYPE_RX_ADDR_ADD:
45 			ionic_lif_addr_add(lif, w->addr);
46 			break;
47 		case IONIC_DW_TYPE_RX_ADDR_DEL:
48 			ionic_lif_addr_del(lif, w->addr);
49 			break;
50 		case IONIC_DW_TYPE_LINK_STATUS:
51 			ionic_link_status_check(lif);
52 			break;
53 		default:
54 			break;
55 		}
56 		kfree(w);
57 		schedule_work(&def->work);
58 	}
59 }
60 
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)61 static void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
62 				       struct ionic_deferred_work *work)
63 {
64 	spin_lock_bh(&def->lock);
65 	list_add_tail(&work->list, &def->list);
66 	spin_unlock_bh(&def->lock);
67 	schedule_work(&def->work);
68 }
69 
ionic_link_status_check(struct ionic_lif * lif)70 static void ionic_link_status_check(struct ionic_lif *lif)
71 {
72 	struct net_device *netdev = lif->netdev;
73 	u16 link_status;
74 	bool link_up;
75 
76 	link_status = le16_to_cpu(lif->info->status.link_status);
77 	link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
78 
79 	/* filter out the no-change cases */
80 	if (link_up == netif_carrier_ok(netdev))
81 		goto link_out;
82 
83 	if (link_up) {
84 		netdev_info(netdev, "Link up - %d Gbps\n",
85 			    le32_to_cpu(lif->info->status.link_speed) / 1000);
86 
87 		if (test_bit(IONIC_LIF_UP, lif->state)) {
88 			netif_tx_wake_all_queues(lif->netdev);
89 			netif_carrier_on(netdev);
90 		}
91 	} else {
92 		netdev_info(netdev, "Link down\n");
93 
94 		/* carrier off first to avoid watchdog timeout */
95 		netif_carrier_off(netdev);
96 		if (test_bit(IONIC_LIF_UP, lif->state))
97 			netif_tx_stop_all_queues(netdev);
98 	}
99 
100 link_out:
101 	clear_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state);
102 }
103 
ionic_link_status_check_request(struct ionic_lif * lif)104 static void ionic_link_status_check_request(struct ionic_lif *lif)
105 {
106 	struct ionic_deferred_work *work;
107 
108 	/* we only need one request outstanding at a time */
109 	if (test_and_set_bit(IONIC_LIF_LINK_CHECK_REQUESTED, lif->state))
110 		return;
111 
112 	if (in_interrupt()) {
113 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
114 		if (!work)
115 			return;
116 
117 		work->type = IONIC_DW_TYPE_LINK_STATUS;
118 		ionic_lif_deferred_enqueue(&lif->deferred, work);
119 	} else {
120 		ionic_link_status_check(lif);
121 	}
122 }
123 
ionic_isr(int irq,void * data)124 static irqreturn_t ionic_isr(int irq, void *data)
125 {
126 	struct napi_struct *napi = data;
127 
128 	napi_schedule_irqoff(napi);
129 
130 	return IRQ_HANDLED;
131 }
132 
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)133 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
134 {
135 	struct ionic_intr_info *intr = &qcq->intr;
136 	struct device *dev = lif->ionic->dev;
137 	struct ionic_queue *q = &qcq->q;
138 	const char *name;
139 
140 	if (lif->registered)
141 		name = lif->netdev->name;
142 	else
143 		name = dev_name(dev);
144 
145 	snprintf(intr->name, sizeof(intr->name),
146 		 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
147 
148 	return devm_request_irq(dev, intr->vector, ionic_isr,
149 				0, intr->name, &qcq->napi);
150 }
151 
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)152 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
153 {
154 	struct ionic *ionic = lif->ionic;
155 	int index;
156 
157 	index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
158 	if (index == ionic->nintrs) {
159 		netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
160 			    __func__, index, ionic->nintrs);
161 		return -ENOSPC;
162 	}
163 
164 	set_bit(index, ionic->intrs);
165 	ionic_intr_init(&ionic->idev, intr, index);
166 
167 	return 0;
168 }
169 
ionic_intr_free(struct ionic_lif * lif,int index)170 static void ionic_intr_free(struct ionic_lif *lif, int index)
171 {
172 	if (index != INTR_INDEX_NOT_ASSIGNED && index < lif->ionic->nintrs)
173 		clear_bit(index, lif->ionic->intrs);
174 }
175 
ionic_qcq_enable(struct ionic_qcq * qcq)176 static int ionic_qcq_enable(struct ionic_qcq *qcq)
177 {
178 	struct ionic_queue *q = &qcq->q;
179 	struct ionic_lif *lif = q->lif;
180 	struct ionic_dev *idev;
181 	struct device *dev;
182 
183 	struct ionic_admin_ctx ctx = {
184 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
185 		.cmd.q_control = {
186 			.opcode = IONIC_CMD_Q_CONTROL,
187 			.lif_index = cpu_to_le16(lif->index),
188 			.type = q->type,
189 			.index = cpu_to_le32(q->index),
190 			.oper = IONIC_Q_ENABLE,
191 		},
192 	};
193 
194 	idev = &lif->ionic->idev;
195 	dev = lif->ionic->dev;
196 
197 	dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
198 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
199 
200 	if (qcq->flags & IONIC_QCQ_F_INTR) {
201 		irq_set_affinity_hint(qcq->intr.vector,
202 				      &qcq->intr.affinity_mask);
203 		napi_enable(&qcq->napi);
204 		ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
205 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
206 				IONIC_INTR_MASK_CLEAR);
207 	}
208 
209 	return ionic_adminq_post_wait(lif, &ctx);
210 }
211 
ionic_qcq_disable(struct ionic_qcq * qcq)212 static int ionic_qcq_disable(struct ionic_qcq *qcq)
213 {
214 	struct ionic_queue *q = &qcq->q;
215 	struct ionic_lif *lif = q->lif;
216 	struct ionic_dev *idev;
217 	struct device *dev;
218 
219 	struct ionic_admin_ctx ctx = {
220 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
221 		.cmd.q_control = {
222 			.opcode = IONIC_CMD_Q_CONTROL,
223 			.lif_index = cpu_to_le16(lif->index),
224 			.type = q->type,
225 			.index = cpu_to_le32(q->index),
226 			.oper = IONIC_Q_DISABLE,
227 		},
228 	};
229 
230 	idev = &lif->ionic->idev;
231 	dev = lif->ionic->dev;
232 
233 	dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
234 		ctx.cmd.q_control.index, ctx.cmd.q_control.type);
235 
236 	if (qcq->flags & IONIC_QCQ_F_INTR) {
237 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
238 				IONIC_INTR_MASK_SET);
239 		synchronize_irq(qcq->intr.vector);
240 		irq_set_affinity_hint(qcq->intr.vector, NULL);
241 		napi_disable(&qcq->napi);
242 	}
243 
244 	return ionic_adminq_post_wait(lif, &ctx);
245 }
246 
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)247 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
248 {
249 	struct ionic_dev *idev = &lif->ionic->idev;
250 	struct device *dev = lif->ionic->dev;
251 
252 	if (!qcq)
253 		return;
254 
255 	ionic_debugfs_del_qcq(qcq);
256 
257 	if (!(qcq->flags & IONIC_QCQ_F_INITED))
258 		return;
259 
260 	if (qcq->flags & IONIC_QCQ_F_INTR) {
261 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
262 				IONIC_INTR_MASK_SET);
263 		devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
264 		netif_napi_del(&qcq->napi);
265 	}
266 
267 	qcq->flags &= ~IONIC_QCQ_F_INITED;
268 }
269 
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)270 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
271 {
272 	struct device *dev = lif->ionic->dev;
273 
274 	if (!qcq)
275 		return;
276 
277 	dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
278 	qcq->base = NULL;
279 	qcq->base_pa = 0;
280 
281 	if (qcq->flags & IONIC_QCQ_F_INTR)
282 		ionic_intr_free(lif, qcq->intr.index);
283 
284 	devm_kfree(dev, qcq->cq.info);
285 	qcq->cq.info = NULL;
286 	devm_kfree(dev, qcq->q.info);
287 	qcq->q.info = NULL;
288 	devm_kfree(dev, qcq);
289 }
290 
ionic_qcqs_free(struct ionic_lif * lif)291 static void ionic_qcqs_free(struct ionic_lif *lif)
292 {
293 	struct device *dev = lif->ionic->dev;
294 	unsigned int i;
295 
296 	if (lif->notifyqcq) {
297 		ionic_qcq_free(lif, lif->notifyqcq);
298 		lif->notifyqcq = NULL;
299 	}
300 
301 	if (lif->adminqcq) {
302 		ionic_qcq_free(lif, lif->adminqcq);
303 		lif->adminqcq = NULL;
304 	}
305 
306 	for (i = 0; i < lif->nxqs; i++)
307 		if (lif->rxqcqs[i].stats)
308 			devm_kfree(dev, lif->rxqcqs[i].stats);
309 
310 	devm_kfree(dev, lif->rxqcqs);
311 	lif->rxqcqs = NULL;
312 
313 	for (i = 0; i < lif->nxqs; i++)
314 		if (lif->txqcqs[i].stats)
315 			devm_kfree(dev, lif->txqcqs[i].stats);
316 
317 	devm_kfree(dev, lif->txqcqs);
318 	lif->txqcqs = NULL;
319 }
320 
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)321 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
322 				      struct ionic_qcq *n_qcq)
323 {
324 	if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
325 		ionic_intr_free(n_qcq->cq.lif, n_qcq->intr.index);
326 		n_qcq->flags &= ~IONIC_QCQ_F_INTR;
327 	}
328 
329 	n_qcq->intr.vector = src_qcq->intr.vector;
330 	n_qcq->intr.index = src_qcq->intr.index;
331 }
332 
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)333 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
334 			   unsigned int index,
335 			   const char *name, unsigned int flags,
336 			   unsigned int num_descs, unsigned int desc_size,
337 			   unsigned int cq_desc_size,
338 			   unsigned int sg_desc_size,
339 			   unsigned int pid, struct ionic_qcq **qcq)
340 {
341 	struct ionic_dev *idev = &lif->ionic->idev;
342 	u32 q_size, cq_size, sg_size, total_size;
343 	struct device *dev = lif->ionic->dev;
344 	void *q_base, *cq_base, *sg_base;
345 	dma_addr_t cq_base_pa = 0;
346 	dma_addr_t sg_base_pa = 0;
347 	dma_addr_t q_base_pa = 0;
348 	struct ionic_qcq *new;
349 	int err;
350 
351 	*qcq = NULL;
352 
353 	q_size  = num_descs * desc_size;
354 	cq_size = num_descs * cq_desc_size;
355 	sg_size = num_descs * sg_desc_size;
356 
357 	total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
358 	/* Note: aligning q_size/cq_size is not enough due to cq_base
359 	 * address aligning as q_base could be not aligned to the page.
360 	 * Adding PAGE_SIZE.
361 	 */
362 	total_size += PAGE_SIZE;
363 	if (flags & IONIC_QCQ_F_SG) {
364 		total_size += ALIGN(sg_size, PAGE_SIZE);
365 		total_size += PAGE_SIZE;
366 	}
367 
368 	new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
369 	if (!new) {
370 		netdev_err(lif->netdev, "Cannot allocate queue structure\n");
371 		err = -ENOMEM;
372 		goto err_out;
373 	}
374 
375 	new->flags = flags;
376 
377 	new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs,
378 				   GFP_KERNEL);
379 	if (!new->q.info) {
380 		netdev_err(lif->netdev, "Cannot allocate queue info\n");
381 		err = -ENOMEM;
382 		goto err_out;
383 	}
384 
385 	new->q.type = type;
386 
387 	err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
388 			   desc_size, sg_desc_size, pid);
389 	if (err) {
390 		netdev_err(lif->netdev, "Cannot initialize queue\n");
391 		goto err_out;
392 	}
393 
394 	if (flags & IONIC_QCQ_F_INTR) {
395 		err = ionic_intr_alloc(lif, &new->intr);
396 		if (err) {
397 			netdev_warn(lif->netdev, "no intr for %s: %d\n",
398 				    name, err);
399 			goto err_out;
400 		}
401 
402 		err = ionic_bus_get_irq(lif->ionic, new->intr.index);
403 		if (err < 0) {
404 			netdev_warn(lif->netdev, "no vector for %s: %d\n",
405 				    name, err);
406 			goto err_out_free_intr;
407 		}
408 		new->intr.vector = err;
409 		ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
410 				       IONIC_INTR_MASK_SET);
411 
412 		new->intr.cpu = new->intr.index % num_online_cpus();
413 		if (cpu_online(new->intr.cpu))
414 			cpumask_set_cpu(new->intr.cpu,
415 					&new->intr.affinity_mask);
416 	} else {
417 		new->intr.index = INTR_INDEX_NOT_ASSIGNED;
418 	}
419 
420 	new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs,
421 				    GFP_KERNEL);
422 	if (!new->cq.info) {
423 		netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
424 		err = -ENOMEM;
425 		goto err_out_free_intr;
426 	}
427 
428 	err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
429 	if (err) {
430 		netdev_err(lif->netdev, "Cannot initialize completion queue\n");
431 		goto err_out_free_intr;
432 	}
433 
434 	new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
435 				       GFP_KERNEL);
436 	if (!new->base) {
437 		netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
438 		err = -ENOMEM;
439 		goto err_out_free_intr;
440 	}
441 
442 	new->total_size = total_size;
443 
444 	q_base = new->base;
445 	q_base_pa = new->base_pa;
446 
447 	cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
448 	cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
449 
450 	if (flags & IONIC_QCQ_F_SG) {
451 		sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
452 					PAGE_SIZE);
453 		sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
454 		ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
455 	}
456 
457 	ionic_q_map(&new->q, q_base, q_base_pa);
458 	ionic_cq_map(&new->cq, cq_base, cq_base_pa);
459 	ionic_cq_bind(&new->cq, &new->q);
460 
461 	*qcq = new;
462 
463 	return 0;
464 
465 err_out_free_intr:
466 	ionic_intr_free(lif, new->intr.index);
467 err_out:
468 	dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
469 	return err;
470 }
471 
ionic_qcqs_alloc(struct ionic_lif * lif)472 static int ionic_qcqs_alloc(struct ionic_lif *lif)
473 {
474 	struct device *dev = lif->ionic->dev;
475 	unsigned int q_list_size;
476 	unsigned int flags;
477 	int err;
478 	int i;
479 
480 	flags = IONIC_QCQ_F_INTR;
481 	err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
482 			      IONIC_ADMINQ_LENGTH,
483 			      sizeof(struct ionic_admin_cmd),
484 			      sizeof(struct ionic_admin_comp),
485 			      0, lif->kern_pid, &lif->adminqcq);
486 	if (err)
487 		return err;
488 
489 	if (lif->ionic->nnqs_per_lif) {
490 		flags = IONIC_QCQ_F_NOTIFYQ;
491 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
492 				      flags, IONIC_NOTIFYQ_LENGTH,
493 				      sizeof(struct ionic_notifyq_cmd),
494 				      sizeof(union ionic_notifyq_comp),
495 				      0, lif->kern_pid, &lif->notifyqcq);
496 		if (err)
497 			goto err_out_free_adminqcq;
498 
499 		/* Let the notifyq ride on the adminq interrupt */
500 		ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
501 	}
502 
503 	q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
504 	err = -ENOMEM;
505 	lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
506 	if (!lif->txqcqs)
507 		goto err_out_free_notifyqcq;
508 	for (i = 0; i < lif->nxqs; i++) {
509 		lif->txqcqs[i].stats = devm_kzalloc(dev,
510 						    sizeof(struct ionic_q_stats),
511 						    GFP_KERNEL);
512 		if (!lif->txqcqs[i].stats)
513 			goto err_out_free_tx_stats;
514 	}
515 
516 	lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
517 	if (!lif->rxqcqs)
518 		goto err_out_free_tx_stats;
519 	for (i = 0; i < lif->nxqs; i++) {
520 		lif->rxqcqs[i].stats = devm_kzalloc(dev,
521 						    sizeof(struct ionic_q_stats),
522 						    GFP_KERNEL);
523 		if (!lif->rxqcqs[i].stats)
524 			goto err_out_free_rx_stats;
525 	}
526 
527 	return 0;
528 
529 err_out_free_rx_stats:
530 	for (i = 0; i < lif->nxqs; i++)
531 		if (lif->rxqcqs[i].stats)
532 			devm_kfree(dev, lif->rxqcqs[i].stats);
533 	devm_kfree(dev, lif->rxqcqs);
534 	lif->rxqcqs = NULL;
535 err_out_free_tx_stats:
536 	for (i = 0; i < lif->nxqs; i++)
537 		if (lif->txqcqs[i].stats)
538 			devm_kfree(dev, lif->txqcqs[i].stats);
539 	devm_kfree(dev, lif->txqcqs);
540 	lif->txqcqs = NULL;
541 err_out_free_notifyqcq:
542 	if (lif->notifyqcq) {
543 		ionic_qcq_free(lif, lif->notifyqcq);
544 		lif->notifyqcq = NULL;
545 	}
546 err_out_free_adminqcq:
547 	ionic_qcq_free(lif, lif->adminqcq);
548 	lif->adminqcq = NULL;
549 
550 	return err;
551 }
552 
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)553 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
554 {
555 	struct device *dev = lif->ionic->dev;
556 	struct ionic_queue *q = &qcq->q;
557 	struct ionic_cq *cq = &qcq->cq;
558 	struct ionic_admin_ctx ctx = {
559 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
560 		.cmd.q_init = {
561 			.opcode = IONIC_CMD_Q_INIT,
562 			.lif_index = cpu_to_le16(lif->index),
563 			.type = q->type,
564 			.index = cpu_to_le32(q->index),
565 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
566 					     IONIC_QINIT_F_SG),
567 			.intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index),
568 			.pid = cpu_to_le16(q->pid),
569 			.ring_size = ilog2(q->num_descs),
570 			.ring_base = cpu_to_le64(q->base_pa),
571 			.cq_ring_base = cpu_to_le64(cq->base_pa),
572 			.sg_ring_base = cpu_to_le64(q->sg_base_pa),
573 		},
574 	};
575 	int err;
576 
577 	dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
578 	dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
579 	dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
580 	dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
581 
582 	err = ionic_adminq_post_wait(lif, &ctx);
583 	if (err)
584 		return err;
585 
586 	q->hw_type = ctx.comp.q_init.hw_type;
587 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
588 	q->dbval = IONIC_DBELL_QID(q->hw_index);
589 
590 	dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
591 	dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
592 
593 	qcq->flags |= IONIC_QCQ_F_INITED;
594 
595 	ionic_debugfs_add_qcq(lif, qcq);
596 
597 	return 0;
598 }
599 
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)600 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
601 {
602 	struct device *dev = lif->ionic->dev;
603 	struct ionic_queue *q = &qcq->q;
604 	struct ionic_cq *cq = &qcq->cq;
605 	struct ionic_admin_ctx ctx = {
606 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
607 		.cmd.q_init = {
608 			.opcode = IONIC_CMD_Q_INIT,
609 			.lif_index = cpu_to_le16(lif->index),
610 			.type = q->type,
611 			.index = cpu_to_le32(q->index),
612 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
613 			.intr_index = cpu_to_le16(cq->bound_intr->index),
614 			.pid = cpu_to_le16(q->pid),
615 			.ring_size = ilog2(q->num_descs),
616 			.ring_base = cpu_to_le64(q->base_pa),
617 			.cq_ring_base = cpu_to_le64(cq->base_pa),
618 		},
619 	};
620 	int err;
621 
622 	dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
623 	dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
624 	dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
625 	dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
626 
627 	err = ionic_adminq_post_wait(lif, &ctx);
628 	if (err)
629 		return err;
630 
631 	q->hw_type = ctx.comp.q_init.hw_type;
632 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
633 	q->dbval = IONIC_DBELL_QID(q->hw_index);
634 
635 	dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
636 	dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
637 
638 	netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
639 		       NAPI_POLL_WEIGHT);
640 
641 	err = ionic_request_irq(lif, qcq);
642 	if (err) {
643 		netif_napi_del(&qcq->napi);
644 		return err;
645 	}
646 
647 	qcq->flags |= IONIC_QCQ_F_INITED;
648 
649 	ionic_debugfs_add_qcq(lif, qcq);
650 
651 	return 0;
652 }
653 
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)654 static bool ionic_notifyq_service(struct ionic_cq *cq,
655 				  struct ionic_cq_info *cq_info)
656 {
657 	union ionic_notifyq_comp *comp = cq_info->cq_desc;
658 	struct net_device *netdev;
659 	struct ionic_queue *q;
660 	struct ionic_lif *lif;
661 	u64 eid;
662 
663 	q = cq->bound_q;
664 	lif = q->info[0].cb_arg;
665 	netdev = lif->netdev;
666 	eid = le64_to_cpu(comp->event.eid);
667 
668 	/* Have we run out of new completions to process? */
669 	if (eid <= lif->last_eid)
670 		return false;
671 
672 	lif->last_eid = eid;
673 
674 	dev_dbg(lif->ionic->dev, "notifyq event:\n");
675 	dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
676 			 comp, sizeof(*comp), true);
677 
678 	switch (le16_to_cpu(comp->event.ecode)) {
679 	case IONIC_EVENT_LINK_CHANGE:
680 		ionic_link_status_check_request(lif);
681 		break;
682 	case IONIC_EVENT_RESET:
683 		netdev_info(netdev, "Notifyq IONIC_EVENT_RESET eid=%lld\n",
684 			    eid);
685 		netdev_info(netdev, "  reset_code=%d state=%d\n",
686 			    comp->reset.reset_code,
687 			    comp->reset.state);
688 		break;
689 	default:
690 		netdev_warn(netdev, "Notifyq unknown event ecode=%d eid=%lld\n",
691 			    comp->event.ecode, eid);
692 		break;
693 	}
694 
695 	return true;
696 }
697 
ionic_notifyq_clean(struct ionic_lif * lif,int budget)698 static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
699 {
700 	struct ionic_dev *idev = &lif->ionic->idev;
701 	struct ionic_cq *cq = &lif->notifyqcq->cq;
702 	u32 work_done;
703 
704 	work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
705 				     NULL, NULL);
706 	if (work_done)
707 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
708 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
709 
710 	return work_done;
711 }
712 
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)713 static bool ionic_adminq_service(struct ionic_cq *cq,
714 				 struct ionic_cq_info *cq_info)
715 {
716 	struct ionic_admin_comp *comp = cq_info->cq_desc;
717 
718 	if (!color_match(comp->color, cq->done_color))
719 		return false;
720 
721 	ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
722 
723 	return true;
724 }
725 
ionic_adminq_napi(struct napi_struct * napi,int budget)726 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
727 {
728 	struct ionic_lif *lif = napi_to_cq(napi)->lif;
729 	int n_work = 0;
730 	int a_work = 0;
731 
732 	if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
733 		n_work = ionic_notifyq_clean(lif, budget);
734 	a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
735 
736 	return max(n_work, a_work);
737 }
738 
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)739 static void ionic_get_stats64(struct net_device *netdev,
740 			      struct rtnl_link_stats64 *ns)
741 {
742 	struct ionic_lif *lif = netdev_priv(netdev);
743 	struct ionic_lif_stats *ls;
744 
745 	memset(ns, 0, sizeof(*ns));
746 	ls = &lif->info->stats;
747 
748 	ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
749 			 le64_to_cpu(ls->rx_mcast_packets) +
750 			 le64_to_cpu(ls->rx_bcast_packets);
751 
752 	ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
753 			 le64_to_cpu(ls->tx_mcast_packets) +
754 			 le64_to_cpu(ls->tx_bcast_packets);
755 
756 	ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
757 		       le64_to_cpu(ls->rx_mcast_bytes) +
758 		       le64_to_cpu(ls->rx_bcast_bytes);
759 
760 	ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
761 		       le64_to_cpu(ls->tx_mcast_bytes) +
762 		       le64_to_cpu(ls->tx_bcast_bytes);
763 
764 	ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
765 			 le64_to_cpu(ls->rx_mcast_drop_packets) +
766 			 le64_to_cpu(ls->rx_bcast_drop_packets);
767 
768 	ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
769 			 le64_to_cpu(ls->tx_mcast_drop_packets) +
770 			 le64_to_cpu(ls->tx_bcast_drop_packets);
771 
772 	ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
773 
774 	ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
775 
776 	ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
777 			       le64_to_cpu(ls->rx_queue_disabled) +
778 			       le64_to_cpu(ls->rx_desc_fetch_error) +
779 			       le64_to_cpu(ls->rx_desc_data_error);
780 
781 	ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
782 				le64_to_cpu(ls->tx_queue_disabled) +
783 				le64_to_cpu(ls->tx_desc_fetch_error) +
784 				le64_to_cpu(ls->tx_desc_data_error);
785 
786 	ns->rx_errors = ns->rx_over_errors +
787 			ns->rx_missed_errors;
788 
789 	ns->tx_errors = ns->tx_aborted_errors;
790 }
791 
ionic_lif_addr_add(struct ionic_lif * lif,const u8 * addr)792 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
793 {
794 	struct ionic_admin_ctx ctx = {
795 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
796 		.cmd.rx_filter_add = {
797 			.opcode = IONIC_CMD_RX_FILTER_ADD,
798 			.lif_index = cpu_to_le16(lif->index),
799 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
800 		},
801 	};
802 	struct ionic_rx_filter *f;
803 	int err;
804 
805 	/* don't bother if we already have it */
806 	spin_lock_bh(&lif->rx_filters.lock);
807 	f = ionic_rx_filter_by_addr(lif, addr);
808 	spin_unlock_bh(&lif->rx_filters.lock);
809 	if (f)
810 		return 0;
811 
812 	netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
813 		   ctx.comp.rx_filter_add.filter_id);
814 
815 	memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
816 	err = ionic_adminq_post_wait(lif, &ctx);
817 	if (err)
818 		return err;
819 
820 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
821 }
822 
ionic_lif_addr_del(struct ionic_lif * lif,const u8 * addr)823 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
824 {
825 	struct ionic_admin_ctx ctx = {
826 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
827 		.cmd.rx_filter_del = {
828 			.opcode = IONIC_CMD_RX_FILTER_DEL,
829 			.lif_index = cpu_to_le16(lif->index),
830 		},
831 	};
832 	struct ionic_rx_filter *f;
833 	int err;
834 
835 	spin_lock_bh(&lif->rx_filters.lock);
836 	f = ionic_rx_filter_by_addr(lif, addr);
837 	if (!f) {
838 		spin_unlock_bh(&lif->rx_filters.lock);
839 		return -ENOENT;
840 	}
841 
842 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
843 	ionic_rx_filter_free(lif, f);
844 	spin_unlock_bh(&lif->rx_filters.lock);
845 
846 	err = ionic_adminq_post_wait(lif, &ctx);
847 	if (err)
848 		return err;
849 
850 	netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
851 		   ctx.cmd.rx_filter_del.filter_id);
852 
853 	return 0;
854 }
855 
ionic_lif_addr(struct ionic_lif * lif,const u8 * addr,bool add)856 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
857 {
858 	struct ionic *ionic = lif->ionic;
859 	struct ionic_deferred_work *work;
860 	unsigned int nmfilters;
861 	unsigned int nufilters;
862 
863 	if (add) {
864 		/* Do we have space for this filter?  We test the counters
865 		 * here before checking the need for deferral so that we
866 		 * can return an overflow error to the stack.
867 		 */
868 		nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
869 		nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
870 
871 		if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
872 			lif->nmcast++;
873 		else if (!is_multicast_ether_addr(addr) &&
874 			 lif->nucast < nufilters)
875 			lif->nucast++;
876 		else
877 			return -ENOSPC;
878 	} else {
879 		if (is_multicast_ether_addr(addr) && lif->nmcast)
880 			lif->nmcast--;
881 		else if (!is_multicast_ether_addr(addr) && lif->nucast)
882 			lif->nucast--;
883 	}
884 
885 	if (in_interrupt()) {
886 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
887 		if (!work) {
888 			netdev_err(lif->netdev, "%s OOM\n", __func__);
889 			return -ENOMEM;
890 		}
891 		work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
892 				   IONIC_DW_TYPE_RX_ADDR_DEL;
893 		memcpy(work->addr, addr, ETH_ALEN);
894 		netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
895 			   add ? "add" : "del", addr);
896 		ionic_lif_deferred_enqueue(&lif->deferred, work);
897 	} else {
898 		netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
899 			   add ? "add" : "del", addr);
900 		if (add)
901 			return ionic_lif_addr_add(lif, addr);
902 		else
903 			return ionic_lif_addr_del(lif, addr);
904 	}
905 
906 	return 0;
907 }
908 
ionic_addr_add(struct net_device * netdev,const u8 * addr)909 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
910 {
911 	return ionic_lif_addr(netdev_priv(netdev), addr, true);
912 }
913 
ionic_addr_del(struct net_device * netdev,const u8 * addr)914 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
915 {
916 	return ionic_lif_addr(netdev_priv(netdev), addr, false);
917 }
918 
ionic_lif_rx_mode(struct ionic_lif * lif,unsigned int rx_mode)919 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
920 {
921 	struct ionic_admin_ctx ctx = {
922 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
923 		.cmd.rx_mode_set = {
924 			.opcode = IONIC_CMD_RX_MODE_SET,
925 			.lif_index = cpu_to_le16(lif->index),
926 			.rx_mode = cpu_to_le16(rx_mode),
927 		},
928 	};
929 	char buf[128];
930 	int err;
931 	int i;
932 #define REMAIN(__x) (sizeof(buf) - (__x))
933 
934 	i = snprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
935 		     lif->rx_mode, rx_mode);
936 	if (rx_mode & IONIC_RX_MODE_F_UNICAST)
937 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
938 	if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
939 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
940 	if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
941 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
942 	if (rx_mode & IONIC_RX_MODE_F_PROMISC)
943 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
944 	if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
945 		i += snprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
946 	netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
947 
948 	err = ionic_adminq_post_wait(lif, &ctx);
949 	if (err)
950 		netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
951 			    rx_mode, err);
952 	else
953 		lif->rx_mode = rx_mode;
954 }
955 
_ionic_lif_rx_mode(struct ionic_lif * lif,unsigned int rx_mode)956 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
957 {
958 	struct ionic_deferred_work *work;
959 
960 	if (in_interrupt()) {
961 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
962 		if (!work) {
963 			netdev_err(lif->netdev, "%s OOM\n", __func__);
964 			return;
965 		}
966 		work->type = IONIC_DW_TYPE_RX_MODE;
967 		work->rx_mode = rx_mode;
968 		netdev_dbg(lif->netdev, "deferred: rx_mode\n");
969 		ionic_lif_deferred_enqueue(&lif->deferred, work);
970 	} else {
971 		ionic_lif_rx_mode(lif, rx_mode);
972 	}
973 }
974 
ionic_set_rx_mode(struct net_device * netdev)975 static void ionic_set_rx_mode(struct net_device *netdev)
976 {
977 	struct ionic_lif *lif = netdev_priv(netdev);
978 	struct ionic_identity *ident;
979 	unsigned int nfilters;
980 	unsigned int rx_mode;
981 
982 	ident = &lif->ionic->ident;
983 
984 	rx_mode = IONIC_RX_MODE_F_UNICAST;
985 	rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
986 	rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
987 	rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
988 	rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
989 
990 	/* sync unicast addresses
991 	 * next check to see if we're in an overflow state
992 	 *    if so, we track that we overflowed and enable NIC PROMISC
993 	 *    else if the overflow is set and not needed
994 	 *       we remove our overflow flag and check the netdev flags
995 	 *       to see if we can disable NIC PROMISC
996 	 */
997 	__dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
998 	nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
999 	if (netdev_uc_count(netdev) + 1 > nfilters) {
1000 		rx_mode |= IONIC_RX_MODE_F_PROMISC;
1001 		lif->uc_overflow = true;
1002 	} else if (lif->uc_overflow) {
1003 		lif->uc_overflow = false;
1004 		if (!(netdev->flags & IFF_PROMISC))
1005 			rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1006 	}
1007 
1008 	/* same for multicast */
1009 	__dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1010 	nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1011 	if (netdev_mc_count(netdev) > nfilters) {
1012 		rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1013 		lif->mc_overflow = true;
1014 	} else if (lif->mc_overflow) {
1015 		lif->mc_overflow = false;
1016 		if (!(netdev->flags & IFF_ALLMULTI))
1017 			rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1018 	}
1019 
1020 	if (lif->rx_mode != rx_mode)
1021 		_ionic_lif_rx_mode(lif, rx_mode);
1022 }
1023 
ionic_netdev_features_to_nic(netdev_features_t features)1024 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1025 {
1026 	u64 wanted = 0;
1027 
1028 	if (features & NETIF_F_HW_VLAN_CTAG_TX)
1029 		wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1030 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1031 		wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1032 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1033 		wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1034 	if (features & NETIF_F_RXHASH)
1035 		wanted |= IONIC_ETH_HW_RX_HASH;
1036 	if (features & NETIF_F_RXCSUM)
1037 		wanted |= IONIC_ETH_HW_RX_CSUM;
1038 	if (features & NETIF_F_SG)
1039 		wanted |= IONIC_ETH_HW_TX_SG;
1040 	if (features & NETIF_F_HW_CSUM)
1041 		wanted |= IONIC_ETH_HW_TX_CSUM;
1042 	if (features & NETIF_F_TSO)
1043 		wanted |= IONIC_ETH_HW_TSO;
1044 	if (features & NETIF_F_TSO6)
1045 		wanted |= IONIC_ETH_HW_TSO_IPV6;
1046 	if (features & NETIF_F_TSO_ECN)
1047 		wanted |= IONIC_ETH_HW_TSO_ECN;
1048 	if (features & NETIF_F_GSO_GRE)
1049 		wanted |= IONIC_ETH_HW_TSO_GRE;
1050 	if (features & NETIF_F_GSO_GRE_CSUM)
1051 		wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1052 	if (features & NETIF_F_GSO_IPXIP4)
1053 		wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1054 	if (features & NETIF_F_GSO_IPXIP6)
1055 		wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1056 	if (features & NETIF_F_GSO_UDP_TUNNEL)
1057 		wanted |= IONIC_ETH_HW_TSO_UDP;
1058 	if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1059 		wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1060 
1061 	return cpu_to_le64(wanted);
1062 }
1063 
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1064 static int ionic_set_nic_features(struct ionic_lif *lif,
1065 				  netdev_features_t features)
1066 {
1067 	struct device *dev = lif->ionic->dev;
1068 	struct ionic_admin_ctx ctx = {
1069 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1070 		.cmd.lif_setattr = {
1071 			.opcode = IONIC_CMD_LIF_SETATTR,
1072 			.index = cpu_to_le16(lif->index),
1073 			.attr = IONIC_LIF_ATTR_FEATURES,
1074 		},
1075 	};
1076 	u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1077 			 IONIC_ETH_HW_VLAN_RX_STRIP |
1078 			 IONIC_ETH_HW_VLAN_RX_FILTER;
1079 	int err;
1080 
1081 	ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1082 	err = ionic_adminq_post_wait(lif, &ctx);
1083 	if (err)
1084 		return err;
1085 
1086 	lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1087 				       ctx.comp.lif_setattr.features);
1088 
1089 	if ((vlan_flags & features) &&
1090 	    !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1091 		dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1092 
1093 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1094 		dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1095 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1096 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1097 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1098 		dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1099 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1100 		dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1101 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1102 		dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1103 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1104 		dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1105 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1106 		dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1107 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1108 		dev_dbg(dev, "feature ETH_HW_TSO\n");
1109 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1110 		dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1111 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1112 		dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1113 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1114 		dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1115 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1116 		dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1117 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1118 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1119 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1120 		dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1121 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1122 		dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1123 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1124 		dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1125 
1126 	return 0;
1127 }
1128 
ionic_init_nic_features(struct ionic_lif * lif)1129 static int ionic_init_nic_features(struct ionic_lif *lif)
1130 {
1131 	struct net_device *netdev = lif->netdev;
1132 	netdev_features_t features;
1133 	int err;
1134 
1135 	/* set up what we expect to support by default */
1136 	features = NETIF_F_HW_VLAN_CTAG_TX |
1137 		   NETIF_F_HW_VLAN_CTAG_RX |
1138 		   NETIF_F_HW_VLAN_CTAG_FILTER |
1139 		   NETIF_F_RXHASH |
1140 		   NETIF_F_SG |
1141 		   NETIF_F_HW_CSUM |
1142 		   NETIF_F_RXCSUM |
1143 		   NETIF_F_TSO |
1144 		   NETIF_F_TSO6 |
1145 		   NETIF_F_TSO_ECN;
1146 
1147 	err = ionic_set_nic_features(lif, features);
1148 	if (err)
1149 		return err;
1150 
1151 	/* tell the netdev what we actually can support */
1152 	netdev->features |= NETIF_F_HIGHDMA;
1153 
1154 	if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1155 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1156 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1157 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1158 	if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1159 		netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1160 	if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1161 		netdev->hw_features |= NETIF_F_RXHASH;
1162 	if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1163 		netdev->hw_features |= NETIF_F_SG;
1164 
1165 	if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1166 		netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1167 	if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1168 		netdev->hw_enc_features |= NETIF_F_RXCSUM;
1169 	if (lif->hw_features & IONIC_ETH_HW_TSO)
1170 		netdev->hw_enc_features |= NETIF_F_TSO;
1171 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1172 		netdev->hw_enc_features |= NETIF_F_TSO6;
1173 	if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1174 		netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1175 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1176 		netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1177 	if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1178 		netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1179 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1180 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1181 	if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1182 		netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1183 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1184 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1185 	if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1186 		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1187 
1188 	netdev->hw_features |= netdev->hw_enc_features;
1189 	netdev->features |= netdev->hw_features;
1190 
1191 	netdev->priv_flags |= IFF_UNICAST_FLT;
1192 
1193 	return 0;
1194 }
1195 
ionic_set_features(struct net_device * netdev,netdev_features_t features)1196 static int ionic_set_features(struct net_device *netdev,
1197 			      netdev_features_t features)
1198 {
1199 	struct ionic_lif *lif = netdev_priv(netdev);
1200 	int err;
1201 
1202 	netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1203 		   __func__, (u64)lif->netdev->features, (u64)features);
1204 
1205 	err = ionic_set_nic_features(lif, features);
1206 
1207 	return err;
1208 }
1209 
ionic_set_mac_address(struct net_device * netdev,void * sa)1210 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1211 {
1212 	struct sockaddr *addr = sa;
1213 	u8 *mac;
1214 	int err;
1215 
1216 	mac = (u8 *)addr->sa_data;
1217 	if (ether_addr_equal(netdev->dev_addr, mac))
1218 		return 0;
1219 
1220 	err = eth_prepare_mac_addr_change(netdev, addr);
1221 	if (err)
1222 		return err;
1223 
1224 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1225 		netdev_info(netdev, "deleting mac addr %pM\n",
1226 			    netdev->dev_addr);
1227 		ionic_addr_del(netdev, netdev->dev_addr);
1228 	}
1229 
1230 	eth_commit_mac_addr_change(netdev, addr);
1231 	netdev_info(netdev, "updating mac addr %pM\n", mac);
1232 
1233 	return ionic_addr_add(netdev, mac);
1234 }
1235 
ionic_change_mtu(struct net_device * netdev,int new_mtu)1236 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1237 {
1238 	struct ionic_lif *lif = netdev_priv(netdev);
1239 	struct ionic_admin_ctx ctx = {
1240 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1241 		.cmd.lif_setattr = {
1242 			.opcode = IONIC_CMD_LIF_SETATTR,
1243 			.index = cpu_to_le16(lif->index),
1244 			.attr = IONIC_LIF_ATTR_MTU,
1245 			.mtu = cpu_to_le32(new_mtu),
1246 		},
1247 	};
1248 	int err;
1249 
1250 	err = ionic_adminq_post_wait(lif, &ctx);
1251 	if (err)
1252 		return err;
1253 
1254 	netdev->mtu = new_mtu;
1255 	err = ionic_reset_queues(lif);
1256 
1257 	return err;
1258 }
1259 
ionic_tx_timeout_work(struct work_struct * ws)1260 static void ionic_tx_timeout_work(struct work_struct *ws)
1261 {
1262 	struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1263 
1264 	netdev_info(lif->netdev, "Tx Timeout recovery\n");
1265 
1266 	rtnl_lock();
1267 	ionic_reset_queues(lif);
1268 	rtnl_unlock();
1269 }
1270 
ionic_tx_timeout(struct net_device * netdev)1271 static void ionic_tx_timeout(struct net_device *netdev)
1272 {
1273 	struct ionic_lif *lif = netdev_priv(netdev);
1274 
1275 	schedule_work(&lif->tx_timeout_work);
1276 }
1277 
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1278 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1279 				 u16 vid)
1280 {
1281 	struct ionic_lif *lif = netdev_priv(netdev);
1282 	struct ionic_admin_ctx ctx = {
1283 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1284 		.cmd.rx_filter_add = {
1285 			.opcode = IONIC_CMD_RX_FILTER_ADD,
1286 			.lif_index = cpu_to_le16(lif->index),
1287 			.match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1288 			.vlan.vlan = cpu_to_le16(vid),
1289 		},
1290 	};
1291 	int err;
1292 
1293 	err = ionic_adminq_post_wait(lif, &ctx);
1294 	if (err)
1295 		return err;
1296 
1297 	netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
1298 		   ctx.comp.rx_filter_add.filter_id);
1299 
1300 	return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1301 }
1302 
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1303 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1304 				  u16 vid)
1305 {
1306 	struct ionic_lif *lif = netdev_priv(netdev);
1307 	struct ionic_admin_ctx ctx = {
1308 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1309 		.cmd.rx_filter_del = {
1310 			.opcode = IONIC_CMD_RX_FILTER_DEL,
1311 			.lif_index = cpu_to_le16(lif->index),
1312 		},
1313 	};
1314 	struct ionic_rx_filter *f;
1315 
1316 	spin_lock_bh(&lif->rx_filters.lock);
1317 
1318 	f = ionic_rx_filter_by_vlan(lif, vid);
1319 	if (!f) {
1320 		spin_unlock_bh(&lif->rx_filters.lock);
1321 		return -ENOENT;
1322 	}
1323 
1324 	netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
1325 		   le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
1326 
1327 	ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1328 	ionic_rx_filter_free(lif, f);
1329 	spin_unlock_bh(&lif->rx_filters.lock);
1330 
1331 	return ionic_adminq_post_wait(lif, &ctx);
1332 }
1333 
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1334 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1335 			 const u8 *key, const u32 *indir)
1336 {
1337 	struct ionic_admin_ctx ctx = {
1338 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1339 		.cmd.lif_setattr = {
1340 			.opcode = IONIC_CMD_LIF_SETATTR,
1341 			.attr = IONIC_LIF_ATTR_RSS,
1342 			.rss.types = cpu_to_le16(types),
1343 			.rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1344 		},
1345 	};
1346 	unsigned int i, tbl_sz;
1347 
1348 	lif->rss_types = types;
1349 
1350 	if (key)
1351 		memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1352 
1353 	if (indir) {
1354 		tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1355 		for (i = 0; i < tbl_sz; i++)
1356 			lif->rss_ind_tbl[i] = indir[i];
1357 	}
1358 
1359 	memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1360 	       IONIC_RSS_HASH_KEY_SIZE);
1361 
1362 	return ionic_adminq_post_wait(lif, &ctx);
1363 }
1364 
ionic_lif_rss_init(struct ionic_lif * lif)1365 static int ionic_lif_rss_init(struct ionic_lif *lif)
1366 {
1367 	u8 rss_key[IONIC_RSS_HASH_KEY_SIZE];
1368 	unsigned int tbl_sz;
1369 	unsigned int i;
1370 
1371 	netdev_rss_key_fill(rss_key, IONIC_RSS_HASH_KEY_SIZE);
1372 
1373 	lif->rss_types = IONIC_RSS_TYPE_IPV4     |
1374 			 IONIC_RSS_TYPE_IPV4_TCP |
1375 			 IONIC_RSS_TYPE_IPV4_UDP |
1376 			 IONIC_RSS_TYPE_IPV6     |
1377 			 IONIC_RSS_TYPE_IPV6_TCP |
1378 			 IONIC_RSS_TYPE_IPV6_UDP;
1379 
1380 	/* Fill indirection table with 'default' values */
1381 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1382 	for (i = 0; i < tbl_sz; i++)
1383 		lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1384 
1385 	return ionic_lif_rss_config(lif, lif->rss_types, rss_key, NULL);
1386 }
1387 
ionic_lif_rss_deinit(struct ionic_lif * lif)1388 static int ionic_lif_rss_deinit(struct ionic_lif *lif)
1389 {
1390 	return ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1391 }
1392 
ionic_txrx_disable(struct ionic_lif * lif)1393 static void ionic_txrx_disable(struct ionic_lif *lif)
1394 {
1395 	unsigned int i;
1396 
1397 	for (i = 0; i < lif->nxqs; i++) {
1398 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1399 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1400 	}
1401 }
1402 
ionic_txrx_deinit(struct ionic_lif * lif)1403 static void ionic_txrx_deinit(struct ionic_lif *lif)
1404 {
1405 	unsigned int i;
1406 
1407 	for (i = 0; i < lif->nxqs; i++) {
1408 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1409 		ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
1410 
1411 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1412 		ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
1413 		ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
1414 	}
1415 }
1416 
ionic_txrx_free(struct ionic_lif * lif)1417 static void ionic_txrx_free(struct ionic_lif *lif)
1418 {
1419 	unsigned int i;
1420 
1421 	for (i = 0; i < lif->nxqs; i++) {
1422 		ionic_qcq_free(lif, lif->txqcqs[i].qcq);
1423 		lif->txqcqs[i].qcq = NULL;
1424 
1425 		ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
1426 		lif->rxqcqs[i].qcq = NULL;
1427 	}
1428 }
1429 
ionic_txrx_alloc(struct ionic_lif * lif)1430 static int ionic_txrx_alloc(struct ionic_lif *lif)
1431 {
1432 	unsigned int flags;
1433 	unsigned int i;
1434 	int err = 0;
1435 	u32 coal;
1436 
1437 	flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1438 	for (i = 0; i < lif->nxqs; i++) {
1439 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1440 				      lif->ntxq_descs,
1441 				      sizeof(struct ionic_txq_desc),
1442 				      sizeof(struct ionic_txq_comp),
1443 				      sizeof(struct ionic_txq_sg_desc),
1444 				      lif->kern_pid, &lif->txqcqs[i].qcq);
1445 		if (err)
1446 			goto err_out;
1447 
1448 		lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
1449 	}
1450 
1451 	flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_INTR;
1452 	coal = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs);
1453 	for (i = 0; i < lif->nxqs; i++) {
1454 		err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1455 				      lif->nrxq_descs,
1456 				      sizeof(struct ionic_rxq_desc),
1457 				      sizeof(struct ionic_rxq_comp),
1458 				      0, lif->kern_pid, &lif->rxqcqs[i].qcq);
1459 		if (err)
1460 			goto err_out;
1461 
1462 		lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
1463 
1464 		ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1465 				     lif->rxqcqs[i].qcq->intr.index, coal);
1466 		ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
1467 					  lif->txqcqs[i].qcq);
1468 	}
1469 
1470 	return 0;
1471 
1472 err_out:
1473 	ionic_txrx_free(lif);
1474 
1475 	return err;
1476 }
1477 
ionic_txrx_init(struct ionic_lif * lif)1478 static int ionic_txrx_init(struct ionic_lif *lif)
1479 {
1480 	unsigned int i;
1481 	int err;
1482 
1483 	for (i = 0; i < lif->nxqs; i++) {
1484 		err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
1485 		if (err)
1486 			goto err_out;
1487 
1488 		err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
1489 		if (err) {
1490 			ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1491 			goto err_out;
1492 		}
1493 	}
1494 
1495 	if (lif->netdev->features & NETIF_F_RXHASH)
1496 		ionic_lif_rss_init(lif);
1497 
1498 	ionic_set_rx_mode(lif->netdev);
1499 
1500 	return 0;
1501 
1502 err_out:
1503 	while (i--) {
1504 		ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
1505 		ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
1506 	}
1507 
1508 	return err;
1509 }
1510 
ionic_txrx_enable(struct ionic_lif * lif)1511 static int ionic_txrx_enable(struct ionic_lif *lif)
1512 {
1513 	int i, err;
1514 
1515 	for (i = 0; i < lif->nxqs; i++) {
1516 		err = ionic_qcq_enable(lif->txqcqs[i].qcq);
1517 		if (err)
1518 			goto err_out;
1519 
1520 		ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
1521 		err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
1522 		if (err) {
1523 			ionic_qcq_disable(lif->txqcqs[i].qcq);
1524 			goto err_out;
1525 		}
1526 	}
1527 
1528 	return 0;
1529 
1530 err_out:
1531 	while (i--) {
1532 		ionic_qcq_disable(lif->rxqcqs[i].qcq);
1533 		ionic_qcq_disable(lif->txqcqs[i].qcq);
1534 	}
1535 
1536 	return err;
1537 }
1538 
ionic_open(struct net_device * netdev)1539 int ionic_open(struct net_device *netdev)
1540 {
1541 	struct ionic_lif *lif = netdev_priv(netdev);
1542 	int err;
1543 
1544 	netif_carrier_off(netdev);
1545 
1546 	err = ionic_txrx_alloc(lif);
1547 	if (err)
1548 		return err;
1549 
1550 	err = ionic_txrx_init(lif);
1551 	if (err)
1552 		goto err_txrx_free;
1553 
1554 	err = ionic_txrx_enable(lif);
1555 	if (err)
1556 		goto err_txrx_deinit;
1557 
1558 	netif_set_real_num_tx_queues(netdev, lif->nxqs);
1559 	netif_set_real_num_rx_queues(netdev, lif->nxqs);
1560 
1561 	set_bit(IONIC_LIF_UP, lif->state);
1562 
1563 	ionic_link_status_check_request(lif);
1564 	if (netif_carrier_ok(netdev))
1565 		netif_tx_wake_all_queues(netdev);
1566 
1567 	return 0;
1568 
1569 err_txrx_deinit:
1570 	ionic_txrx_deinit(lif);
1571 err_txrx_free:
1572 	ionic_txrx_free(lif);
1573 	return err;
1574 }
1575 
ionic_stop(struct net_device * netdev)1576 int ionic_stop(struct net_device *netdev)
1577 {
1578 	struct ionic_lif *lif = netdev_priv(netdev);
1579 	int err = 0;
1580 
1581 	if (!test_bit(IONIC_LIF_UP, lif->state)) {
1582 		dev_dbg(lif->ionic->dev, "%s: %s state=DOWN\n",
1583 			__func__, lif->name);
1584 		return 0;
1585 	}
1586 	dev_dbg(lif->ionic->dev, "%s: %s state=UP\n", __func__, lif->name);
1587 	clear_bit(IONIC_LIF_UP, lif->state);
1588 
1589 	/* carrier off before disabling queues to avoid watchdog timeout */
1590 	netif_carrier_off(netdev);
1591 	netif_tx_stop_all_queues(netdev);
1592 	netif_tx_disable(netdev);
1593 
1594 	ionic_txrx_disable(lif);
1595 	ionic_txrx_deinit(lif);
1596 	ionic_txrx_free(lif);
1597 
1598 	return err;
1599 }
1600 
1601 static const struct net_device_ops ionic_netdev_ops = {
1602 	.ndo_open               = ionic_open,
1603 	.ndo_stop               = ionic_stop,
1604 	.ndo_start_xmit		= ionic_start_xmit,
1605 	.ndo_get_stats64	= ionic_get_stats64,
1606 	.ndo_set_rx_mode	= ionic_set_rx_mode,
1607 	.ndo_set_features	= ionic_set_features,
1608 	.ndo_set_mac_address	= ionic_set_mac_address,
1609 	.ndo_validate_addr	= eth_validate_addr,
1610 	.ndo_tx_timeout         = ionic_tx_timeout,
1611 	.ndo_change_mtu         = ionic_change_mtu,
1612 	.ndo_vlan_rx_add_vid    = ionic_vlan_rx_add_vid,
1613 	.ndo_vlan_rx_kill_vid   = ionic_vlan_rx_kill_vid,
1614 };
1615 
ionic_reset_queues(struct ionic_lif * lif)1616 int ionic_reset_queues(struct ionic_lif *lif)
1617 {
1618 	bool running;
1619 	int err = 0;
1620 
1621 	/* Put off the next watchdog timeout */
1622 	netif_trans_update(lif->netdev);
1623 
1624 	if (!ionic_wait_for_bit(lif, IONIC_LIF_QUEUE_RESET))
1625 		return -EBUSY;
1626 
1627 	running = netif_running(lif->netdev);
1628 	if (running)
1629 		err = ionic_stop(lif->netdev);
1630 	if (!err && running)
1631 		ionic_open(lif->netdev);
1632 
1633 	clear_bit(IONIC_LIF_QUEUE_RESET, lif->state);
1634 
1635 	return err;
1636 }
1637 
ionic_lif_alloc(struct ionic * ionic,unsigned int index)1638 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
1639 {
1640 	struct device *dev = ionic->dev;
1641 	struct net_device *netdev;
1642 	struct ionic_lif *lif;
1643 	int tbl_sz;
1644 	u32 coal;
1645 	int err;
1646 
1647 	netdev = alloc_etherdev_mqs(sizeof(*lif),
1648 				    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
1649 	if (!netdev) {
1650 		dev_err(dev, "Cannot allocate netdev, aborting\n");
1651 		return ERR_PTR(-ENOMEM);
1652 	}
1653 
1654 	SET_NETDEV_DEV(netdev, dev);
1655 
1656 	lif = netdev_priv(netdev);
1657 	lif->netdev = netdev;
1658 	ionic->master_lif = lif;
1659 	netdev->netdev_ops = &ionic_netdev_ops;
1660 	ionic_ethtool_set_ops(netdev);
1661 
1662 	netdev->watchdog_timeo = 2 * HZ;
1663 	netdev->min_mtu = IONIC_MIN_MTU;
1664 	netdev->max_mtu = IONIC_MAX_MTU;
1665 
1666 	lif->neqs = ionic->neqs_per_lif;
1667 	lif->nxqs = ionic->ntxqs_per_lif;
1668 
1669 	lif->ionic = ionic;
1670 	lif->index = index;
1671 	lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
1672 	lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1673 
1674 	/* Convert the default coalesce value to actual hw resolution */
1675 	coal = ionic_coal_usec_to_hw(lif->ionic, IONIC_ITR_COAL_USEC_DEFAULT);
1676 	lif->rx_coalesce_usecs = ionic_coal_hw_to_usec(lif->ionic, coal);
1677 
1678 	snprintf(lif->name, sizeof(lif->name), "lif%u", index);
1679 
1680 	spin_lock_init(&lif->adminq_lock);
1681 
1682 	spin_lock_init(&lif->deferred.lock);
1683 	INIT_LIST_HEAD(&lif->deferred.list);
1684 	INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
1685 
1686 	/* allocate lif info */
1687 	lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
1688 	lif->info = dma_alloc_coherent(dev, lif->info_sz,
1689 				       &lif->info_pa, GFP_KERNEL);
1690 	if (!lif->info) {
1691 		dev_err(dev, "Failed to allocate lif info, aborting\n");
1692 		err = -ENOMEM;
1693 		goto err_out_free_netdev;
1694 	}
1695 
1696 	/* allocate queues */
1697 	err = ionic_qcqs_alloc(lif);
1698 	if (err)
1699 		goto err_out_free_lif_info;
1700 
1701 	/* allocate rss indirection table */
1702 	tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1703 	lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
1704 	lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
1705 					      &lif->rss_ind_tbl_pa,
1706 					      GFP_KERNEL);
1707 
1708 	if (!lif->rss_ind_tbl) {
1709 		err = -ENOMEM;
1710 		dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
1711 		goto err_out_free_qcqs;
1712 	}
1713 
1714 	list_add_tail(&lif->list, &ionic->lifs);
1715 
1716 	return lif;
1717 
1718 err_out_free_qcqs:
1719 	ionic_qcqs_free(lif);
1720 err_out_free_lif_info:
1721 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1722 	lif->info = NULL;
1723 	lif->info_pa = 0;
1724 err_out_free_netdev:
1725 	free_netdev(lif->netdev);
1726 	lif = NULL;
1727 
1728 	return ERR_PTR(err);
1729 }
1730 
ionic_lifs_alloc(struct ionic * ionic)1731 int ionic_lifs_alloc(struct ionic *ionic)
1732 {
1733 	struct ionic_lif *lif;
1734 
1735 	INIT_LIST_HEAD(&ionic->lifs);
1736 
1737 	/* only build the first lif, others are for later features */
1738 	set_bit(0, ionic->lifbits);
1739 	lif = ionic_lif_alloc(ionic, 0);
1740 
1741 	return PTR_ERR_OR_ZERO(lif);
1742 }
1743 
ionic_lif_reset(struct ionic_lif * lif)1744 static void ionic_lif_reset(struct ionic_lif *lif)
1745 {
1746 	struct ionic_dev *idev = &lif->ionic->idev;
1747 
1748 	mutex_lock(&lif->ionic->dev_cmd_lock);
1749 	ionic_dev_cmd_lif_reset(idev, lif->index);
1750 	ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1751 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1752 }
1753 
ionic_lif_free(struct ionic_lif * lif)1754 static void ionic_lif_free(struct ionic_lif *lif)
1755 {
1756 	struct device *dev = lif->ionic->dev;
1757 
1758 	/* free rss indirection table */
1759 	dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
1760 			  lif->rss_ind_tbl_pa);
1761 	lif->rss_ind_tbl = NULL;
1762 	lif->rss_ind_tbl_pa = 0;
1763 
1764 	/* free queues */
1765 	ionic_qcqs_free(lif);
1766 	ionic_lif_reset(lif);
1767 
1768 	/* free lif info */
1769 	dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
1770 	lif->info = NULL;
1771 	lif->info_pa = 0;
1772 
1773 	/* unmap doorbell page */
1774 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
1775 	lif->kern_dbpage = NULL;
1776 	kfree(lif->dbid_inuse);
1777 	lif->dbid_inuse = NULL;
1778 
1779 	/* free netdev & lif */
1780 	ionic_debugfs_del_lif(lif);
1781 	list_del(&lif->list);
1782 	free_netdev(lif->netdev);
1783 }
1784 
ionic_lifs_free(struct ionic * ionic)1785 void ionic_lifs_free(struct ionic *ionic)
1786 {
1787 	struct list_head *cur, *tmp;
1788 	struct ionic_lif *lif;
1789 
1790 	list_for_each_safe(cur, tmp, &ionic->lifs) {
1791 		lif = list_entry(cur, struct ionic_lif, list);
1792 
1793 		ionic_lif_free(lif);
1794 	}
1795 }
1796 
ionic_lif_deinit(struct ionic_lif * lif)1797 static void ionic_lif_deinit(struct ionic_lif *lif)
1798 {
1799 	if (!test_bit(IONIC_LIF_INITED, lif->state))
1800 		return;
1801 
1802 	clear_bit(IONIC_LIF_INITED, lif->state);
1803 
1804 	ionic_rx_filters_deinit(lif);
1805 	ionic_lif_rss_deinit(lif);
1806 
1807 	napi_disable(&lif->adminqcq->napi);
1808 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1809 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
1810 
1811 	ionic_lif_reset(lif);
1812 }
1813 
ionic_lifs_deinit(struct ionic * ionic)1814 void ionic_lifs_deinit(struct ionic *ionic)
1815 {
1816 	struct list_head *cur, *tmp;
1817 	struct ionic_lif *lif;
1818 
1819 	list_for_each_safe(cur, tmp, &ionic->lifs) {
1820 		lif = list_entry(cur, struct ionic_lif, list);
1821 		ionic_lif_deinit(lif);
1822 	}
1823 }
1824 
ionic_lif_adminq_init(struct ionic_lif * lif)1825 static int ionic_lif_adminq_init(struct ionic_lif *lif)
1826 {
1827 	struct device *dev = lif->ionic->dev;
1828 	struct ionic_q_init_comp comp;
1829 	struct ionic_dev *idev;
1830 	struct ionic_qcq *qcq;
1831 	struct ionic_queue *q;
1832 	int err;
1833 
1834 	idev = &lif->ionic->idev;
1835 	qcq = lif->adminqcq;
1836 	q = &qcq->q;
1837 
1838 	mutex_lock(&lif->ionic->dev_cmd_lock);
1839 	ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
1840 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1841 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1842 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1843 	if (err) {
1844 		netdev_err(lif->netdev, "adminq init failed %d\n", err);
1845 		return err;
1846 	}
1847 
1848 	q->hw_type = comp.hw_type;
1849 	q->hw_index = le32_to_cpu(comp.hw_index);
1850 	q->dbval = IONIC_DBELL_QID(q->hw_index);
1851 
1852 	dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
1853 	dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
1854 
1855 	netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
1856 		       NAPI_POLL_WEIGHT);
1857 
1858 	err = ionic_request_irq(lif, qcq);
1859 	if (err) {
1860 		netdev_warn(lif->netdev, "adminq irq request failed %d\n", err);
1861 		netif_napi_del(&qcq->napi);
1862 		return err;
1863 	}
1864 
1865 	napi_enable(&qcq->napi);
1866 
1867 	if (qcq->flags & IONIC_QCQ_F_INTR)
1868 		ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
1869 				IONIC_INTR_MASK_CLEAR);
1870 
1871 	qcq->flags |= IONIC_QCQ_F_INITED;
1872 
1873 	ionic_debugfs_add_qcq(lif, qcq);
1874 
1875 	return 0;
1876 }
1877 
ionic_lif_notifyq_init(struct ionic_lif * lif)1878 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
1879 {
1880 	struct ionic_qcq *qcq = lif->notifyqcq;
1881 	struct device *dev = lif->ionic->dev;
1882 	struct ionic_queue *q = &qcq->q;
1883 	int err;
1884 
1885 	struct ionic_admin_ctx ctx = {
1886 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1887 		.cmd.q_init = {
1888 			.opcode = IONIC_CMD_Q_INIT,
1889 			.lif_index = cpu_to_le16(lif->index),
1890 			.type = q->type,
1891 			.index = cpu_to_le32(q->index),
1892 			.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
1893 					     IONIC_QINIT_F_ENA),
1894 			.intr_index = cpu_to_le16(lif->adminqcq->intr.index),
1895 			.pid = cpu_to_le16(q->pid),
1896 			.ring_size = ilog2(q->num_descs),
1897 			.ring_base = cpu_to_le64(q->base_pa),
1898 		}
1899 	};
1900 
1901 	dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
1902 	dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
1903 	dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
1904 	dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
1905 
1906 	err = ionic_adminq_post_wait(lif, &ctx);
1907 	if (err)
1908 		return err;
1909 
1910 	q->hw_type = ctx.comp.q_init.hw_type;
1911 	q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
1912 	q->dbval = IONIC_DBELL_QID(q->hw_index);
1913 
1914 	dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
1915 	dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
1916 
1917 	/* preset the callback info */
1918 	q->info[0].cb_arg = lif;
1919 
1920 	qcq->flags |= IONIC_QCQ_F_INITED;
1921 
1922 	ionic_debugfs_add_qcq(lif, qcq);
1923 
1924 	return 0;
1925 }
1926 
ionic_station_set(struct ionic_lif * lif)1927 static int ionic_station_set(struct ionic_lif *lif)
1928 {
1929 	struct net_device *netdev = lif->netdev;
1930 	struct ionic_admin_ctx ctx = {
1931 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1932 		.cmd.lif_getattr = {
1933 			.opcode = IONIC_CMD_LIF_GETATTR,
1934 			.index = cpu_to_le16(lif->index),
1935 			.attr = IONIC_LIF_ATTR_MAC,
1936 		},
1937 	};
1938 	struct sockaddr addr;
1939 	int err;
1940 
1941 	err = ionic_adminq_post_wait(lif, &ctx);
1942 	if (err)
1943 		return err;
1944 
1945 	memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
1946 	addr.sa_family = AF_INET;
1947 	err = eth_prepare_mac_addr_change(netdev, &addr);
1948 	if (err)
1949 		return err;
1950 
1951 	if (!is_zero_ether_addr(netdev->dev_addr)) {
1952 		netdev_dbg(lif->netdev, "deleting station MAC addr %pM\n",
1953 			   netdev->dev_addr);
1954 		ionic_lif_addr(lif, netdev->dev_addr, false);
1955 	}
1956 
1957 	eth_commit_mac_addr_change(netdev, &addr);
1958 	netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
1959 		   netdev->dev_addr);
1960 	ionic_lif_addr(lif, netdev->dev_addr, true);
1961 
1962 	return 0;
1963 }
1964 
ionic_lif_init(struct ionic_lif * lif)1965 static int ionic_lif_init(struct ionic_lif *lif)
1966 {
1967 	struct ionic_dev *idev = &lif->ionic->idev;
1968 	struct device *dev = lif->ionic->dev;
1969 	struct ionic_lif_init_comp comp;
1970 	int dbpage_num;
1971 	int err;
1972 
1973 	ionic_debugfs_add_lif(lif);
1974 
1975 	mutex_lock(&lif->ionic->dev_cmd_lock);
1976 	ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
1977 	err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
1978 	ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
1979 	mutex_unlock(&lif->ionic->dev_cmd_lock);
1980 	if (err)
1981 		return err;
1982 
1983 	lif->hw_index = le16_to_cpu(comp.hw_index);
1984 
1985 	/* now that we have the hw_index we can figure out our doorbell page */
1986 	lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
1987 	if (!lif->dbid_count) {
1988 		dev_err(dev, "No doorbell pages, aborting\n");
1989 		return -EINVAL;
1990 	}
1991 
1992 	lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
1993 	if (!lif->dbid_inuse) {
1994 		dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
1995 		return -ENOMEM;
1996 	}
1997 
1998 	/* first doorbell id reserved for kernel (dbid aka pid == zero) */
1999 	set_bit(0, lif->dbid_inuse);
2000 	lif->kern_pid = 0;
2001 
2002 	dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2003 	lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2004 	if (!lif->kern_dbpage) {
2005 		dev_err(dev, "Cannot map dbpage, aborting\n");
2006 		err = -ENOMEM;
2007 		goto err_out_free_dbid;
2008 	}
2009 
2010 	err = ionic_lif_adminq_init(lif);
2011 	if (err)
2012 		goto err_out_adminq_deinit;
2013 
2014 	if (lif->ionic->nnqs_per_lif) {
2015 		err = ionic_lif_notifyq_init(lif);
2016 		if (err)
2017 			goto err_out_notifyq_deinit;
2018 	}
2019 
2020 	err = ionic_init_nic_features(lif);
2021 	if (err)
2022 		goto err_out_notifyq_deinit;
2023 
2024 	err = ionic_rx_filters_init(lif);
2025 	if (err)
2026 		goto err_out_notifyq_deinit;
2027 
2028 	err = ionic_station_set(lif);
2029 	if (err)
2030 		goto err_out_notifyq_deinit;
2031 
2032 	lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2033 
2034 	set_bit(IONIC_LIF_INITED, lif->state);
2035 
2036 	INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2037 
2038 	return 0;
2039 
2040 err_out_notifyq_deinit:
2041 	ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2042 err_out_adminq_deinit:
2043 	ionic_lif_qcq_deinit(lif, lif->adminqcq);
2044 	ionic_lif_reset(lif);
2045 	ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2046 	lif->kern_dbpage = NULL;
2047 err_out_free_dbid:
2048 	kfree(lif->dbid_inuse);
2049 	lif->dbid_inuse = NULL;
2050 
2051 	return err;
2052 }
2053 
ionic_lifs_init(struct ionic * ionic)2054 int ionic_lifs_init(struct ionic *ionic)
2055 {
2056 	struct list_head *cur, *tmp;
2057 	struct ionic_lif *lif;
2058 	int err;
2059 
2060 	list_for_each_safe(cur, tmp, &ionic->lifs) {
2061 		lif = list_entry(cur, struct ionic_lif, list);
2062 		err = ionic_lif_init(lif);
2063 		if (err)
2064 			return err;
2065 	}
2066 
2067 	return 0;
2068 }
2069 
ionic_lif_notify_work(struct work_struct * ws)2070 static void ionic_lif_notify_work(struct work_struct *ws)
2071 {
2072 }
2073 
ionic_lif_set_netdev_info(struct ionic_lif * lif)2074 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2075 {
2076 	struct ionic_admin_ctx ctx = {
2077 		.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2078 		.cmd.lif_setattr = {
2079 			.opcode = IONIC_CMD_LIF_SETATTR,
2080 			.index = cpu_to_le16(lif->index),
2081 			.attr = IONIC_LIF_ATTR_NAME,
2082 		},
2083 	};
2084 
2085 	strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2086 		sizeof(ctx.cmd.lif_setattr.name));
2087 
2088 	ionic_adminq_post_wait(lif, &ctx);
2089 }
2090 
ionic_netdev_lif(struct net_device * netdev)2091 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2092 {
2093 	if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2094 		return NULL;
2095 
2096 	return netdev_priv(netdev);
2097 }
2098 
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)2099 static int ionic_lif_notify(struct notifier_block *nb,
2100 			    unsigned long event, void *info)
2101 {
2102 	struct net_device *ndev = netdev_notifier_info_to_dev(info);
2103 	struct ionic *ionic = container_of(nb, struct ionic, nb);
2104 	struct ionic_lif *lif = ionic_netdev_lif(ndev);
2105 
2106 	if (!lif || lif->ionic != ionic)
2107 		return NOTIFY_DONE;
2108 
2109 	switch (event) {
2110 	case NETDEV_CHANGENAME:
2111 		ionic_lif_set_netdev_info(lif);
2112 		break;
2113 	}
2114 
2115 	return NOTIFY_DONE;
2116 }
2117 
ionic_lifs_register(struct ionic * ionic)2118 int ionic_lifs_register(struct ionic *ionic)
2119 {
2120 	int err;
2121 
2122 	INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
2123 
2124 	ionic->nb.notifier_call = ionic_lif_notify;
2125 
2126 	err = register_netdevice_notifier(&ionic->nb);
2127 	if (err)
2128 		ionic->nb.notifier_call = NULL;
2129 
2130 	/* only register LIF0 for now */
2131 	err = register_netdev(ionic->master_lif->netdev);
2132 	if (err) {
2133 		dev_err(ionic->dev, "Cannot register net device, aborting\n");
2134 		return err;
2135 	}
2136 
2137 	ionic_link_status_check_request(ionic->master_lif);
2138 	ionic->master_lif->registered = true;
2139 
2140 	return 0;
2141 }
2142 
ionic_lifs_unregister(struct ionic * ionic)2143 void ionic_lifs_unregister(struct ionic *ionic)
2144 {
2145 	if (ionic->nb.notifier_call) {
2146 		unregister_netdevice_notifier(&ionic->nb);
2147 		cancel_work_sync(&ionic->nb_work);
2148 		ionic->nb.notifier_call = NULL;
2149 	}
2150 
2151 	/* There is only one lif ever registered in the
2152 	 * current model, so don't bother searching the
2153 	 * ionic->lif for candidates to unregister
2154 	 */
2155 	cancel_work_sync(&ionic->master_lif->deferred.work);
2156 	cancel_work_sync(&ionic->master_lif->tx_timeout_work);
2157 	if (ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
2158 		unregister_netdev(ionic->master_lif->netdev);
2159 }
2160 
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)2161 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2162 		       union ionic_lif_identity *lid)
2163 {
2164 	struct ionic_dev *idev = &ionic->idev;
2165 	size_t sz;
2166 	int err;
2167 
2168 	sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2169 
2170 	mutex_lock(&ionic->dev_cmd_lock);
2171 	ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2172 	err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2173 	memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2174 	mutex_unlock(&ionic->dev_cmd_lock);
2175 	if (err)
2176 		return (err);
2177 
2178 	dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2179 		le64_to_cpu(lid->capabilities));
2180 
2181 	dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2182 		le32_to_cpu(lid->eth.max_ucast_filters));
2183 	dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2184 		le32_to_cpu(lid->eth.max_mcast_filters));
2185 	dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2186 		le64_to_cpu(lid->eth.config.features));
2187 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2188 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2189 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2190 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2191 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2192 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2193 	dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2194 		le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2195 	dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2196 	dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2197 	dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2198 		le32_to_cpu(lid->eth.config.mtu));
2199 
2200 	return 0;
2201 }
2202 
ionic_lifs_size(struct ionic * ionic)2203 int ionic_lifs_size(struct ionic *ionic)
2204 {
2205 	struct ionic_identity *ident = &ionic->ident;
2206 	unsigned int nintrs, dev_nintrs;
2207 	union ionic_lif_config *lc;
2208 	unsigned int ntxqs_per_lif;
2209 	unsigned int nrxqs_per_lif;
2210 	unsigned int neqs_per_lif;
2211 	unsigned int nnqs_per_lif;
2212 	unsigned int nxqs, neqs;
2213 	unsigned int min_intrs;
2214 	int err;
2215 
2216 	lc = &ident->lif.eth.config;
2217 	dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2218 	neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2219 	nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2220 	ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2221 	nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2222 
2223 	nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2224 	nxqs = min(nxqs, num_online_cpus());
2225 	neqs = min(neqs_per_lif, num_online_cpus());
2226 
2227 try_again:
2228 	/* interrupt usage:
2229 	 *    1 for master lif adminq/notifyq
2230 	 *    1 for each CPU for master lif TxRx queue pairs
2231 	 *    whatever's left is for RDMA queues
2232 	 */
2233 	nintrs = 1 + nxqs + neqs;
2234 	min_intrs = 2;  /* adminq + 1 TxRx queue pair */
2235 
2236 	if (nintrs > dev_nintrs)
2237 		goto try_fewer;
2238 
2239 	err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2240 	if (err < 0 && err != -ENOSPC) {
2241 		dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2242 		return err;
2243 	}
2244 	if (err == -ENOSPC)
2245 		goto try_fewer;
2246 
2247 	if (err != nintrs) {
2248 		ionic_bus_free_irq_vectors(ionic);
2249 		goto try_fewer;
2250 	}
2251 
2252 	ionic->nnqs_per_lif = nnqs_per_lif;
2253 	ionic->neqs_per_lif = neqs;
2254 	ionic->ntxqs_per_lif = nxqs;
2255 	ionic->nrxqs_per_lif = nxqs;
2256 	ionic->nintrs = nintrs;
2257 
2258 	ionic_debugfs_add_sizes(ionic);
2259 
2260 	return 0;
2261 
2262 try_fewer:
2263 	if (nnqs_per_lif > 1) {
2264 		nnqs_per_lif >>= 1;
2265 		goto try_again;
2266 	}
2267 	if (neqs > 1) {
2268 		neqs >>= 1;
2269 		goto try_again;
2270 	}
2271 	if (nxqs > 1) {
2272 		nxqs >>= 1;
2273 		goto try_again;
2274 	}
2275 	dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2276 	return -ENOSPC;
2277 }
2278