1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/printk.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/if_vlan.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/cpumask.h>
13
14 #include "ionic.h"
15 #include "ionic_bus.h"
16 #include "ionic_lif.h"
17 #include "ionic_txrx.h"
18 #include "ionic_ethtool.h"
19 #include "ionic_debugfs.h"
20
21 /* queuetype support level */
22 static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
23 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
24 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
25 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
26 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
27 * 1 = ... with Tx SG version 1
28 */
29 };
30
31 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
32 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
33 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
34 static void ionic_link_status_check(struct ionic_lif *lif);
35 static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
36 static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
37 static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
38
39 static void ionic_txrx_deinit(struct ionic_lif *lif);
40 static int ionic_txrx_init(struct ionic_lif *lif);
41 static int ionic_start_queues(struct ionic_lif *lif);
42 static void ionic_stop_queues(struct ionic_lif *lif);
43 static void ionic_lif_queue_identify(struct ionic_lif *lif);
44
ionic_dim_work(struct work_struct * work)45 static void ionic_dim_work(struct work_struct *work)
46 {
47 struct dim *dim = container_of(work, struct dim, work);
48 struct dim_cq_moder cur_moder;
49 struct ionic_qcq *qcq;
50 u32 new_coal;
51
52 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
53 qcq = container_of(dim, struct ionic_qcq, dim);
54 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
55 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
56 dim->state = DIM_START_MEASURE;
57 }
58
ionic_lif_deferred_work(struct work_struct * work)59 static void ionic_lif_deferred_work(struct work_struct *work)
60 {
61 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
62 struct ionic_deferred *def = &lif->deferred;
63 struct ionic_deferred_work *w = NULL;
64
65 do {
66 spin_lock_bh(&def->lock);
67 if (!list_empty(&def->list)) {
68 w = list_first_entry(&def->list,
69 struct ionic_deferred_work, list);
70 list_del(&w->list);
71 }
72 spin_unlock_bh(&def->lock);
73
74 if (!w)
75 break;
76
77 switch (w->type) {
78 case IONIC_DW_TYPE_RX_MODE:
79 ionic_lif_rx_mode(lif, w->rx_mode);
80 break;
81 case IONIC_DW_TYPE_RX_ADDR_ADD:
82 ionic_lif_addr_add(lif, w->addr);
83 break;
84 case IONIC_DW_TYPE_RX_ADDR_DEL:
85 ionic_lif_addr_del(lif, w->addr);
86 break;
87 case IONIC_DW_TYPE_LINK_STATUS:
88 ionic_link_status_check(lif);
89 break;
90 case IONIC_DW_TYPE_LIF_RESET:
91 if (w->fw_status)
92 ionic_lif_handle_fw_up(lif);
93 else
94 ionic_lif_handle_fw_down(lif);
95 break;
96 default:
97 break;
98 }
99 kfree(w);
100 w = NULL;
101 } while (true);
102 }
103
ionic_lif_deferred_enqueue(struct ionic_deferred * def,struct ionic_deferred_work * work)104 void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
105 struct ionic_deferred_work *work)
106 {
107 spin_lock_bh(&def->lock);
108 list_add_tail(&work->list, &def->list);
109 spin_unlock_bh(&def->lock);
110 schedule_work(&def->work);
111 }
112
ionic_link_status_check(struct ionic_lif * lif)113 static void ionic_link_status_check(struct ionic_lif *lif)
114 {
115 struct net_device *netdev = lif->netdev;
116 u16 link_status;
117 bool link_up;
118
119 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
120 return;
121
122 link_status = le16_to_cpu(lif->info->status.link_status);
123 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
124
125 if (link_up) {
126 if (!netif_carrier_ok(netdev)) {
127 u32 link_speed;
128
129 ionic_port_identify(lif->ionic);
130 link_speed = le32_to_cpu(lif->info->status.link_speed);
131 netdev_info(netdev, "Link up - %d Gbps\n",
132 link_speed / 1000);
133 netif_carrier_on(netdev);
134 }
135
136 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
137 mutex_lock(&lif->queue_lock);
138 ionic_start_queues(lif);
139 mutex_unlock(&lif->queue_lock);
140 }
141 } else {
142 if (netif_carrier_ok(netdev)) {
143 netdev_info(netdev, "Link down\n");
144 netif_carrier_off(netdev);
145 }
146
147 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
148 mutex_lock(&lif->queue_lock);
149 ionic_stop_queues(lif);
150 mutex_unlock(&lif->queue_lock);
151 }
152 }
153
154 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
155 }
156
ionic_link_status_check_request(struct ionic_lif * lif,bool can_sleep)157 void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
158 {
159 struct ionic_deferred_work *work;
160
161 /* we only need one request outstanding at a time */
162 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
163 return;
164
165 if (!can_sleep) {
166 work = kzalloc(sizeof(*work), GFP_ATOMIC);
167 if (!work) {
168 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
169 return;
170 }
171
172 work->type = IONIC_DW_TYPE_LINK_STATUS;
173 ionic_lif_deferred_enqueue(&lif->deferred, work);
174 } else {
175 ionic_link_status_check(lif);
176 }
177 }
178
ionic_isr(int irq,void * data)179 static irqreturn_t ionic_isr(int irq, void *data)
180 {
181 struct napi_struct *napi = data;
182
183 napi_schedule_irqoff(napi);
184
185 return IRQ_HANDLED;
186 }
187
ionic_request_irq(struct ionic_lif * lif,struct ionic_qcq * qcq)188 static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
189 {
190 struct ionic_intr_info *intr = &qcq->intr;
191 struct device *dev = lif->ionic->dev;
192 struct ionic_queue *q = &qcq->q;
193 const char *name;
194
195 if (lif->registered)
196 name = lif->netdev->name;
197 else
198 name = dev_name(dev);
199
200 snprintf(intr->name, sizeof(intr->name),
201 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
202
203 return devm_request_irq(dev, intr->vector, ionic_isr,
204 0, intr->name, &qcq->napi);
205 }
206
ionic_intr_alloc(struct ionic_lif * lif,struct ionic_intr_info * intr)207 static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
208 {
209 struct ionic *ionic = lif->ionic;
210 int index;
211
212 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
213 if (index == ionic->nintrs) {
214 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
215 __func__, index, ionic->nintrs);
216 return -ENOSPC;
217 }
218
219 set_bit(index, ionic->intrs);
220 ionic_intr_init(&ionic->idev, intr, index);
221
222 return 0;
223 }
224
ionic_intr_free(struct ionic * ionic,int index)225 static void ionic_intr_free(struct ionic *ionic, int index)
226 {
227 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
228 clear_bit(index, ionic->intrs);
229 }
230
ionic_qcq_enable(struct ionic_qcq * qcq)231 static int ionic_qcq_enable(struct ionic_qcq *qcq)
232 {
233 struct ionic_queue *q = &qcq->q;
234 struct ionic_lif *lif = q->lif;
235 struct ionic_dev *idev;
236 struct device *dev;
237
238 struct ionic_admin_ctx ctx = {
239 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
240 .cmd.q_control = {
241 .opcode = IONIC_CMD_Q_CONTROL,
242 .lif_index = cpu_to_le16(lif->index),
243 .type = q->type,
244 .index = cpu_to_le32(q->index),
245 .oper = IONIC_Q_ENABLE,
246 },
247 };
248
249 idev = &lif->ionic->idev;
250 dev = lif->ionic->dev;
251
252 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
253 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
254
255 if (qcq->flags & IONIC_QCQ_F_INTR) {
256 irq_set_affinity_hint(qcq->intr.vector,
257 &qcq->intr.affinity_mask);
258 napi_enable(&qcq->napi);
259 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
260 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
261 IONIC_INTR_MASK_CLEAR);
262 }
263
264 return ionic_adminq_post_wait(lif, &ctx);
265 }
266
ionic_qcq_disable(struct ionic_qcq * qcq,bool send_to_hw)267 static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
268 {
269 struct ionic_queue *q;
270 struct ionic_lif *lif;
271 int err = 0;
272
273 struct ionic_admin_ctx ctx = {
274 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
275 .cmd.q_control = {
276 .opcode = IONIC_CMD_Q_CONTROL,
277 .oper = IONIC_Q_DISABLE,
278 },
279 };
280
281 if (!qcq)
282 return -ENXIO;
283
284 q = &qcq->q;
285 lif = q->lif;
286
287 if (qcq->flags & IONIC_QCQ_F_INTR) {
288 struct ionic_dev *idev = &lif->ionic->idev;
289
290 cancel_work_sync(&qcq->dim.work);
291 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
292 IONIC_INTR_MASK_SET);
293 synchronize_irq(qcq->intr.vector);
294 irq_set_affinity_hint(qcq->intr.vector, NULL);
295 napi_disable(&qcq->napi);
296 }
297
298 if (send_to_hw) {
299 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
300 ctx.cmd.q_control.type = q->type;
301 ctx.cmd.q_control.index = cpu_to_le32(q->index);
302 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
303 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
304
305 err = ionic_adminq_post_wait(lif, &ctx);
306 }
307
308 return err;
309 }
310
ionic_lif_qcq_deinit(struct ionic_lif * lif,struct ionic_qcq * qcq)311 static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
312 {
313 struct ionic_dev *idev = &lif->ionic->idev;
314
315 if (!qcq)
316 return;
317
318 if (!(qcq->flags & IONIC_QCQ_F_INITED))
319 return;
320
321 if (qcq->flags & IONIC_QCQ_F_INTR) {
322 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
323 IONIC_INTR_MASK_SET);
324 netif_napi_del(&qcq->napi);
325 }
326
327 qcq->flags &= ~IONIC_QCQ_F_INITED;
328 }
329
ionic_qcq_intr_free(struct ionic_lif * lif,struct ionic_qcq * qcq)330 static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
331 {
332 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
333 return;
334
335 irq_set_affinity_hint(qcq->intr.vector, NULL);
336 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
337 qcq->intr.vector = 0;
338 ionic_intr_free(lif->ionic, qcq->intr.index);
339 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
340 }
341
ionic_qcq_free(struct ionic_lif * lif,struct ionic_qcq * qcq)342 static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
343 {
344 struct device *dev = lif->ionic->dev;
345
346 if (!qcq)
347 return;
348
349 ionic_debugfs_del_qcq(qcq);
350
351 if (qcq->q_base) {
352 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
353 qcq->q_base = NULL;
354 qcq->q_base_pa = 0;
355 }
356
357 if (qcq->cq_base) {
358 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
359 qcq->cq_base = NULL;
360 qcq->cq_base_pa = 0;
361 }
362
363 if (qcq->sg_base) {
364 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
365 qcq->sg_base = NULL;
366 qcq->sg_base_pa = 0;
367 }
368
369 ionic_qcq_intr_free(lif, qcq);
370
371 if (qcq->cq.info) {
372 devm_kfree(dev, qcq->cq.info);
373 qcq->cq.info = NULL;
374 }
375 if (qcq->q.info) {
376 devm_kfree(dev, qcq->q.info);
377 qcq->q.info = NULL;
378 }
379 }
380
ionic_qcqs_free(struct ionic_lif * lif)381 static void ionic_qcqs_free(struct ionic_lif *lif)
382 {
383 struct device *dev = lif->ionic->dev;
384
385 if (lif->notifyqcq) {
386 ionic_qcq_free(lif, lif->notifyqcq);
387 devm_kfree(dev, lif->notifyqcq);
388 lif->notifyqcq = NULL;
389 }
390
391 if (lif->adminqcq) {
392 ionic_qcq_free(lif, lif->adminqcq);
393 devm_kfree(dev, lif->adminqcq);
394 lif->adminqcq = NULL;
395 }
396
397 if (lif->rxqcqs) {
398 devm_kfree(dev, lif->rxqstats);
399 lif->rxqstats = NULL;
400 devm_kfree(dev, lif->rxqcqs);
401 lif->rxqcqs = NULL;
402 }
403
404 if (lif->txqcqs) {
405 devm_kfree(dev, lif->txqstats);
406 lif->txqstats = NULL;
407 devm_kfree(dev, lif->txqcqs);
408 lif->txqcqs = NULL;
409 }
410 }
411
ionic_link_qcq_interrupts(struct ionic_qcq * src_qcq,struct ionic_qcq * n_qcq)412 static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
413 struct ionic_qcq *n_qcq)
414 {
415 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
416 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
417 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
418 }
419
420 n_qcq->intr.vector = src_qcq->intr.vector;
421 n_qcq->intr.index = src_qcq->intr.index;
422 }
423
ionic_alloc_qcq_interrupt(struct ionic_lif * lif,struct ionic_qcq * qcq)424 static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
425 {
426 int err;
427
428 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
429 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
430 return 0;
431 }
432
433 err = ionic_intr_alloc(lif, &qcq->intr);
434 if (err) {
435 netdev_warn(lif->netdev, "no intr for %s: %d\n",
436 qcq->q.name, err);
437 goto err_out;
438 }
439
440 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
441 if (err < 0) {
442 netdev_warn(lif->netdev, "no vector for %s: %d\n",
443 qcq->q.name, err);
444 goto err_out_free_intr;
445 }
446 qcq->intr.vector = err;
447 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
448 IONIC_INTR_MASK_SET);
449
450 err = ionic_request_irq(lif, qcq);
451 if (err) {
452 netdev_warn(lif->netdev, "irq request failed %d\n", err);
453 goto err_out_free_intr;
454 }
455
456 /* try to get the irq on the local numa node first */
457 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
458 dev_to_node(lif->ionic->dev));
459 if (qcq->intr.cpu != -1)
460 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
461
462 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
463 return 0;
464
465 err_out_free_intr:
466 ionic_intr_free(lif->ionic, qcq->intr.index);
467 err_out:
468 return err;
469 }
470
ionic_qcq_alloc(struct ionic_lif * lif,unsigned int type,unsigned int index,const char * name,unsigned int flags,unsigned int num_descs,unsigned int desc_size,unsigned int cq_desc_size,unsigned int sg_desc_size,unsigned int pid,struct ionic_qcq ** qcq)471 static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
472 unsigned int index,
473 const char *name, unsigned int flags,
474 unsigned int num_descs, unsigned int desc_size,
475 unsigned int cq_desc_size,
476 unsigned int sg_desc_size,
477 unsigned int pid, struct ionic_qcq **qcq)
478 {
479 struct ionic_dev *idev = &lif->ionic->idev;
480 struct device *dev = lif->ionic->dev;
481 void *q_base, *cq_base, *sg_base;
482 dma_addr_t cq_base_pa = 0;
483 dma_addr_t sg_base_pa = 0;
484 dma_addr_t q_base_pa = 0;
485 struct ionic_qcq *new;
486 int err;
487
488 *qcq = NULL;
489
490 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
491 if (!new) {
492 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
493 err = -ENOMEM;
494 goto err_out;
495 }
496
497 new->flags = flags;
498
499 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
500 GFP_KERNEL);
501 if (!new->q.info) {
502 netdev_err(lif->netdev, "Cannot allocate queue info\n");
503 err = -ENOMEM;
504 goto err_out_free_qcq;
505 }
506
507 new->q.type = type;
508
509 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
510 desc_size, sg_desc_size, pid);
511 if (err) {
512 netdev_err(lif->netdev, "Cannot initialize queue\n");
513 goto err_out_free_q_info;
514 }
515
516 err = ionic_alloc_qcq_interrupt(lif, new);
517 if (err)
518 goto err_out;
519
520 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
521 GFP_KERNEL);
522 if (!new->cq.info) {
523 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
524 err = -ENOMEM;
525 goto err_out_free_irq;
526 }
527
528 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
529 if (err) {
530 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
531 goto err_out_free_cq_info;
532 }
533
534 if (flags & IONIC_QCQ_F_NOTIFYQ) {
535 int q_size, cq_size;
536
537 /* q & cq need to be contiguous in case of notifyq */
538 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
539 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
540
541 new->q_size = PAGE_SIZE + q_size + cq_size;
542 new->q_base = dma_alloc_coherent(dev, new->q_size,
543 &new->q_base_pa, GFP_KERNEL);
544 if (!new->q_base) {
545 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
546 err = -ENOMEM;
547 goto err_out_free_cq_info;
548 }
549 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
550 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
551 ionic_q_map(&new->q, q_base, q_base_pa);
552
553 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
554 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
555 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
556 ionic_cq_bind(&new->cq, &new->q);
557 } else {
558 new->q_size = PAGE_SIZE + (num_descs * desc_size);
559 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
560 GFP_KERNEL);
561 if (!new->q_base) {
562 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
563 err = -ENOMEM;
564 goto err_out_free_cq_info;
565 }
566 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
567 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
568 ionic_q_map(&new->q, q_base, q_base_pa);
569
570 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
571 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
572 GFP_KERNEL);
573 if (!new->cq_base) {
574 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
575 err = -ENOMEM;
576 goto err_out_free_q;
577 }
578 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
579 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
580 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
581 ionic_cq_bind(&new->cq, &new->q);
582 }
583
584 if (flags & IONIC_QCQ_F_SG) {
585 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
586 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
587 GFP_KERNEL);
588 if (!new->sg_base) {
589 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
590 err = -ENOMEM;
591 goto err_out_free_cq;
592 }
593 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
594 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
595 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
596 }
597
598 INIT_WORK(&new->dim.work, ionic_dim_work);
599 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
600
601 *qcq = new;
602
603 return 0;
604
605 err_out_free_cq:
606 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
607 err_out_free_q:
608 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
609 err_out_free_cq_info:
610 devm_kfree(dev, new->cq.info);
611 err_out_free_irq:
612 if (flags & IONIC_QCQ_F_INTR) {
613 devm_free_irq(dev, new->intr.vector, &new->napi);
614 ionic_intr_free(lif->ionic, new->intr.index);
615 }
616 err_out_free_q_info:
617 devm_kfree(dev, new->q.info);
618 err_out_free_qcq:
619 devm_kfree(dev, new);
620 err_out:
621 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
622 return err;
623 }
624
ionic_qcqs_alloc(struct ionic_lif * lif)625 static int ionic_qcqs_alloc(struct ionic_lif *lif)
626 {
627 struct device *dev = lif->ionic->dev;
628 unsigned int flags;
629 int err;
630
631 flags = IONIC_QCQ_F_INTR;
632 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
633 IONIC_ADMINQ_LENGTH,
634 sizeof(struct ionic_admin_cmd),
635 sizeof(struct ionic_admin_comp),
636 0, lif->kern_pid, &lif->adminqcq);
637 if (err)
638 return err;
639 ionic_debugfs_add_qcq(lif, lif->adminqcq);
640
641 if (lif->ionic->nnqs_per_lif) {
642 flags = IONIC_QCQ_F_NOTIFYQ;
643 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
644 flags, IONIC_NOTIFYQ_LENGTH,
645 sizeof(struct ionic_notifyq_cmd),
646 sizeof(union ionic_notifyq_comp),
647 0, lif->kern_pid, &lif->notifyqcq);
648 if (err)
649 goto err_out;
650 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
651
652 /* Let the notifyq ride on the adminq interrupt */
653 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
654 }
655
656 err = -ENOMEM;
657 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
658 sizeof(struct ionic_qcq *), GFP_KERNEL);
659 if (!lif->txqcqs)
660 goto err_out;
661 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
662 sizeof(struct ionic_qcq *), GFP_KERNEL);
663 if (!lif->rxqcqs)
664 goto err_out;
665
666 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
667 sizeof(struct ionic_tx_stats), GFP_KERNEL);
668 if (!lif->txqstats)
669 goto err_out;
670 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
671 sizeof(struct ionic_rx_stats), GFP_KERNEL);
672 if (!lif->rxqstats)
673 goto err_out;
674
675 return 0;
676
677 err_out:
678 ionic_qcqs_free(lif);
679 return err;
680 }
681
ionic_qcq_sanitize(struct ionic_qcq * qcq)682 static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
683 {
684 qcq->q.tail_idx = 0;
685 qcq->q.head_idx = 0;
686 qcq->cq.tail_idx = 0;
687 qcq->cq.done_color = 1;
688 memset(qcq->q_base, 0, qcq->q_size);
689 memset(qcq->cq_base, 0, qcq->cq_size);
690 memset(qcq->sg_base, 0, qcq->sg_size);
691 }
692
ionic_lif_txq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)693 static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
694 {
695 struct device *dev = lif->ionic->dev;
696 struct ionic_queue *q = &qcq->q;
697 struct ionic_cq *cq = &qcq->cq;
698 struct ionic_admin_ctx ctx = {
699 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
700 .cmd.q_init = {
701 .opcode = IONIC_CMD_Q_INIT,
702 .lif_index = cpu_to_le16(lif->index),
703 .type = q->type,
704 .ver = lif->qtype_info[q->type].version,
705 .index = cpu_to_le32(q->index),
706 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
707 IONIC_QINIT_F_SG),
708 .pid = cpu_to_le16(q->pid),
709 .ring_size = ilog2(q->num_descs),
710 .ring_base = cpu_to_le64(q->base_pa),
711 .cq_ring_base = cpu_to_le64(cq->base_pa),
712 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
713 },
714 };
715 unsigned int intr_index;
716 int err;
717
718 if (qcq->flags & IONIC_QCQ_F_INTR)
719 intr_index = qcq->intr.index;
720 else
721 intr_index = lif->rxqcqs[q->index]->intr.index;
722 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
723
724 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
725 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
726 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
727 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
728 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
729 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
730 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
731
732 ionic_qcq_sanitize(qcq);
733
734 err = ionic_adminq_post_wait(lif, &ctx);
735 if (err)
736 return err;
737
738 q->hw_type = ctx.comp.q_init.hw_type;
739 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
740 q->dbval = IONIC_DBELL_QID(q->hw_index);
741
742 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
743 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
744
745 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
746 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
747 NAPI_POLL_WEIGHT);
748
749 qcq->flags |= IONIC_QCQ_F_INITED;
750
751 return 0;
752 }
753
ionic_lif_rxq_init(struct ionic_lif * lif,struct ionic_qcq * qcq)754 static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
755 {
756 struct device *dev = lif->ionic->dev;
757 struct ionic_queue *q = &qcq->q;
758 struct ionic_cq *cq = &qcq->cq;
759 struct ionic_admin_ctx ctx = {
760 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
761 .cmd.q_init = {
762 .opcode = IONIC_CMD_Q_INIT,
763 .lif_index = cpu_to_le16(lif->index),
764 .type = q->type,
765 .ver = lif->qtype_info[q->type].version,
766 .index = cpu_to_le32(q->index),
767 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
768 IONIC_QINIT_F_SG),
769 .intr_index = cpu_to_le16(cq->bound_intr->index),
770 .pid = cpu_to_le16(q->pid),
771 .ring_size = ilog2(q->num_descs),
772 .ring_base = cpu_to_le64(q->base_pa),
773 .cq_ring_base = cpu_to_le64(cq->base_pa),
774 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
775 },
776 };
777 int err;
778
779 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
780 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
781 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
782 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
783 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
784 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
785 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
786
787 ionic_qcq_sanitize(qcq);
788
789 err = ionic_adminq_post_wait(lif, &ctx);
790 if (err)
791 return err;
792
793 q->hw_type = ctx.comp.q_init.hw_type;
794 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
795 q->dbval = IONIC_DBELL_QID(q->hw_index);
796
797 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
798 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
799
800 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
801 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
802 NAPI_POLL_WEIGHT);
803 else
804 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
805 NAPI_POLL_WEIGHT);
806
807 qcq->flags |= IONIC_QCQ_F_INITED;
808
809 return 0;
810 }
811
ionic_notifyq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)812 static bool ionic_notifyq_service(struct ionic_cq *cq,
813 struct ionic_cq_info *cq_info)
814 {
815 union ionic_notifyq_comp *comp = cq_info->cq_desc;
816 struct ionic_deferred_work *work;
817 struct net_device *netdev;
818 struct ionic_queue *q;
819 struct ionic_lif *lif;
820 u64 eid;
821
822 q = cq->bound_q;
823 lif = q->info[0].cb_arg;
824 netdev = lif->netdev;
825 eid = le64_to_cpu(comp->event.eid);
826
827 /* Have we run out of new completions to process? */
828 if ((s64)(eid - lif->last_eid) <= 0)
829 return false;
830
831 lif->last_eid = eid;
832
833 dev_dbg(lif->ionic->dev, "notifyq event:\n");
834 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
835 comp, sizeof(*comp), true);
836
837 switch (le16_to_cpu(comp->event.ecode)) {
838 case IONIC_EVENT_LINK_CHANGE:
839 ionic_link_status_check_request(lif, false);
840 break;
841 case IONIC_EVENT_RESET:
842 work = kzalloc(sizeof(*work), GFP_ATOMIC);
843 if (!work) {
844 netdev_err(lif->netdev, "%s OOM\n", __func__);
845 } else {
846 work->type = IONIC_DW_TYPE_LIF_RESET;
847 ionic_lif_deferred_enqueue(&lif->deferred, work);
848 }
849 break;
850 default:
851 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
852 comp->event.ecode, eid);
853 break;
854 }
855
856 return true;
857 }
858
ionic_adminq_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)859 static bool ionic_adminq_service(struct ionic_cq *cq,
860 struct ionic_cq_info *cq_info)
861 {
862 struct ionic_admin_comp *comp = cq_info->cq_desc;
863
864 if (!color_match(comp->color, cq->done_color))
865 return false;
866
867 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
868
869 return true;
870 }
871
ionic_adminq_napi(struct napi_struct * napi,int budget)872 static int ionic_adminq_napi(struct napi_struct *napi, int budget)
873 {
874 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
875 struct ionic_lif *lif = napi_to_cq(napi)->lif;
876 struct ionic_dev *idev = &lif->ionic->idev;
877 unsigned int flags = 0;
878 int n_work = 0;
879 int a_work = 0;
880 int work_done;
881
882 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
883 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
884 ionic_notifyq_service, NULL, NULL);
885
886 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
887 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
888 ionic_adminq_service, NULL, NULL);
889
890 work_done = max(n_work, a_work);
891 if (work_done < budget && napi_complete_done(napi, work_done)) {
892 flags |= IONIC_INTR_CRED_UNMASK;
893 lif->adminqcq->cq.bound_intr->rearm_count++;
894 }
895
896 if (work_done || flags) {
897 flags |= IONIC_INTR_CRED_RESET_COALESCE;
898 ionic_intr_credits(idev->intr_ctrl,
899 intr->index,
900 n_work + a_work, flags);
901 }
902
903 return work_done;
904 }
905
ionic_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * ns)906 void ionic_get_stats64(struct net_device *netdev,
907 struct rtnl_link_stats64 *ns)
908 {
909 struct ionic_lif *lif = netdev_priv(netdev);
910 struct ionic_lif_stats *ls;
911
912 memset(ns, 0, sizeof(*ns));
913 ls = &lif->info->stats;
914
915 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
916 le64_to_cpu(ls->rx_mcast_packets) +
917 le64_to_cpu(ls->rx_bcast_packets);
918
919 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
920 le64_to_cpu(ls->tx_mcast_packets) +
921 le64_to_cpu(ls->tx_bcast_packets);
922
923 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
924 le64_to_cpu(ls->rx_mcast_bytes) +
925 le64_to_cpu(ls->rx_bcast_bytes);
926
927 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
928 le64_to_cpu(ls->tx_mcast_bytes) +
929 le64_to_cpu(ls->tx_bcast_bytes);
930
931 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
932 le64_to_cpu(ls->rx_mcast_drop_packets) +
933 le64_to_cpu(ls->rx_bcast_drop_packets);
934
935 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
936 le64_to_cpu(ls->tx_mcast_drop_packets) +
937 le64_to_cpu(ls->tx_bcast_drop_packets);
938
939 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
940
941 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
942
943 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
944 le64_to_cpu(ls->rx_queue_disabled) +
945 le64_to_cpu(ls->rx_desc_fetch_error) +
946 le64_to_cpu(ls->rx_desc_data_error);
947
948 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
949 le64_to_cpu(ls->tx_queue_disabled) +
950 le64_to_cpu(ls->tx_desc_fetch_error) +
951 le64_to_cpu(ls->tx_desc_data_error);
952
953 ns->rx_errors = ns->rx_over_errors +
954 ns->rx_missed_errors;
955
956 ns->tx_errors = ns->tx_aborted_errors;
957 }
958
ionic_lif_addr_add(struct ionic_lif * lif,const u8 * addr)959 static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
960 {
961 struct ionic_admin_ctx ctx = {
962 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
963 .cmd.rx_filter_add = {
964 .opcode = IONIC_CMD_RX_FILTER_ADD,
965 .lif_index = cpu_to_le16(lif->index),
966 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
967 },
968 };
969 struct ionic_rx_filter *f;
970 int err;
971
972 /* don't bother if we already have it */
973 spin_lock_bh(&lif->rx_filters.lock);
974 f = ionic_rx_filter_by_addr(lif, addr);
975 spin_unlock_bh(&lif->rx_filters.lock);
976 if (f)
977 return 0;
978
979 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
980
981 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
982 err = ionic_adminq_post_wait(lif, &ctx);
983 if (err && err != -EEXIST)
984 return err;
985
986 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
987 }
988
ionic_lif_addr_del(struct ionic_lif * lif,const u8 * addr)989 static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
990 {
991 struct ionic_admin_ctx ctx = {
992 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
993 .cmd.rx_filter_del = {
994 .opcode = IONIC_CMD_RX_FILTER_DEL,
995 .lif_index = cpu_to_le16(lif->index),
996 },
997 };
998 struct ionic_rx_filter *f;
999 int err;
1000
1001 spin_lock_bh(&lif->rx_filters.lock);
1002 f = ionic_rx_filter_by_addr(lif, addr);
1003 if (!f) {
1004 spin_unlock_bh(&lif->rx_filters.lock);
1005 return -ENOENT;
1006 }
1007
1008 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1009 addr, f->filter_id);
1010
1011 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1012 ionic_rx_filter_free(lif, f);
1013 spin_unlock_bh(&lif->rx_filters.lock);
1014
1015 err = ionic_adminq_post_wait(lif, &ctx);
1016 if (err && err != -EEXIST)
1017 return err;
1018
1019 return 0;
1020 }
1021
ionic_lif_addr(struct ionic_lif * lif,const u8 * addr,bool add,bool can_sleep)1022 static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
1023 bool can_sleep)
1024 {
1025 struct ionic_deferred_work *work;
1026 unsigned int nmfilters;
1027 unsigned int nufilters;
1028
1029 if (add) {
1030 /* Do we have space for this filter? We test the counters
1031 * here before checking the need for deferral so that we
1032 * can return an overflow error to the stack.
1033 */
1034 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1035 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1036
1037 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1038 lif->nmcast++;
1039 else if (!is_multicast_ether_addr(addr) &&
1040 lif->nucast < nufilters)
1041 lif->nucast++;
1042 else
1043 return -ENOSPC;
1044 } else {
1045 if (is_multicast_ether_addr(addr) && lif->nmcast)
1046 lif->nmcast--;
1047 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1048 lif->nucast--;
1049 }
1050
1051 if (!can_sleep) {
1052 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1053 if (!work) {
1054 netdev_err(lif->netdev, "%s OOM\n", __func__);
1055 return -ENOMEM;
1056 }
1057 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
1058 IONIC_DW_TYPE_RX_ADDR_DEL;
1059 memcpy(work->addr, addr, ETH_ALEN);
1060 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
1061 add ? "add" : "del", addr);
1062 ionic_lif_deferred_enqueue(&lif->deferred, work);
1063 } else {
1064 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1065 add ? "add" : "del", addr);
1066 if (add)
1067 return ionic_lif_addr_add(lif, addr);
1068 else
1069 return ionic_lif_addr_del(lif, addr);
1070 }
1071
1072 return 0;
1073 }
1074
ionic_addr_add(struct net_device * netdev,const u8 * addr)1075 static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1076 {
1077 return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
1078 }
1079
ionic_ndo_addr_add(struct net_device * netdev,const u8 * addr)1080 static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
1081 {
1082 return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
1083 }
1084
ionic_addr_del(struct net_device * netdev,const u8 * addr)1085 static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1086 {
1087 return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
1088 }
1089
ionic_ndo_addr_del(struct net_device * netdev,const u8 * addr)1090 static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
1091 {
1092 return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
1093 }
1094
ionic_lif_rx_mode(struct ionic_lif * lif,unsigned int rx_mode)1095 static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1096 {
1097 struct ionic_admin_ctx ctx = {
1098 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1099 .cmd.rx_mode_set = {
1100 .opcode = IONIC_CMD_RX_MODE_SET,
1101 .lif_index = cpu_to_le16(lif->index),
1102 .rx_mode = cpu_to_le16(rx_mode),
1103 },
1104 };
1105 char buf[128];
1106 int err;
1107 int i;
1108 #define REMAIN(__x) (sizeof(buf) - (__x))
1109
1110 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1111 lif->rx_mode, rx_mode);
1112 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
1113 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
1114 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
1115 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
1116 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
1117 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
1118 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
1119 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
1120 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
1121 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
1122 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1123
1124 err = ionic_adminq_post_wait(lif, &ctx);
1125 if (err)
1126 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1127 rx_mode, err);
1128 else
1129 lif->rx_mode = rx_mode;
1130 }
1131
_ionic_lif_rx_mode(struct ionic_lif * lif,unsigned int rx_mode,bool from_ndo)1132 static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
1133 bool from_ndo)
1134 {
1135 struct ionic_deferred_work *work;
1136
1137 if (from_ndo) {
1138 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1139 if (!work) {
1140 netdev_err(lif->netdev, "%s OOM\n", __func__);
1141 return;
1142 }
1143 work->type = IONIC_DW_TYPE_RX_MODE;
1144 work->rx_mode = rx_mode;
1145 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1146 ionic_lif_deferred_enqueue(&lif->deferred, work);
1147 } else {
1148 ionic_lif_rx_mode(lif, rx_mode);
1149 }
1150 }
1151
ionic_dev_uc_sync(struct net_device * netdev,bool from_ndo)1152 static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
1153 {
1154 if (from_ndo)
1155 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
1156 else
1157 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1158
1159 }
1160
ionic_set_rx_mode(struct net_device * netdev,bool from_ndo)1161 static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
1162 {
1163 struct ionic_lif *lif = netdev_priv(netdev);
1164 unsigned int nfilters;
1165 unsigned int rx_mode;
1166
1167 rx_mode = IONIC_RX_MODE_F_UNICAST;
1168 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1169 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1170 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1171 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1172
1173 /* sync unicast addresses
1174 * next check to see if we're in an overflow state
1175 * if so, we track that we overflowed and enable NIC PROMISC
1176 * else if the overflow is set and not needed
1177 * we remove our overflow flag and check the netdev flags
1178 * to see if we can disable NIC PROMISC
1179 */
1180 ionic_dev_uc_sync(netdev, from_ndo);
1181 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
1182 if (netdev_uc_count(netdev) + 1 > nfilters) {
1183 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1184 lif->uc_overflow = true;
1185 } else if (lif->uc_overflow) {
1186 lif->uc_overflow = false;
1187 if (!(netdev->flags & IFF_PROMISC))
1188 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1189 }
1190
1191 /* same for multicast */
1192 ionic_dev_uc_sync(netdev, from_ndo);
1193 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1194 if (netdev_mc_count(netdev) > nfilters) {
1195 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1196 lif->mc_overflow = true;
1197 } else if (lif->mc_overflow) {
1198 lif->mc_overflow = false;
1199 if (!(netdev->flags & IFF_ALLMULTI))
1200 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1201 }
1202
1203 if (lif->rx_mode != rx_mode)
1204 _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
1205 }
1206
ionic_ndo_set_rx_mode(struct net_device * netdev)1207 static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1208 {
1209 ionic_set_rx_mode(netdev, true);
1210 }
1211
ionic_netdev_features_to_nic(netdev_features_t features)1212 static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1213 {
1214 u64 wanted = 0;
1215
1216 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1217 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1218 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1219 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1220 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1221 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1222 if (features & NETIF_F_RXHASH)
1223 wanted |= IONIC_ETH_HW_RX_HASH;
1224 if (features & NETIF_F_RXCSUM)
1225 wanted |= IONIC_ETH_HW_RX_CSUM;
1226 if (features & NETIF_F_SG)
1227 wanted |= IONIC_ETH_HW_TX_SG;
1228 if (features & NETIF_F_HW_CSUM)
1229 wanted |= IONIC_ETH_HW_TX_CSUM;
1230 if (features & NETIF_F_TSO)
1231 wanted |= IONIC_ETH_HW_TSO;
1232 if (features & NETIF_F_TSO6)
1233 wanted |= IONIC_ETH_HW_TSO_IPV6;
1234 if (features & NETIF_F_TSO_ECN)
1235 wanted |= IONIC_ETH_HW_TSO_ECN;
1236 if (features & NETIF_F_GSO_GRE)
1237 wanted |= IONIC_ETH_HW_TSO_GRE;
1238 if (features & NETIF_F_GSO_GRE_CSUM)
1239 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1240 if (features & NETIF_F_GSO_IPXIP4)
1241 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1242 if (features & NETIF_F_GSO_IPXIP6)
1243 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1244 if (features & NETIF_F_GSO_UDP_TUNNEL)
1245 wanted |= IONIC_ETH_HW_TSO_UDP;
1246 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1247 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1248
1249 return cpu_to_le64(wanted);
1250 }
1251
ionic_set_nic_features(struct ionic_lif * lif,netdev_features_t features)1252 static int ionic_set_nic_features(struct ionic_lif *lif,
1253 netdev_features_t features)
1254 {
1255 struct device *dev = lif->ionic->dev;
1256 struct ionic_admin_ctx ctx = {
1257 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1258 .cmd.lif_setattr = {
1259 .opcode = IONIC_CMD_LIF_SETATTR,
1260 .index = cpu_to_le16(lif->index),
1261 .attr = IONIC_LIF_ATTR_FEATURES,
1262 },
1263 };
1264 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1265 IONIC_ETH_HW_VLAN_RX_STRIP |
1266 IONIC_ETH_HW_VLAN_RX_FILTER;
1267 u64 old_hw_features;
1268 int err;
1269
1270 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1271 err = ionic_adminq_post_wait(lif, &ctx);
1272 if (err)
1273 return err;
1274
1275 old_hw_features = lif->hw_features;
1276 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1277 ctx.comp.lif_setattr.features);
1278
1279 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1280 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1281
1282 if ((vlan_flags & features) &&
1283 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1284 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1285
1286 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1287 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1288 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1289 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1290 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1291 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1292 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1293 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1294 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1295 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1296 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1297 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1298 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1299 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1300 if (lif->hw_features & IONIC_ETH_HW_TSO)
1301 dev_dbg(dev, "feature ETH_HW_TSO\n");
1302 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1303 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1304 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1305 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1306 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1307 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1308 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1309 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1310 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1311 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1312 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1313 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1314 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1315 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1316 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1317 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1318
1319 return 0;
1320 }
1321
ionic_init_nic_features(struct ionic_lif * lif)1322 static int ionic_init_nic_features(struct ionic_lif *lif)
1323 {
1324 struct net_device *netdev = lif->netdev;
1325 netdev_features_t features;
1326 int err;
1327
1328 /* set up what we expect to support by default */
1329 features = NETIF_F_HW_VLAN_CTAG_TX |
1330 NETIF_F_HW_VLAN_CTAG_RX |
1331 NETIF_F_HW_VLAN_CTAG_FILTER |
1332 NETIF_F_RXHASH |
1333 NETIF_F_SG |
1334 NETIF_F_HW_CSUM |
1335 NETIF_F_RXCSUM |
1336 NETIF_F_TSO |
1337 NETIF_F_TSO6 |
1338 NETIF_F_TSO_ECN;
1339
1340 err = ionic_set_nic_features(lif, features);
1341 if (err)
1342 return err;
1343
1344 /* tell the netdev what we actually can support */
1345 netdev->features |= NETIF_F_HIGHDMA;
1346
1347 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1348 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1349 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1350 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1351 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1352 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1353 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1354 netdev->hw_features |= NETIF_F_RXHASH;
1355 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1356 netdev->hw_features |= NETIF_F_SG;
1357
1358 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1359 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1360 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1361 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1362 if (lif->hw_features & IONIC_ETH_HW_TSO)
1363 netdev->hw_enc_features |= NETIF_F_TSO;
1364 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1365 netdev->hw_enc_features |= NETIF_F_TSO6;
1366 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1367 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1368 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1369 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1370 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1371 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1372 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1373 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1374 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1375 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1376 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1377 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1378 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1379 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1380
1381 netdev->hw_features |= netdev->hw_enc_features;
1382 netdev->features |= netdev->hw_features;
1383 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
1384
1385 netdev->priv_flags |= IFF_UNICAST_FLT |
1386 IFF_LIVE_ADDR_CHANGE;
1387
1388 return 0;
1389 }
1390
ionic_set_features(struct net_device * netdev,netdev_features_t features)1391 static int ionic_set_features(struct net_device *netdev,
1392 netdev_features_t features)
1393 {
1394 struct ionic_lif *lif = netdev_priv(netdev);
1395 int err;
1396
1397 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1398 __func__, (u64)lif->netdev->features, (u64)features);
1399
1400 err = ionic_set_nic_features(lif, features);
1401
1402 return err;
1403 }
1404
ionic_set_mac_address(struct net_device * netdev,void * sa)1405 static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1406 {
1407 struct sockaddr *addr = sa;
1408 u8 *mac;
1409 int err;
1410
1411 mac = (u8 *)addr->sa_data;
1412 if (ether_addr_equal(netdev->dev_addr, mac))
1413 return 0;
1414
1415 err = eth_prepare_mac_addr_change(netdev, addr);
1416 if (err)
1417 return err;
1418
1419 if (!is_zero_ether_addr(netdev->dev_addr)) {
1420 netdev_info(netdev, "deleting mac addr %pM\n",
1421 netdev->dev_addr);
1422 ionic_addr_del(netdev, netdev->dev_addr);
1423 }
1424
1425 eth_commit_mac_addr_change(netdev, addr);
1426 netdev_info(netdev, "updating mac addr %pM\n", mac);
1427
1428 return ionic_addr_add(netdev, mac);
1429 }
1430
ionic_stop_queues_reconfig(struct ionic_lif * lif)1431 static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1432 {
1433 /* Stop and clean the queues before reconfiguration */
1434 mutex_lock(&lif->queue_lock);
1435 netif_device_detach(lif->netdev);
1436 ionic_stop_queues(lif);
1437 ionic_txrx_deinit(lif);
1438 }
1439
ionic_start_queues_reconfig(struct ionic_lif * lif)1440 static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1441 {
1442 int err;
1443
1444 /* Re-init the queues after reconfiguration */
1445
1446 /* The only way txrx_init can fail here is if communication
1447 * with FW is suddenly broken. There's not much we can do
1448 * at this point - error messages have already been printed,
1449 * so we can continue on and the user can eventually do a
1450 * DOWN and UP to try to reset and clear the issue.
1451 */
1452 err = ionic_txrx_init(lif);
1453 mutex_unlock(&lif->queue_lock);
1454 ionic_link_status_check_request(lif, true);
1455 netif_device_attach(lif->netdev);
1456
1457 return err;
1458 }
1459
ionic_change_mtu(struct net_device * netdev,int new_mtu)1460 static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1461 {
1462 struct ionic_lif *lif = netdev_priv(netdev);
1463 struct ionic_admin_ctx ctx = {
1464 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1465 .cmd.lif_setattr = {
1466 .opcode = IONIC_CMD_LIF_SETATTR,
1467 .index = cpu_to_le16(lif->index),
1468 .attr = IONIC_LIF_ATTR_MTU,
1469 .mtu = cpu_to_le32(new_mtu),
1470 },
1471 };
1472 int err;
1473
1474 err = ionic_adminq_post_wait(lif, &ctx);
1475 if (err)
1476 return err;
1477
1478 netdev->mtu = new_mtu;
1479 /* if we're not running, nothing more to do */
1480 if (!netif_running(netdev))
1481 return 0;
1482
1483 ionic_stop_queues_reconfig(lif);
1484 return ionic_start_queues_reconfig(lif);
1485 }
1486
ionic_tx_timeout_work(struct work_struct * ws)1487 static void ionic_tx_timeout_work(struct work_struct *ws)
1488 {
1489 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1490
1491 netdev_info(lif->netdev, "Tx Timeout recovery\n");
1492
1493 /* if we were stopped before this scheduled job was launched,
1494 * don't bother the queues as they are already stopped.
1495 */
1496 if (!netif_running(lif->netdev))
1497 return;
1498
1499 ionic_stop_queues_reconfig(lif);
1500 ionic_start_queues_reconfig(lif);
1501 }
1502
ionic_tx_timeout(struct net_device * netdev,unsigned int txqueue)1503 static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1504 {
1505 struct ionic_lif *lif = netdev_priv(netdev);
1506
1507 schedule_work(&lif->tx_timeout_work);
1508 }
1509
ionic_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1510 static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1511 u16 vid)
1512 {
1513 struct ionic_lif *lif = netdev_priv(netdev);
1514 struct ionic_admin_ctx ctx = {
1515 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1516 .cmd.rx_filter_add = {
1517 .opcode = IONIC_CMD_RX_FILTER_ADD,
1518 .lif_index = cpu_to_le16(lif->index),
1519 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1520 .vlan.vlan = cpu_to_le16(vid),
1521 },
1522 };
1523 int err;
1524
1525 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
1526 err = ionic_adminq_post_wait(lif, &ctx);
1527 if (err)
1528 return err;
1529
1530 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
1531 }
1532
ionic_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)1533 static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1534 u16 vid)
1535 {
1536 struct ionic_lif *lif = netdev_priv(netdev);
1537 struct ionic_admin_ctx ctx = {
1538 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1539 .cmd.rx_filter_del = {
1540 .opcode = IONIC_CMD_RX_FILTER_DEL,
1541 .lif_index = cpu_to_le16(lif->index),
1542 },
1543 };
1544 struct ionic_rx_filter *f;
1545
1546 spin_lock_bh(&lif->rx_filters.lock);
1547
1548 f = ionic_rx_filter_by_vlan(lif, vid);
1549 if (!f) {
1550 spin_unlock_bh(&lif->rx_filters.lock);
1551 return -ENOENT;
1552 }
1553
1554 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1555 vid, f->filter_id);
1556
1557 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1558 ionic_rx_filter_free(lif, f);
1559 spin_unlock_bh(&lif->rx_filters.lock);
1560
1561 return ionic_adminq_post_wait(lif, &ctx);
1562 }
1563
ionic_lif_rss_config(struct ionic_lif * lif,const u16 types,const u8 * key,const u32 * indir)1564 int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1565 const u8 *key, const u32 *indir)
1566 {
1567 struct ionic_admin_ctx ctx = {
1568 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1569 .cmd.lif_setattr = {
1570 .opcode = IONIC_CMD_LIF_SETATTR,
1571 .attr = IONIC_LIF_ATTR_RSS,
1572 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1573 },
1574 };
1575 unsigned int i, tbl_sz;
1576
1577 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1578 lif->rss_types = types;
1579 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1580 }
1581
1582 if (key)
1583 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1584
1585 if (indir) {
1586 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1587 for (i = 0; i < tbl_sz; i++)
1588 lif->rss_ind_tbl[i] = indir[i];
1589 }
1590
1591 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1592 IONIC_RSS_HASH_KEY_SIZE);
1593
1594 return ionic_adminq_post_wait(lif, &ctx);
1595 }
1596
ionic_lif_rss_init(struct ionic_lif * lif)1597 static int ionic_lif_rss_init(struct ionic_lif *lif)
1598 {
1599 unsigned int tbl_sz;
1600 unsigned int i;
1601
1602 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1603 IONIC_RSS_TYPE_IPV4_TCP |
1604 IONIC_RSS_TYPE_IPV4_UDP |
1605 IONIC_RSS_TYPE_IPV6 |
1606 IONIC_RSS_TYPE_IPV6_TCP |
1607 IONIC_RSS_TYPE_IPV6_UDP;
1608
1609 /* Fill indirection table with 'default' values */
1610 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1611 for (i = 0; i < tbl_sz; i++)
1612 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1613
1614 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1615 }
1616
ionic_lif_rss_deinit(struct ionic_lif * lif)1617 static void ionic_lif_rss_deinit(struct ionic_lif *lif)
1618 {
1619 int tbl_sz;
1620
1621 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1622 memset(lif->rss_ind_tbl, 0, tbl_sz);
1623 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1624
1625 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
1626 }
1627
ionic_txrx_disable(struct ionic_lif * lif)1628 static void ionic_txrx_disable(struct ionic_lif *lif)
1629 {
1630 unsigned int i;
1631 int err = 0;
1632
1633 if (lif->txqcqs) {
1634 for (i = 0; i < lif->nxqs; i++)
1635 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
1636 }
1637
1638 if (lif->rxqcqs) {
1639 for (i = 0; i < lif->nxqs; i++)
1640 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1641 }
1642 }
1643
ionic_txrx_deinit(struct ionic_lif * lif)1644 static void ionic_txrx_deinit(struct ionic_lif *lif)
1645 {
1646 unsigned int i;
1647
1648 if (lif->txqcqs) {
1649 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
1650 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1651 ionic_tx_flush(&lif->txqcqs[i]->cq);
1652 ionic_tx_empty(&lif->txqcqs[i]->q);
1653 }
1654 }
1655
1656 if (lif->rxqcqs) {
1657 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
1658 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1659 ionic_rx_empty(&lif->rxqcqs[i]->q);
1660 }
1661 }
1662 lif->rx_mode = 0;
1663 }
1664
ionic_txrx_free(struct ionic_lif * lif)1665 static void ionic_txrx_free(struct ionic_lif *lif)
1666 {
1667 unsigned int i;
1668
1669 if (lif->txqcqs) {
1670 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
1671 ionic_qcq_free(lif, lif->txqcqs[i]);
1672 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
1673 lif->txqcqs[i] = NULL;
1674 }
1675 }
1676
1677 if (lif->rxqcqs) {
1678 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
1679 ionic_qcq_free(lif, lif->rxqcqs[i]);
1680 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
1681 lif->rxqcqs[i] = NULL;
1682 }
1683 }
1684 }
1685
ionic_txrx_alloc(struct ionic_lif * lif)1686 static int ionic_txrx_alloc(struct ionic_lif *lif)
1687 {
1688 unsigned int sg_desc_sz;
1689 unsigned int flags;
1690 unsigned int i;
1691 int err = 0;
1692
1693 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1694 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1695 sizeof(struct ionic_txq_sg_desc_v1))
1696 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1697 else
1698 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1699
1700 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
1701 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1702 flags |= IONIC_QCQ_F_INTR;
1703 for (i = 0; i < lif->nxqs; i++) {
1704 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1705 lif->ntxq_descs,
1706 sizeof(struct ionic_txq_desc),
1707 sizeof(struct ionic_txq_comp),
1708 sg_desc_sz,
1709 lif->kern_pid, &lif->txqcqs[i]);
1710 if (err)
1711 goto err_out;
1712
1713 if (flags & IONIC_QCQ_F_INTR) {
1714 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1715 lif->txqcqs[i]->intr.index,
1716 lif->tx_coalesce_hw);
1717 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
1718 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
1719 }
1720
1721 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
1722 }
1723
1724 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
1725 for (i = 0; i < lif->nxqs; i++) {
1726 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1727 lif->nrxq_descs,
1728 sizeof(struct ionic_rxq_desc),
1729 sizeof(struct ionic_rxq_comp),
1730 sizeof(struct ionic_rxq_sg_desc),
1731 lif->kern_pid, &lif->rxqcqs[i]);
1732 if (err)
1733 goto err_out;
1734
1735 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
1736 lif->rxqcqs[i]->intr.index,
1737 lif->rx_coalesce_hw);
1738 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
1739 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
1740
1741 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1742 ionic_link_qcq_interrupts(lif->rxqcqs[i],
1743 lif->txqcqs[i]);
1744
1745 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
1746 }
1747
1748 return 0;
1749
1750 err_out:
1751 ionic_txrx_free(lif);
1752
1753 return err;
1754 }
1755
ionic_txrx_init(struct ionic_lif * lif)1756 static int ionic_txrx_init(struct ionic_lif *lif)
1757 {
1758 unsigned int i;
1759 int err;
1760
1761 for (i = 0; i < lif->nxqs; i++) {
1762 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
1763 if (err)
1764 goto err_out;
1765
1766 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
1767 if (err) {
1768 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1769 goto err_out;
1770 }
1771 }
1772
1773 if (lif->netdev->features & NETIF_F_RXHASH)
1774 ionic_lif_rss_init(lif);
1775
1776 ionic_set_rx_mode(lif->netdev, false);
1777
1778 return 0;
1779
1780 err_out:
1781 while (i--) {
1782 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1783 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1784 }
1785
1786 return err;
1787 }
1788
ionic_txrx_enable(struct ionic_lif * lif)1789 static int ionic_txrx_enable(struct ionic_lif *lif)
1790 {
1791 int derr = 0;
1792 int i, err;
1793
1794 for (i = 0; i < lif->nxqs; i++) {
1795 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
1796 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
1797 err = -ENXIO;
1798 goto err_out;
1799 }
1800
1801 ionic_rx_fill(&lif->rxqcqs[i]->q);
1802 err = ionic_qcq_enable(lif->rxqcqs[i]);
1803 if (err)
1804 goto err_out;
1805
1806 err = ionic_qcq_enable(lif->txqcqs[i]);
1807 if (err) {
1808 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
1809 goto err_out;
1810 }
1811 }
1812
1813 return 0;
1814
1815 err_out:
1816 while (i--) {
1817 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
1818 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
1819 }
1820
1821 return err;
1822 }
1823
ionic_start_queues(struct ionic_lif * lif)1824 static int ionic_start_queues(struct ionic_lif *lif)
1825 {
1826 int err;
1827
1828 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1829 return 0;
1830
1831 err = ionic_txrx_enable(lif);
1832 if (err) {
1833 clear_bit(IONIC_LIF_F_UP, lif->state);
1834 return err;
1835 }
1836 netif_tx_wake_all_queues(lif->netdev);
1837
1838 return 0;
1839 }
1840
ionic_open(struct net_device * netdev)1841 static int ionic_open(struct net_device *netdev)
1842 {
1843 struct ionic_lif *lif = netdev_priv(netdev);
1844 int err;
1845
1846 err = ionic_txrx_alloc(lif);
1847 if (err)
1848 return err;
1849
1850 err = ionic_txrx_init(lif);
1851 if (err)
1852 goto err_out;
1853
1854 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1855 if (err)
1856 goto err_txrx_deinit;
1857
1858 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1859 if (err)
1860 goto err_txrx_deinit;
1861
1862 /* don't start the queues until we have link */
1863 if (netif_carrier_ok(netdev)) {
1864 err = ionic_start_queues(lif);
1865 if (err)
1866 goto err_txrx_deinit;
1867 }
1868
1869 return 0;
1870
1871 err_txrx_deinit:
1872 ionic_txrx_deinit(lif);
1873 err_out:
1874 ionic_txrx_free(lif);
1875 return err;
1876 }
1877
ionic_stop_queues(struct ionic_lif * lif)1878 static void ionic_stop_queues(struct ionic_lif *lif)
1879 {
1880 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1881 return;
1882
1883 netif_tx_disable(lif->netdev);
1884 ionic_txrx_disable(lif);
1885 }
1886
ionic_stop(struct net_device * netdev)1887 static int ionic_stop(struct net_device *netdev)
1888 {
1889 struct ionic_lif *lif = netdev_priv(netdev);
1890
1891 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
1892 return 0;
1893
1894 ionic_stop_queues(lif);
1895 ionic_txrx_deinit(lif);
1896 ionic_txrx_free(lif);
1897
1898 return 0;
1899 }
1900
ionic_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * ivf)1901 static int ionic_get_vf_config(struct net_device *netdev,
1902 int vf, struct ifla_vf_info *ivf)
1903 {
1904 struct ionic_lif *lif = netdev_priv(netdev);
1905 struct ionic *ionic = lif->ionic;
1906 int ret = 0;
1907
1908 if (!netif_device_present(netdev))
1909 return -EBUSY;
1910
1911 down_read(&ionic->vf_op_lock);
1912
1913 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1914 ret = -EINVAL;
1915 } else {
1916 ivf->vf = vf;
1917 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
1918 ivf->qos = 0;
1919 ivf->spoofchk = ionic->vfs[vf].spoofchk;
1920 ivf->linkstate = ionic->vfs[vf].linkstate;
1921 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
1922 ivf->trusted = ionic->vfs[vf].trusted;
1923 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1924 }
1925
1926 up_read(&ionic->vf_op_lock);
1927 return ret;
1928 }
1929
ionic_get_vf_stats(struct net_device * netdev,int vf,struct ifla_vf_stats * vf_stats)1930 static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1931 struct ifla_vf_stats *vf_stats)
1932 {
1933 struct ionic_lif *lif = netdev_priv(netdev);
1934 struct ionic *ionic = lif->ionic;
1935 struct ionic_lif_stats *vs;
1936 int ret = 0;
1937
1938 if (!netif_device_present(netdev))
1939 return -EBUSY;
1940
1941 down_read(&ionic->vf_op_lock);
1942
1943 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1944 ret = -EINVAL;
1945 } else {
1946 memset(vf_stats, 0, sizeof(*vf_stats));
1947 vs = &ionic->vfs[vf].stats;
1948
1949 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1950 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1951 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
1952 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
1953 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
1954 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
1955 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1956 le64_to_cpu(vs->rx_mcast_drop_packets) +
1957 le64_to_cpu(vs->rx_bcast_drop_packets);
1958 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1959 le64_to_cpu(vs->tx_mcast_drop_packets) +
1960 le64_to_cpu(vs->tx_bcast_drop_packets);
1961 }
1962
1963 up_read(&ionic->vf_op_lock);
1964 return ret;
1965 }
1966
ionic_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1967 static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1968 {
1969 struct ionic_lif *lif = netdev_priv(netdev);
1970 struct ionic *ionic = lif->ionic;
1971 int ret;
1972
1973 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1974 return -EINVAL;
1975
1976 if (!netif_device_present(netdev))
1977 return -EBUSY;
1978
1979 down_write(&ionic->vf_op_lock);
1980
1981 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1982 ret = -EINVAL;
1983 } else {
1984 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1985 if (!ret)
1986 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1987 }
1988
1989 up_write(&ionic->vf_op_lock);
1990 return ret;
1991 }
1992
ionic_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 proto)1993 static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1994 u8 qos, __be16 proto)
1995 {
1996 struct ionic_lif *lif = netdev_priv(netdev);
1997 struct ionic *ionic = lif->ionic;
1998 int ret;
1999
2000 /* until someday when we support qos */
2001 if (qos)
2002 return -EINVAL;
2003
2004 if (vlan > 4095)
2005 return -EINVAL;
2006
2007 if (proto != htons(ETH_P_8021Q))
2008 return -EPROTONOSUPPORT;
2009
2010 if (!netif_device_present(netdev))
2011 return -EBUSY;
2012
2013 down_write(&ionic->vf_op_lock);
2014
2015 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2016 ret = -EINVAL;
2017 } else {
2018 ret = ionic_set_vf_config(ionic, vf,
2019 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2020 if (!ret)
2021 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
2022 }
2023
2024 up_write(&ionic->vf_op_lock);
2025 return ret;
2026 }
2027
ionic_set_vf_rate(struct net_device * netdev,int vf,int tx_min,int tx_max)2028 static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2029 int tx_min, int tx_max)
2030 {
2031 struct ionic_lif *lif = netdev_priv(netdev);
2032 struct ionic *ionic = lif->ionic;
2033 int ret;
2034
2035 /* setting the min just seems silly */
2036 if (tx_min)
2037 return -EINVAL;
2038
2039 if (!netif_device_present(netdev))
2040 return -EBUSY;
2041
2042 down_write(&ionic->vf_op_lock);
2043
2044 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2045 ret = -EINVAL;
2046 } else {
2047 ret = ionic_set_vf_config(ionic, vf,
2048 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2049 if (!ret)
2050 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
2051 }
2052
2053 up_write(&ionic->vf_op_lock);
2054 return ret;
2055 }
2056
ionic_set_vf_spoofchk(struct net_device * netdev,int vf,bool set)2057 static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2058 {
2059 struct ionic_lif *lif = netdev_priv(netdev);
2060 struct ionic *ionic = lif->ionic;
2061 u8 data = set; /* convert to u8 for config */
2062 int ret;
2063
2064 if (!netif_device_present(netdev))
2065 return -EBUSY;
2066
2067 down_write(&ionic->vf_op_lock);
2068
2069 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2070 ret = -EINVAL;
2071 } else {
2072 ret = ionic_set_vf_config(ionic, vf,
2073 IONIC_VF_ATTR_SPOOFCHK, &data);
2074 if (!ret)
2075 ionic->vfs[vf].spoofchk = data;
2076 }
2077
2078 up_write(&ionic->vf_op_lock);
2079 return ret;
2080 }
2081
ionic_set_vf_trust(struct net_device * netdev,int vf,bool set)2082 static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2083 {
2084 struct ionic_lif *lif = netdev_priv(netdev);
2085 struct ionic *ionic = lif->ionic;
2086 u8 data = set; /* convert to u8 for config */
2087 int ret;
2088
2089 if (!netif_device_present(netdev))
2090 return -EBUSY;
2091
2092 down_write(&ionic->vf_op_lock);
2093
2094 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2095 ret = -EINVAL;
2096 } else {
2097 ret = ionic_set_vf_config(ionic, vf,
2098 IONIC_VF_ATTR_TRUST, &data);
2099 if (!ret)
2100 ionic->vfs[vf].trusted = data;
2101 }
2102
2103 up_write(&ionic->vf_op_lock);
2104 return ret;
2105 }
2106
ionic_set_vf_link_state(struct net_device * netdev,int vf,int set)2107 static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2108 {
2109 struct ionic_lif *lif = netdev_priv(netdev);
2110 struct ionic *ionic = lif->ionic;
2111 u8 data;
2112 int ret;
2113
2114 switch (set) {
2115 case IFLA_VF_LINK_STATE_ENABLE:
2116 data = IONIC_VF_LINK_STATUS_UP;
2117 break;
2118 case IFLA_VF_LINK_STATE_DISABLE:
2119 data = IONIC_VF_LINK_STATUS_DOWN;
2120 break;
2121 case IFLA_VF_LINK_STATE_AUTO:
2122 data = IONIC_VF_LINK_STATUS_AUTO;
2123 break;
2124 default:
2125 return -EINVAL;
2126 }
2127
2128 if (!netif_device_present(netdev))
2129 return -EBUSY;
2130
2131 down_write(&ionic->vf_op_lock);
2132
2133 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2134 ret = -EINVAL;
2135 } else {
2136 ret = ionic_set_vf_config(ionic, vf,
2137 IONIC_VF_ATTR_LINKSTATE, &data);
2138 if (!ret)
2139 ionic->vfs[vf].linkstate = set;
2140 }
2141
2142 up_write(&ionic->vf_op_lock);
2143 return ret;
2144 }
2145
2146 static const struct net_device_ops ionic_netdev_ops = {
2147 .ndo_open = ionic_open,
2148 .ndo_stop = ionic_stop,
2149 .ndo_start_xmit = ionic_start_xmit,
2150 .ndo_get_stats64 = ionic_get_stats64,
2151 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
2152 .ndo_set_features = ionic_set_features,
2153 .ndo_set_mac_address = ionic_set_mac_address,
2154 .ndo_validate_addr = eth_validate_addr,
2155 .ndo_tx_timeout = ionic_tx_timeout,
2156 .ndo_change_mtu = ionic_change_mtu,
2157 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2158 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
2159 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2160 .ndo_set_vf_trust = ionic_set_vf_trust,
2161 .ndo_set_vf_mac = ionic_set_vf_mac,
2162 .ndo_set_vf_rate = ionic_set_vf_rate,
2163 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2164 .ndo_get_vf_config = ionic_get_vf_config,
2165 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2166 .ndo_get_vf_stats = ionic_get_vf_stats,
2167 };
2168
ionic_swap_queues(struct ionic_qcq * a,struct ionic_qcq * b)2169 static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2170 {
2171 /* only swapping the queues, not the napi, flags, or other stuff */
2172 swap(a->q.num_descs, b->q.num_descs);
2173 swap(a->q.base, b->q.base);
2174 swap(a->q.base_pa, b->q.base_pa);
2175 swap(a->q.info, b->q.info);
2176 swap(a->q_base, b->q_base);
2177 swap(a->q_base_pa, b->q_base_pa);
2178 swap(a->q_size, b->q_size);
2179
2180 swap(a->q.sg_base, b->q.sg_base);
2181 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2182 swap(a->sg_base, b->sg_base);
2183 swap(a->sg_base_pa, b->sg_base_pa);
2184 swap(a->sg_size, b->sg_size);
2185
2186 swap(a->cq.num_descs, b->cq.num_descs);
2187 swap(a->cq.base, b->cq.base);
2188 swap(a->cq.base_pa, b->cq.base_pa);
2189 swap(a->cq.info, b->cq.info);
2190 swap(a->cq_base, b->cq_base);
2191 swap(a->cq_base_pa, b->cq_base_pa);
2192 swap(a->cq_size, b->cq_size);
2193 }
2194
ionic_reconfigure_queues(struct ionic_lif * lif,struct ionic_queue_params * qparam)2195 int ionic_reconfigure_queues(struct ionic_lif *lif,
2196 struct ionic_queue_params *qparam)
2197 {
2198 struct ionic_qcq **tx_qcqs = NULL;
2199 struct ionic_qcq **rx_qcqs = NULL;
2200 unsigned int sg_desc_sz;
2201 unsigned int flags;
2202 int err = -ENOMEM;
2203 unsigned int i;
2204
2205 /* allocate temporary qcq arrays to hold new queue structs */
2206 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2207 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
2208 sizeof(struct ionic_qcq *), GFP_KERNEL);
2209 if (!tx_qcqs)
2210 goto err_out;
2211 }
2212 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
2213 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
2214 sizeof(struct ionic_qcq *), GFP_KERNEL);
2215 if (!rx_qcqs)
2216 goto err_out;
2217 }
2218
2219 /* allocate new desc_info and rings, but leave the interrupt setup
2220 * until later so as to not mess with the still-running queues
2221 */
2222 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2223 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2224 sizeof(struct ionic_txq_sg_desc_v1))
2225 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2226 else
2227 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2228
2229 if (tx_qcqs) {
2230 for (i = 0; i < qparam->nxqs; i++) {
2231 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2232 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2233 qparam->ntxq_descs,
2234 sizeof(struct ionic_txq_desc),
2235 sizeof(struct ionic_txq_comp),
2236 sg_desc_sz,
2237 lif->kern_pid, &tx_qcqs[i]);
2238 if (err)
2239 goto err_out;
2240 }
2241 }
2242
2243 if (rx_qcqs) {
2244 for (i = 0; i < qparam->nxqs; i++) {
2245 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2246 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2247 qparam->nrxq_descs,
2248 sizeof(struct ionic_rxq_desc),
2249 sizeof(struct ionic_rxq_comp),
2250 sizeof(struct ionic_rxq_sg_desc),
2251 lif->kern_pid, &rx_qcqs[i]);
2252 if (err)
2253 goto err_out;
2254 }
2255 }
2256
2257 /* stop and clean the queues */
2258 ionic_stop_queues_reconfig(lif);
2259
2260 if (qparam->nxqs != lif->nxqs) {
2261 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2262 if (err)
2263 goto err_out_reinit_unlock;
2264 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2265 if (err) {
2266 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2267 goto err_out_reinit_unlock;
2268 }
2269 }
2270
2271 /* swap new desc_info and rings, keeping existing interrupt config */
2272 if (tx_qcqs) {
2273 lif->ntxq_descs = qparam->ntxq_descs;
2274 for (i = 0; i < qparam->nxqs; i++)
2275 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2276 }
2277
2278 if (rx_qcqs) {
2279 lif->nrxq_descs = qparam->nrxq_descs;
2280 for (i = 0; i < qparam->nxqs; i++)
2281 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2282 }
2283
2284 /* if we need to change the interrupt layout, this is the time */
2285 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2286 qparam->nxqs != lif->nxqs) {
2287 if (qparam->intr_split) {
2288 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2289 } else {
2290 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2291 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2292 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2293 }
2294
2295 /* clear existing interrupt assignments */
2296 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2297 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2298 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2299 }
2300
2301 /* re-assign the interrupts */
2302 for (i = 0; i < qparam->nxqs; i++) {
2303 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2304 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2305 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2306 lif->rxqcqs[i]->intr.index,
2307 lif->rx_coalesce_hw);
2308
2309 if (qparam->intr_split) {
2310 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2311 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2312 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2313 lif->txqcqs[i]->intr.index,
2314 lif->tx_coalesce_hw);
2315 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2316 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
2317 } else {
2318 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2319 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2320 }
2321 }
2322 }
2323
2324 /* now we can rework the debugfs mappings */
2325 if (tx_qcqs) {
2326 for (i = 0; i < qparam->nxqs; i++) {
2327 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2328 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2329 }
2330 }
2331
2332 if (rx_qcqs) {
2333 for (i = 0; i < qparam->nxqs; i++) {
2334 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2335 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2336 }
2337 }
2338
2339 swap(lif->nxqs, qparam->nxqs);
2340
2341 err_out_reinit_unlock:
2342 /* re-init the queues, but don't loose an error code */
2343 if (err)
2344 ionic_start_queues_reconfig(lif);
2345 else
2346 err = ionic_start_queues_reconfig(lif);
2347
2348 err_out:
2349 /* free old allocs without cleaning intr */
2350 for (i = 0; i < qparam->nxqs; i++) {
2351 if (tx_qcqs && tx_qcqs[i]) {
2352 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2353 ionic_qcq_free(lif, tx_qcqs[i]);
2354 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
2355 tx_qcqs[i] = NULL;
2356 }
2357 if (rx_qcqs && rx_qcqs[i]) {
2358 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2359 ionic_qcq_free(lif, rx_qcqs[i]);
2360 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
2361 rx_qcqs[i] = NULL;
2362 }
2363 }
2364
2365 /* free q array */
2366 if (rx_qcqs) {
2367 devm_kfree(lif->ionic->dev, rx_qcqs);
2368 rx_qcqs = NULL;
2369 }
2370 if (tx_qcqs) {
2371 devm_kfree(lif->ionic->dev, tx_qcqs);
2372 tx_qcqs = NULL;
2373 }
2374
2375 /* clean the unused dma and info allocations when new set is smaller
2376 * than the full array, but leave the qcq shells in place
2377 */
2378 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2379 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2380 ionic_qcq_free(lif, lif->txqcqs[i]);
2381
2382 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2383 ionic_qcq_free(lif, lif->rxqcqs[i]);
2384 }
2385
2386 return err;
2387 }
2388
ionic_lif_alloc(struct ionic * ionic)2389 int ionic_lif_alloc(struct ionic *ionic)
2390 {
2391 struct device *dev = ionic->dev;
2392 union ionic_lif_identity *lid;
2393 struct net_device *netdev;
2394 struct ionic_lif *lif;
2395 int tbl_sz;
2396 int err;
2397
2398 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2399 if (!lid)
2400 return -ENOMEM;
2401
2402 netdev = alloc_etherdev_mqs(sizeof(*lif),
2403 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2404 if (!netdev) {
2405 dev_err(dev, "Cannot allocate netdev, aborting\n");
2406 err = -ENOMEM;
2407 goto err_out_free_lid;
2408 }
2409
2410 SET_NETDEV_DEV(netdev, dev);
2411
2412 lif = netdev_priv(netdev);
2413 lif->netdev = netdev;
2414 ionic->lif = lif;
2415 netdev->netdev_ops = &ionic_netdev_ops;
2416 ionic_ethtool_set_ops(netdev);
2417
2418 netdev->watchdog_timeo = 2 * HZ;
2419 netif_carrier_off(netdev);
2420
2421 lif->identity = lid;
2422 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2423 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2424 if (err) {
2425 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2426 lif->lif_type, err);
2427 goto err_out_free_netdev;
2428 }
2429 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2430 le32_to_cpu(lif->identity->eth.min_frame_size));
2431 lif->netdev->max_mtu =
2432 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
2433
2434 lif->neqs = ionic->neqs_per_lif;
2435 lif->nxqs = ionic->ntxqs_per_lif;
2436
2437 lif->ionic = ionic;
2438 lif->index = 0;
2439 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2440 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
2441 lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
2442
2443 /* Convert the default coalesce value to actual hw resolution */
2444 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
2445 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
2446 lif->rx_coalesce_usecs);
2447 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2448 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2449 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2450 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
2451
2452 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
2453
2454 spin_lock_init(&lif->adminq_lock);
2455
2456 spin_lock_init(&lif->deferred.lock);
2457 INIT_LIST_HEAD(&lif->deferred.list);
2458 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2459
2460 /* allocate lif info */
2461 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2462 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2463 &lif->info_pa, GFP_KERNEL);
2464 if (!lif->info) {
2465 dev_err(dev, "Failed to allocate lif info, aborting\n");
2466 err = -ENOMEM;
2467 goto err_out_free_netdev;
2468 }
2469
2470 ionic_debugfs_add_lif(lif);
2471
2472 /* allocate control queues and txrx queue arrays */
2473 ionic_lif_queue_identify(lif);
2474 err = ionic_qcqs_alloc(lif);
2475 if (err)
2476 goto err_out_free_lif_info;
2477
2478 /* allocate rss indirection table */
2479 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2480 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2481 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2482 &lif->rss_ind_tbl_pa,
2483 GFP_KERNEL);
2484
2485 if (!lif->rss_ind_tbl) {
2486 err = -ENOMEM;
2487 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2488 goto err_out_free_qcqs;
2489 }
2490 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
2491
2492 return 0;
2493
2494 err_out_free_qcqs:
2495 ionic_qcqs_free(lif);
2496 err_out_free_lif_info:
2497 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2498 lif->info = NULL;
2499 lif->info_pa = 0;
2500 err_out_free_netdev:
2501 free_netdev(lif->netdev);
2502 lif = NULL;
2503 err_out_free_lid:
2504 kfree(lid);
2505
2506 return err;
2507 }
2508
ionic_lif_reset(struct ionic_lif * lif)2509 static void ionic_lif_reset(struct ionic_lif *lif)
2510 {
2511 struct ionic_dev *idev = &lif->ionic->idev;
2512
2513 mutex_lock(&lif->ionic->dev_cmd_lock);
2514 ionic_dev_cmd_lif_reset(idev, lif->index);
2515 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2516 mutex_unlock(&lif->ionic->dev_cmd_lock);
2517 }
2518
ionic_lif_handle_fw_down(struct ionic_lif * lif)2519 static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2520 {
2521 struct ionic *ionic = lif->ionic;
2522
2523 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2524 return;
2525
2526 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2527
2528 netif_device_detach(lif->netdev);
2529
2530 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2531 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
2532 mutex_lock(&lif->queue_lock);
2533 ionic_stop_queues(lif);
2534 mutex_unlock(&lif->queue_lock);
2535 }
2536
2537 if (netif_running(lif->netdev)) {
2538 ionic_txrx_deinit(lif);
2539 ionic_txrx_free(lif);
2540 }
2541 ionic_lif_deinit(lif);
2542 ionic_reset(ionic);
2543 ionic_qcqs_free(lif);
2544
2545 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2546 }
2547
ionic_lif_handle_fw_up(struct ionic_lif * lif)2548 static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2549 {
2550 struct ionic *ionic = lif->ionic;
2551 int err;
2552
2553 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2554 return;
2555
2556 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2557
2558 ionic_init_devinfo(ionic);
2559 err = ionic_identify(ionic);
2560 if (err)
2561 goto err_out;
2562 err = ionic_port_identify(ionic);
2563 if (err)
2564 goto err_out;
2565 err = ionic_port_init(ionic);
2566 if (err)
2567 goto err_out;
2568 err = ionic_qcqs_alloc(lif);
2569 if (err)
2570 goto err_out;
2571
2572 err = ionic_lif_init(lif);
2573 if (err)
2574 goto err_qcqs_free;
2575
2576 if (lif->registered)
2577 ionic_lif_set_netdev_info(lif);
2578
2579 ionic_rx_filter_replay(lif);
2580
2581 if (netif_running(lif->netdev)) {
2582 err = ionic_txrx_alloc(lif);
2583 if (err)
2584 goto err_lifs_deinit;
2585
2586 err = ionic_txrx_init(lif);
2587 if (err)
2588 goto err_txrx_free;
2589 }
2590
2591 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2592 ionic_link_status_check_request(lif, true);
2593 netif_device_attach(lif->netdev);
2594 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2595
2596 return;
2597
2598 err_txrx_free:
2599 ionic_txrx_free(lif);
2600 err_lifs_deinit:
2601 ionic_lif_deinit(lif);
2602 err_qcqs_free:
2603 ionic_qcqs_free(lif);
2604 err_out:
2605 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2606 }
2607
ionic_lif_free(struct ionic_lif * lif)2608 void ionic_lif_free(struct ionic_lif *lif)
2609 {
2610 struct device *dev = lif->ionic->dev;
2611
2612 /* free rss indirection table */
2613 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2614 lif->rss_ind_tbl_pa);
2615 lif->rss_ind_tbl = NULL;
2616 lif->rss_ind_tbl_pa = 0;
2617
2618 /* free queues */
2619 ionic_qcqs_free(lif);
2620 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2621 ionic_lif_reset(lif);
2622
2623 /* free lif info */
2624 kfree(lif->identity);
2625 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2626 lif->info = NULL;
2627 lif->info_pa = 0;
2628
2629 /* unmap doorbell page */
2630 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2631 lif->kern_dbpage = NULL;
2632 kfree(lif->dbid_inuse);
2633 lif->dbid_inuse = NULL;
2634
2635 /* free netdev & lif */
2636 ionic_debugfs_del_lif(lif);
2637 free_netdev(lif->netdev);
2638 }
2639
ionic_lif_deinit(struct ionic_lif * lif)2640 void ionic_lif_deinit(struct ionic_lif *lif)
2641 {
2642 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
2643 return;
2644
2645 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2646 cancel_work_sync(&lif->deferred.work);
2647 cancel_work_sync(&lif->tx_timeout_work);
2648 ionic_rx_filters_deinit(lif);
2649 if (lif->netdev->features & NETIF_F_RXHASH)
2650 ionic_lif_rss_deinit(lif);
2651 }
2652
2653 napi_disable(&lif->adminqcq->napi);
2654 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2655 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2656
2657 mutex_destroy(&lif->queue_lock);
2658 ionic_lif_reset(lif);
2659 }
2660
ionic_lif_adminq_init(struct ionic_lif * lif)2661 static int ionic_lif_adminq_init(struct ionic_lif *lif)
2662 {
2663 struct device *dev = lif->ionic->dev;
2664 struct ionic_q_init_comp comp;
2665 struct ionic_dev *idev;
2666 struct ionic_qcq *qcq;
2667 struct ionic_queue *q;
2668 int err;
2669
2670 idev = &lif->ionic->idev;
2671 qcq = lif->adminqcq;
2672 q = &qcq->q;
2673
2674 mutex_lock(&lif->ionic->dev_cmd_lock);
2675 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2676 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2677 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2678 mutex_unlock(&lif->ionic->dev_cmd_lock);
2679 if (err) {
2680 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2681 return err;
2682 }
2683
2684 q->hw_type = comp.hw_type;
2685 q->hw_index = le32_to_cpu(comp.hw_index);
2686 q->dbval = IONIC_DBELL_QID(q->hw_index);
2687
2688 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2689 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2690
2691 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2692 NAPI_POLL_WEIGHT);
2693
2694 napi_enable(&qcq->napi);
2695
2696 if (qcq->flags & IONIC_QCQ_F_INTR)
2697 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2698 IONIC_INTR_MASK_CLEAR);
2699
2700 qcq->flags |= IONIC_QCQ_F_INITED;
2701
2702 return 0;
2703 }
2704
ionic_lif_notifyq_init(struct ionic_lif * lif)2705 static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2706 {
2707 struct ionic_qcq *qcq = lif->notifyqcq;
2708 struct device *dev = lif->ionic->dev;
2709 struct ionic_queue *q = &qcq->q;
2710 int err;
2711
2712 struct ionic_admin_ctx ctx = {
2713 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2714 .cmd.q_init = {
2715 .opcode = IONIC_CMD_Q_INIT,
2716 .lif_index = cpu_to_le16(lif->index),
2717 .type = q->type,
2718 .ver = lif->qtype_info[q->type].version,
2719 .index = cpu_to_le32(q->index),
2720 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2721 IONIC_QINIT_F_ENA),
2722 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2723 .pid = cpu_to_le16(q->pid),
2724 .ring_size = ilog2(q->num_descs),
2725 .ring_base = cpu_to_le64(q->base_pa),
2726 }
2727 };
2728
2729 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2730 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2731 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2732 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2733
2734 err = ionic_adminq_post_wait(lif, &ctx);
2735 if (err)
2736 return err;
2737
2738 lif->last_eid = 0;
2739 q->hw_type = ctx.comp.q_init.hw_type;
2740 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2741 q->dbval = IONIC_DBELL_QID(q->hw_index);
2742
2743 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2744 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2745
2746 /* preset the callback info */
2747 q->info[0].cb_arg = lif;
2748
2749 qcq->flags |= IONIC_QCQ_F_INITED;
2750
2751 return 0;
2752 }
2753
ionic_station_set(struct ionic_lif * lif)2754 static int ionic_station_set(struct ionic_lif *lif)
2755 {
2756 struct net_device *netdev = lif->netdev;
2757 struct ionic_admin_ctx ctx = {
2758 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2759 .cmd.lif_getattr = {
2760 .opcode = IONIC_CMD_LIF_GETATTR,
2761 .index = cpu_to_le16(lif->index),
2762 .attr = IONIC_LIF_ATTR_MAC,
2763 },
2764 };
2765 struct sockaddr addr;
2766 int err;
2767
2768 err = ionic_adminq_post_wait(lif, &ctx);
2769 if (err)
2770 return err;
2771 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2772 ctx.comp.lif_getattr.mac);
2773 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2774 return 0;
2775
2776 if (!is_zero_ether_addr(netdev->dev_addr)) {
2777 /* If the netdev mac is non-zero and doesn't match the default
2778 * device address, it was set by something earlier and we're
2779 * likely here again after a fw-upgrade reset. We need to be
2780 * sure the netdev mac is in our filter list.
2781 */
2782 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2783 netdev->dev_addr))
2784 ionic_lif_addr(lif, netdev->dev_addr, true, true);
2785 } else {
2786 /* Update the netdev mac with the device's mac */
2787 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2788 addr.sa_family = AF_INET;
2789 err = eth_prepare_mac_addr_change(netdev, &addr);
2790 if (err) {
2791 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2792 addr.sa_data, err);
2793 return 0;
2794 }
2795
2796 eth_commit_mac_addr_change(netdev, &addr);
2797 }
2798
2799 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2800 netdev->dev_addr);
2801 ionic_lif_addr(lif, netdev->dev_addr, true, true);
2802
2803 return 0;
2804 }
2805
ionic_lif_init(struct ionic_lif * lif)2806 int ionic_lif_init(struct ionic_lif *lif)
2807 {
2808 struct ionic_dev *idev = &lif->ionic->idev;
2809 struct device *dev = lif->ionic->dev;
2810 struct ionic_lif_init_comp comp;
2811 int dbpage_num;
2812 int err;
2813
2814 mutex_lock(&lif->ionic->dev_cmd_lock);
2815 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2816 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2817 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2818 mutex_unlock(&lif->ionic->dev_cmd_lock);
2819 if (err)
2820 return err;
2821
2822 lif->hw_index = le16_to_cpu(comp.hw_index);
2823 mutex_init(&lif->queue_lock);
2824
2825 /* now that we have the hw_index we can figure out our doorbell page */
2826 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2827 if (!lif->dbid_count) {
2828 dev_err(dev, "No doorbell pages, aborting\n");
2829 return -EINVAL;
2830 }
2831
2832 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2833 if (!lif->dbid_inuse) {
2834 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2835 return -ENOMEM;
2836 }
2837
2838 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2839 set_bit(0, lif->dbid_inuse);
2840 lif->kern_pid = 0;
2841
2842 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2843 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2844 if (!lif->kern_dbpage) {
2845 dev_err(dev, "Cannot map dbpage, aborting\n");
2846 err = -ENOMEM;
2847 goto err_out_free_dbid;
2848 }
2849
2850 err = ionic_lif_adminq_init(lif);
2851 if (err)
2852 goto err_out_adminq_deinit;
2853
2854 if (lif->ionic->nnqs_per_lif) {
2855 err = ionic_lif_notifyq_init(lif);
2856 if (err)
2857 goto err_out_notifyq_deinit;
2858 }
2859
2860 err = ionic_init_nic_features(lif);
2861 if (err)
2862 goto err_out_notifyq_deinit;
2863
2864 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2865 err = ionic_rx_filters_init(lif);
2866 if (err)
2867 goto err_out_notifyq_deinit;
2868 }
2869
2870 err = ionic_station_set(lif);
2871 if (err)
2872 goto err_out_notifyq_deinit;
2873
2874 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2875
2876 set_bit(IONIC_LIF_F_INITED, lif->state);
2877
2878 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2879
2880 return 0;
2881
2882 err_out_notifyq_deinit:
2883 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
2884 err_out_adminq_deinit:
2885 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2886 ionic_lif_reset(lif);
2887 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2888 lif->kern_dbpage = NULL;
2889 err_out_free_dbid:
2890 kfree(lif->dbid_inuse);
2891 lif->dbid_inuse = NULL;
2892
2893 return err;
2894 }
2895
ionic_lif_notify_work(struct work_struct * ws)2896 static void ionic_lif_notify_work(struct work_struct *ws)
2897 {
2898 }
2899
ionic_lif_set_netdev_info(struct ionic_lif * lif)2900 static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2901 {
2902 struct ionic_admin_ctx ctx = {
2903 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2904 .cmd.lif_setattr = {
2905 .opcode = IONIC_CMD_LIF_SETATTR,
2906 .index = cpu_to_le16(lif->index),
2907 .attr = IONIC_LIF_ATTR_NAME,
2908 },
2909 };
2910
2911 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2912 sizeof(ctx.cmd.lif_setattr.name));
2913
2914 ionic_adminq_post_wait(lif, &ctx);
2915 }
2916
ionic_netdev_lif(struct net_device * netdev)2917 static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2918 {
2919 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2920 return NULL;
2921
2922 return netdev_priv(netdev);
2923 }
2924
ionic_lif_notify(struct notifier_block * nb,unsigned long event,void * info)2925 static int ionic_lif_notify(struct notifier_block *nb,
2926 unsigned long event, void *info)
2927 {
2928 struct net_device *ndev = netdev_notifier_info_to_dev(info);
2929 struct ionic *ionic = container_of(nb, struct ionic, nb);
2930 struct ionic_lif *lif = ionic_netdev_lif(ndev);
2931
2932 if (!lif || lif->ionic != ionic)
2933 return NOTIFY_DONE;
2934
2935 switch (event) {
2936 case NETDEV_CHANGENAME:
2937 ionic_lif_set_netdev_info(lif);
2938 break;
2939 }
2940
2941 return NOTIFY_DONE;
2942 }
2943
ionic_lif_register(struct ionic_lif * lif)2944 int ionic_lif_register(struct ionic_lif *lif)
2945 {
2946 int err;
2947
2948 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
2949
2950 lif->ionic->nb.notifier_call = ionic_lif_notify;
2951
2952 err = register_netdevice_notifier(&lif->ionic->nb);
2953 if (err)
2954 lif->ionic->nb.notifier_call = NULL;
2955
2956 /* only register LIF0 for now */
2957 err = register_netdev(lif->netdev);
2958 if (err) {
2959 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
2960 return err;
2961 }
2962 lif->registered = true;
2963 ionic_lif_set_netdev_info(lif);
2964
2965 return 0;
2966 }
2967
ionic_lif_unregister(struct ionic_lif * lif)2968 void ionic_lif_unregister(struct ionic_lif *lif)
2969 {
2970 if (lif->ionic->nb.notifier_call) {
2971 unregister_netdevice_notifier(&lif->ionic->nb);
2972 cancel_work_sync(&lif->ionic->nb_work);
2973 lif->ionic->nb.notifier_call = NULL;
2974 }
2975
2976 if (lif->netdev->reg_state == NETREG_REGISTERED)
2977 unregister_netdev(lif->netdev);
2978 lif->registered = false;
2979 }
2980
ionic_lif_queue_identify(struct ionic_lif * lif)2981 static void ionic_lif_queue_identify(struct ionic_lif *lif)
2982 {
2983 union ionic_q_identity __iomem *q_ident;
2984 struct ionic *ionic = lif->ionic;
2985 struct ionic_dev *idev;
2986 int qtype;
2987 int err;
2988
2989 idev = &lif->ionic->idev;
2990 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
2991
2992 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2993 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2994
2995 /* filter out the ones we know about */
2996 switch (qtype) {
2997 case IONIC_QTYPE_ADMINQ:
2998 case IONIC_QTYPE_NOTIFYQ:
2999 case IONIC_QTYPE_RXQ:
3000 case IONIC_QTYPE_TXQ:
3001 break;
3002 default:
3003 continue;
3004 }
3005
3006 memset(qti, 0, sizeof(*qti));
3007
3008 mutex_lock(&ionic->dev_cmd_lock);
3009 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3010 ionic_qtype_versions[qtype]);
3011 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3012 if (!err) {
3013 qti->version = readb(&q_ident->version);
3014 qti->supported = readb(&q_ident->supported);
3015 qti->features = readq(&q_ident->features);
3016 qti->desc_sz = readw(&q_ident->desc_sz);
3017 qti->comp_sz = readw(&q_ident->comp_sz);
3018 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3019 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3020 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
3021 }
3022 mutex_unlock(&ionic->dev_cmd_lock);
3023
3024 if (err == -EINVAL) {
3025 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3026 continue;
3027 } else if (err == -EIO) {
3028 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3029 return;
3030 } else if (err) {
3031 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3032 qtype, err);
3033 return;
3034 }
3035
3036 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3037 qtype, qti->version);
3038 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3039 qtype, qti->supported);
3040 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3041 qtype, qti->features);
3042 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3043 qtype, qti->desc_sz);
3044 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3045 qtype, qti->comp_sz);
3046 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3047 qtype, qti->sg_desc_sz);
3048 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3049 qtype, qti->max_sg_elems);
3050 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3051 qtype, qti->sg_desc_stride);
3052 }
3053 }
3054
ionic_lif_identify(struct ionic * ionic,u8 lif_type,union ionic_lif_identity * lid)3055 int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3056 union ionic_lif_identity *lid)
3057 {
3058 struct ionic_dev *idev = &ionic->idev;
3059 size_t sz;
3060 int err;
3061
3062 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3063
3064 mutex_lock(&ionic->dev_cmd_lock);
3065 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3066 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3067 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3068 mutex_unlock(&ionic->dev_cmd_lock);
3069 if (err)
3070 return (err);
3071
3072 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3073 le64_to_cpu(lid->capabilities));
3074
3075 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3076 le32_to_cpu(lid->eth.max_ucast_filters));
3077 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3078 le32_to_cpu(lid->eth.max_mcast_filters));
3079 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3080 le64_to_cpu(lid->eth.config.features));
3081 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3082 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3083 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3084 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3085 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3086 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3087 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3088 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3089 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3090 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3091 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3092 le32_to_cpu(lid->eth.config.mtu));
3093
3094 return 0;
3095 }
3096
ionic_lif_size(struct ionic * ionic)3097 int ionic_lif_size(struct ionic *ionic)
3098 {
3099 struct ionic_identity *ident = &ionic->ident;
3100 unsigned int nintrs, dev_nintrs;
3101 union ionic_lif_config *lc;
3102 unsigned int ntxqs_per_lif;
3103 unsigned int nrxqs_per_lif;
3104 unsigned int neqs_per_lif;
3105 unsigned int nnqs_per_lif;
3106 unsigned int nxqs, neqs;
3107 unsigned int min_intrs;
3108 int err;
3109
3110 lc = &ident->lif.eth.config;
3111 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3112 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3113 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3114 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3115 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3116
3117 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3118 nxqs = min(nxqs, num_online_cpus());
3119 neqs = min(neqs_per_lif, num_online_cpus());
3120
3121 try_again:
3122 /* interrupt usage:
3123 * 1 for master lif adminq/notifyq
3124 * 1 for each CPU for master lif TxRx queue pairs
3125 * whatever's left is for RDMA queues
3126 */
3127 nintrs = 1 + nxqs + neqs;
3128 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3129
3130 if (nintrs > dev_nintrs)
3131 goto try_fewer;
3132
3133 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3134 if (err < 0 && err != -ENOSPC) {
3135 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3136 return err;
3137 }
3138 if (err == -ENOSPC)
3139 goto try_fewer;
3140
3141 if (err != nintrs) {
3142 ionic_bus_free_irq_vectors(ionic);
3143 goto try_fewer;
3144 }
3145
3146 ionic->nnqs_per_lif = nnqs_per_lif;
3147 ionic->neqs_per_lif = neqs;
3148 ionic->ntxqs_per_lif = nxqs;
3149 ionic->nrxqs_per_lif = nxqs;
3150 ionic->nintrs = nintrs;
3151
3152 ionic_debugfs_add_sizes(ionic);
3153
3154 return 0;
3155
3156 try_fewer:
3157 if (nnqs_per_lif > 1) {
3158 nnqs_per_lif >>= 1;
3159 goto try_again;
3160 }
3161 if (neqs > 1) {
3162 neqs >>= 1;
3163 goto try_again;
3164 }
3165 if (nxqs > 1) {
3166 nxqs >>= 1;
3167 goto try_again;
3168 }
3169 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3170 return -ENOSPC;
3171 }
3172