1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/timer.h>
14 #include <linux/workqueue.h>
15 #include <net/sch_generic.h>
16 #include "gve.h"
17 #include "gve_dqo.h"
18 #include "gve_adminq.h"
19 #include "gve_register.h"
20
21 #define GVE_DEFAULT_RX_COPYBREAK (256)
22
23 #define DEFAULT_MSG_LEVEL (NETIF_MSG_DRV | NETIF_MSG_LINK)
24 #define GVE_VERSION "1.0.0"
25 #define GVE_VERSION_PREFIX "GVE-"
26
27 // Minimum amount of time between queue kicks in msec (10 seconds)
28 #define MIN_TX_TIMEOUT_GAP (1000 * 10)
29
30 const char gve_version_str[] = GVE_VERSION;
31 static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
32
gve_start_xmit(struct sk_buff * skb,struct net_device * dev)33 static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
34 {
35 struct gve_priv *priv = netdev_priv(dev);
36
37 if (gve_is_gqi(priv))
38 return gve_tx(skb, dev);
39 else
40 return gve_tx_dqo(skb, dev);
41 }
42
gve_get_stats(struct net_device * dev,struct rtnl_link_stats64 * s)43 static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
44 {
45 struct gve_priv *priv = netdev_priv(dev);
46 unsigned int start;
47 u64 packets, bytes;
48 int ring;
49
50 if (priv->rx) {
51 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
52 do {
53 start =
54 u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
55 packets = priv->rx[ring].rpackets;
56 bytes = priv->rx[ring].rbytes;
57 } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
58 start));
59 s->rx_packets += packets;
60 s->rx_bytes += bytes;
61 }
62 }
63 if (priv->tx) {
64 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
65 do {
66 start =
67 u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
68 packets = priv->tx[ring].pkt_done;
69 bytes = priv->tx[ring].bytes_done;
70 } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
71 start));
72 s->tx_packets += packets;
73 s->tx_bytes += bytes;
74 }
75 }
76 }
77
gve_alloc_counter_array(struct gve_priv * priv)78 static int gve_alloc_counter_array(struct gve_priv *priv)
79 {
80 priv->counter_array =
81 dma_alloc_coherent(&priv->pdev->dev,
82 priv->num_event_counters *
83 sizeof(*priv->counter_array),
84 &priv->counter_array_bus, GFP_KERNEL);
85 if (!priv->counter_array)
86 return -ENOMEM;
87
88 return 0;
89 }
90
gve_free_counter_array(struct gve_priv * priv)91 static void gve_free_counter_array(struct gve_priv *priv)
92 {
93 if (!priv->counter_array)
94 return;
95
96 dma_free_coherent(&priv->pdev->dev,
97 priv->num_event_counters *
98 sizeof(*priv->counter_array),
99 priv->counter_array, priv->counter_array_bus);
100 priv->counter_array = NULL;
101 }
102
103 /* NIC requests to report stats */
gve_stats_report_task(struct work_struct * work)104 static void gve_stats_report_task(struct work_struct *work)
105 {
106 struct gve_priv *priv = container_of(work, struct gve_priv,
107 stats_report_task);
108 if (gve_get_do_report_stats(priv)) {
109 gve_handle_report_stats(priv);
110 gve_clear_do_report_stats(priv);
111 }
112 }
113
gve_stats_report_schedule(struct gve_priv * priv)114 static void gve_stats_report_schedule(struct gve_priv *priv)
115 {
116 if (!gve_get_probe_in_progress(priv) &&
117 !gve_get_reset_in_progress(priv)) {
118 gve_set_do_report_stats(priv);
119 queue_work(priv->gve_wq, &priv->stats_report_task);
120 }
121 }
122
gve_stats_report_timer(struct timer_list * t)123 static void gve_stats_report_timer(struct timer_list *t)
124 {
125 struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
126
127 mod_timer(&priv->stats_report_timer,
128 round_jiffies(jiffies +
129 msecs_to_jiffies(priv->stats_report_timer_period)));
130 gve_stats_report_schedule(priv);
131 }
132
gve_alloc_stats_report(struct gve_priv * priv)133 static int gve_alloc_stats_report(struct gve_priv *priv)
134 {
135 int tx_stats_num, rx_stats_num;
136
137 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
138 priv->tx_cfg.num_queues;
139 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
140 priv->rx_cfg.num_queues;
141 priv->stats_report_len = struct_size(priv->stats_report, stats,
142 tx_stats_num + rx_stats_num);
143 priv->stats_report =
144 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
145 &priv->stats_report_bus, GFP_KERNEL);
146 if (!priv->stats_report)
147 return -ENOMEM;
148 /* Set up timer for the report-stats task */
149 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
150 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
151 return 0;
152 }
153
gve_free_stats_report(struct gve_priv * priv)154 static void gve_free_stats_report(struct gve_priv *priv)
155 {
156 if (!priv->stats_report)
157 return;
158
159 del_timer_sync(&priv->stats_report_timer);
160 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
161 priv->stats_report, priv->stats_report_bus);
162 priv->stats_report = NULL;
163 }
164
gve_mgmnt_intr(int irq,void * arg)165 static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
166 {
167 struct gve_priv *priv = arg;
168
169 queue_work(priv->gve_wq, &priv->service_task);
170 return IRQ_HANDLED;
171 }
172
gve_intr(int irq,void * arg)173 static irqreturn_t gve_intr(int irq, void *arg)
174 {
175 struct gve_notify_block *block = arg;
176 struct gve_priv *priv = block->priv;
177
178 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
179 napi_schedule_irqoff(&block->napi);
180 return IRQ_HANDLED;
181 }
182
gve_intr_dqo(int irq,void * arg)183 static irqreturn_t gve_intr_dqo(int irq, void *arg)
184 {
185 struct gve_notify_block *block = arg;
186
187 /* Interrupts are automatically masked */
188 napi_schedule_irqoff(&block->napi);
189 return IRQ_HANDLED;
190 }
191
gve_napi_poll(struct napi_struct * napi,int budget)192 static int gve_napi_poll(struct napi_struct *napi, int budget)
193 {
194 struct gve_notify_block *block;
195 __be32 __iomem *irq_doorbell;
196 bool reschedule = false;
197 struct gve_priv *priv;
198 int work_done = 0;
199
200 block = container_of(napi, struct gve_notify_block, napi);
201 priv = block->priv;
202
203 if (block->tx)
204 reschedule |= gve_tx_poll(block, budget);
205 if (block->rx) {
206 work_done = gve_rx_poll(block, budget);
207 reschedule |= work_done == budget;
208 }
209
210 if (reschedule)
211 return budget;
212
213 /* Complete processing - don't unmask irq if busy polling is enabled */
214 if (likely(napi_complete_done(napi, work_done))) {
215 irq_doorbell = gve_irq_doorbell(priv, block);
216 iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
217
218 /* Ensure IRQ ACK is visible before we check pending work.
219 * If queue had issued updates, it would be truly visible.
220 */
221 mb();
222
223 if (block->tx)
224 reschedule |= gve_tx_clean_pending(priv, block->tx);
225 if (block->rx)
226 reschedule |= gve_rx_work_pending(block->rx);
227
228 if (reschedule && napi_reschedule(napi))
229 iowrite32be(GVE_IRQ_MASK, irq_doorbell);
230 }
231 return work_done;
232 }
233
gve_napi_poll_dqo(struct napi_struct * napi,int budget)234 static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
235 {
236 struct gve_notify_block *block =
237 container_of(napi, struct gve_notify_block, napi);
238 struct gve_priv *priv = block->priv;
239 bool reschedule = false;
240 int work_done = 0;
241
242 /* Clear PCI MSI-X Pending Bit Array (PBA)
243 *
244 * This bit is set if an interrupt event occurs while the vector is
245 * masked. If this bit is set and we reenable the interrupt, it will
246 * fire again. Since we're just about to poll the queue state, we don't
247 * need it to fire again.
248 *
249 * Under high softirq load, it's possible that the interrupt condition
250 * is triggered twice before we got the chance to process it.
251 */
252 gve_write_irq_doorbell_dqo(priv, block,
253 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_CLEAR_PBA_BIT_DQO);
254
255 if (block->tx)
256 reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
257
258 if (block->rx) {
259 work_done = gve_rx_poll_dqo(block, budget);
260 reschedule |= work_done == budget;
261 }
262
263 if (reschedule)
264 return budget;
265
266 if (likely(napi_complete_done(napi, work_done))) {
267 /* Enable interrupts again.
268 *
269 * We don't need to repoll afterwards because HW supports the
270 * PCI MSI-X PBA feature.
271 *
272 * Another interrupt would be triggered if a new event came in
273 * since the last one.
274 */
275 gve_write_irq_doorbell_dqo(priv, block,
276 GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
277 }
278
279 return work_done;
280 }
281
gve_alloc_notify_blocks(struct gve_priv * priv)282 static int gve_alloc_notify_blocks(struct gve_priv *priv)
283 {
284 int num_vecs_requested = priv->num_ntfy_blks + 1;
285 char *name = priv->dev->name;
286 unsigned int active_cpus;
287 int vecs_enabled;
288 int i, j;
289 int err;
290
291 priv->msix_vectors = kvcalloc(num_vecs_requested,
292 sizeof(*priv->msix_vectors), GFP_KERNEL);
293 if (!priv->msix_vectors)
294 return -ENOMEM;
295 for (i = 0; i < num_vecs_requested; i++)
296 priv->msix_vectors[i].entry = i;
297 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors,
298 GVE_MIN_MSIX, num_vecs_requested);
299 if (vecs_enabled < 0) {
300 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n",
301 GVE_MIN_MSIX, vecs_enabled);
302 err = vecs_enabled;
303 goto abort_with_msix_vectors;
304 }
305 if (vecs_enabled != num_vecs_requested) {
306 int new_num_ntfy_blks = (vecs_enabled - 1) & ~0x1;
307 int vecs_per_type = new_num_ntfy_blks / 2;
308 int vecs_left = new_num_ntfy_blks % 2;
309
310 priv->num_ntfy_blks = new_num_ntfy_blks;
311 priv->mgmt_msix_idx = priv->num_ntfy_blks;
312 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
313 vecs_per_type);
314 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
315 vecs_per_type + vecs_left);
316 dev_err(&priv->pdev->dev,
317 "Could not enable desired msix, only enabled %d, adjusting tx max queues to %d, and rx max queues to %d\n",
318 vecs_enabled, priv->tx_cfg.max_queues,
319 priv->rx_cfg.max_queues);
320 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)
321 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
322 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues)
323 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
324 }
325 /* Half the notification blocks go to TX and half to RX */
326 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
327
328 /* Setup Management Vector - the last vector */
329 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
330 name);
331 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
332 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
333 if (err) {
334 dev_err(&priv->pdev->dev, "Did not receive management vector.\n");
335 goto abort_with_msix_enabled;
336 }
337 priv->irq_db_indices =
338 dma_alloc_coherent(&priv->pdev->dev,
339 priv->num_ntfy_blks *
340 sizeof(*priv->irq_db_indices),
341 &priv->irq_db_indices_bus, GFP_KERNEL);
342 if (!priv->irq_db_indices) {
343 err = -ENOMEM;
344 goto abort_with_mgmt_vector;
345 }
346
347 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks *
348 sizeof(*priv->ntfy_blocks), GFP_KERNEL);
349 if (!priv->ntfy_blocks) {
350 err = -ENOMEM;
351 goto abort_with_irq_db_indices;
352 }
353
354 /* Setup the other blocks - the first n-1 vectors */
355 for (i = 0; i < priv->num_ntfy_blks; i++) {
356 struct gve_notify_block *block = &priv->ntfy_blocks[i];
357 int msix_idx = i;
358
359 snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
360 name, i);
361 block->priv = priv;
362 err = request_irq(priv->msix_vectors[msix_idx].vector,
363 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
364 0, block->name, block);
365 if (err) {
366 dev_err(&priv->pdev->dev,
367 "Failed to receive msix vector %d\n", i);
368 goto abort_with_some_ntfy_blocks;
369 }
370 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
371 get_cpu_mask(i % active_cpus));
372 block->irq_db_index = &priv->irq_db_indices[i].index;
373 }
374 return 0;
375 abort_with_some_ntfy_blocks:
376 for (j = 0; j < i; j++) {
377 struct gve_notify_block *block = &priv->ntfy_blocks[j];
378 int msix_idx = j;
379
380 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
381 NULL);
382 free_irq(priv->msix_vectors[msix_idx].vector, block);
383 }
384 kvfree(priv->ntfy_blocks);
385 priv->ntfy_blocks = NULL;
386 abort_with_irq_db_indices:
387 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
388 sizeof(*priv->irq_db_indices),
389 priv->irq_db_indices, priv->irq_db_indices_bus);
390 priv->irq_db_indices = NULL;
391 abort_with_mgmt_vector:
392 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
393 abort_with_msix_enabled:
394 pci_disable_msix(priv->pdev);
395 abort_with_msix_vectors:
396 kvfree(priv->msix_vectors);
397 priv->msix_vectors = NULL;
398 return err;
399 }
400
gve_free_notify_blocks(struct gve_priv * priv)401 static void gve_free_notify_blocks(struct gve_priv *priv)
402 {
403 int i;
404
405 if (!priv->msix_vectors)
406 return;
407
408 /* Free the irqs */
409 for (i = 0; i < priv->num_ntfy_blks; i++) {
410 struct gve_notify_block *block = &priv->ntfy_blocks[i];
411 int msix_idx = i;
412
413 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
414 NULL);
415 free_irq(priv->msix_vectors[msix_idx].vector, block);
416 }
417 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
418 kvfree(priv->ntfy_blocks);
419 priv->ntfy_blocks = NULL;
420 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks *
421 sizeof(*priv->irq_db_indices),
422 priv->irq_db_indices, priv->irq_db_indices_bus);
423 priv->irq_db_indices = NULL;
424 pci_disable_msix(priv->pdev);
425 kvfree(priv->msix_vectors);
426 priv->msix_vectors = NULL;
427 }
428
gve_setup_device_resources(struct gve_priv * priv)429 static int gve_setup_device_resources(struct gve_priv *priv)
430 {
431 int err;
432
433 err = gve_alloc_counter_array(priv);
434 if (err)
435 return err;
436 err = gve_alloc_notify_blocks(priv);
437 if (err)
438 goto abort_with_counter;
439 err = gve_alloc_stats_report(priv);
440 if (err)
441 goto abort_with_ntfy_blocks;
442 err = gve_adminq_configure_device_resources(priv,
443 priv->counter_array_bus,
444 priv->num_event_counters,
445 priv->irq_db_indices_bus,
446 priv->num_ntfy_blks);
447 if (unlikely(err)) {
448 dev_err(&priv->pdev->dev,
449 "could not setup device_resources: err=%d\n", err);
450 err = -ENXIO;
451 goto abort_with_stats_report;
452 }
453
454 if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
455 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo),
456 GFP_KERNEL);
457 if (!priv->ptype_lut_dqo) {
458 err = -ENOMEM;
459 goto abort_with_stats_report;
460 }
461 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
462 if (err) {
463 dev_err(&priv->pdev->dev,
464 "Failed to get ptype map: err=%d\n", err);
465 goto abort_with_ptype_lut;
466 }
467 }
468
469 err = gve_adminq_report_stats(priv, priv->stats_report_len,
470 priv->stats_report_bus,
471 GVE_STATS_REPORT_TIMER_PERIOD);
472 if (err)
473 dev_err(&priv->pdev->dev,
474 "Failed to report stats: err=%d\n", err);
475 gve_set_device_resources_ok(priv);
476 return 0;
477
478 abort_with_ptype_lut:
479 kvfree(priv->ptype_lut_dqo);
480 priv->ptype_lut_dqo = NULL;
481 abort_with_stats_report:
482 gve_free_stats_report(priv);
483 abort_with_ntfy_blocks:
484 gve_free_notify_blocks(priv);
485 abort_with_counter:
486 gve_free_counter_array(priv);
487
488 return err;
489 }
490
491 static void gve_trigger_reset(struct gve_priv *priv);
492
gve_teardown_device_resources(struct gve_priv * priv)493 static void gve_teardown_device_resources(struct gve_priv *priv)
494 {
495 int err;
496
497 /* Tell device its resources are being freed */
498 if (gve_get_device_resources_ok(priv)) {
499 /* detach the stats report */
500 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
501 if (err) {
502 dev_err(&priv->pdev->dev,
503 "Failed to detach stats report: err=%d\n", err);
504 gve_trigger_reset(priv);
505 }
506 err = gve_adminq_deconfigure_device_resources(priv);
507 if (err) {
508 dev_err(&priv->pdev->dev,
509 "Could not deconfigure device resources: err=%d\n",
510 err);
511 gve_trigger_reset(priv);
512 }
513 }
514
515 kvfree(priv->ptype_lut_dqo);
516 priv->ptype_lut_dqo = NULL;
517
518 gve_free_counter_array(priv);
519 gve_free_notify_blocks(priv);
520 gve_free_stats_report(priv);
521 gve_clear_device_resources_ok(priv);
522 }
523
gve_add_napi(struct gve_priv * priv,int ntfy_idx,int (* gve_poll)(struct napi_struct *,int))524 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
525 int (*gve_poll)(struct napi_struct *, int))
526 {
527 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
528
529 netif_napi_add(priv->dev, &block->napi, gve_poll);
530 }
531
gve_remove_napi(struct gve_priv * priv,int ntfy_idx)532 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
533 {
534 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
535
536 netif_napi_del(&block->napi);
537 }
538
gve_register_qpls(struct gve_priv * priv)539 static int gve_register_qpls(struct gve_priv *priv)
540 {
541 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
542 int err;
543 int i;
544
545 for (i = 0; i < num_qpls; i++) {
546 err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
547 if (err) {
548 netif_err(priv, drv, priv->dev,
549 "failed to register queue page list %d\n",
550 priv->qpls[i].id);
551 /* This failure will trigger a reset - no need to clean
552 * up
553 */
554 return err;
555 }
556 }
557 return 0;
558 }
559
gve_unregister_qpls(struct gve_priv * priv)560 static int gve_unregister_qpls(struct gve_priv *priv)
561 {
562 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
563 int err;
564 int i;
565
566 for (i = 0; i < num_qpls; i++) {
567 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
568 /* This failure will trigger a reset - no need to clean up */
569 if (err) {
570 netif_err(priv, drv, priv->dev,
571 "Failed to unregister queue page list %d\n",
572 priv->qpls[i].id);
573 return err;
574 }
575 }
576 return 0;
577 }
578
gve_create_rings(struct gve_priv * priv)579 static int gve_create_rings(struct gve_priv *priv)
580 {
581 int err;
582 int i;
583
584 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
585 if (err) {
586 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
587 priv->tx_cfg.num_queues);
588 /* This failure will trigger a reset - no need to clean
589 * up
590 */
591 return err;
592 }
593 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
594 priv->tx_cfg.num_queues);
595
596 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
597 if (err) {
598 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
599 priv->rx_cfg.num_queues);
600 /* This failure will trigger a reset - no need to clean
601 * up
602 */
603 return err;
604 }
605 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
606 priv->rx_cfg.num_queues);
607
608 if (gve_is_gqi(priv)) {
609 /* Rx data ring has been prefilled with packet buffers at queue
610 * allocation time.
611 *
612 * Write the doorbell to provide descriptor slots and packet
613 * buffers to the NIC.
614 */
615 for (i = 0; i < priv->rx_cfg.num_queues; i++)
616 gve_rx_write_doorbell(priv, &priv->rx[i]);
617 } else {
618 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
619 /* Post buffers and ring doorbell. */
620 gve_rx_post_buffers_dqo(&priv->rx[i]);
621 }
622 }
623
624 return 0;
625 }
626
add_napi_init_sync_stats(struct gve_priv * priv,int (* napi_poll)(struct napi_struct * napi,int budget))627 static void add_napi_init_sync_stats(struct gve_priv *priv,
628 int (*napi_poll)(struct napi_struct *napi,
629 int budget))
630 {
631 int i;
632
633 /* Add tx napi & init sync stats*/
634 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
635 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
636
637 u64_stats_init(&priv->tx[i].statss);
638 priv->tx[i].ntfy_id = ntfy_idx;
639 gve_add_napi(priv, ntfy_idx, napi_poll);
640 }
641 /* Add rx napi & init sync stats*/
642 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
643 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
644
645 u64_stats_init(&priv->rx[i].statss);
646 priv->rx[i].ntfy_id = ntfy_idx;
647 gve_add_napi(priv, ntfy_idx, napi_poll);
648 }
649 }
650
gve_tx_free_rings(struct gve_priv * priv)651 static void gve_tx_free_rings(struct gve_priv *priv)
652 {
653 if (gve_is_gqi(priv)) {
654 gve_tx_free_rings_gqi(priv);
655 } else {
656 gve_tx_free_rings_dqo(priv);
657 }
658 }
659
gve_alloc_rings(struct gve_priv * priv)660 static int gve_alloc_rings(struct gve_priv *priv)
661 {
662 int err;
663
664 /* Setup tx rings */
665 priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
666 GFP_KERNEL);
667 if (!priv->tx)
668 return -ENOMEM;
669
670 if (gve_is_gqi(priv))
671 err = gve_tx_alloc_rings(priv);
672 else
673 err = gve_tx_alloc_rings_dqo(priv);
674 if (err)
675 goto free_tx;
676
677 /* Setup rx rings */
678 priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
679 GFP_KERNEL);
680 if (!priv->rx) {
681 err = -ENOMEM;
682 goto free_tx_queue;
683 }
684
685 if (gve_is_gqi(priv))
686 err = gve_rx_alloc_rings(priv);
687 else
688 err = gve_rx_alloc_rings_dqo(priv);
689 if (err)
690 goto free_rx;
691
692 if (gve_is_gqi(priv))
693 add_napi_init_sync_stats(priv, gve_napi_poll);
694 else
695 add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
696
697 return 0;
698
699 free_rx:
700 kvfree(priv->rx);
701 priv->rx = NULL;
702 free_tx_queue:
703 gve_tx_free_rings(priv);
704 free_tx:
705 kvfree(priv->tx);
706 priv->tx = NULL;
707 return err;
708 }
709
gve_destroy_rings(struct gve_priv * priv)710 static int gve_destroy_rings(struct gve_priv *priv)
711 {
712 int err;
713
714 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
715 if (err) {
716 netif_err(priv, drv, priv->dev,
717 "failed to destroy tx queues\n");
718 /* This failure will trigger a reset - no need to clean up */
719 return err;
720 }
721 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
722 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
723 if (err) {
724 netif_err(priv, drv, priv->dev,
725 "failed to destroy rx queues\n");
726 /* This failure will trigger a reset - no need to clean up */
727 return err;
728 }
729 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
730 return 0;
731 }
732
gve_rx_free_rings(struct gve_priv * priv)733 static void gve_rx_free_rings(struct gve_priv *priv)
734 {
735 if (gve_is_gqi(priv))
736 gve_rx_free_rings_gqi(priv);
737 else
738 gve_rx_free_rings_dqo(priv);
739 }
740
gve_free_rings(struct gve_priv * priv)741 static void gve_free_rings(struct gve_priv *priv)
742 {
743 int ntfy_idx;
744 int i;
745
746 if (priv->tx) {
747 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
748 ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
749 gve_remove_napi(priv, ntfy_idx);
750 }
751 gve_tx_free_rings(priv);
752 kvfree(priv->tx);
753 priv->tx = NULL;
754 }
755 if (priv->rx) {
756 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
757 ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
758 gve_remove_napi(priv, ntfy_idx);
759 }
760 gve_rx_free_rings(priv);
761 kvfree(priv->rx);
762 priv->rx = NULL;
763 }
764 }
765
gve_alloc_page(struct gve_priv * priv,struct device * dev,struct page ** page,dma_addr_t * dma,enum dma_data_direction dir,gfp_t gfp_flags)766 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
767 struct page **page, dma_addr_t *dma,
768 enum dma_data_direction dir, gfp_t gfp_flags)
769 {
770 *page = alloc_page(gfp_flags);
771 if (!*page) {
772 priv->page_alloc_fail++;
773 return -ENOMEM;
774 }
775 *dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
776 if (dma_mapping_error(dev, *dma)) {
777 priv->dma_mapping_error++;
778 put_page(*page);
779 return -ENOMEM;
780 }
781 return 0;
782 }
783
gve_alloc_queue_page_list(struct gve_priv * priv,u32 id,int pages)784 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
785 int pages)
786 {
787 struct gve_queue_page_list *qpl = &priv->qpls[id];
788 int err;
789 int i;
790
791 if (pages + priv->num_registered_pages > priv->max_registered_pages) {
792 netif_err(priv, drv, priv->dev,
793 "Reached max number of registered pages %llu > %llu\n",
794 pages + priv->num_registered_pages,
795 priv->max_registered_pages);
796 return -EINVAL;
797 }
798
799 qpl->id = id;
800 qpl->num_entries = 0;
801 qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
802 /* caller handles clean up */
803 if (!qpl->pages)
804 return -ENOMEM;
805 qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
806 /* caller handles clean up */
807 if (!qpl->page_buses)
808 return -ENOMEM;
809
810 for (i = 0; i < pages; i++) {
811 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
812 &qpl->page_buses[i],
813 gve_qpl_dma_dir(priv, id), GFP_KERNEL);
814 /* caller handles clean up */
815 if (err)
816 return -ENOMEM;
817 qpl->num_entries++;
818 }
819 priv->num_registered_pages += pages;
820
821 return 0;
822 }
823
gve_free_page(struct device * dev,struct page * page,dma_addr_t dma,enum dma_data_direction dir)824 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
825 enum dma_data_direction dir)
826 {
827 if (!dma_mapping_error(dev, dma))
828 dma_unmap_page(dev, dma, PAGE_SIZE, dir);
829 if (page)
830 put_page(page);
831 }
832
gve_free_queue_page_list(struct gve_priv * priv,u32 id)833 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
834 {
835 struct gve_queue_page_list *qpl = &priv->qpls[id];
836 int i;
837
838 if (!qpl->pages)
839 return;
840 if (!qpl->page_buses)
841 goto free_pages;
842
843 for (i = 0; i < qpl->num_entries; i++)
844 gve_free_page(&priv->pdev->dev, qpl->pages[i],
845 qpl->page_buses[i], gve_qpl_dma_dir(priv, id));
846
847 kvfree(qpl->page_buses);
848 free_pages:
849 kvfree(qpl->pages);
850 priv->num_registered_pages -= qpl->num_entries;
851 }
852
gve_alloc_qpls(struct gve_priv * priv)853 static int gve_alloc_qpls(struct gve_priv *priv)
854 {
855 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
856 int i, j;
857 int err;
858
859 if (num_qpls == 0)
860 return 0;
861
862 priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
863 if (!priv->qpls)
864 return -ENOMEM;
865
866 for (i = 0; i < gve_num_tx_qpls(priv); i++) {
867 err = gve_alloc_queue_page_list(priv, i,
868 priv->tx_pages_per_qpl);
869 if (err)
870 goto free_qpls;
871 }
872 for (; i < num_qpls; i++) {
873 err = gve_alloc_queue_page_list(priv, i,
874 priv->rx_data_slot_cnt);
875 if (err)
876 goto free_qpls;
877 }
878
879 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
880 sizeof(unsigned long) * BITS_PER_BYTE;
881 priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
882 sizeof(unsigned long), GFP_KERNEL);
883 if (!priv->qpl_cfg.qpl_id_map) {
884 err = -ENOMEM;
885 goto free_qpls;
886 }
887
888 return 0;
889
890 free_qpls:
891 for (j = 0; j <= i; j++)
892 gve_free_queue_page_list(priv, j);
893 kvfree(priv->qpls);
894 return err;
895 }
896
gve_free_qpls(struct gve_priv * priv)897 static void gve_free_qpls(struct gve_priv *priv)
898 {
899 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
900 int i;
901
902 if (num_qpls == 0)
903 return;
904
905 kvfree(priv->qpl_cfg.qpl_id_map);
906
907 for (i = 0; i < num_qpls; i++)
908 gve_free_queue_page_list(priv, i);
909
910 kvfree(priv->qpls);
911 }
912
913 /* Use this to schedule a reset when the device is capable of continuing
914 * to handle other requests in its current state. If it is not, do a reset
915 * in thread instead.
916 */
gve_schedule_reset(struct gve_priv * priv)917 void gve_schedule_reset(struct gve_priv *priv)
918 {
919 gve_set_do_reset(priv);
920 queue_work(priv->gve_wq, &priv->service_task);
921 }
922
923 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
924 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
925 static void gve_turndown(struct gve_priv *priv);
926 static void gve_turnup(struct gve_priv *priv);
927
gve_open(struct net_device * dev)928 static int gve_open(struct net_device *dev)
929 {
930 struct gve_priv *priv = netdev_priv(dev);
931 int err;
932
933 err = gve_alloc_qpls(priv);
934 if (err)
935 return err;
936
937 err = gve_alloc_rings(priv);
938 if (err)
939 goto free_qpls;
940
941 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
942 if (err)
943 goto free_rings;
944 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues);
945 if (err)
946 goto free_rings;
947
948 err = gve_register_qpls(priv);
949 if (err)
950 goto reset;
951
952 if (!gve_is_gqi(priv)) {
953 /* Hard code this for now. This may be tuned in the future for
954 * performance.
955 */
956 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
957 }
958 err = gve_create_rings(priv);
959 if (err)
960 goto reset;
961
962 gve_set_device_rings_ok(priv);
963
964 if (gve_get_report_stats(priv))
965 mod_timer(&priv->stats_report_timer,
966 round_jiffies(jiffies +
967 msecs_to_jiffies(priv->stats_report_timer_period)));
968
969 gve_turnup(priv);
970 queue_work(priv->gve_wq, &priv->service_task);
971 priv->interface_up_cnt++;
972 return 0;
973
974 free_rings:
975 gve_free_rings(priv);
976 free_qpls:
977 gve_free_qpls(priv);
978 return err;
979
980 reset:
981 /* This must have been called from a reset due to the rtnl lock
982 * so just return at this point.
983 */
984 if (gve_get_reset_in_progress(priv))
985 return err;
986 /* Otherwise reset before returning */
987 gve_reset_and_teardown(priv, true);
988 /* if this fails there is nothing we can do so just ignore the return */
989 gve_reset_recovery(priv, false);
990 /* return the original error */
991 return err;
992 }
993
gve_close(struct net_device * dev)994 static int gve_close(struct net_device *dev)
995 {
996 struct gve_priv *priv = netdev_priv(dev);
997 int err;
998
999 netif_carrier_off(dev);
1000 if (gve_get_device_rings_ok(priv)) {
1001 gve_turndown(priv);
1002 err = gve_destroy_rings(priv);
1003 if (err)
1004 goto err;
1005 err = gve_unregister_qpls(priv);
1006 if (err)
1007 goto err;
1008 gve_clear_device_rings_ok(priv);
1009 }
1010 del_timer_sync(&priv->stats_report_timer);
1011
1012 gve_free_rings(priv);
1013 gve_free_qpls(priv);
1014 priv->interface_down_cnt++;
1015 return 0;
1016
1017 err:
1018 /* This must have been called from a reset due to the rtnl lock
1019 * so just return at this point.
1020 */
1021 if (gve_get_reset_in_progress(priv))
1022 return err;
1023 /* Otherwise reset before returning */
1024 gve_reset_and_teardown(priv, true);
1025 return gve_reset_recovery(priv, false);
1026 }
1027
gve_adjust_queues(struct gve_priv * priv,struct gve_queue_config new_rx_config,struct gve_queue_config new_tx_config)1028 int gve_adjust_queues(struct gve_priv *priv,
1029 struct gve_queue_config new_rx_config,
1030 struct gve_queue_config new_tx_config)
1031 {
1032 int err;
1033
1034 if (netif_carrier_ok(priv->dev)) {
1035 /* To make this process as simple as possible we teardown the
1036 * device, set the new configuration, and then bring the device
1037 * up again.
1038 */
1039 err = gve_close(priv->dev);
1040 /* we have already tried to reset in close,
1041 * just fail at this point
1042 */
1043 if (err)
1044 return err;
1045 priv->tx_cfg = new_tx_config;
1046 priv->rx_cfg = new_rx_config;
1047
1048 err = gve_open(priv->dev);
1049 if (err)
1050 goto err;
1051
1052 return 0;
1053 }
1054 /* Set the config for the next up. */
1055 priv->tx_cfg = new_tx_config;
1056 priv->rx_cfg = new_rx_config;
1057
1058 return 0;
1059 err:
1060 netif_err(priv, drv, priv->dev,
1061 "Adjust queues failed! !!! DISABLING ALL QUEUES !!!\n");
1062 gve_turndown(priv);
1063 return err;
1064 }
1065
gve_turndown(struct gve_priv * priv)1066 static void gve_turndown(struct gve_priv *priv)
1067 {
1068 int idx;
1069
1070 if (netif_carrier_ok(priv->dev))
1071 netif_carrier_off(priv->dev);
1072
1073 if (!gve_get_napi_enabled(priv))
1074 return;
1075
1076 /* Disable napi to prevent more work from coming in */
1077 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1078 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1079 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1080
1081 napi_disable(&block->napi);
1082 }
1083 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1084 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1085 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1086
1087 napi_disable(&block->napi);
1088 }
1089
1090 /* Stop tx queues */
1091 netif_tx_disable(priv->dev);
1092
1093 gve_clear_napi_enabled(priv);
1094 gve_clear_report_stats(priv);
1095 }
1096
gve_turnup(struct gve_priv * priv)1097 static void gve_turnup(struct gve_priv *priv)
1098 {
1099 int idx;
1100
1101 /* Start the tx queues */
1102 netif_tx_start_all_queues(priv->dev);
1103
1104 /* Enable napi and unmask interrupts for all queues */
1105 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1106 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
1107 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1108
1109 napi_enable(&block->napi);
1110 if (gve_is_gqi(priv)) {
1111 iowrite32be(0, gve_irq_doorbell(priv, block));
1112 } else {
1113 gve_set_itr_coalesce_usecs_dqo(priv, block,
1114 priv->tx_coalesce_usecs);
1115 }
1116 }
1117 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1118 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
1119 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
1120
1121 napi_enable(&block->napi);
1122 if (gve_is_gqi(priv)) {
1123 iowrite32be(0, gve_irq_doorbell(priv, block));
1124 } else {
1125 gve_set_itr_coalesce_usecs_dqo(priv, block,
1126 priv->rx_coalesce_usecs);
1127 }
1128 }
1129
1130 gve_set_napi_enabled(priv);
1131 }
1132
gve_tx_timeout(struct net_device * dev,unsigned int txqueue)1133 static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
1134 {
1135 struct gve_notify_block *block;
1136 struct gve_tx_ring *tx = NULL;
1137 struct gve_priv *priv;
1138 u32 last_nic_done;
1139 u32 current_time;
1140 u32 ntfy_idx;
1141
1142 netdev_info(dev, "Timeout on tx queue, %d", txqueue);
1143 priv = netdev_priv(dev);
1144 if (txqueue > priv->tx_cfg.num_queues)
1145 goto reset;
1146
1147 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
1148 if (ntfy_idx >= priv->num_ntfy_blks)
1149 goto reset;
1150
1151 block = &priv->ntfy_blocks[ntfy_idx];
1152 tx = block->tx;
1153
1154 current_time = jiffies_to_msecs(jiffies);
1155 if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
1156 goto reset;
1157
1158 /* Check to see if there are missed completions, which will allow us to
1159 * kick the queue.
1160 */
1161 last_nic_done = gve_tx_load_event_counter(priv, tx);
1162 if (last_nic_done - tx->done) {
1163 netdev_info(dev, "Kicking queue %d", txqueue);
1164 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
1165 napi_schedule(&block->napi);
1166 tx->last_kick_msec = current_time;
1167 goto out;
1168 } // Else reset.
1169
1170 reset:
1171 gve_schedule_reset(priv);
1172
1173 out:
1174 if (tx)
1175 tx->queue_timeout++;
1176 priv->tx_timeo_cnt++;
1177 }
1178
gve_set_features(struct net_device * netdev,netdev_features_t features)1179 static int gve_set_features(struct net_device *netdev,
1180 netdev_features_t features)
1181 {
1182 const netdev_features_t orig_features = netdev->features;
1183 struct gve_priv *priv = netdev_priv(netdev);
1184 int err;
1185
1186 if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
1187 netdev->features ^= NETIF_F_LRO;
1188 if (netif_carrier_ok(netdev)) {
1189 /* To make this process as simple as possible we
1190 * teardown the device, set the new configuration,
1191 * and then bring the device up again.
1192 */
1193 err = gve_close(netdev);
1194 /* We have already tried to reset in close, just fail
1195 * at this point.
1196 */
1197 if (err)
1198 goto err;
1199
1200 err = gve_open(netdev);
1201 if (err)
1202 goto err;
1203 }
1204 }
1205
1206 return 0;
1207 err:
1208 /* Reverts the change on error. */
1209 netdev->features = orig_features;
1210 netif_err(priv, drv, netdev,
1211 "Set features failed! !!! DISABLING ALL QUEUES !!!\n");
1212 return err;
1213 }
1214
1215 static const struct net_device_ops gve_netdev_ops = {
1216 .ndo_start_xmit = gve_start_xmit,
1217 .ndo_open = gve_open,
1218 .ndo_stop = gve_close,
1219 .ndo_get_stats64 = gve_get_stats,
1220 .ndo_tx_timeout = gve_tx_timeout,
1221 .ndo_set_features = gve_set_features,
1222 };
1223
gve_handle_status(struct gve_priv * priv,u32 status)1224 static void gve_handle_status(struct gve_priv *priv, u32 status)
1225 {
1226 if (GVE_DEVICE_STATUS_RESET_MASK & status) {
1227 dev_info(&priv->pdev->dev, "Device requested reset.\n");
1228 gve_set_do_reset(priv);
1229 }
1230 if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
1231 priv->stats_report_trigger_cnt++;
1232 gve_set_do_report_stats(priv);
1233 }
1234 }
1235
gve_handle_reset(struct gve_priv * priv)1236 static void gve_handle_reset(struct gve_priv *priv)
1237 {
1238 /* A service task will be scheduled at the end of probe to catch any
1239 * resets that need to happen, and we don't want to reset until
1240 * probe is done.
1241 */
1242 if (gve_get_probe_in_progress(priv))
1243 return;
1244
1245 if (gve_get_do_reset(priv)) {
1246 rtnl_lock();
1247 gve_reset(priv, false);
1248 rtnl_unlock();
1249 }
1250 }
1251
gve_handle_report_stats(struct gve_priv * priv)1252 void gve_handle_report_stats(struct gve_priv *priv)
1253 {
1254 struct stats *stats = priv->stats_report->stats;
1255 int idx, stats_idx = 0;
1256 unsigned int start = 0;
1257 u64 tx_bytes;
1258
1259 if (!gve_get_report_stats(priv))
1260 return;
1261
1262 be64_add_cpu(&priv->stats_report->written_count, 1);
1263 /* tx stats */
1264 if (priv->tx) {
1265 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
1266 u32 last_completion = 0;
1267 u32 tx_frames = 0;
1268
1269 /* DQO doesn't currently support these metrics. */
1270 if (gve_is_gqi(priv)) {
1271 last_completion = priv->tx[idx].done;
1272 tx_frames = priv->tx[idx].req;
1273 }
1274
1275 do {
1276 start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
1277 tx_bytes = priv->tx[idx].bytes_done;
1278 } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
1279 stats[stats_idx++] = (struct stats) {
1280 .stat_name = cpu_to_be32(TX_WAKE_CNT),
1281 .value = cpu_to_be64(priv->tx[idx].wake_queue),
1282 .queue_id = cpu_to_be32(idx),
1283 };
1284 stats[stats_idx++] = (struct stats) {
1285 .stat_name = cpu_to_be32(TX_STOP_CNT),
1286 .value = cpu_to_be64(priv->tx[idx].stop_queue),
1287 .queue_id = cpu_to_be32(idx),
1288 };
1289 stats[stats_idx++] = (struct stats) {
1290 .stat_name = cpu_to_be32(TX_FRAMES_SENT),
1291 .value = cpu_to_be64(tx_frames),
1292 .queue_id = cpu_to_be32(idx),
1293 };
1294 stats[stats_idx++] = (struct stats) {
1295 .stat_name = cpu_to_be32(TX_BYTES_SENT),
1296 .value = cpu_to_be64(tx_bytes),
1297 .queue_id = cpu_to_be32(idx),
1298 };
1299 stats[stats_idx++] = (struct stats) {
1300 .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
1301 .value = cpu_to_be64(last_completion),
1302 .queue_id = cpu_to_be32(idx),
1303 };
1304 stats[stats_idx++] = (struct stats) {
1305 .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
1306 .value = cpu_to_be64(priv->tx[idx].queue_timeout),
1307 .queue_id = cpu_to_be32(idx),
1308 };
1309 }
1310 }
1311 /* rx stats */
1312 if (priv->rx) {
1313 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
1314 stats[stats_idx++] = (struct stats) {
1315 .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
1316 .value = cpu_to_be64(priv->rx[idx].desc.seqno),
1317 .queue_id = cpu_to_be32(idx),
1318 };
1319 stats[stats_idx++] = (struct stats) {
1320 .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
1321 .value = cpu_to_be64(priv->rx[0].fill_cnt),
1322 .queue_id = cpu_to_be32(idx),
1323 };
1324 }
1325 }
1326 }
1327
gve_handle_link_status(struct gve_priv * priv,bool link_status)1328 static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
1329 {
1330 if (!gve_get_napi_enabled(priv))
1331 return;
1332
1333 if (link_status == netif_carrier_ok(priv->dev))
1334 return;
1335
1336 if (link_status) {
1337 netdev_info(priv->dev, "Device link is up.\n");
1338 netif_carrier_on(priv->dev);
1339 } else {
1340 netdev_info(priv->dev, "Device link is down.\n");
1341 netif_carrier_off(priv->dev);
1342 }
1343 }
1344
1345 /* Handle NIC status register changes, reset requests and report stats */
gve_service_task(struct work_struct * work)1346 static void gve_service_task(struct work_struct *work)
1347 {
1348 struct gve_priv *priv = container_of(work, struct gve_priv,
1349 service_task);
1350 u32 status = ioread32be(&priv->reg_bar0->device_status);
1351
1352 gve_handle_status(priv, status);
1353
1354 gve_handle_reset(priv);
1355 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
1356 }
1357
gve_init_priv(struct gve_priv * priv,bool skip_describe_device)1358 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
1359 {
1360 int num_ntfy;
1361 int err;
1362
1363 /* Set up the adminq */
1364 err = gve_adminq_alloc(&priv->pdev->dev, priv);
1365 if (err) {
1366 dev_err(&priv->pdev->dev,
1367 "Failed to alloc admin queue: err=%d\n", err);
1368 return err;
1369 }
1370
1371 if (skip_describe_device)
1372 goto setup_device;
1373
1374 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED;
1375 /* Get the initial information we need from the device */
1376 err = gve_adminq_describe_device(priv);
1377 if (err) {
1378 dev_err(&priv->pdev->dev,
1379 "Could not get device information: err=%d\n", err);
1380 goto err;
1381 }
1382 priv->dev->mtu = priv->dev->max_mtu;
1383 num_ntfy = pci_msix_vec_count(priv->pdev);
1384 if (num_ntfy <= 0) {
1385 dev_err(&priv->pdev->dev,
1386 "could not count MSI-x vectors: err=%d\n", num_ntfy);
1387 err = num_ntfy;
1388 goto err;
1389 } else if (num_ntfy < GVE_MIN_MSIX) {
1390 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n",
1391 GVE_MIN_MSIX, num_ntfy);
1392 err = -EINVAL;
1393 goto err;
1394 }
1395
1396 priv->num_registered_pages = 0;
1397 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
1398 /* gvnic has one Notification Block per MSI-x vector, except for the
1399 * management vector
1400 */
1401 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1;
1402 priv->mgmt_msix_idx = priv->num_ntfy_blks;
1403
1404 priv->tx_cfg.max_queues =
1405 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2);
1406 priv->rx_cfg.max_queues =
1407 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2);
1408
1409 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues;
1410 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues;
1411 if (priv->default_num_queues > 0) {
1412 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues,
1413 priv->tx_cfg.num_queues);
1414 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues,
1415 priv->rx_cfg.num_queues);
1416 }
1417
1418 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
1419 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
1420 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
1421 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
1422
1423 if (!gve_is_gqi(priv)) {
1424 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO;
1425 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO;
1426 }
1427
1428 setup_device:
1429 err = gve_setup_device_resources(priv);
1430 if (!err)
1431 return 0;
1432 err:
1433 gve_adminq_free(&priv->pdev->dev, priv);
1434 return err;
1435 }
1436
gve_teardown_priv_resources(struct gve_priv * priv)1437 static void gve_teardown_priv_resources(struct gve_priv *priv)
1438 {
1439 gve_teardown_device_resources(priv);
1440 gve_adminq_free(&priv->pdev->dev, priv);
1441 }
1442
gve_trigger_reset(struct gve_priv * priv)1443 static void gve_trigger_reset(struct gve_priv *priv)
1444 {
1445 /* Reset the device by releasing the AQ */
1446 gve_adminq_release(priv);
1447 }
1448
gve_reset_and_teardown(struct gve_priv * priv,bool was_up)1449 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up)
1450 {
1451 gve_trigger_reset(priv);
1452 /* With the reset having already happened, close cannot fail */
1453 if (was_up)
1454 gve_close(priv->dev);
1455 gve_teardown_priv_resources(priv);
1456 }
1457
gve_reset_recovery(struct gve_priv * priv,bool was_up)1458 static int gve_reset_recovery(struct gve_priv *priv, bool was_up)
1459 {
1460 int err;
1461
1462 err = gve_init_priv(priv, true);
1463 if (err)
1464 goto err;
1465 if (was_up) {
1466 err = gve_open(priv->dev);
1467 if (err)
1468 goto err;
1469 }
1470 return 0;
1471 err:
1472 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n");
1473 gve_turndown(priv);
1474 return err;
1475 }
1476
gve_reset(struct gve_priv * priv,bool attempt_teardown)1477 int gve_reset(struct gve_priv *priv, bool attempt_teardown)
1478 {
1479 bool was_up = netif_carrier_ok(priv->dev);
1480 int err;
1481
1482 dev_info(&priv->pdev->dev, "Performing reset\n");
1483 gve_clear_do_reset(priv);
1484 gve_set_reset_in_progress(priv);
1485 /* If we aren't attempting to teardown normally, just go turndown and
1486 * reset right away.
1487 */
1488 if (!attempt_teardown) {
1489 gve_turndown(priv);
1490 gve_reset_and_teardown(priv, was_up);
1491 } else {
1492 /* Otherwise attempt to close normally */
1493 if (was_up) {
1494 err = gve_close(priv->dev);
1495 /* If that fails reset as we did above */
1496 if (err)
1497 gve_reset_and_teardown(priv, was_up);
1498 }
1499 /* Clean up any remaining resources */
1500 gve_teardown_priv_resources(priv);
1501 }
1502
1503 /* Set it all back up */
1504 err = gve_reset_recovery(priv, was_up);
1505 gve_clear_reset_in_progress(priv);
1506 priv->reset_cnt++;
1507 priv->interface_up_cnt = 0;
1508 priv->interface_down_cnt = 0;
1509 priv->stats_report_trigger_cnt = 0;
1510 return err;
1511 }
1512
gve_write_version(u8 __iomem * driver_version_register)1513 static void gve_write_version(u8 __iomem *driver_version_register)
1514 {
1515 const char *c = gve_version_prefix;
1516
1517 while (*c) {
1518 writeb(*c, driver_version_register);
1519 c++;
1520 }
1521
1522 c = gve_version_str;
1523 while (*c) {
1524 writeb(*c, driver_version_register);
1525 c++;
1526 }
1527 writeb('\n', driver_version_register);
1528 }
1529
gve_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1530 static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1531 {
1532 int max_tx_queues, max_rx_queues;
1533 struct net_device *dev;
1534 __be32 __iomem *db_bar;
1535 struct gve_registers __iomem *reg_bar;
1536 struct gve_priv *priv;
1537 int err;
1538
1539 err = pci_enable_device(pdev);
1540 if (err)
1541 return err;
1542
1543 err = pci_request_regions(pdev, "gvnic-cfg");
1544 if (err)
1545 goto abort_with_enabled;
1546
1547 pci_set_master(pdev);
1548
1549 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1550 if (err) {
1551 dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
1552 goto abort_with_pci_region;
1553 }
1554
1555 reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
1556 if (!reg_bar) {
1557 dev_err(&pdev->dev, "Failed to map pci bar!\n");
1558 err = -ENOMEM;
1559 goto abort_with_pci_region;
1560 }
1561
1562 db_bar = pci_iomap(pdev, GVE_DOORBELL_BAR, 0);
1563 if (!db_bar) {
1564 dev_err(&pdev->dev, "Failed to map doorbell bar!\n");
1565 err = -ENOMEM;
1566 goto abort_with_reg_bar;
1567 }
1568
1569 gve_write_version(®_bar->driver_version);
1570 /* Get max queues to alloc etherdev */
1571 max_tx_queues = ioread32be(®_bar->max_tx_queues);
1572 max_rx_queues = ioread32be(®_bar->max_rx_queues);
1573 /* Alloc and setup the netdev and priv */
1574 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
1575 if (!dev) {
1576 dev_err(&pdev->dev, "could not allocate netdev\n");
1577 err = -ENOMEM;
1578 goto abort_with_db_bar;
1579 }
1580 SET_NETDEV_DEV(dev, &pdev->dev);
1581 pci_set_drvdata(pdev, dev);
1582 dev->ethtool_ops = &gve_ethtool_ops;
1583 dev->netdev_ops = &gve_netdev_ops;
1584
1585 /* Set default and supported features.
1586 *
1587 * Features might be set in other locations as well (such as
1588 * `gve_adminq_describe_device`).
1589 */
1590 dev->hw_features = NETIF_F_HIGHDMA;
1591 dev->hw_features |= NETIF_F_SG;
1592 dev->hw_features |= NETIF_F_HW_CSUM;
1593 dev->hw_features |= NETIF_F_TSO;
1594 dev->hw_features |= NETIF_F_TSO6;
1595 dev->hw_features |= NETIF_F_TSO_ECN;
1596 dev->hw_features |= NETIF_F_RXCSUM;
1597 dev->hw_features |= NETIF_F_RXHASH;
1598 dev->features = dev->hw_features;
1599 dev->watchdog_timeo = 5 * HZ;
1600 dev->min_mtu = ETH_MIN_MTU;
1601 netif_carrier_off(dev);
1602
1603 priv = netdev_priv(dev);
1604 priv->dev = dev;
1605 priv->pdev = pdev;
1606 priv->msg_enable = DEFAULT_MSG_LEVEL;
1607 priv->reg_bar0 = reg_bar;
1608 priv->db_bar2 = db_bar;
1609 priv->service_task_flags = 0x0;
1610 priv->state_flags = 0x0;
1611 priv->ethtool_flags = 0x0;
1612
1613 gve_set_probe_in_progress(priv);
1614 priv->gve_wq = alloc_ordered_workqueue("gve", 0);
1615 if (!priv->gve_wq) {
1616 dev_err(&pdev->dev, "Could not allocate workqueue");
1617 err = -ENOMEM;
1618 goto abort_with_netdev;
1619 }
1620 INIT_WORK(&priv->service_task, gve_service_task);
1621 INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
1622 priv->tx_cfg.max_queues = max_tx_queues;
1623 priv->rx_cfg.max_queues = max_rx_queues;
1624
1625 err = gve_init_priv(priv, false);
1626 if (err)
1627 goto abort_with_wq;
1628
1629 err = register_netdev(dev);
1630 if (err)
1631 goto abort_with_gve_init;
1632
1633 dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
1634 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
1635 gve_clear_probe_in_progress(priv);
1636 queue_work(priv->gve_wq, &priv->service_task);
1637 return 0;
1638
1639 abort_with_gve_init:
1640 gve_teardown_priv_resources(priv);
1641
1642 abort_with_wq:
1643 destroy_workqueue(priv->gve_wq);
1644
1645 abort_with_netdev:
1646 free_netdev(dev);
1647
1648 abort_with_db_bar:
1649 pci_iounmap(pdev, db_bar);
1650
1651 abort_with_reg_bar:
1652 pci_iounmap(pdev, reg_bar);
1653
1654 abort_with_pci_region:
1655 pci_release_regions(pdev);
1656
1657 abort_with_enabled:
1658 pci_disable_device(pdev);
1659 return err;
1660 }
1661
gve_remove(struct pci_dev * pdev)1662 static void gve_remove(struct pci_dev *pdev)
1663 {
1664 struct net_device *netdev = pci_get_drvdata(pdev);
1665 struct gve_priv *priv = netdev_priv(netdev);
1666 __be32 __iomem *db_bar = priv->db_bar2;
1667 void __iomem *reg_bar = priv->reg_bar0;
1668
1669 unregister_netdev(netdev);
1670 gve_teardown_priv_resources(priv);
1671 destroy_workqueue(priv->gve_wq);
1672 free_netdev(netdev);
1673 pci_iounmap(pdev, db_bar);
1674 pci_iounmap(pdev, reg_bar);
1675 pci_release_regions(pdev);
1676 pci_disable_device(pdev);
1677 }
1678
gve_shutdown(struct pci_dev * pdev)1679 static void gve_shutdown(struct pci_dev *pdev)
1680 {
1681 struct net_device *netdev = pci_get_drvdata(pdev);
1682 struct gve_priv *priv = netdev_priv(netdev);
1683 bool was_up = netif_carrier_ok(priv->dev);
1684
1685 rtnl_lock();
1686 if (was_up && gve_close(priv->dev)) {
1687 /* If the dev was up, attempt to close, if close fails, reset */
1688 gve_reset_and_teardown(priv, was_up);
1689 } else {
1690 /* If the dev wasn't up or close worked, finish tearing down */
1691 gve_teardown_priv_resources(priv);
1692 }
1693 rtnl_unlock();
1694 }
1695
1696 #ifdef CONFIG_PM
gve_suspend(struct pci_dev * pdev,pm_message_t state)1697 static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
1698 {
1699 struct net_device *netdev = pci_get_drvdata(pdev);
1700 struct gve_priv *priv = netdev_priv(netdev);
1701 bool was_up = netif_carrier_ok(priv->dev);
1702
1703 priv->suspend_cnt++;
1704 rtnl_lock();
1705 if (was_up && gve_close(priv->dev)) {
1706 /* If the dev was up, attempt to close, if close fails, reset */
1707 gve_reset_and_teardown(priv, was_up);
1708 } else {
1709 /* If the dev wasn't up or close worked, finish tearing down */
1710 gve_teardown_priv_resources(priv);
1711 }
1712 priv->up_before_suspend = was_up;
1713 rtnl_unlock();
1714 return 0;
1715 }
1716
gve_resume(struct pci_dev * pdev)1717 static int gve_resume(struct pci_dev *pdev)
1718 {
1719 struct net_device *netdev = pci_get_drvdata(pdev);
1720 struct gve_priv *priv = netdev_priv(netdev);
1721 int err;
1722
1723 priv->resume_cnt++;
1724 rtnl_lock();
1725 err = gve_reset_recovery(priv, priv->up_before_suspend);
1726 rtnl_unlock();
1727 return err;
1728 }
1729 #endif /* CONFIG_PM */
1730
1731 static const struct pci_device_id gve_id_table[] = {
1732 { PCI_DEVICE(PCI_VENDOR_ID_GOOGLE, PCI_DEV_ID_GVNIC) },
1733 { }
1734 };
1735
1736 static struct pci_driver gvnic_driver = {
1737 .name = "gvnic",
1738 .id_table = gve_id_table,
1739 .probe = gve_probe,
1740 .remove = gve_remove,
1741 .shutdown = gve_shutdown,
1742 #ifdef CONFIG_PM
1743 .suspend = gve_suspend,
1744 .resume = gve_resume,
1745 #endif
1746 };
1747
1748 module_pci_driver(gvnic_driver);
1749
1750 MODULE_DEVICE_TABLE(pci, gve_id_table);
1751 MODULE_AUTHOR("Google, Inc.");
1752 MODULE_DESCRIPTION("gVNIC Driver");
1753 MODULE_LICENSE("Dual MIT/GPL");
1754 MODULE_VERSION(GVE_VERSION);
1755