Lines Matching refs:priv

35 	struct gve_priv *priv = netdev_priv(dev);  in gve_start_xmit()  local
37 if (gve_is_gqi(priv)) in gve_start_xmit()
45 struct gve_priv *priv = netdev_priv(dev); in gve_get_stats() local
50 if (priv->rx) { in gve_get_stats()
51 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) { in gve_get_stats()
54 u64_stats_fetch_begin_irq(&priv->rx[ring].statss); in gve_get_stats()
55 packets = priv->rx[ring].rpackets; in gve_get_stats()
56 bytes = priv->rx[ring].rbytes; in gve_get_stats()
57 } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss, in gve_get_stats()
63 if (priv->tx) { in gve_get_stats()
64 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) { in gve_get_stats()
67 u64_stats_fetch_begin_irq(&priv->tx[ring].statss); in gve_get_stats()
68 packets = priv->tx[ring].pkt_done; in gve_get_stats()
69 bytes = priv->tx[ring].bytes_done; in gve_get_stats()
70 } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss, in gve_get_stats()
78 static int gve_alloc_counter_array(struct gve_priv *priv) in gve_alloc_counter_array() argument
80 priv->counter_array = in gve_alloc_counter_array()
81 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_counter_array()
82 priv->num_event_counters * in gve_alloc_counter_array()
83 sizeof(*priv->counter_array), in gve_alloc_counter_array()
84 &priv->counter_array_bus, GFP_KERNEL); in gve_alloc_counter_array()
85 if (!priv->counter_array) in gve_alloc_counter_array()
91 static void gve_free_counter_array(struct gve_priv *priv) in gve_free_counter_array() argument
93 if (!priv->counter_array) in gve_free_counter_array()
96 dma_free_coherent(&priv->pdev->dev, in gve_free_counter_array()
97 priv->num_event_counters * in gve_free_counter_array()
98 sizeof(*priv->counter_array), in gve_free_counter_array()
99 priv->counter_array, priv->counter_array_bus); in gve_free_counter_array()
100 priv->counter_array = NULL; in gve_free_counter_array()
106 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_stats_report_task() local
108 if (gve_get_do_report_stats(priv)) { in gve_stats_report_task()
109 gve_handle_report_stats(priv); in gve_stats_report_task()
110 gve_clear_do_report_stats(priv); in gve_stats_report_task()
114 static void gve_stats_report_schedule(struct gve_priv *priv) in gve_stats_report_schedule() argument
116 if (!gve_get_probe_in_progress(priv) && in gve_stats_report_schedule()
117 !gve_get_reset_in_progress(priv)) { in gve_stats_report_schedule()
118 gve_set_do_report_stats(priv); in gve_stats_report_schedule()
119 queue_work(priv->gve_wq, &priv->stats_report_task); in gve_stats_report_schedule()
125 struct gve_priv *priv = from_timer(priv, t, stats_report_timer); in gve_stats_report_timer() local
127 mod_timer(&priv->stats_report_timer, in gve_stats_report_timer()
129 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_stats_report_timer()
130 gve_stats_report_schedule(priv); in gve_stats_report_timer()
133 static int gve_alloc_stats_report(struct gve_priv *priv) in gve_alloc_stats_report() argument
138 priv->tx_cfg.num_queues; in gve_alloc_stats_report()
140 priv->rx_cfg.num_queues; in gve_alloc_stats_report()
141 priv->stats_report_len = struct_size(priv->stats_report, stats, in gve_alloc_stats_report()
143 priv->stats_report = in gve_alloc_stats_report()
144 dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_alloc_stats_report()
145 &priv->stats_report_bus, GFP_KERNEL); in gve_alloc_stats_report()
146 if (!priv->stats_report) in gve_alloc_stats_report()
149 timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0); in gve_alloc_stats_report()
150 priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD; in gve_alloc_stats_report()
154 static void gve_free_stats_report(struct gve_priv *priv) in gve_free_stats_report() argument
156 if (!priv->stats_report) in gve_free_stats_report()
159 del_timer_sync(&priv->stats_report_timer); in gve_free_stats_report()
160 dma_free_coherent(&priv->pdev->dev, priv->stats_report_len, in gve_free_stats_report()
161 priv->stats_report, priv->stats_report_bus); in gve_free_stats_report()
162 priv->stats_report = NULL; in gve_free_stats_report()
167 struct gve_priv *priv = arg; in gve_mgmnt_intr() local
169 queue_work(priv->gve_wq, &priv->service_task); in gve_mgmnt_intr()
176 struct gve_priv *priv = block->priv; in gve_intr() local
178 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_intr()
197 struct gve_priv *priv; in gve_napi_poll() local
201 priv = block->priv; in gve_napi_poll()
215 irq_doorbell = gve_irq_doorbell(priv, block); in gve_napi_poll()
224 reschedule |= gve_tx_clean_pending(priv, block->tx); in gve_napi_poll()
238 struct gve_priv *priv = block->priv; in gve_napi_poll_dqo() local
252 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
275 gve_write_irq_doorbell_dqo(priv, block, in gve_napi_poll_dqo()
282 static int gve_alloc_notify_blocks(struct gve_priv *priv) in gve_alloc_notify_blocks() argument
284 int num_vecs_requested = priv->num_ntfy_blks + 1; in gve_alloc_notify_blocks()
285 char *name = priv->dev->name; in gve_alloc_notify_blocks()
291 priv->msix_vectors = kvcalloc(num_vecs_requested, in gve_alloc_notify_blocks()
292 sizeof(*priv->msix_vectors), GFP_KERNEL); in gve_alloc_notify_blocks()
293 if (!priv->msix_vectors) in gve_alloc_notify_blocks()
296 priv->msix_vectors[i].entry = i; in gve_alloc_notify_blocks()
297 vecs_enabled = pci_enable_msix_range(priv->pdev, priv->msix_vectors, in gve_alloc_notify_blocks()
300 dev_err(&priv->pdev->dev, "Could not enable min msix %d/%d\n", in gve_alloc_notify_blocks()
310 priv->num_ntfy_blks = new_num_ntfy_blks; in gve_alloc_notify_blocks()
311 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_alloc_notify_blocks()
312 priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
314 priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues, in gve_alloc_notify_blocks()
316 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
318 vecs_enabled, priv->tx_cfg.max_queues, in gve_alloc_notify_blocks()
319 priv->rx_cfg.max_queues); in gve_alloc_notify_blocks()
320 if (priv->tx_cfg.num_queues > priv->tx_cfg.max_queues) in gve_alloc_notify_blocks()
321 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_alloc_notify_blocks()
322 if (priv->rx_cfg.num_queues > priv->rx_cfg.max_queues) in gve_alloc_notify_blocks()
323 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_alloc_notify_blocks()
326 active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus()); in gve_alloc_notify_blocks()
329 snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt", in gve_alloc_notify_blocks()
331 err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, in gve_alloc_notify_blocks()
332 gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv); in gve_alloc_notify_blocks()
334 dev_err(&priv->pdev->dev, "Did not receive management vector.\n"); in gve_alloc_notify_blocks()
337 priv->irq_db_indices = in gve_alloc_notify_blocks()
338 dma_alloc_coherent(&priv->pdev->dev, in gve_alloc_notify_blocks()
339 priv->num_ntfy_blks * in gve_alloc_notify_blocks()
340 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
341 &priv->irq_db_indices_bus, GFP_KERNEL); in gve_alloc_notify_blocks()
342 if (!priv->irq_db_indices) { in gve_alloc_notify_blocks()
347 priv->ntfy_blocks = kvzalloc(priv->num_ntfy_blks * in gve_alloc_notify_blocks()
348 sizeof(*priv->ntfy_blocks), GFP_KERNEL); in gve_alloc_notify_blocks()
349 if (!priv->ntfy_blocks) { in gve_alloc_notify_blocks()
355 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_alloc_notify_blocks()
356 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_alloc_notify_blocks()
361 block->priv = priv; in gve_alloc_notify_blocks()
362 err = request_irq(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
363 gve_is_gqi(priv) ? gve_intr : gve_intr_dqo, in gve_alloc_notify_blocks()
366 dev_err(&priv->pdev->dev, in gve_alloc_notify_blocks()
370 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
372 block->irq_db_index = &priv->irq_db_indices[i].index; in gve_alloc_notify_blocks()
377 struct gve_notify_block *block = &priv->ntfy_blocks[j]; in gve_alloc_notify_blocks()
380 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_alloc_notify_blocks()
382 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_alloc_notify_blocks()
384 kvfree(priv->ntfy_blocks); in gve_alloc_notify_blocks()
385 priv->ntfy_blocks = NULL; in gve_alloc_notify_blocks()
387 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_alloc_notify_blocks()
388 sizeof(*priv->irq_db_indices), in gve_alloc_notify_blocks()
389 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_alloc_notify_blocks()
390 priv->irq_db_indices = NULL; in gve_alloc_notify_blocks()
392 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_alloc_notify_blocks()
394 pci_disable_msix(priv->pdev); in gve_alloc_notify_blocks()
396 kvfree(priv->msix_vectors); in gve_alloc_notify_blocks()
397 priv->msix_vectors = NULL; in gve_alloc_notify_blocks()
401 static void gve_free_notify_blocks(struct gve_priv *priv) in gve_free_notify_blocks() argument
405 if (!priv->msix_vectors) in gve_free_notify_blocks()
409 for (i = 0; i < priv->num_ntfy_blks; i++) { in gve_free_notify_blocks()
410 struct gve_notify_block *block = &priv->ntfy_blocks[i]; in gve_free_notify_blocks()
413 irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector, in gve_free_notify_blocks()
415 free_irq(priv->msix_vectors[msix_idx].vector, block); in gve_free_notify_blocks()
417 free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv); in gve_free_notify_blocks()
418 kvfree(priv->ntfy_blocks); in gve_free_notify_blocks()
419 priv->ntfy_blocks = NULL; in gve_free_notify_blocks()
420 dma_free_coherent(&priv->pdev->dev, priv->num_ntfy_blks * in gve_free_notify_blocks()
421 sizeof(*priv->irq_db_indices), in gve_free_notify_blocks()
422 priv->irq_db_indices, priv->irq_db_indices_bus); in gve_free_notify_blocks()
423 priv->irq_db_indices = NULL; in gve_free_notify_blocks()
424 pci_disable_msix(priv->pdev); in gve_free_notify_blocks()
425 kvfree(priv->msix_vectors); in gve_free_notify_blocks()
426 priv->msix_vectors = NULL; in gve_free_notify_blocks()
429 static int gve_setup_device_resources(struct gve_priv *priv) in gve_setup_device_resources() argument
433 err = gve_alloc_counter_array(priv); in gve_setup_device_resources()
436 err = gve_alloc_notify_blocks(priv); in gve_setup_device_resources()
439 err = gve_alloc_stats_report(priv); in gve_setup_device_resources()
442 err = gve_adminq_configure_device_resources(priv, in gve_setup_device_resources()
443 priv->counter_array_bus, in gve_setup_device_resources()
444 priv->num_event_counters, in gve_setup_device_resources()
445 priv->irq_db_indices_bus, in gve_setup_device_resources()
446 priv->num_ntfy_blks); in gve_setup_device_resources()
448 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
454 if (priv->queue_format == GVE_DQO_RDA_FORMAT) { in gve_setup_device_resources()
455 priv->ptype_lut_dqo = kvzalloc(sizeof(*priv->ptype_lut_dqo), in gve_setup_device_resources()
457 if (!priv->ptype_lut_dqo) { in gve_setup_device_resources()
461 err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo); in gve_setup_device_resources()
463 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
469 err = gve_adminq_report_stats(priv, priv->stats_report_len, in gve_setup_device_resources()
470 priv->stats_report_bus, in gve_setup_device_resources()
473 dev_err(&priv->pdev->dev, in gve_setup_device_resources()
475 gve_set_device_resources_ok(priv); in gve_setup_device_resources()
479 kvfree(priv->ptype_lut_dqo); in gve_setup_device_resources()
480 priv->ptype_lut_dqo = NULL; in gve_setup_device_resources()
482 gve_free_stats_report(priv); in gve_setup_device_resources()
484 gve_free_notify_blocks(priv); in gve_setup_device_resources()
486 gve_free_counter_array(priv); in gve_setup_device_resources()
491 static void gve_trigger_reset(struct gve_priv *priv);
493 static void gve_teardown_device_resources(struct gve_priv *priv) in gve_teardown_device_resources() argument
498 if (gve_get_device_resources_ok(priv)) { in gve_teardown_device_resources()
500 err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); in gve_teardown_device_resources()
502 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
504 gve_trigger_reset(priv); in gve_teardown_device_resources()
506 err = gve_adminq_deconfigure_device_resources(priv); in gve_teardown_device_resources()
508 dev_err(&priv->pdev->dev, in gve_teardown_device_resources()
511 gve_trigger_reset(priv); in gve_teardown_device_resources()
515 kvfree(priv->ptype_lut_dqo); in gve_teardown_device_resources()
516 priv->ptype_lut_dqo = NULL; in gve_teardown_device_resources()
518 gve_free_counter_array(priv); in gve_teardown_device_resources()
519 gve_free_notify_blocks(priv); in gve_teardown_device_resources()
520 gve_free_stats_report(priv); in gve_teardown_device_resources()
521 gve_clear_device_resources_ok(priv); in gve_teardown_device_resources()
524 static void gve_add_napi(struct gve_priv *priv, int ntfy_idx, in gve_add_napi() argument
527 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_add_napi()
529 netif_napi_add(priv->dev, &block->napi, gve_poll); in gve_add_napi()
532 static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx) in gve_remove_napi() argument
534 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_remove_napi()
539 static int gve_register_qpls(struct gve_priv *priv) in gve_register_qpls() argument
541 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_register_qpls()
546 err = gve_adminq_register_page_list(priv, &priv->qpls[i]); in gve_register_qpls()
548 netif_err(priv, drv, priv->dev, in gve_register_qpls()
550 priv->qpls[i].id); in gve_register_qpls()
560 static int gve_unregister_qpls(struct gve_priv *priv) in gve_unregister_qpls() argument
562 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_unregister_qpls()
567 err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); in gve_unregister_qpls()
570 netif_err(priv, drv, priv->dev, in gve_unregister_qpls()
572 priv->qpls[i].id); in gve_unregister_qpls()
579 static int gve_create_rings(struct gve_priv *priv) in gve_create_rings() argument
584 err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues); in gve_create_rings()
586 netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n", in gve_create_rings()
587 priv->tx_cfg.num_queues); in gve_create_rings()
593 netif_dbg(priv, drv, priv->dev, "created %d tx queues\n", in gve_create_rings()
594 priv->tx_cfg.num_queues); in gve_create_rings()
596 err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues); in gve_create_rings()
598 netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n", in gve_create_rings()
599 priv->rx_cfg.num_queues); in gve_create_rings()
605 netif_dbg(priv, drv, priv->dev, "created %d rx queues\n", in gve_create_rings()
606 priv->rx_cfg.num_queues); in gve_create_rings()
608 if (gve_is_gqi(priv)) { in gve_create_rings()
615 for (i = 0; i < priv->rx_cfg.num_queues; i++) in gve_create_rings()
616 gve_rx_write_doorbell(priv, &priv->rx[i]); in gve_create_rings()
618 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_create_rings()
620 gve_rx_post_buffers_dqo(&priv->rx[i]); in gve_create_rings()
627 static void add_napi_init_sync_stats(struct gve_priv *priv, in add_napi_init_sync_stats() argument
634 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
635 int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
637 u64_stats_init(&priv->tx[i].statss); in add_napi_init_sync_stats()
638 priv->tx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
639 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
642 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in add_napi_init_sync_stats()
643 int ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in add_napi_init_sync_stats()
645 u64_stats_init(&priv->rx[i].statss); in add_napi_init_sync_stats()
646 priv->rx[i].ntfy_id = ntfy_idx; in add_napi_init_sync_stats()
647 gve_add_napi(priv, ntfy_idx, napi_poll); in add_napi_init_sync_stats()
651 static void gve_tx_free_rings(struct gve_priv *priv) in gve_tx_free_rings() argument
653 if (gve_is_gqi(priv)) { in gve_tx_free_rings()
654 gve_tx_free_rings_gqi(priv); in gve_tx_free_rings()
656 gve_tx_free_rings_dqo(priv); in gve_tx_free_rings()
660 static int gve_alloc_rings(struct gve_priv *priv) in gve_alloc_rings() argument
665 priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx), in gve_alloc_rings()
667 if (!priv->tx) in gve_alloc_rings()
670 if (gve_is_gqi(priv)) in gve_alloc_rings()
671 err = gve_tx_alloc_rings(priv); in gve_alloc_rings()
673 err = gve_tx_alloc_rings_dqo(priv); in gve_alloc_rings()
678 priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx), in gve_alloc_rings()
680 if (!priv->rx) { in gve_alloc_rings()
685 if (gve_is_gqi(priv)) in gve_alloc_rings()
686 err = gve_rx_alloc_rings(priv); in gve_alloc_rings()
688 err = gve_rx_alloc_rings_dqo(priv); in gve_alloc_rings()
692 if (gve_is_gqi(priv)) in gve_alloc_rings()
693 add_napi_init_sync_stats(priv, gve_napi_poll); in gve_alloc_rings()
695 add_napi_init_sync_stats(priv, gve_napi_poll_dqo); in gve_alloc_rings()
700 kvfree(priv->rx); in gve_alloc_rings()
701 priv->rx = NULL; in gve_alloc_rings()
703 gve_tx_free_rings(priv); in gve_alloc_rings()
705 kvfree(priv->tx); in gve_alloc_rings()
706 priv->tx = NULL; in gve_alloc_rings()
710 static int gve_destroy_rings(struct gve_priv *priv) in gve_destroy_rings() argument
714 err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues); in gve_destroy_rings()
716 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
721 netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n"); in gve_destroy_rings()
722 err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues); in gve_destroy_rings()
724 netif_err(priv, drv, priv->dev, in gve_destroy_rings()
729 netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n"); in gve_destroy_rings()
733 static void gve_rx_free_rings(struct gve_priv *priv) in gve_rx_free_rings() argument
735 if (gve_is_gqi(priv)) in gve_rx_free_rings()
736 gve_rx_free_rings_gqi(priv); in gve_rx_free_rings()
738 gve_rx_free_rings_dqo(priv); in gve_rx_free_rings()
741 static void gve_free_rings(struct gve_priv *priv) in gve_free_rings() argument
746 if (priv->tx) { in gve_free_rings()
747 for (i = 0; i < priv->tx_cfg.num_queues; i++) { in gve_free_rings()
748 ntfy_idx = gve_tx_idx_to_ntfy(priv, i); in gve_free_rings()
749 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
751 gve_tx_free_rings(priv); in gve_free_rings()
752 kvfree(priv->tx); in gve_free_rings()
753 priv->tx = NULL; in gve_free_rings()
755 if (priv->rx) { in gve_free_rings()
756 for (i = 0; i < priv->rx_cfg.num_queues; i++) { in gve_free_rings()
757 ntfy_idx = gve_rx_idx_to_ntfy(priv, i); in gve_free_rings()
758 gve_remove_napi(priv, ntfy_idx); in gve_free_rings()
760 gve_rx_free_rings(priv); in gve_free_rings()
761 kvfree(priv->rx); in gve_free_rings()
762 priv->rx = NULL; in gve_free_rings()
766 int gve_alloc_page(struct gve_priv *priv, struct device *dev, in gve_alloc_page() argument
772 priv->page_alloc_fail++; in gve_alloc_page()
777 priv->dma_mapping_error++; in gve_alloc_page()
784 static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, in gve_alloc_queue_page_list() argument
787 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_alloc_queue_page_list()
791 if (pages + priv->num_registered_pages > priv->max_registered_pages) { in gve_alloc_queue_page_list()
792 netif_err(priv, drv, priv->dev, in gve_alloc_queue_page_list()
794 pages + priv->num_registered_pages, in gve_alloc_queue_page_list()
795 priv->max_registered_pages); in gve_alloc_queue_page_list()
811 err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i], in gve_alloc_queue_page_list()
813 gve_qpl_dma_dir(priv, id), GFP_KERNEL); in gve_alloc_queue_page_list()
819 priv->num_registered_pages += pages; in gve_alloc_queue_page_list()
833 static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) in gve_free_queue_page_list() argument
835 struct gve_queue_page_list *qpl = &priv->qpls[id]; in gve_free_queue_page_list()
844 gve_free_page(&priv->pdev->dev, qpl->pages[i], in gve_free_queue_page_list()
845 qpl->page_buses[i], gve_qpl_dma_dir(priv, id)); in gve_free_queue_page_list()
850 priv->num_registered_pages -= qpl->num_entries; in gve_free_queue_page_list()
853 static int gve_alloc_qpls(struct gve_priv *priv) in gve_alloc_qpls() argument
855 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_alloc_qpls()
862 priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL); in gve_alloc_qpls()
863 if (!priv->qpls) in gve_alloc_qpls()
866 for (i = 0; i < gve_num_tx_qpls(priv); i++) { in gve_alloc_qpls()
867 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
868 priv->tx_pages_per_qpl); in gve_alloc_qpls()
873 err = gve_alloc_queue_page_list(priv, i, in gve_alloc_qpls()
874 priv->rx_data_slot_cnt); in gve_alloc_qpls()
879 priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) * in gve_alloc_qpls()
881 priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls), in gve_alloc_qpls()
883 if (!priv->qpl_cfg.qpl_id_map) { in gve_alloc_qpls()
892 gve_free_queue_page_list(priv, j); in gve_alloc_qpls()
893 kvfree(priv->qpls); in gve_alloc_qpls()
897 static void gve_free_qpls(struct gve_priv *priv) in gve_free_qpls() argument
899 int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv); in gve_free_qpls()
905 kvfree(priv->qpl_cfg.qpl_id_map); in gve_free_qpls()
908 gve_free_queue_page_list(priv, i); in gve_free_qpls()
910 kvfree(priv->qpls); in gve_free_qpls()
917 void gve_schedule_reset(struct gve_priv *priv) in gve_schedule_reset() argument
919 gve_set_do_reset(priv); in gve_schedule_reset()
920 queue_work(priv->gve_wq, &priv->service_task); in gve_schedule_reset()
923 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up);
924 static int gve_reset_recovery(struct gve_priv *priv, bool was_up);
925 static void gve_turndown(struct gve_priv *priv);
926 static void gve_turnup(struct gve_priv *priv);
930 struct gve_priv *priv = netdev_priv(dev); in gve_open() local
933 err = gve_alloc_qpls(priv); in gve_open()
937 err = gve_alloc_rings(priv); in gve_open()
941 err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); in gve_open()
944 err = netif_set_real_num_rx_queues(dev, priv->rx_cfg.num_queues); in gve_open()
948 err = gve_register_qpls(priv); in gve_open()
952 if (!gve_is_gqi(priv)) { in gve_open()
956 priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO; in gve_open()
958 err = gve_create_rings(priv); in gve_open()
962 gve_set_device_rings_ok(priv); in gve_open()
964 if (gve_get_report_stats(priv)) in gve_open()
965 mod_timer(&priv->stats_report_timer, in gve_open()
967 msecs_to_jiffies(priv->stats_report_timer_period))); in gve_open()
969 gve_turnup(priv); in gve_open()
970 queue_work(priv->gve_wq, &priv->service_task); in gve_open()
971 priv->interface_up_cnt++; in gve_open()
975 gve_free_rings(priv); in gve_open()
977 gve_free_qpls(priv); in gve_open()
984 if (gve_get_reset_in_progress(priv)) in gve_open()
987 gve_reset_and_teardown(priv, true); in gve_open()
989 gve_reset_recovery(priv, false); in gve_open()
996 struct gve_priv *priv = netdev_priv(dev); in gve_close() local
1000 if (gve_get_device_rings_ok(priv)) { in gve_close()
1001 gve_turndown(priv); in gve_close()
1002 err = gve_destroy_rings(priv); in gve_close()
1005 err = gve_unregister_qpls(priv); in gve_close()
1008 gve_clear_device_rings_ok(priv); in gve_close()
1010 del_timer_sync(&priv->stats_report_timer); in gve_close()
1012 gve_free_rings(priv); in gve_close()
1013 gve_free_qpls(priv); in gve_close()
1014 priv->interface_down_cnt++; in gve_close()
1021 if (gve_get_reset_in_progress(priv)) in gve_close()
1024 gve_reset_and_teardown(priv, true); in gve_close()
1025 return gve_reset_recovery(priv, false); in gve_close()
1028 int gve_adjust_queues(struct gve_priv *priv, in gve_adjust_queues() argument
1034 if (netif_carrier_ok(priv->dev)) { in gve_adjust_queues()
1039 err = gve_close(priv->dev); in gve_adjust_queues()
1045 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1046 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1048 err = gve_open(priv->dev); in gve_adjust_queues()
1055 priv->tx_cfg = new_tx_config; in gve_adjust_queues()
1056 priv->rx_cfg = new_rx_config; in gve_adjust_queues()
1060 netif_err(priv, drv, priv->dev, in gve_adjust_queues()
1062 gve_turndown(priv); in gve_adjust_queues()
1066 static void gve_turndown(struct gve_priv *priv) in gve_turndown() argument
1070 if (netif_carrier_ok(priv->dev)) in gve_turndown()
1071 netif_carrier_off(priv->dev); in gve_turndown()
1073 if (!gve_get_napi_enabled(priv)) in gve_turndown()
1077 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turndown()
1078 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turndown()
1079 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1083 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turndown()
1084 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turndown()
1085 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turndown()
1091 netif_tx_disable(priv->dev); in gve_turndown()
1093 gve_clear_napi_enabled(priv); in gve_turndown()
1094 gve_clear_report_stats(priv); in gve_turndown()
1097 static void gve_turnup(struct gve_priv *priv) in gve_turnup() argument
1102 netif_tx_start_all_queues(priv->dev); in gve_turnup()
1105 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_turnup()
1106 int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx); in gve_turnup()
1107 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1110 if (gve_is_gqi(priv)) { in gve_turnup()
1111 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1113 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1114 priv->tx_coalesce_usecs); in gve_turnup()
1117 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_turnup()
1118 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx); in gve_turnup()
1119 struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; in gve_turnup()
1122 if (gve_is_gqi(priv)) { in gve_turnup()
1123 iowrite32be(0, gve_irq_doorbell(priv, block)); in gve_turnup()
1125 gve_set_itr_coalesce_usecs_dqo(priv, block, in gve_turnup()
1126 priv->rx_coalesce_usecs); in gve_turnup()
1130 gve_set_napi_enabled(priv); in gve_turnup()
1137 struct gve_priv *priv; in gve_tx_timeout() local
1143 priv = netdev_priv(dev); in gve_tx_timeout()
1144 if (txqueue > priv->tx_cfg.num_queues) in gve_tx_timeout()
1147 ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); in gve_tx_timeout()
1148 if (ntfy_idx >= priv->num_ntfy_blks) in gve_tx_timeout()
1151 block = &priv->ntfy_blocks[ntfy_idx]; in gve_tx_timeout()
1161 last_nic_done = gve_tx_load_event_counter(priv, tx); in gve_tx_timeout()
1164 iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); in gve_tx_timeout()
1171 gve_schedule_reset(priv); in gve_tx_timeout()
1176 priv->tx_timeo_cnt++; in gve_tx_timeout()
1183 struct gve_priv *priv = netdev_priv(netdev); in gve_set_features() local
1210 netif_err(priv, drv, netdev, in gve_set_features()
1224 static void gve_handle_status(struct gve_priv *priv, u32 status) in gve_handle_status() argument
1227 dev_info(&priv->pdev->dev, "Device requested reset.\n"); in gve_handle_status()
1228 gve_set_do_reset(priv); in gve_handle_status()
1231 priv->stats_report_trigger_cnt++; in gve_handle_status()
1232 gve_set_do_report_stats(priv); in gve_handle_status()
1236 static void gve_handle_reset(struct gve_priv *priv) in gve_handle_reset() argument
1242 if (gve_get_probe_in_progress(priv)) in gve_handle_reset()
1245 if (gve_get_do_reset(priv)) { in gve_handle_reset()
1247 gve_reset(priv, false); in gve_handle_reset()
1252 void gve_handle_report_stats(struct gve_priv *priv) in gve_handle_report_stats() argument
1254 struct stats *stats = priv->stats_report->stats; in gve_handle_report_stats()
1259 if (!gve_get_report_stats(priv)) in gve_handle_report_stats()
1262 be64_add_cpu(&priv->stats_report->written_count, 1); in gve_handle_report_stats()
1264 if (priv->tx) { in gve_handle_report_stats()
1265 for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1270 if (gve_is_gqi(priv)) { in gve_handle_report_stats()
1271 last_completion = priv->tx[idx].done; in gve_handle_report_stats()
1272 tx_frames = priv->tx[idx].req; in gve_handle_report_stats()
1276 start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss); in gve_handle_report_stats()
1277 tx_bytes = priv->tx[idx].bytes_done; in gve_handle_report_stats()
1278 } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start)); in gve_handle_report_stats()
1281 .value = cpu_to_be64(priv->tx[idx].wake_queue), in gve_handle_report_stats()
1286 .value = cpu_to_be64(priv->tx[idx].stop_queue), in gve_handle_report_stats()
1306 .value = cpu_to_be64(priv->tx[idx].queue_timeout), in gve_handle_report_stats()
1312 if (priv->rx) { in gve_handle_report_stats()
1313 for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) { in gve_handle_report_stats()
1316 .value = cpu_to_be64(priv->rx[idx].desc.seqno), in gve_handle_report_stats()
1321 .value = cpu_to_be64(priv->rx[0].fill_cnt), in gve_handle_report_stats()
1328 static void gve_handle_link_status(struct gve_priv *priv, bool link_status) in gve_handle_link_status() argument
1330 if (!gve_get_napi_enabled(priv)) in gve_handle_link_status()
1333 if (link_status == netif_carrier_ok(priv->dev)) in gve_handle_link_status()
1337 netdev_info(priv->dev, "Device link is up.\n"); in gve_handle_link_status()
1338 netif_carrier_on(priv->dev); in gve_handle_link_status()
1340 netdev_info(priv->dev, "Device link is down.\n"); in gve_handle_link_status()
1341 netif_carrier_off(priv->dev); in gve_handle_link_status()
1348 struct gve_priv *priv = container_of(work, struct gve_priv, in gve_service_task() local
1350 u32 status = ioread32be(&priv->reg_bar0->device_status); in gve_service_task()
1352 gve_handle_status(priv, status); in gve_service_task()
1354 gve_handle_reset(priv); in gve_service_task()
1355 gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); in gve_service_task()
1358 static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) in gve_init_priv() argument
1364 err = gve_adminq_alloc(&priv->pdev->dev, priv); in gve_init_priv()
1366 dev_err(&priv->pdev->dev, in gve_init_priv()
1374 priv->queue_format = GVE_QUEUE_FORMAT_UNSPECIFIED; in gve_init_priv()
1376 err = gve_adminq_describe_device(priv); in gve_init_priv()
1378 dev_err(&priv->pdev->dev, in gve_init_priv()
1382 priv->dev->mtu = priv->dev->max_mtu; in gve_init_priv()
1383 num_ntfy = pci_msix_vec_count(priv->pdev); in gve_init_priv()
1385 dev_err(&priv->pdev->dev, in gve_init_priv()
1390 dev_err(&priv->pdev->dev, "gve needs at least %d MSI-x vectors, but only has %d\n", in gve_init_priv()
1396 priv->num_registered_pages = 0; in gve_init_priv()
1397 priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; in gve_init_priv()
1401 priv->num_ntfy_blks = (num_ntfy - 1) & ~0x1; in gve_init_priv()
1402 priv->mgmt_msix_idx = priv->num_ntfy_blks; in gve_init_priv()
1404 priv->tx_cfg.max_queues = in gve_init_priv()
1405 min_t(int, priv->tx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1406 priv->rx_cfg.max_queues = in gve_init_priv()
1407 min_t(int, priv->rx_cfg.max_queues, priv->num_ntfy_blks / 2); in gve_init_priv()
1409 priv->tx_cfg.num_queues = priv->tx_cfg.max_queues; in gve_init_priv()
1410 priv->rx_cfg.num_queues = priv->rx_cfg.max_queues; in gve_init_priv()
1411 if (priv->default_num_queues > 0) { in gve_init_priv()
1412 priv->tx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1413 priv->tx_cfg.num_queues); in gve_init_priv()
1414 priv->rx_cfg.num_queues = min_t(int, priv->default_num_queues, in gve_init_priv()
1415 priv->rx_cfg.num_queues); in gve_init_priv()
1418 dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n", in gve_init_priv()
1419 priv->tx_cfg.num_queues, priv->rx_cfg.num_queues); in gve_init_priv()
1420 dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n", in gve_init_priv()
1421 priv->tx_cfg.max_queues, priv->rx_cfg.max_queues); in gve_init_priv()
1423 if (!gve_is_gqi(priv)) { in gve_init_priv()
1424 priv->tx_coalesce_usecs = GVE_TX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
1425 priv->rx_coalesce_usecs = GVE_RX_IRQ_RATELIMIT_US_DQO; in gve_init_priv()
1429 err = gve_setup_device_resources(priv); in gve_init_priv()
1433 gve_adminq_free(&priv->pdev->dev, priv); in gve_init_priv()
1437 static void gve_teardown_priv_resources(struct gve_priv *priv) in gve_teardown_priv_resources() argument
1439 gve_teardown_device_resources(priv); in gve_teardown_priv_resources()
1440 gve_adminq_free(&priv->pdev->dev, priv); in gve_teardown_priv_resources()
1443 static void gve_trigger_reset(struct gve_priv *priv) in gve_trigger_reset() argument
1446 gve_adminq_release(priv); in gve_trigger_reset()
1449 static void gve_reset_and_teardown(struct gve_priv *priv, bool was_up) in gve_reset_and_teardown() argument
1451 gve_trigger_reset(priv); in gve_reset_and_teardown()
1454 gve_close(priv->dev); in gve_reset_and_teardown()
1455 gve_teardown_priv_resources(priv); in gve_reset_and_teardown()
1458 static int gve_reset_recovery(struct gve_priv *priv, bool was_up) in gve_reset_recovery() argument
1462 err = gve_init_priv(priv, true); in gve_reset_recovery()
1466 err = gve_open(priv->dev); in gve_reset_recovery()
1472 dev_err(&priv->pdev->dev, "Reset failed! !!! DISABLING ALL QUEUES !!!\n"); in gve_reset_recovery()
1473 gve_turndown(priv); in gve_reset_recovery()
1477 int gve_reset(struct gve_priv *priv, bool attempt_teardown) in gve_reset() argument
1479 bool was_up = netif_carrier_ok(priv->dev); in gve_reset()
1482 dev_info(&priv->pdev->dev, "Performing reset\n"); in gve_reset()
1483 gve_clear_do_reset(priv); in gve_reset()
1484 gve_set_reset_in_progress(priv); in gve_reset()
1489 gve_turndown(priv); in gve_reset()
1490 gve_reset_and_teardown(priv, was_up); in gve_reset()
1494 err = gve_close(priv->dev); in gve_reset()
1497 gve_reset_and_teardown(priv, was_up); in gve_reset()
1500 gve_teardown_priv_resources(priv); in gve_reset()
1504 err = gve_reset_recovery(priv, was_up); in gve_reset()
1505 gve_clear_reset_in_progress(priv); in gve_reset()
1506 priv->reset_cnt++; in gve_reset()
1507 priv->interface_up_cnt = 0; in gve_reset()
1508 priv->interface_down_cnt = 0; in gve_reset()
1509 priv->stats_report_trigger_cnt = 0; in gve_reset()
1536 struct gve_priv *priv; in gve_probe() local
1574 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); in gve_probe()
1603 priv = netdev_priv(dev); in gve_probe()
1604 priv->dev = dev; in gve_probe()
1605 priv->pdev = pdev; in gve_probe()
1606 priv->msg_enable = DEFAULT_MSG_LEVEL; in gve_probe()
1607 priv->reg_bar0 = reg_bar; in gve_probe()
1608 priv->db_bar2 = db_bar; in gve_probe()
1609 priv->service_task_flags = 0x0; in gve_probe()
1610 priv->state_flags = 0x0; in gve_probe()
1611 priv->ethtool_flags = 0x0; in gve_probe()
1613 gve_set_probe_in_progress(priv); in gve_probe()
1614 priv->gve_wq = alloc_ordered_workqueue("gve", 0); in gve_probe()
1615 if (!priv->gve_wq) { in gve_probe()
1620 INIT_WORK(&priv->service_task, gve_service_task); in gve_probe()
1621 INIT_WORK(&priv->stats_report_task, gve_stats_report_task); in gve_probe()
1622 priv->tx_cfg.max_queues = max_tx_queues; in gve_probe()
1623 priv->rx_cfg.max_queues = max_rx_queues; in gve_probe()
1625 err = gve_init_priv(priv, false); in gve_probe()
1634 dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); in gve_probe()
1635 gve_clear_probe_in_progress(priv); in gve_probe()
1636 queue_work(priv->gve_wq, &priv->service_task); in gve_probe()
1640 gve_teardown_priv_resources(priv); in gve_probe()
1643 destroy_workqueue(priv->gve_wq); in gve_probe()
1665 struct gve_priv *priv = netdev_priv(netdev); in gve_remove() local
1666 __be32 __iomem *db_bar = priv->db_bar2; in gve_remove()
1667 void __iomem *reg_bar = priv->reg_bar0; in gve_remove()
1670 gve_teardown_priv_resources(priv); in gve_remove()
1671 destroy_workqueue(priv->gve_wq); in gve_remove()
1682 struct gve_priv *priv = netdev_priv(netdev); in gve_shutdown() local
1683 bool was_up = netif_carrier_ok(priv->dev); in gve_shutdown()
1686 if (was_up && gve_close(priv->dev)) { in gve_shutdown()
1688 gve_reset_and_teardown(priv, was_up); in gve_shutdown()
1691 gve_teardown_priv_resources(priv); in gve_shutdown()
1700 struct gve_priv *priv = netdev_priv(netdev); in gve_suspend() local
1701 bool was_up = netif_carrier_ok(priv->dev); in gve_suspend()
1703 priv->suspend_cnt++; in gve_suspend()
1705 if (was_up && gve_close(priv->dev)) { in gve_suspend()
1707 gve_reset_and_teardown(priv, was_up); in gve_suspend()
1710 gve_teardown_priv_resources(priv); in gve_suspend()
1712 priv->up_before_suspend = was_up; in gve_suspend()
1720 struct gve_priv *priv = netdev_priv(netdev); in gve_resume() local
1723 priv->resume_cnt++; in gve_resume()
1725 err = gve_reset_recovery(priv, priv->up_before_suspend); in gve_resume()