1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2 /*
3  * Copyright(c) 2020 Intel Corporation.
4  *
5  */
6 
7 #ifndef HFI1_NETDEV_H
8 #define HFI1_NETDEV_H
9 
10 #include "hfi.h"
11 
12 #include <linux/netdevice.h>
13 #include <linux/xarray.h>
14 
15 /**
16  * struct hfi1_netdev_rxq - Receive Queue for HFI
17  * dummy netdev. Both IPoIB and VNIC netdevices will be working on
18  * top of this device.
19  * @napi: napi object
20  * @priv: ptr to netdev_priv
21  * @rcd:  ptr to receive context data
22  */
23 struct hfi1_netdev_rxq {
24 	struct napi_struct napi;
25 	struct hfi1_netdev_priv *priv;
26 	struct hfi1_ctxtdata *rcd;
27 };
28 
29 /*
30  * Number of netdev contexts used. Ensure it is less than or equal to
31  * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
32  */
33 #define HFI1_MAX_NETDEV_CTXTS   8
34 
35 /* Number of NETDEV RSM entries */
36 #define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
37 
38 /**
39  * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
40  * @dd:		hfi1_devdata
41  * @rxq:	pointer to dummy netdev receive queues.
42  * @num_rx_q:	number of receive queues
43  * @rmt_index:	first free index in RMT Array
44  * @msix_start: first free MSI-X interrupt vector.
45  * @dev_tbl:	netdev table for unique identifier VNIC and IPoIb VLANs.
46  * @enabled:	atomic counter of netdevs enabling receive queues.
47  *		When 0 NAPI will be disabled.
48  * @netdevs:	atomic counter of netdevs using dummy netdev.
49  *		When 0 receive queues will be freed.
50  */
51 struct hfi1_netdev_priv {
52 	struct hfi1_devdata *dd;
53 	struct hfi1_netdev_rxq *rxq;
54 	int num_rx_q;
55 	int rmt_start;
56 	struct xarray dev_tbl;
57 	/* count of enabled napi polls */
58 	atomic_t enabled;
59 	/* count of netdevs on top */
60 	atomic_t netdevs;
61 };
62 
63 static inline
hfi1_netdev_priv(struct net_device * dev)64 struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
65 {
66 	return (struct hfi1_netdev_priv *)&dev[1];
67 }
68 
69 static inline
hfi1_netdev_ctxt_count(struct hfi1_devdata * dd)70 int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
71 {
72 	struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
73 
74 	return priv->num_rx_q;
75 }
76 
77 static inline
hfi1_netdev_get_ctxt(struct hfi1_devdata * dd,int ctxt)78 struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
79 {
80 	struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
81 
82 	return priv->rxq[ctxt].rcd;
83 }
84 
85 static inline
hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata * dd)86 int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
87 {
88 	struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
89 
90 	return priv->rmt_start;
91 }
92 
93 static inline
hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata * dd,int rmt_idx)94 void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
95 {
96 	struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
97 
98 	priv->rmt_start = rmt_idx;
99 }
100 
101 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
102 			     struct cpumask *cpu_mask);
103 
104 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
105 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
106 int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
107 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
108 int hfi1_netdev_alloc(struct hfi1_devdata *dd);
109 void hfi1_netdev_free(struct hfi1_devdata *dd);
110 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
111 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
112 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
113 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
114 
115 /* chip.c  */
116 int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
117 
118 #endif /* HFI1_NETDEV_H */
119